From 1d0d6d36700a22013cb19e08508bb952b0d71800 Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Thu, 18 Sep 2014 17:09:22 -0700 Subject: [PATCH 0001/1035] refac(exchange) bitswap -> exchange/bitswap Move go-ipfs/bitswap package to go-ipfs/exchange/bitswap * Delineates the difference between the generic exchange interface and implementations (eg. BitSwap protocol) Thus, the bitswap protocol can be refined without having to overthink how future exchanges will work. Aspects common to BitSwap and other exchanges can be extracted out to the exchange package in piecemeal. Future exchange implementations can be placed in sibling packages next to exchange/bitswap. (eg. exchange/multilateral) This commit was moved from ipfs/go-bitswap@7cb2f524f323069d5a5dd8f98138762181b226a7 --- bitswap/bitswap.go | 182 ++++++++++++++++++++ bitswap/message/message.go | 81 +++++++++ bitswap/message/message.pb.go | 48 ++++++ bitswap/message/message.proto | 6 + bitswap/message/message_test.go | 72 ++++++++ bitswap/network/forwarder.go | 28 +++ bitswap/network/forwarder_test.go | 26 +++ bitswap/network/interface.go | 43 +++++ bitswap/network/network_adapter.go | 93 ++++++++++ bitswap/notifications/notifications.go | 55 ++++++ bitswap/notifications/notifications_test.go | 58 +++++++ bitswap/offline.go | 31 ++++ bitswap/offline_test.go | 27 +++ bitswap/strategy/interface.go | 45 +++++ bitswap/strategy/ledger.go | 93 ++++++++++ bitswap/strategy/ledger_test.go | 23 +++ bitswap/strategy/math.go | 31 ++++ bitswap/strategy/math_test.go | 17 ++ bitswap/strategy/strategy.go | 87 ++++++++++ bitswap/strategy/strategy_test.go | 70 ++++++++ 20 files changed, 1116 insertions(+) create mode 100644 bitswap/bitswap.go create mode 100644 bitswap/message/message.go create mode 100644 bitswap/message/message.pb.go create mode 100644 bitswap/message/message.proto create mode 100644 bitswap/message/message_test.go create mode 100644 bitswap/network/forwarder.go create mode 100644 bitswap/network/forwarder_test.go create mode 100644 bitswap/network/interface.go create mode 100644 bitswap/network/network_adapter.go create mode 100644 bitswap/notifications/notifications.go create mode 100644 bitswap/notifications/notifications_test.go create mode 100644 bitswap/offline.go create mode 100644 bitswap/offline_test.go create mode 100644 bitswap/strategy/interface.go create mode 100644 bitswap/strategy/ledger.go create mode 100644 bitswap/strategy/ledger_test.go create mode 100644 bitswap/strategy/math.go create mode 100644 bitswap/strategy/math_test.go create mode 100644 bitswap/strategy/strategy.go create mode 100644 bitswap/strategy/strategy_test.go diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go new file mode 100644 index 000000000..71b879f98 --- /dev/null +++ b/bitswap/bitswap.go @@ -0,0 +1,182 @@ +package bitswap + +import ( + "errors" + "time" + + context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" + ds "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/datastore.go" + + blocks "github.com/jbenet/go-ipfs/blocks" + blockstore "github.com/jbenet/go-ipfs/blockstore" + exchange "github.com/jbenet/go-ipfs/exchange" + bsmsg "github.com/jbenet/go-ipfs/exchange/bitswap/message" + bsnet "github.com/jbenet/go-ipfs/exchange/bitswap/network" + notifications "github.com/jbenet/go-ipfs/exchange/bitswap/notifications" + strategy "github.com/jbenet/go-ipfs/exchange/bitswap/strategy" + peer "github.com/jbenet/go-ipfs/peer" + u "github.com/jbenet/go-ipfs/util" +) + +// TODO(brian): ensure messages are being received + +// PartnerWantListMax is the bound for the number of keys we'll store per +// partner. These are usually taken from the top of the Partner's WantList +// advertisements. WantLists are sorted in terms of priority. +const PartnerWantListMax = 10 + +// bitswap instances implement the bitswap protocol. +type bitswap struct { + // peer is the identity of this (local) node. + peer *peer.Peer + + // sender delivers messages on behalf of the session + sender bsnet.NetworkAdapter + + // blockstore is the local database + // NB: ensure threadsafety + blockstore blockstore.Blockstore + + // routing interface for communication + routing exchange.Directory + + notifications notifications.PubSub + + // strategy listens to network traffic and makes decisions about how to + // interact with partners. + // TODO(brian): save the strategy's state to the datastore + strategy strategy.Strategy +} + +// NewSession initializes a bitswap session. +func NewSession(parent context.Context, s bsnet.NetworkService, p *peer.Peer, d ds.Datastore, directory exchange.Directory) exchange.Exchange { + + // FIXME(brian): instantiate a concrete Strategist + receiver := bsnet.Forwarder{} + bs := &bitswap{ + blockstore: blockstore.NewBlockstore(d), + notifications: notifications.New(), + strategy: strategy.New(), + peer: p, + routing: directory, + sender: bsnet.NewNetworkAdapter(s, &receiver), + } + receiver.Delegate(bs) + + return bs +} + +// GetBlock attempts to retrieve a particular block from peers, within timeout. +func (bs *bitswap) Block(k u.Key, timeout time.Duration) ( + *blocks.Block, error) { + begin := time.Now() + tleft := timeout - time.Now().Sub(begin) + provs_ch := bs.routing.FindProvidersAsync(k, 20, timeout) + + blockChannel := make(chan blocks.Block) + after := time.After(tleft) + + // TODO: when the data is received, shut down this for loop ASAP + go func() { + for p := range provs_ch { + go func(pr *peer.Peer) { + blk, err := bs.getBlock(k, pr, tleft) + if err != nil { + return + } + select { + case blockChannel <- *blk: + default: + } + }(p) + } + }() + + select { + case block := <-blockChannel: + close(blockChannel) + return &block, nil + case <-after: + return nil, u.ErrTimeout + } +} + +func (bs *bitswap) getBlock(k u.Key, p *peer.Peer, timeout time.Duration) (*blocks.Block, error) { + + ctx, _ := context.WithTimeout(context.Background(), timeout) + blockChannel := bs.notifications.Subscribe(ctx, k) + + message := bsmsg.New() + message.AppendWanted(k) + + // FIXME(brian): register the accountant on the service wrapper to ensure + // that accounting is _always_ performed when SendMessage and + // ReceiveMessage are called + bs.sender.SendMessage(ctx, p, message) + bs.strategy.MessageSent(p, message) + + block, ok := <-blockChannel + if !ok { + return nil, u.ErrTimeout + } + return &block, nil +} + +func (bs *bitswap) sendToPeersThatWant(block blocks.Block) { + for _, p := range bs.strategy.Peers() { + if bs.strategy.BlockIsWantedByPeer(block.Key(), p) { + if bs.strategy.ShouldSendBlockToPeer(block.Key(), p) { + go bs.send(p, block) + } + } + } +} + +// HasBlock announces the existance of a block to bitswap, potentially sending +// it to peers (Partners) whose WantLists include it. +func (bs *bitswap) HasBlock(blk blocks.Block) error { + go bs.sendToPeersThatWant(blk) + return bs.routing.Provide(blk.Key()) +} + +// TODO(brian): get a return value +func (bs *bitswap) send(p *peer.Peer, b blocks.Block) { + message := bsmsg.New() + message.AppendBlock(b) + // FIXME(brian): pass ctx + bs.sender.SendMessage(context.Background(), p, message) + bs.strategy.MessageSent(p, message) +} + +// TODO(brian): handle errors +func (bs *bitswap) ReceiveMessage( + ctx context.Context, sender *peer.Peer, incoming bsmsg.BitSwapMessage) ( + *peer.Peer, bsmsg.BitSwapMessage, error) { + + bs.strategy.MessageReceived(sender, incoming) + + if incoming.Blocks() != nil { + for _, block := range incoming.Blocks() { + go bs.blockstore.Put(block) // FIXME(brian): err ignored + go bs.notifications.Publish(block) + } + } + + if incoming.Wantlist() != nil { + for _, key := range incoming.Wantlist() { + if bs.strategy.ShouldSendBlockToPeer(key, sender) { + block, errBlockNotFound := bs.blockstore.Get(key) + if errBlockNotFound != nil { + // TODO(brian): log/return the error + continue + } + go bs.send(sender, *block) + } + } + } + return nil, nil, errors.New("TODO implement") +} + +func numBytes(b blocks.Block) int { + return len(b.Data) +} diff --git a/bitswap/message/message.go b/bitswap/message/message.go new file mode 100644 index 000000000..dc6506313 --- /dev/null +++ b/bitswap/message/message.go @@ -0,0 +1,81 @@ +package message + +import ( + "errors" + + netmsg "github.com/jbenet/go-ipfs/net/message" + + blocks "github.com/jbenet/go-ipfs/blocks" + nm "github.com/jbenet/go-ipfs/net/message" + peer "github.com/jbenet/go-ipfs/peer" + u "github.com/jbenet/go-ipfs/util" +) + +type BitSwapMessage interface { + Wantlist() []u.Key + Blocks() []blocks.Block + AppendWanted(k u.Key) + AppendBlock(b blocks.Block) + Exportable +} + +type Exportable interface { + ToProto() *PBMessage + ToNet(p *peer.Peer) (nm.NetMessage, error) +} + +// message wraps a proto message for convenience +type message struct { + pb PBMessage +} + +func newMessageFromProto(pb PBMessage) *message { + return &message{pb: pb} +} + +func New() *message { + return new(message) +} + +// TODO(brian): convert these into keys +func (m *message) Wantlist() []u.Key { + wl := make([]u.Key, len(m.pb.Wantlist)) + for _, str := range m.pb.Wantlist { + wl = append(wl, u.Key(str)) + } + return wl +} + +// TODO(brian): convert these into blocks +func (m *message) Blocks() []blocks.Block { + bs := make([]blocks.Block, len(m.pb.Blocks)) + for _, data := range m.pb.Blocks { + b, err := blocks.NewBlock(data) + if err != nil { + continue + } + bs = append(bs, *b) + } + return bs +} + +func (m *message) AppendWanted(k u.Key) { + m.pb.Wantlist = append(m.pb.Wantlist, string(k)) +} + +func (m *message) AppendBlock(b blocks.Block) { + m.pb.Blocks = append(m.pb.Blocks, b.Data) +} + +func FromNet(nmsg netmsg.NetMessage) (BitSwapMessage, error) { + return nil, errors.New("TODO implement") +} + +func (m *message) ToProto() *PBMessage { + cp := m.pb + return &cp +} + +func (m *message) ToNet(p *peer.Peer) (nm.NetMessage, error) { + return nm.FromObject(p, m.ToProto()) +} diff --git a/bitswap/message/message.pb.go b/bitswap/message/message.pb.go new file mode 100644 index 000000000..d1089f5c9 --- /dev/null +++ b/bitswap/message/message.pb.go @@ -0,0 +1,48 @@ +// Code generated by protoc-gen-go. +// source: message.proto +// DO NOT EDIT! + +/* +Package bitswap is a generated protocol buffer package. + +It is generated from these files: + message.proto + +It has these top-level messages: + PBMessage +*/ +package message + +import proto "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = math.Inf + +type PBMessage struct { + Wantlist []string `protobuf:"bytes,1,rep,name=wantlist" json:"wantlist,omitempty"` + Blocks [][]byte `protobuf:"bytes,2,rep,name=blocks" json:"blocks,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *PBMessage) Reset() { *m = PBMessage{} } +func (m *PBMessage) String() string { return proto.CompactTextString(m) } +func (*PBMessage) ProtoMessage() {} + +func (m *PBMessage) GetWantlist() []string { + if m != nil { + return m.Wantlist + } + return nil +} + +func (m *PBMessage) GetBlocks() [][]byte { + if m != nil { + return m.Blocks + } + return nil +} + +func init() { +} diff --git a/bitswap/message/message.proto b/bitswap/message/message.proto new file mode 100644 index 000000000..a0e4d1997 --- /dev/null +++ b/bitswap/message/message.proto @@ -0,0 +1,6 @@ +package message; + +message PBMessage { + repeated string wantlist = 1; + repeated bytes blocks = 2; +} diff --git a/bitswap/message/message_test.go b/bitswap/message/message_test.go new file mode 100644 index 000000000..8ff345f1c --- /dev/null +++ b/bitswap/message/message_test.go @@ -0,0 +1,72 @@ +package message + +import ( + "bytes" + "testing" + + u "github.com/jbenet/go-ipfs/util" + testutil "github.com/jbenet/go-ipfs/util/testutil" +) + +func TestAppendWanted(t *testing.T) { + const str = "foo" + m := New() + m.AppendWanted(u.Key(str)) + + if !contains(m.ToProto().GetWantlist(), str) { + t.Fail() + } +} + +func TestNewMessageFromProto(t *testing.T) { + const str = "a_key" + protoMessage := new(PBMessage) + protoMessage.Wantlist = []string{string(str)} + if !contains(protoMessage.Wantlist, str) { + t.Fail() + } + m := newMessageFromProto(*protoMessage) + if !contains(m.ToProto().GetWantlist(), str) { + t.Fail() + } +} + +func TestAppendBlock(t *testing.T) { + + strs := make([]string, 2) + strs = append(strs, "Celeritas") + strs = append(strs, "Incendia") + + m := New() + for _, str := range strs { + block := testutil.NewBlockOrFail(t, str) + m.AppendBlock(block) + } + + // assert strings are in proto message + for _, blockbytes := range m.ToProto().GetBlocks() { + s := bytes.NewBuffer(blockbytes).String() + if !contains(strs, s) { + t.Fail() + } + } +} + +func TestCopyProtoByValue(t *testing.T) { + const str = "foo" + m := New() + protoBeforeAppend := m.ToProto() + m.AppendWanted(u.Key(str)) + if contains(protoBeforeAppend.GetWantlist(), str) { + t.Fail() + } +} + +func contains(s []string, x string) bool { + for _, a := range s { + if a == x { + return true + } + } + return false +} diff --git a/bitswap/network/forwarder.go b/bitswap/network/forwarder.go new file mode 100644 index 000000000..603cd0123 --- /dev/null +++ b/bitswap/network/forwarder.go @@ -0,0 +1,28 @@ +package network + +import ( + context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" + bsmsg "github.com/jbenet/go-ipfs/exchange/bitswap/message" + peer "github.com/jbenet/go-ipfs/peer" +) + +// Forwarder receives messages and forwards them to the delegate. +// +// Forwarder breaks the circular dependency between the BitSwap Session and the +// Network Service. +type Forwarder struct { + delegate Receiver +} + +func (r *Forwarder) ReceiveMessage( + ctx context.Context, sender *peer.Peer, incoming bsmsg.BitSwapMessage) ( + *peer.Peer, bsmsg.BitSwapMessage, error) { + if r.delegate == nil { + return nil, nil, nil + } + return r.delegate.ReceiveMessage(ctx, sender, incoming) +} + +func (r *Forwarder) Delegate(delegate Receiver) { + r.delegate = delegate +} diff --git a/bitswap/network/forwarder_test.go b/bitswap/network/forwarder_test.go new file mode 100644 index 000000000..accc2c781 --- /dev/null +++ b/bitswap/network/forwarder_test.go @@ -0,0 +1,26 @@ +package network + +import ( + "testing" + + context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" + bsmsg "github.com/jbenet/go-ipfs/bitswap/message" + peer "github.com/jbenet/go-ipfs/peer" +) + +func TestDoesntPanicIfDelegateNotPresent(t *testing.T) { + fwdr := Forwarder{} + fwdr.ReceiveMessage(context.Background(), &peer.Peer{}, bsmsg.New()) +} + +func TestForwardsMessageToDelegate(t *testing.T) { + fwdr := Forwarder{delegate: &EchoDelegate{}} + fwdr.ReceiveMessage(context.Background(), &peer.Peer{}, bsmsg.New()) +} + +type EchoDelegate struct{} + +func (d *EchoDelegate) ReceiveMessage(ctx context.Context, p *peer.Peer, + incoming bsmsg.BitSwapMessage) (*peer.Peer, bsmsg.BitSwapMessage, error) { + return p, incoming, nil +} diff --git a/bitswap/network/interface.go b/bitswap/network/interface.go new file mode 100644 index 000000000..703398354 --- /dev/null +++ b/bitswap/network/interface.go @@ -0,0 +1,43 @@ +package network + +import ( + context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" + netservice "github.com/jbenet/go-ipfs/net/service" + + bsmsg "github.com/jbenet/go-ipfs/exchange/bitswap/message" + netmsg "github.com/jbenet/go-ipfs/net/message" + peer "github.com/jbenet/go-ipfs/peer" +) + +// NetworkAdapter mediates the exchange's communication with the network. +type NetworkAdapter interface { + + // SendMessage sends a BitSwap message to a peer. + SendMessage( + context.Context, + *peer.Peer, + bsmsg.BitSwapMessage) error + + // SendRequest sends a BitSwap message to a peer and waits for a response. + SendRequest( + context.Context, + *peer.Peer, + bsmsg.BitSwapMessage) (incoming bsmsg.BitSwapMessage, err error) + + // SetDelegate registers the Reciver to handle messages received from the + // network. + SetDelegate(Receiver) +} + +type Receiver interface { + ReceiveMessage( + ctx context.Context, sender *peer.Peer, incoming bsmsg.BitSwapMessage) ( + destination *peer.Peer, outgoing bsmsg.BitSwapMessage, err error) +} + +// TODO(brian): move this to go-ipfs/net package +type NetworkService interface { + SendRequest(ctx context.Context, m netmsg.NetMessage) (netmsg.NetMessage, error) + SendMessage(ctx context.Context, m netmsg.NetMessage) error + SetHandler(netservice.Handler) +} diff --git a/bitswap/network/network_adapter.go b/bitswap/network/network_adapter.go new file mode 100644 index 000000000..8914101bc --- /dev/null +++ b/bitswap/network/network_adapter.go @@ -0,0 +1,93 @@ +package network + +import ( + "errors" + + context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" + + bsmsg "github.com/jbenet/go-ipfs/exchange/bitswap/message" + netmsg "github.com/jbenet/go-ipfs/net/message" + peer "github.com/jbenet/go-ipfs/peer" +) + +// NewSender wraps a network Service to perform translation between +// BitSwapMessage and NetMessage formats. This allows the BitSwap session to +// ignore these details. +func NewNetworkAdapter(s NetworkService, r Receiver) NetworkAdapter { + adapter := networkAdapter{ + networkService: s, + receiver: r, + } + s.SetHandler(&adapter) + return &adapter +} + +// networkAdapter implements NetworkAdapter +type networkAdapter struct { + networkService NetworkService + receiver Receiver +} + +// HandleMessage marshals and unmarshals net messages, forwarding them to the +// BitSwapMessage receiver +func (adapter *networkAdapter) HandleMessage( + ctx context.Context, incoming netmsg.NetMessage) (netmsg.NetMessage, error) { + + if adapter.receiver == nil { + return nil, errors.New("No receiver. NetMessage dropped") + } + + received, err := bsmsg.FromNet(incoming) + if err != nil { + return nil, err + } + + p, bsmsg, err := adapter.receiver.ReceiveMessage(ctx, incoming.Peer(), received) + if err != nil { + return nil, err + } + + // TODO(brian): put this in a helper function + if bsmsg == nil || p == nil { + return nil, nil + } + + outgoing, err := bsmsg.ToNet(p) + if err != nil { + return nil, err + } + + return outgoing, nil +} + +func (adapter *networkAdapter) SendMessage( + ctx context.Context, + p *peer.Peer, + outgoing bsmsg.BitSwapMessage) error { + + nmsg, err := outgoing.ToNet(p) + if err != nil { + return err + } + return adapter.networkService.SendMessage(ctx, nmsg) +} + +func (adapter *networkAdapter) SendRequest( + ctx context.Context, + p *peer.Peer, + outgoing bsmsg.BitSwapMessage) (bsmsg.BitSwapMessage, error) { + + outgoingMsg, err := outgoing.ToNet(p) + if err != nil { + return nil, err + } + incomingMsg, err := adapter.networkService.SendRequest(ctx, outgoingMsg) + if err != nil { + return nil, err + } + return bsmsg.FromNet(incomingMsg) +} + +func (adapter *networkAdapter) SetDelegate(r Receiver) { + adapter.receiver = r +} diff --git a/bitswap/notifications/notifications.go b/bitswap/notifications/notifications.go new file mode 100644 index 000000000..2da2b7fad --- /dev/null +++ b/bitswap/notifications/notifications.go @@ -0,0 +1,55 @@ +package notifications + +import ( + context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" + pubsub "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/tuxychandru/pubsub" + + blocks "github.com/jbenet/go-ipfs/blocks" + u "github.com/jbenet/go-ipfs/util" +) + +type PubSub interface { + Publish(block blocks.Block) + Subscribe(ctx context.Context, k u.Key) <-chan blocks.Block + Shutdown() +} + +func New() PubSub { + const bufferSize = 16 + return &impl{*pubsub.New(bufferSize)} +} + +type impl struct { + wrapped pubsub.PubSub +} + +func (ps *impl) Publish(block blocks.Block) { + topic := string(block.Key()) + ps.wrapped.Pub(block, topic) +} + +// Subscribe returns a one-time use |blockChannel|. |blockChannel| returns nil +// if the |ctx| times out or is cancelled. Then channel is closed after the +// block given by |k| is sent. +func (ps *impl) Subscribe(ctx context.Context, k u.Key) <-chan blocks.Block { + topic := string(k) + subChan := ps.wrapped.SubOnce(topic) + blockChannel := make(chan blocks.Block) + go func() { + defer close(blockChannel) + select { + case val := <-subChan: + block, ok := val.(blocks.Block) + if ok { + blockChannel <- block + } + case <-ctx.Done(): + ps.wrapped.Unsub(subChan, topic) + } + }() + return blockChannel +} + +func (ps *impl) Shutdown() { + ps.wrapped.Shutdown() +} diff --git a/bitswap/notifications/notifications_test.go b/bitswap/notifications/notifications_test.go new file mode 100644 index 000000000..b12cc7d83 --- /dev/null +++ b/bitswap/notifications/notifications_test.go @@ -0,0 +1,58 @@ +package notifications + +import ( + "bytes" + "testing" + "time" + + context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" + testutil "github.com/jbenet/go-ipfs/util/testutil" + + blocks "github.com/jbenet/go-ipfs/blocks" +) + +func TestPublishSubscribe(t *testing.T) { + blockSent := testutil.NewBlockOrFail(t, "Greetings from The Interval") + + n := New() + defer n.Shutdown() + ch := n.Subscribe(context.Background(), blockSent.Key()) + + n.Publish(blockSent) + blockRecvd, ok := <-ch + if !ok { + t.Fail() + } + + assertBlocksEqual(t, blockRecvd, blockSent) + +} + +func TestCarryOnWhenDeadlineExpires(t *testing.T) { + + impossibleDeadline := time.Nanosecond + fastExpiringCtx, _ := context.WithTimeout(context.Background(), impossibleDeadline) + + n := New() + defer n.Shutdown() + block := testutil.NewBlockOrFail(t, "A Missed Connection") + blockChannel := n.Subscribe(fastExpiringCtx, block.Key()) + + assertBlockChannelNil(t, blockChannel) +} + +func assertBlockChannelNil(t *testing.T, blockChannel <-chan blocks.Block) { + _, ok := <-blockChannel + if ok { + t.Fail() + } +} + +func assertBlocksEqual(t *testing.T, a, b blocks.Block) { + if !bytes.Equal(a.Data, b.Data) { + t.Fail() + } + if a.Key() != b.Key() { + t.Fail() + } +} diff --git a/bitswap/offline.go b/bitswap/offline.go new file mode 100644 index 000000000..46b71d27b --- /dev/null +++ b/bitswap/offline.go @@ -0,0 +1,31 @@ +package bitswap + +import ( + "errors" + "time" + + blocks "github.com/jbenet/go-ipfs/blocks" + exchange "github.com/jbenet/go-ipfs/exchange" + u "github.com/jbenet/go-ipfs/util" +) + +func NewOfflineExchange() exchange.Exchange { + return &offlineExchange{} +} + +// offlineExchange implements the Exchange interface but doesn't return blocks. +// For use in offline mode. +type offlineExchange struct { +} + +// Block returns nil to signal that a block could not be retrieved for the +// given key. +// NB: This function may return before the timeout expires. +func (_ *offlineExchange) Block(k u.Key, timeout time.Duration) (*blocks.Block, error) { + return nil, errors.New("Block unavailable. Operating in offline mode") +} + +// HasBlock always returns nil. +func (_ *offlineExchange) HasBlock(blocks.Block) error { + return nil +} diff --git a/bitswap/offline_test.go b/bitswap/offline_test.go new file mode 100644 index 000000000..2b40ac5e2 --- /dev/null +++ b/bitswap/offline_test.go @@ -0,0 +1,27 @@ +package bitswap + +import ( + "testing" + "time" + + u "github.com/jbenet/go-ipfs/util" + testutil "github.com/jbenet/go-ipfs/util/testutil" +) + +func TestBlockReturnsErr(t *testing.T) { + off := NewOfflineExchange() + _, err := off.Block(u.Key("foo"), time.Second) + if err != nil { + return // as desired + } + t.Fail() +} + +func TestHasBlockReturnsNil(t *testing.T) { + off := NewOfflineExchange() + block := testutil.NewBlockOrFail(t, "data") + err := off.HasBlock(block) + if err != nil { + t.Fatal("") + } +} diff --git a/bitswap/strategy/interface.go b/bitswap/strategy/interface.go new file mode 100644 index 000000000..8608c52ce --- /dev/null +++ b/bitswap/strategy/interface.go @@ -0,0 +1,45 @@ +package strategy + +import ( + bsmsg "github.com/jbenet/go-ipfs/exchange/bitswap/message" + peer "github.com/jbenet/go-ipfs/peer" + u "github.com/jbenet/go-ipfs/util" +) + +type Strategy interface { + // Returns a slice of Peers that + Peers() []*peer.Peer + + // WantList returns the WantList for the given Peer + BlockIsWantedByPeer(u.Key, *peer.Peer) bool + + // ShouldSendTo(Peer) decides whether to send data to this Peer + ShouldSendBlockToPeer(u.Key, *peer.Peer) bool + + // Seed initializes the decider to a deterministic state + Seed(int64) + + // MessageReceived records receipt of message for accounting purposes + MessageReceived(*peer.Peer, bsmsg.BitSwapMessage) error + + // MessageSent records sending of message for accounting purposes + MessageSent(*peer.Peer, bsmsg.BitSwapMessage) error +} + +type WantList interface { + // Peer returns the owner of the WantList + Peer() *peer.Peer + + // Intersection returns the keys common to both WantLists + Intersection(WantList) WantList + + KeySet +} + +// TODO(brian): potentially move this somewhere more generic. For now, it's +// useful in BitSwap operations. + +type KeySet interface { + Contains(u.Key) bool + Keys() []u.Key +} diff --git a/bitswap/strategy/ledger.go b/bitswap/strategy/ledger.go new file mode 100644 index 000000000..34f301055 --- /dev/null +++ b/bitswap/strategy/ledger.go @@ -0,0 +1,93 @@ +package strategy + +import ( + "sync" + "time" + + peer "github.com/jbenet/go-ipfs/peer" + u "github.com/jbenet/go-ipfs/util" +) + +// keySet is just a convenient alias for maps of keys, where we only care +// access/lookups. +type keySet map[u.Key]struct{} + +func newLedger(p *peer.Peer, strategy strategyFunc) *ledger { + return &ledger{ + wantList: keySet{}, + Strategy: strategy, + Partner: p, + } +} + +// ledger stores the data exchange relationship between two peers. +type ledger struct { + lock sync.RWMutex + + // Partner is the remote Peer. + Partner *peer.Peer + + // Accounting tracks bytes sent and recieved. + Accounting debtRatio + + // firstExchnage is the time of the first data exchange. + firstExchange time.Time + + // lastExchange is the time of the last data exchange. + lastExchange time.Time + + // exchangeCount is the number of exchanges with this peer + exchangeCount uint64 + + // wantList is a (bounded, small) set of keys that Partner desires. + wantList keySet + + Strategy strategyFunc +} + +func (l *ledger) ShouldSend() bool { + l.lock.Lock() + defer l.lock.Unlock() + + return l.Strategy(l) +} + +func (l *ledger) SentBytes(n int) { + l.lock.Lock() + defer l.lock.Unlock() + + l.exchangeCount++ + l.lastExchange = time.Now() + l.Accounting.BytesSent += uint64(n) +} + +func (l *ledger) ReceivedBytes(n int) { + l.lock.Lock() + defer l.lock.Unlock() + + l.exchangeCount++ + l.lastExchange = time.Now() + l.Accounting.BytesRecv += uint64(n) +} + +// TODO: this needs to be different. We need timeouts. +func (l *ledger) Wants(k u.Key) { + l.lock.Lock() + defer l.lock.Unlock() + + l.wantList[k] = struct{}{} +} + +func (l *ledger) WantListContains(k u.Key) bool { + l.lock.RLock() + defer l.lock.RUnlock() + + _, ok := l.wantList[k] + return ok +} + +func (l *ledger) ExchangeCount() uint64 { + l.lock.RLock() + defer l.lock.RUnlock() + return l.exchangeCount +} diff --git a/bitswap/strategy/ledger_test.go b/bitswap/strategy/ledger_test.go new file mode 100644 index 000000000..0fdfae0cc --- /dev/null +++ b/bitswap/strategy/ledger_test.go @@ -0,0 +1,23 @@ +package strategy + +import ( + "sync" + "testing" +) + +func TestRaceConditions(t *testing.T) { + const numberOfExpectedExchanges = 10000 + l := new(ledger) + var wg sync.WaitGroup + for i := 0; i < numberOfExpectedExchanges; i++ { + wg.Add(1) + go func() { + defer wg.Done() + l.ReceivedBytes(1) + }() + } + wg.Wait() + if l.ExchangeCount() != numberOfExpectedExchanges { + t.Fail() + } +} diff --git a/bitswap/strategy/math.go b/bitswap/strategy/math.go new file mode 100644 index 000000000..21b1ff163 --- /dev/null +++ b/bitswap/strategy/math.go @@ -0,0 +1,31 @@ +package strategy + +import ( + "math" + "math/rand" +) + +type strategyFunc func(*ledger) bool + +func standardStrategy(l *ledger) bool { + return rand.Float64() <= probabilitySend(l.Accounting.Value()) +} + +func yesManStrategy(l *ledger) bool { + return true +} + +func probabilitySend(ratio float64) float64 { + x := 1 + math.Exp(6-3*ratio) + y := 1 / x + return 1 - y +} + +type debtRatio struct { + BytesSent uint64 + BytesRecv uint64 +} + +func (dr *debtRatio) Value() float64 { + return float64(dr.BytesSent) / float64(dr.BytesRecv+1) +} diff --git a/bitswap/strategy/math_test.go b/bitswap/strategy/math_test.go new file mode 100644 index 000000000..58092bc09 --- /dev/null +++ b/bitswap/strategy/math_test.go @@ -0,0 +1,17 @@ +package strategy + +import ( + "testing" +) + +func TestProbabilitySendDecreasesAsRatioIncreases(t *testing.T) { + grateful := debtRatio{BytesSent: 0, BytesRecv: 10000} + pWhenGrateful := probabilitySend(grateful.Value()) + + abused := debtRatio{BytesSent: 10000, BytesRecv: 0} + pWhenAbused := probabilitySend(abused.Value()) + + if pWhenGrateful < pWhenAbused { + t.Fail() + } +} diff --git a/bitswap/strategy/strategy.go b/bitswap/strategy/strategy.go new file mode 100644 index 000000000..208811561 --- /dev/null +++ b/bitswap/strategy/strategy.go @@ -0,0 +1,87 @@ +package strategy + +import ( + "errors" + + bsmsg "github.com/jbenet/go-ipfs/exchange/bitswap/message" + "github.com/jbenet/go-ipfs/peer" + u "github.com/jbenet/go-ipfs/util" +) + +// TODO declare thread-safe datastore +func New() Strategy { + return &strategist{ + ledgerMap: ledgerMap{}, + strategyFunc: yesManStrategy, + } +} + +type strategist struct { + ledgerMap + strategyFunc +} + +// LedgerMap lists Ledgers by their Partner key. +type ledgerMap map[peerKey]*ledger + +// FIXME share this externally +type peerKey u.Key + +// Peers returns a list of peers +func (s *strategist) Peers() []*peer.Peer { + response := make([]*peer.Peer, 0) + for _, ledger := range s.ledgerMap { + response = append(response, ledger.Partner) + } + return response +} + +func (s *strategist) BlockIsWantedByPeer(k u.Key, p *peer.Peer) bool { + ledger := s.ledger(p) + return ledger.WantListContains(k) +} + +func (s *strategist) ShouldSendBlockToPeer(k u.Key, p *peer.Peer) bool { + ledger := s.ledger(p) + return ledger.ShouldSend() +} + +func (s *strategist) Seed(int64) { + // TODO +} + +func (s *strategist) MessageReceived(p *peer.Peer, m bsmsg.BitSwapMessage) error { + l := s.ledger(p) + for _, key := range m.Wantlist() { + l.Wants(key) + } + for _, block := range m.Blocks() { + // FIXME extract blocks.NumBytes(block) or block.NumBytes() method + l.ReceivedBytes(len(block.Data)) + } + return errors.New("TODO") +} + +// TODO add contents of m.WantList() to my local wantlist? NB: could introduce +// race conditions where I send a message, but MessageSent gets handled after +// MessageReceived. The information in the local wantlist could become +// inconsistent. Would need to ensure that Sends and acknowledgement of the +// send happen atomically + +func (s *strategist) MessageSent(p *peer.Peer, m bsmsg.BitSwapMessage) error { + l := s.ledger(p) + for _, block := range m.Blocks() { + l.SentBytes(len(block.Data)) + } + return nil +} + +// ledger lazily instantiates a ledger +func (s *strategist) ledger(p *peer.Peer) *ledger { + l, ok := s.ledgerMap[peerKey(p.Key())] + if !ok { + l = newLedger(p, s.strategyFunc) + s.ledgerMap[peerKey(p.Key())] = l + } + return l +} diff --git a/bitswap/strategy/strategy_test.go b/bitswap/strategy/strategy_test.go new file mode 100644 index 000000000..4adff29a0 --- /dev/null +++ b/bitswap/strategy/strategy_test.go @@ -0,0 +1,70 @@ +package strategy + +import ( + "testing" + + message "github.com/jbenet/go-ipfs/bitswap/message" + "github.com/jbenet/go-ipfs/peer" + "github.com/jbenet/go-ipfs/util/testutil" +) + +type peerAndStrategist struct { + *peer.Peer + Strategist +} + +func newPeerAndStrategist(idStr string) peerAndStrategist { + return peerAndStrategist{ + Peer: &peer.Peer{ID: peer.ID(idStr)}, + Strategist: New(), + } +} + +func TestBlockRecordedAsWantedAfterMessageReceived(t *testing.T) { + beggar := newPeerAndStrategist("can't be chooser") + chooser := newPeerAndStrategist("chooses JIF") + + block := testutil.NewBlockOrFail(t, "data wanted by beggar") + + messageFromBeggarToChooser := message.New() + messageFromBeggarToChooser.AppendWanted(block.Key()) + + chooser.MessageReceived(beggar.Peer, messageFromBeggarToChooser) + // for this test, doesn't matter if you record that beggar sent + + if !chooser.IsWantedByPeer(block.Key(), beggar.Peer) { + t.Fatal("chooser failed to record that beggar wants block") + } +} + +func TestPeerIsAddedToPeersWhenMessageReceivedOrSent(t *testing.T) { + + sanfrancisco := newPeerAndStrategist("sf") + seattle := newPeerAndStrategist("sea") + + m := message.New() + + sanfrancisco.MessageSent(seattle.Peer, m) + seattle.MessageReceived(sanfrancisco.Peer, m) + + if seattle.Peer.Key() == sanfrancisco.Peer.Key() { + t.Fatal("Sanity Check: Peers have same Key!") + } + + if !peerIsPartner(seattle.Peer, sanfrancisco.Strategist) { + t.Fatal("Peer wasn't added as a Partner") + } + + if !peerIsPartner(sanfrancisco.Peer, seattle.Strategist) { + t.Fatal("Peer wasn't added as a Partner") + } +} + +func peerIsPartner(p *peer.Peer, s Strategist) bool { + for _, partner := range s.Peers() { + if partner.Key() == p.Key() { + return true + } + } + return false +} From f9d704be9034249c7019f072a653a973c9cab296 Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Thu, 18 Sep 2014 17:30:06 -0700 Subject: [PATCH 0002/1035] refac(exchange) rename exchange.Interface to match golang conventions examples: http://golang.org/pkg/container/heap/#Interface http://golang.org/pkg/net/#Interface http://golang.org/pkg/sort/#Interface This commit was moved from ipfs/go-bitswap@f6e8d9584530aeed31f7fb1df7a4de6a928d12f1 --- bitswap/bitswap.go | 14 ++++++++++++-- bitswap/offline.go | 2 +- 2 files changed, 13 insertions(+), 3 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 71b879f98..dcf095b02 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -18,6 +18,16 @@ import ( u "github.com/jbenet/go-ipfs/util" ) +// TODO rename -> Router? +type Routing interface { + // FindProvidersAsync returns a channel of providers for the given key + // TODO replace with timeout with context + FindProvidersAsync(u.Key, int, time.Duration) <-chan *peer.Peer + + // Provide provides the key to the network + Provide(key u.Key) error +} + // TODO(brian): ensure messages are being received // PartnerWantListMax is the bound for the number of keys we'll store per @@ -38,7 +48,7 @@ type bitswap struct { blockstore blockstore.Blockstore // routing interface for communication - routing exchange.Directory + routing Routing notifications notifications.PubSub @@ -49,7 +59,7 @@ type bitswap struct { } // NewSession initializes a bitswap session. -func NewSession(parent context.Context, s bsnet.NetworkService, p *peer.Peer, d ds.Datastore, directory exchange.Directory) exchange.Exchange { +func NewSession(parent context.Context, s bsnet.NetworkService, p *peer.Peer, d ds.Datastore, directory Routing) exchange.Interface { // FIXME(brian): instantiate a concrete Strategist receiver := bsnet.Forwarder{} diff --git a/bitswap/offline.go b/bitswap/offline.go index 46b71d27b..a8dbd0f8e 100644 --- a/bitswap/offline.go +++ b/bitswap/offline.go @@ -9,7 +9,7 @@ import ( u "github.com/jbenet/go-ipfs/util" ) -func NewOfflineExchange() exchange.Exchange { +func NewOfflineExchange() exchange.Interface { return &offlineExchange{} } From 47c5a4aec8365ff9d430e7845d5d044561a3cb3a Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Thu, 18 Sep 2014 17:43:13 -0700 Subject: [PATCH 0003/1035] fix(bitswap) compiler errors didn't run tests after the refactor. apologies. This commit was moved from ipfs/go-bitswap@ff4b979d391f3120624c8eedbe39f9b6a72e8dbc --- bitswap/network/forwarder_test.go | 2 +- bitswap/strategy/strategy_test.go | 16 ++++++++-------- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/bitswap/network/forwarder_test.go b/bitswap/network/forwarder_test.go index accc2c781..73604e110 100644 --- a/bitswap/network/forwarder_test.go +++ b/bitswap/network/forwarder_test.go @@ -4,7 +4,7 @@ import ( "testing" context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" - bsmsg "github.com/jbenet/go-ipfs/bitswap/message" + bsmsg "github.com/jbenet/go-ipfs/exchange/bitswap/message" peer "github.com/jbenet/go-ipfs/peer" ) diff --git a/bitswap/strategy/strategy_test.go b/bitswap/strategy/strategy_test.go index 4adff29a0..dfa216849 100644 --- a/bitswap/strategy/strategy_test.go +++ b/bitswap/strategy/strategy_test.go @@ -3,20 +3,20 @@ package strategy import ( "testing" - message "github.com/jbenet/go-ipfs/bitswap/message" + message "github.com/jbenet/go-ipfs/exchange/bitswap/message" "github.com/jbenet/go-ipfs/peer" "github.com/jbenet/go-ipfs/util/testutil" ) type peerAndStrategist struct { *peer.Peer - Strategist + Strategy } func newPeerAndStrategist(idStr string) peerAndStrategist { return peerAndStrategist{ - Peer: &peer.Peer{ID: peer.ID(idStr)}, - Strategist: New(), + Peer: &peer.Peer{ID: peer.ID(idStr)}, + Strategy: New(), } } @@ -32,7 +32,7 @@ func TestBlockRecordedAsWantedAfterMessageReceived(t *testing.T) { chooser.MessageReceived(beggar.Peer, messageFromBeggarToChooser) // for this test, doesn't matter if you record that beggar sent - if !chooser.IsWantedByPeer(block.Key(), beggar.Peer) { + if !chooser.BlockIsWantedByPeer(block.Key(), beggar.Peer) { t.Fatal("chooser failed to record that beggar wants block") } } @@ -51,16 +51,16 @@ func TestPeerIsAddedToPeersWhenMessageReceivedOrSent(t *testing.T) { t.Fatal("Sanity Check: Peers have same Key!") } - if !peerIsPartner(seattle.Peer, sanfrancisco.Strategist) { + if !peerIsPartner(seattle.Peer, sanfrancisco.Strategy) { t.Fatal("Peer wasn't added as a Partner") } - if !peerIsPartner(sanfrancisco.Peer, seattle.Strategist) { + if !peerIsPartner(sanfrancisco.Peer, seattle.Strategy) { t.Fatal("Peer wasn't added as a Partner") } } -func peerIsPartner(p *peer.Peer, s Strategist) bool { +func peerIsPartner(p *peer.Peer, s Strategy) bool { for _, partner := range s.Peers() { if partner.Key() == p.Key() { return true From dbb9063c1481a1c2d99b1a0044699c554f4a6e1b Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Thu, 18 Sep 2014 18:13:25 -0700 Subject: [PATCH 0004/1035] test(exch:bs:strategy) test accounting consistency > Why expose num bytes sent and received? Makes it easy to test consistency of the ledgers > Got a better reason? Makes it possible to expose metrics to the people-facing API This commit was moved from ipfs/go-bitswap@d2ea3d2543d9d5093be9d35b919cb9c72c15db36 --- bitswap/strategy/interface.go | 4 ++++ bitswap/strategy/strategy.go | 8 +++++++ bitswap/strategy/strategy_test.go | 37 +++++++++++++++++++++++++++++-- 3 files changed, 47 insertions(+), 2 deletions(-) diff --git a/bitswap/strategy/interface.go b/bitswap/strategy/interface.go index 8608c52ce..a95ea8bd2 100644 --- a/bitswap/strategy/interface.go +++ b/bitswap/strategy/interface.go @@ -24,6 +24,10 @@ type Strategy interface { // MessageSent records sending of message for accounting purposes MessageSent(*peer.Peer, bsmsg.BitSwapMessage) error + + NumBytesSentTo(*peer.Peer) uint64 + + NumBytesReceivedFrom(*peer.Peer) uint64 } type WantList interface { diff --git a/bitswap/strategy/strategy.go b/bitswap/strategy/strategy.go index 208811561..406508d6e 100644 --- a/bitswap/strategy/strategy.go +++ b/bitswap/strategy/strategy.go @@ -76,6 +76,14 @@ func (s *strategist) MessageSent(p *peer.Peer, m bsmsg.BitSwapMessage) error { return nil } +func (s *strategist) NumBytesSentTo(p *peer.Peer) uint64 { + return s.ledger(p).Accounting.BytesSent +} + +func (s *strategist) NumBytesReceivedFrom(p *peer.Peer) uint64 { + return s.ledger(p).Accounting.BytesRecv +} + // ledger lazily instantiates a ledger func (s *strategist) ledger(p *peer.Peer) *ledger { l, ok := s.ledgerMap[peerKey(p.Key())] diff --git a/bitswap/strategy/strategy_test.go b/bitswap/strategy/strategy_test.go index dfa216849..e90bcd4ec 100644 --- a/bitswap/strategy/strategy_test.go +++ b/bitswap/strategy/strategy_test.go @@ -1,11 +1,12 @@ package strategy import ( + "strings" "testing" message "github.com/jbenet/go-ipfs/exchange/bitswap/message" - "github.com/jbenet/go-ipfs/peer" - "github.com/jbenet/go-ipfs/util/testutil" + peer "github.com/jbenet/go-ipfs/peer" + testutil "github.com/jbenet/go-ipfs/util/testutil" ) type peerAndStrategist struct { @@ -20,6 +21,38 @@ func newPeerAndStrategist(idStr string) peerAndStrategist { } } +func TestConsistentAccounting(t *testing.T) { + sender := newPeerAndStrategist("Ernie") + receiver := newPeerAndStrategist("Bert") + + // Send messages from Ernie to Bert + for i := 0; i < 1000; i++ { + + m := message.New() + content := []string{"this", "is", "message", "i"} + m.AppendBlock(testutil.NewBlockOrFail(t, strings.Join(content, " "))) + + sender.MessageSent(receiver.Peer, m) + receiver.MessageReceived(sender.Peer, m) + } + + // Ensure sender records the change + if sender.NumBytesSentTo(receiver.Peer) == 0 { + t.Fatal("Sent bytes were not recorded") + } + + // Ensure sender and receiver have the same values + if sender.NumBytesSentTo(receiver.Peer) != receiver.NumBytesReceivedFrom(sender.Peer) { + t.Fatal("Inconsistent book-keeping. Strategies don't agree") + } + + // Ensure sender didn't record receving anything. And that the receiver + // didn't record sending anything + if receiver.NumBytesSentTo(sender.Peer) != 0 || sender.NumBytesReceivedFrom(receiver.Peer) != 0 { + t.Fatal("Bert didn't send bytes to Ernie") + } +} + func TestBlockRecordedAsWantedAfterMessageReceived(t *testing.T) { beggar := newPeerAndStrategist("can't be chooser") chooser := newPeerAndStrategist("chooses JIF") From cbe83b3f78e58f5cb3664418485f00b6396cd146 Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Thu, 18 Sep 2014 18:44:46 -0700 Subject: [PATCH 0005/1035] style(ex:bitswap) put public methods at top This commit was moved from ipfs/go-bitswap@7de1c50576744593519079313835f7293bab05d4 --- bitswap/bitswap.go | 54 +++++++++++++++++++++++----------------------- 1 file changed, 27 insertions(+), 27 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index dcf095b02..967494625 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -35,6 +35,24 @@ type Routing interface { // advertisements. WantLists are sorted in terms of priority. const PartnerWantListMax = 10 +// NewSession initializes a bitswap session. +func NewSession(parent context.Context, s bsnet.NetworkService, p *peer.Peer, d ds.Datastore, directory Routing) exchange.Interface { + + // FIXME(brian): instantiate a concrete Strategist + receiver := bsnet.Forwarder{} + bs := &bitswap{ + blockstore: blockstore.NewBlockstore(d), + notifications: notifications.New(), + strategy: strategy.New(), + peer: p, + routing: directory, + sender: bsnet.NewNetworkAdapter(s, &receiver), + } + receiver.Delegate(bs) + + return bs +} + // bitswap instances implement the bitswap protocol. type bitswap struct { // peer is the identity of this (local) node. @@ -58,24 +76,6 @@ type bitswap struct { strategy strategy.Strategy } -// NewSession initializes a bitswap session. -func NewSession(parent context.Context, s bsnet.NetworkService, p *peer.Peer, d ds.Datastore, directory Routing) exchange.Interface { - - // FIXME(brian): instantiate a concrete Strategist - receiver := bsnet.Forwarder{} - bs := &bitswap{ - blockstore: blockstore.NewBlockstore(d), - notifications: notifications.New(), - strategy: strategy.New(), - peer: p, - routing: directory, - sender: bsnet.NewNetworkAdapter(s, &receiver), - } - receiver.Delegate(bs) - - return bs -} - // GetBlock attempts to retrieve a particular block from peers, within timeout. func (bs *bitswap) Block(k u.Key, timeout time.Duration) ( *blocks.Block, error) { @@ -149,15 +149,6 @@ func (bs *bitswap) HasBlock(blk blocks.Block) error { return bs.routing.Provide(blk.Key()) } -// TODO(brian): get a return value -func (bs *bitswap) send(p *peer.Peer, b blocks.Block) { - message := bsmsg.New() - message.AppendBlock(b) - // FIXME(brian): pass ctx - bs.sender.SendMessage(context.Background(), p, message) - bs.strategy.MessageSent(p, message) -} - // TODO(brian): handle errors func (bs *bitswap) ReceiveMessage( ctx context.Context, sender *peer.Peer, incoming bsmsg.BitSwapMessage) ( @@ -187,6 +178,15 @@ func (bs *bitswap) ReceiveMessage( return nil, nil, errors.New("TODO implement") } +// TODO(brian): get a return value +func (bs *bitswap) send(p *peer.Peer, b blocks.Block) { + message := bsmsg.New() + message.AppendBlock(b) + // FIXME(brian): pass ctx + bs.sender.SendMessage(context.Background(), p, message) + bs.strategy.MessageSent(p, message) +} + func numBytes(b blocks.Block) int { return len(b.Data) } From 61e5fb44d7b8c3687f06aea8c316d0b805a91bc6 Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Thu, 18 Sep 2014 19:01:06 -0700 Subject: [PATCH 0006/1035] refac(exch:bitswap) always notify strategy when message sent This commit was moved from ipfs/go-bitswap@0abce33fe864b356a4f61e13e6ca944fc20da6ea --- bitswap/bitswap.go | 45 +++++++++++++++++++++++---------------------- 1 file changed, 23 insertions(+), 22 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 967494625..f012e8042 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -79,6 +79,9 @@ type bitswap struct { // GetBlock attempts to retrieve a particular block from peers, within timeout. func (bs *bitswap) Block(k u.Key, timeout time.Duration) ( *blocks.Block, error) { + ctx, _ := context.WithTimeout(context.Background(), timeout) + + // TODO replace timeout with ctx in routing interface begin := time.Now() tleft := timeout - time.Now().Sub(begin) provs_ch := bs.routing.FindProvidersAsync(k, 20, timeout) @@ -90,7 +93,7 @@ func (bs *bitswap) Block(k u.Key, timeout time.Duration) ( go func() { for p := range provs_ch { go func(pr *peer.Peer) { - blk, err := bs.getBlock(k, pr, tleft) + blk, err := bs.getBlock(ctx, k, pr) if err != nil { return } @@ -111,19 +114,14 @@ func (bs *bitswap) Block(k u.Key, timeout time.Duration) ( } } -func (bs *bitswap) getBlock(k u.Key, p *peer.Peer, timeout time.Duration) (*blocks.Block, error) { +func (bs *bitswap) getBlock(ctx context.Context, k u.Key, p *peer.Peer) (*blocks.Block, error) { - ctx, _ := context.WithTimeout(context.Background(), timeout) blockChannel := bs.notifications.Subscribe(ctx, k) message := bsmsg.New() message.AppendWanted(k) - // FIXME(brian): register the accountant on the service wrapper to ensure - // that accounting is _always_ performed when SendMessage and - // ReceiveMessage are called - bs.sender.SendMessage(ctx, p, message) - bs.strategy.MessageSent(p, message) + bs.send(ctx, p, message) block, ok := <-blockChannel if !ok { @@ -132,11 +130,13 @@ func (bs *bitswap) getBlock(k u.Key, p *peer.Peer, timeout time.Duration) (*bloc return &block, nil } -func (bs *bitswap) sendToPeersThatWant(block blocks.Block) { +func (bs *bitswap) sendToPeersThatWant(ctx context.Context, block blocks.Block) { for _, p := range bs.strategy.Peers() { if bs.strategy.BlockIsWantedByPeer(block.Key(), p) { if bs.strategy.ShouldSendBlockToPeer(block.Key(), p) { - go bs.send(p, block) + message := bsmsg.New() + message.AppendBlock(block) + go bs.send(ctx, p, message) } } } @@ -145,16 +145,17 @@ func (bs *bitswap) sendToPeersThatWant(block blocks.Block) { // HasBlock announces the existance of a block to bitswap, potentially sending // it to peers (Partners) whose WantLists include it. func (bs *bitswap) HasBlock(blk blocks.Block) error { - go bs.sendToPeersThatWant(blk) + ctx := context.TODO() + go bs.sendToPeersThatWant(ctx, blk) return bs.routing.Provide(blk.Key()) } // TODO(brian): handle errors func (bs *bitswap) ReceiveMessage( - ctx context.Context, sender *peer.Peer, incoming bsmsg.BitSwapMessage) ( + ctx context.Context, p *peer.Peer, incoming bsmsg.BitSwapMessage) ( *peer.Peer, bsmsg.BitSwapMessage, error) { - bs.strategy.MessageReceived(sender, incoming) + bs.strategy.MessageReceived(p, incoming) if incoming.Blocks() != nil { for _, block := range incoming.Blocks() { @@ -165,26 +166,26 @@ func (bs *bitswap) ReceiveMessage( if incoming.Wantlist() != nil { for _, key := range incoming.Wantlist() { - if bs.strategy.ShouldSendBlockToPeer(key, sender) { + if bs.strategy.ShouldSendBlockToPeer(key, p) { block, errBlockNotFound := bs.blockstore.Get(key) if errBlockNotFound != nil { // TODO(brian): log/return the error continue } - go bs.send(sender, *block) + message := bsmsg.New() + message.AppendBlock(*block) + go bs.send(ctx, p, message) } } } return nil, nil, errors.New("TODO implement") } -// TODO(brian): get a return value -func (bs *bitswap) send(p *peer.Peer, b blocks.Block) { - message := bsmsg.New() - message.AppendBlock(b) - // FIXME(brian): pass ctx - bs.sender.SendMessage(context.Background(), p, message) - bs.strategy.MessageSent(p, message) +// send strives to ensure that accounting is always performed when a message is +// sent +func (bs *bitswap) send(ctx context.Context, p *peer.Peer, m bsmsg.BitSwapMessage) { + bs.sender.SendMessage(context.Background(), p, m) + bs.strategy.MessageSent(p, m) } func numBytes(b blocks.Block) int { From ee3246e68e8a4b008371732e450f517db5be96c3 Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Thu, 18 Sep 2014 19:02:55 -0700 Subject: [PATCH 0007/1035] refac(ex:bs) remove local peer ref until shown to be necessary This commit was moved from ipfs/go-bitswap@658c955618985d61a9a1875654a24a9e1de4a6c3 --- bitswap/bitswap.go | 3 --- 1 file changed, 3 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index f012e8042..b39ef0f12 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -44,7 +44,6 @@ func NewSession(parent context.Context, s bsnet.NetworkService, p *peer.Peer, d blockstore: blockstore.NewBlockstore(d), notifications: notifications.New(), strategy: strategy.New(), - peer: p, routing: directory, sender: bsnet.NewNetworkAdapter(s, &receiver), } @@ -55,8 +54,6 @@ func NewSession(parent context.Context, s bsnet.NetworkService, p *peer.Peer, d // bitswap instances implement the bitswap protocol. type bitswap struct { - // peer is the identity of this (local) node. - peer *peer.Peer // sender delivers messages on behalf of the session sender bsnet.NetworkAdapter From 15dcab8f90fb38a526c471e79d252f7f3d9e7fff Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Thu, 18 Sep 2014 19:03:33 -0700 Subject: [PATCH 0008/1035] chore(bitswap) remove unused const This commit was moved from ipfs/go-bitswap@bb11184653aa3e8e13c4af84b2c9ebd028a1ca77 --- bitswap/bitswap.go | 7 ------- 1 file changed, 7 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index b39ef0f12..82d603176 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -28,13 +28,6 @@ type Routing interface { Provide(key u.Key) error } -// TODO(brian): ensure messages are being received - -// PartnerWantListMax is the bound for the number of keys we'll store per -// partner. These are usually taken from the top of the Partner's WantList -// advertisements. WantLists are sorted in terms of priority. -const PartnerWantListMax = 10 - // NewSession initializes a bitswap session. func NewSession(parent context.Context, s bsnet.NetworkService, p *peer.Peer, d ds.Datastore, directory Routing) exchange.Interface { From 7d27b45325953a61bdda14a58267b764c54806ab Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Thu, 18 Sep 2014 19:15:15 -0700 Subject: [PATCH 0009/1035] refac(routing) replace timeout -> ctx @jbenet oh hai there! This commit was moved from ipfs/go-bitswap@978a60f76424a11f464a7ba5302e2d8adf325be1 --- bitswap/bitswap.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 82d603176..9cd59af8e 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -22,7 +22,7 @@ import ( type Routing interface { // FindProvidersAsync returns a channel of providers for the given key // TODO replace with timeout with context - FindProvidersAsync(u.Key, int, time.Duration) <-chan *peer.Peer + FindProvidersAsync(context.Context, u.Key, int) <-chan *peer.Peer // Provide provides the key to the network Provide(key u.Key) error @@ -74,7 +74,7 @@ func (bs *bitswap) Block(k u.Key, timeout time.Duration) ( // TODO replace timeout with ctx in routing interface begin := time.Now() tleft := timeout - time.Now().Sub(begin) - provs_ch := bs.routing.FindProvidersAsync(k, 20, timeout) + provs_ch := bs.routing.FindProvidersAsync(ctx, k, 20) blockChannel := make(chan blocks.Block) after := time.After(tleft) From 67fe45f08c729ab1c40645ef90d276cd318474b7 Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Thu, 18 Sep 2014 19:26:27 -0700 Subject: [PATCH 0010/1035] refac(bitswap) let adapter be created with nil delegate yay deleting code. This commit was moved from ipfs/go-bitswap@c0ab7e4630812d0a4454e996f6f8067237678615 --- bitswap/bitswap.go | 7 +++---- bitswap/network/forwarder.go | 28 ---------------------------- bitswap/network/forwarder_test.go | 26 -------------------------- 3 files changed, 3 insertions(+), 58 deletions(-) delete mode 100644 bitswap/network/forwarder.go delete mode 100644 bitswap/network/forwarder_test.go diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 9cd59af8e..d47c96144 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -31,16 +31,15 @@ type Routing interface { // NewSession initializes a bitswap session. func NewSession(parent context.Context, s bsnet.NetworkService, p *peer.Peer, d ds.Datastore, directory Routing) exchange.Interface { - // FIXME(brian): instantiate a concrete Strategist - receiver := bsnet.Forwarder{} + adapter := bsnet.NewNetworkAdapter(s, nil) bs := &bitswap{ blockstore: blockstore.NewBlockstore(d), notifications: notifications.New(), strategy: strategy.New(), routing: directory, - sender: bsnet.NewNetworkAdapter(s, &receiver), + sender: adapter, } - receiver.Delegate(bs) + adapter.SetDelegate(bs) return bs } diff --git a/bitswap/network/forwarder.go b/bitswap/network/forwarder.go deleted file mode 100644 index 603cd0123..000000000 --- a/bitswap/network/forwarder.go +++ /dev/null @@ -1,28 +0,0 @@ -package network - -import ( - context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" - bsmsg "github.com/jbenet/go-ipfs/exchange/bitswap/message" - peer "github.com/jbenet/go-ipfs/peer" -) - -// Forwarder receives messages and forwards them to the delegate. -// -// Forwarder breaks the circular dependency between the BitSwap Session and the -// Network Service. -type Forwarder struct { - delegate Receiver -} - -func (r *Forwarder) ReceiveMessage( - ctx context.Context, sender *peer.Peer, incoming bsmsg.BitSwapMessage) ( - *peer.Peer, bsmsg.BitSwapMessage, error) { - if r.delegate == nil { - return nil, nil, nil - } - return r.delegate.ReceiveMessage(ctx, sender, incoming) -} - -func (r *Forwarder) Delegate(delegate Receiver) { - r.delegate = delegate -} diff --git a/bitswap/network/forwarder_test.go b/bitswap/network/forwarder_test.go deleted file mode 100644 index 73604e110..000000000 --- a/bitswap/network/forwarder_test.go +++ /dev/null @@ -1,26 +0,0 @@ -package network - -import ( - "testing" - - context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" - bsmsg "github.com/jbenet/go-ipfs/exchange/bitswap/message" - peer "github.com/jbenet/go-ipfs/peer" -) - -func TestDoesntPanicIfDelegateNotPresent(t *testing.T) { - fwdr := Forwarder{} - fwdr.ReceiveMessage(context.Background(), &peer.Peer{}, bsmsg.New()) -} - -func TestForwardsMessageToDelegate(t *testing.T) { - fwdr := Forwarder{delegate: &EchoDelegate{}} - fwdr.ReceiveMessage(context.Background(), &peer.Peer{}, bsmsg.New()) -} - -type EchoDelegate struct{} - -func (d *EchoDelegate) ReceiveMessage(ctx context.Context, p *peer.Peer, - incoming bsmsg.BitSwapMessage) (*peer.Peer, bsmsg.BitSwapMessage, error) { - return p, incoming, nil -} From b7e641ed411b7efb55a5334b917519415a067a8b Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Thu, 18 Sep 2014 19:36:18 -0700 Subject: [PATCH 0011/1035] refac(exchange) replace timeout -> context in API This commit was moved from ipfs/go-bitswap@df164fa95b44d975b46db0827af38a5ae9748e89 --- bitswap/bitswap.go | 15 +++++---------- bitswap/offline.go | 5 +++-- bitswap/offline_test.go | 5 +++-- 3 files changed, 11 insertions(+), 14 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index d47c96144..173da67e8 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -2,7 +2,6 @@ package bitswap import ( "errors" - "time" context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" ds "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/datastore.go" @@ -65,18 +64,14 @@ type bitswap struct { strategy strategy.Strategy } -// GetBlock attempts to retrieve a particular block from peers, within timeout. -func (bs *bitswap) Block(k u.Key, timeout time.Duration) ( +// GetBlock attempts to retrieve a particular block from peers within the +// deadline enforced by the context +func (bs *bitswap) Block(ctx context.Context, k u.Key) ( *blocks.Block, error) { - ctx, _ := context.WithTimeout(context.Background(), timeout) - // TODO replace timeout with ctx in routing interface - begin := time.Now() - tleft := timeout - time.Now().Sub(begin) provs_ch := bs.routing.FindProvidersAsync(ctx, k, 20) blockChannel := make(chan blocks.Block) - after := time.After(tleft) // TODO: when the data is received, shut down this for loop ASAP go func() { @@ -98,8 +93,8 @@ func (bs *bitswap) Block(k u.Key, timeout time.Duration) ( case block := <-blockChannel: close(blockChannel) return &block, nil - case <-after: - return nil, u.ErrTimeout + case <-ctx.Done(): + return nil, ctx.Err() } } diff --git a/bitswap/offline.go b/bitswap/offline.go index a8dbd0f8e..e35cce2fc 100644 --- a/bitswap/offline.go +++ b/bitswap/offline.go @@ -2,7 +2,8 @@ package bitswap import ( "errors" - "time" + + context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" blocks "github.com/jbenet/go-ipfs/blocks" exchange "github.com/jbenet/go-ipfs/exchange" @@ -21,7 +22,7 @@ type offlineExchange struct { // Block returns nil to signal that a block could not be retrieved for the // given key. // NB: This function may return before the timeout expires. -func (_ *offlineExchange) Block(k u.Key, timeout time.Duration) (*blocks.Block, error) { +func (_ *offlineExchange) Block(context.Context, u.Key) (*blocks.Block, error) { return nil, errors.New("Block unavailable. Operating in offline mode") } diff --git a/bitswap/offline_test.go b/bitswap/offline_test.go index 2b40ac5e2..19b040cd5 100644 --- a/bitswap/offline_test.go +++ b/bitswap/offline_test.go @@ -2,7 +2,8 @@ package bitswap import ( "testing" - "time" + + context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" u "github.com/jbenet/go-ipfs/util" testutil "github.com/jbenet/go-ipfs/util/testutil" @@ -10,7 +11,7 @@ import ( func TestBlockReturnsErr(t *testing.T) { off := NewOfflineExchange() - _, err := off.Block(u.Key("foo"), time.Second) + _, err := off.Block(context.TODO(), u.Key("foo")) if err != nil { return // as desired } From 06e2c3314905078e2a6a29a3b2e7b8741294c418 Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Thu, 18 Sep 2014 19:39:31 -0700 Subject: [PATCH 0012/1035] fix(bitswap) use passed ctx This commit was moved from ipfs/go-bitswap@55b425a84c28d276b320a6628e0d8a48243f976a --- bitswap/bitswap.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 173da67e8..62ff1cd28 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -168,7 +168,7 @@ func (bs *bitswap) ReceiveMessage( // send strives to ensure that accounting is always performed when a message is // sent func (bs *bitswap) send(ctx context.Context, p *peer.Peer, m bsmsg.BitSwapMessage) { - bs.sender.SendMessage(context.Background(), p, m) + bs.sender.SendMessage(ctx, p, m) bs.strategy.MessageSent(p, m) } From 414bfb75fe1cba8f6ec0eff93d66ae4985c84f8a Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Thu, 18 Sep 2014 19:43:03 -0700 Subject: [PATCH 0013/1035] feat(exchange) pass ctx to exchange.HasBlock(...) This commit was moved from ipfs/go-bitswap@b62e655908f0b1f091267fa9c27979a57bd7dcb1 --- bitswap/bitswap.go | 3 +-- bitswap/offline.go | 2 +- bitswap/offline_test.go | 4 ++-- 3 files changed, 4 insertions(+), 5 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 62ff1cd28..35a1a90b5 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -128,8 +128,7 @@ func (bs *bitswap) sendToPeersThatWant(ctx context.Context, block blocks.Block) // HasBlock announces the existance of a block to bitswap, potentially sending // it to peers (Partners) whose WantLists include it. -func (bs *bitswap) HasBlock(blk blocks.Block) error { - ctx := context.TODO() +func (bs *bitswap) HasBlock(ctx context.Context, blk blocks.Block) error { go bs.sendToPeersThatWant(ctx, blk) return bs.routing.Provide(blk.Key()) } diff --git a/bitswap/offline.go b/bitswap/offline.go index e35cce2fc..9695b0b56 100644 --- a/bitswap/offline.go +++ b/bitswap/offline.go @@ -27,6 +27,6 @@ func (_ *offlineExchange) Block(context.Context, u.Key) (*blocks.Block, error) { } // HasBlock always returns nil. -func (_ *offlineExchange) HasBlock(blocks.Block) error { +func (_ *offlineExchange) HasBlock(context.Context, blocks.Block) error { return nil } diff --git a/bitswap/offline_test.go b/bitswap/offline_test.go index 19b040cd5..26821f2c8 100644 --- a/bitswap/offline_test.go +++ b/bitswap/offline_test.go @@ -11,7 +11,7 @@ import ( func TestBlockReturnsErr(t *testing.T) { off := NewOfflineExchange() - _, err := off.Block(context.TODO(), u.Key("foo")) + _, err := off.Block(context.Background(), u.Key("foo")) if err != nil { return // as desired } @@ -21,7 +21,7 @@ func TestBlockReturnsErr(t *testing.T) { func TestHasBlockReturnsNil(t *testing.T) { off := NewOfflineExchange() block := testutil.NewBlockOrFail(t, "data") - err := off.HasBlock(block) + err := off.HasBlock(context.Background(), block) if err != nil { t.Fatal("") } From e4122bdec698ffcec4aca7fa93594efd633067f9 Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Thu, 18 Sep 2014 19:52:07 -0700 Subject: [PATCH 0014/1035] chore(exch, bitswap) misc trivial cleanup This commit was moved from ipfs/go-bitswap@55e531817e12a7ff268236c1d98cace39c6ae12c --- bitswap/bitswap.go | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 35a1a90b5..083ca2833 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -1,8 +1,6 @@ package bitswap import ( - "errors" - context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" ds "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/datastore.go" @@ -20,7 +18,6 @@ import ( // TODO rename -> Router? type Routing interface { // FindProvidersAsync returns a channel of providers for the given key - // TODO replace with timeout with context FindProvidersAsync(context.Context, u.Key, int) <-chan *peer.Peer // Provide provides the key to the network @@ -66,8 +63,7 @@ type bitswap struct { // GetBlock attempts to retrieve a particular block from peers within the // deadline enforced by the context -func (bs *bitswap) Block(ctx context.Context, k u.Key) ( - *blocks.Block, error) { +func (bs *bitswap) Block(ctx context.Context, k u.Key) (*blocks.Block, error) { provs_ch := bs.routing.FindProvidersAsync(ctx, k, 20) @@ -161,7 +157,7 @@ func (bs *bitswap) ReceiveMessage( } } } - return nil, nil, errors.New("TODO implement") + return nil, nil, nil } // send strives to ensure that accounting is always performed when a message is From 813fafef6e4b3ece2972a28a813c3214a7a1a530 Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Thu, 18 Sep 2014 19:54:30 -0700 Subject: [PATCH 0015/1035] refac(bitswap) extract const This commit was moved from ipfs/go-bitswap@640fa135b82eb016143c1e8ff8006e9fc81bc7a8 --- bitswap/bitswap.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 083ca2833..418d5046e 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -63,9 +63,12 @@ type bitswap struct { // GetBlock attempts to retrieve a particular block from peers within the // deadline enforced by the context +// +// TODO ensure only one active request per key func (bs *bitswap) Block(ctx context.Context, k u.Key) (*blocks.Block, error) { - provs_ch := bs.routing.FindProvidersAsync(ctx, k, 20) + const maxProviders = 20 + provs_ch := bs.routing.FindProvidersAsync(ctx, k, maxProviders) blockChannel := make(chan blocks.Block) From 6dd37754c821431fdb22822e2255efc8cf0db921 Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Thu, 18 Sep 2014 20:30:04 -0700 Subject: [PATCH 0016/1035] feat(exch:bitswap) simply get method This commit was moved from ipfs/go-bitswap@ef92b55d8c4c9133fa74643fc0b6ee590f9abcf2 --- bitswap/bitswap.go | 79 +++++++++++++++++++--------------------------- 1 file changed, 33 insertions(+), 46 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 418d5046e..aab1c6f1e 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -65,63 +65,38 @@ type bitswap struct { // deadline enforced by the context // // TODO ensure only one active request per key -func (bs *bitswap) Block(ctx context.Context, k u.Key) (*blocks.Block, error) { +func (bs *bitswap) Block(parent context.Context, k u.Key) (*blocks.Block, error) { - const maxProviders = 20 - provs_ch := bs.routing.FindProvidersAsync(ctx, k, maxProviders) + ctx, cancelFunc := context.WithCancel(parent) + promise := bs.notifications.Subscribe(ctx, k) - blockChannel := make(chan blocks.Block) - - // TODO: when the data is received, shut down this for loop ASAP go func() { - for p := range provs_ch { - go func(pr *peer.Peer) { - blk, err := bs.getBlock(ctx, k, pr) + const maxProviders = 20 + peersToQuery := bs.routing.FindProvidersAsync(ctx, k, maxProviders) + message := bsmsg.New() + message.AppendWanted(k) + for i := range peersToQuery { + go func(p *peer.Peer) { + response, err := bs.sender.SendRequest(ctx, p, message) if err != nil { return } - select { - case blockChannel <- *blk: - default: - } - }(p) + // FIXME ensure accounting is handled correctly when + // communication fails. May require slightly different API to + // get better guarantees. May need shared sequence numbers. + bs.strategy.MessageSent(p, message) + + bs.ReceiveMessage(ctx, p, response) + }(i) } }() select { - case block := <-blockChannel: - close(blockChannel) + case block := <-promise: + cancelFunc() return &block, nil - case <-ctx.Done(): - return nil, ctx.Err() - } -} - -func (bs *bitswap) getBlock(ctx context.Context, k u.Key, p *peer.Peer) (*blocks.Block, error) { - - blockChannel := bs.notifications.Subscribe(ctx, k) - - message := bsmsg.New() - message.AppendWanted(k) - - bs.send(ctx, p, message) - - block, ok := <-blockChannel - if !ok { - return nil, u.ErrTimeout - } - return &block, nil -} - -func (bs *bitswap) sendToPeersThatWant(ctx context.Context, block blocks.Block) { - for _, p := range bs.strategy.Peers() { - if bs.strategy.BlockIsWantedByPeer(block.Key(), p) { - if bs.strategy.ShouldSendBlockToPeer(block.Key(), p) { - message := bsmsg.New() - message.AppendBlock(block) - go bs.send(ctx, p, message) - } - } + case <-parent.Done(): + return nil, parent.Err() } } @@ -173,3 +148,15 @@ func (bs *bitswap) send(ctx context.Context, p *peer.Peer, m bsmsg.BitSwapMessag func numBytes(b blocks.Block) int { return len(b.Data) } + +func (bs *bitswap) sendToPeersThatWant(ctx context.Context, block blocks.Block) { + for _, p := range bs.strategy.Peers() { + if bs.strategy.BlockIsWantedByPeer(block.Key(), p) { + if bs.strategy.ShouldSendBlockToPeer(block.Key(), p) { + message := bsmsg.New() + message.AppendBlock(block) + go bs.send(ctx, p, message) + } + } + } +} From 58b850ce030ddf117d56a326857bac85fa339343 Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Thu, 18 Sep 2014 20:38:01 -0700 Subject: [PATCH 0017/1035] feat(bitswap) broadcast block to routing, peers on receipt This commit was moved from ipfs/go-bitswap@b30eb0e7eefb0b3af7996638b19846765f1ff566 --- bitswap/bitswap.go | 1 + 1 file changed, 1 insertion(+) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index aab1c6f1e..ac6ec4536 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -118,6 +118,7 @@ func (bs *bitswap) ReceiveMessage( for _, block := range incoming.Blocks() { go bs.blockstore.Put(block) // FIXME(brian): err ignored go bs.notifications.Publish(block) + go bs.HasBlock(ctx, block) // FIXME err ignored } } From 2b4770ebbe18a8df0713868662c358b96099daad Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Thu, 18 Sep 2014 20:47:28 -0700 Subject: [PATCH 0018/1035] style(exch:bitswap) rename variable This commit was moved from ipfs/go-bitswap@619a9470a0026503c180a8fe9a2e0ae2bbd1a3cd --- bitswap/bitswap.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index ac6ec4536..5b2a63a6c 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -27,15 +27,15 @@ type Routing interface { // NewSession initializes a bitswap session. func NewSession(parent context.Context, s bsnet.NetworkService, p *peer.Peer, d ds.Datastore, directory Routing) exchange.Interface { - adapter := bsnet.NewNetworkAdapter(s, nil) + networkAdapter := bsnet.NewNetworkAdapter(s, nil) bs := &bitswap{ blockstore: blockstore.NewBlockstore(d), notifications: notifications.New(), strategy: strategy.New(), routing: directory, - sender: adapter, + sender: networkAdapter, } - adapter.SetDelegate(bs) + networkAdapter.SetDelegate(bs) return bs } From 35563da10f327b2169690d6955363bb03de69f42 Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Thu, 18 Sep 2014 22:19:33 -0700 Subject: [PATCH 0019/1035] style(exch:bitswap) rename adapter, session, etc. style(exch:bitswap) rename NetMessage adapter impl This commit was moved from ipfs/go-bitswap@893042399158ead67c8b90b384b13fb68a9c7eae --- bitswap/bitswap.go | 9 ++--- bitswap/network/interface.go | 6 ++-- ...work_adapter.go => net_message_adapter.go} | 34 +++++++++---------- 3 files changed, 25 insertions(+), 24 deletions(-) rename bitswap/network/{network_adapter.go => net_message_adapter.go} (65%) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 5b2a63a6c..c223addd0 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -24,10 +24,11 @@ type Routing interface { Provide(key u.Key) error } -// NewSession initializes a bitswap session. -func NewSession(parent context.Context, s bsnet.NetworkService, p *peer.Peer, d ds.Datastore, directory Routing) exchange.Interface { +// NetMessageSession initializes a BitSwap session that communicates over the +// provided NetMessage service +func NetMessageSession(parent context.Context, s bsnet.NetMessageService, p *peer.Peer, d ds.Datastore, directory Routing) exchange.Interface { - networkAdapter := bsnet.NewNetworkAdapter(s, nil) + networkAdapter := bsnet.NetMessageAdapter(s, nil) bs := &bitswap{ blockstore: blockstore.NewBlockstore(d), notifications: notifications.New(), @@ -44,7 +45,7 @@ func NewSession(parent context.Context, s bsnet.NetworkService, p *peer.Peer, d type bitswap struct { // sender delivers messages on behalf of the session - sender bsnet.NetworkAdapter + sender bsnet.Adapter // blockstore is the local database // NB: ensure threadsafety diff --git a/bitswap/network/interface.go b/bitswap/network/interface.go index 703398354..29bb0da3b 100644 --- a/bitswap/network/interface.go +++ b/bitswap/network/interface.go @@ -9,8 +9,8 @@ import ( peer "github.com/jbenet/go-ipfs/peer" ) -// NetworkAdapter mediates the exchange's communication with the network. -type NetworkAdapter interface { +// Adapter provides network connectivity for BitSwap sessions +type Adapter interface { // SendMessage sends a BitSwap message to a peer. SendMessage( @@ -36,7 +36,7 @@ type Receiver interface { } // TODO(brian): move this to go-ipfs/net package -type NetworkService interface { +type NetMessageService interface { SendRequest(ctx context.Context, m netmsg.NetMessage) (netmsg.NetMessage, error) SendMessage(ctx context.Context, m netmsg.NetMessage) error SetHandler(netservice.Handler) diff --git a/bitswap/network/network_adapter.go b/bitswap/network/net_message_adapter.go similarity index 65% rename from bitswap/network/network_adapter.go rename to bitswap/network/net_message_adapter.go index 8914101bc..603317afb 100644 --- a/bitswap/network/network_adapter.go +++ b/bitswap/network/net_message_adapter.go @@ -10,27 +10,27 @@ import ( peer "github.com/jbenet/go-ipfs/peer" ) -// NewSender wraps a network Service to perform translation between -// BitSwapMessage and NetMessage formats. This allows the BitSwap session to -// ignore these details. -func NewNetworkAdapter(s NetworkService, r Receiver) NetworkAdapter { - adapter := networkAdapter{ - networkService: s, - receiver: r, +// NetMessageAdapter wraps a NetMessage network service +func NetMessageAdapter(s NetMessageService, r Receiver) Adapter { + adapter := impl{ + nms: s, + receiver: r, } s.SetHandler(&adapter) return &adapter } -// networkAdapter implements NetworkAdapter -type networkAdapter struct { - networkService NetworkService - receiver Receiver +// implements an Adapter that integrates with a NetMessage network service +type impl struct { + nms NetMessageService + + // inbound messages from the network are forwarded to the receiver + receiver Receiver } // HandleMessage marshals and unmarshals net messages, forwarding them to the // BitSwapMessage receiver -func (adapter *networkAdapter) HandleMessage( +func (adapter *impl) HandleMessage( ctx context.Context, incoming netmsg.NetMessage) (netmsg.NetMessage, error) { if adapter.receiver == nil { @@ -60,7 +60,7 @@ func (adapter *networkAdapter) HandleMessage( return outgoing, nil } -func (adapter *networkAdapter) SendMessage( +func (adapter *impl) SendMessage( ctx context.Context, p *peer.Peer, outgoing bsmsg.BitSwapMessage) error { @@ -69,10 +69,10 @@ func (adapter *networkAdapter) SendMessage( if err != nil { return err } - return adapter.networkService.SendMessage(ctx, nmsg) + return adapter.nms.SendMessage(ctx, nmsg) } -func (adapter *networkAdapter) SendRequest( +func (adapter *impl) SendRequest( ctx context.Context, p *peer.Peer, outgoing bsmsg.BitSwapMessage) (bsmsg.BitSwapMessage, error) { @@ -81,13 +81,13 @@ func (adapter *networkAdapter) SendRequest( if err != nil { return nil, err } - incomingMsg, err := adapter.networkService.SendRequest(ctx, outgoingMsg) + incomingMsg, err := adapter.nms.SendRequest(ctx, outgoingMsg) if err != nil { return nil, err } return bsmsg.FromNet(incomingMsg) } -func (adapter *networkAdapter) SetDelegate(r Receiver) { +func (adapter *impl) SetDelegate(r Receiver) { adapter.receiver = r } From 8fa177bfb9901ab97c6a505faf13f82c69a8032c Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Fri, 19 Sep 2014 08:11:15 -0700 Subject: [PATCH 0020/1035] test(bitswap:testnet) misc: * test network client getting more than max * test for find providers * rename factory method * local network * misc test improvements * test bitswap get block timeout * test provider exists but cannot connect to peer * test sending a message async over local network This commit was moved from ipfs/go-bitswap@791637a5541c21c80dd461297570ff98c7fb42de --- bitswap/bitswap_test.go | 81 ++++++++++++++++ bitswap/hash_table.go | 96 +++++++++++++++++++ bitswap/hash_table_test.go | 157 ++++++++++++++++++++++++++++++ bitswap/local_network.go | 174 ++++++++++++++++++++++++++++++++++ bitswap/local_network_test.go | 138 +++++++++++++++++++++++++++ bitswap/strategy/strategy.go | 7 ++ 6 files changed, 653 insertions(+) create mode 100644 bitswap/bitswap_test.go create mode 100644 bitswap/hash_table.go create mode 100644 bitswap/hash_table_test.go create mode 100644 bitswap/local_network.go create mode 100644 bitswap/local_network_test.go diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go new file mode 100644 index 000000000..cc2bb6fa3 --- /dev/null +++ b/bitswap/bitswap_test.go @@ -0,0 +1,81 @@ +package bitswap + +import ( + "testing" + "time" + + context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" + + ds "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/datastore.go" + bstore "github.com/jbenet/go-ipfs/blockstore" + exchange "github.com/jbenet/go-ipfs/exchange" + notifications "github.com/jbenet/go-ipfs/exchange/bitswap/notifications" + strategy "github.com/jbenet/go-ipfs/exchange/bitswap/strategy" + peer "github.com/jbenet/go-ipfs/peer" + testutil "github.com/jbenet/go-ipfs/util/testutil" +) + +func TestGetBlockTimeout(t *testing.T) { + + net := LocalNetwork() + rs := newRoutingServer() + ipfs := session(net, rs, []byte("peer id")) + ctx, _ := context.WithTimeout(context.Background(), time.Nanosecond) + block := testutil.NewBlockOrFail(t, "block") + + _, err := ipfs.exchange.Block(ctx, block.Key()) + if err != context.DeadlineExceeded { + t.Fatal("Expected DeadlineExceeded error") + } +} + +func TestProviderForKeyButNetworkCannotFind(t *testing.T) { + + net := LocalNetwork() + rs := newRoutingServer() + ipfs := session(net, rs, []byte("peer id")) + // ctx := context.Background() + ctx, _ := context.WithTimeout(context.Background(), time.Nanosecond) + block := testutil.NewBlockOrFail(t, "block") + + rs.Announce(&peer.Peer{}, block.Key()) // but not on network + + _, err := ipfs.exchange.Block(ctx, block.Key()) + if err != context.DeadlineExceeded { + t.Fatal("Expected DeadlineExceeded error") + } +} + +type ipfs struct { + peer *peer.Peer + exchange exchange.Interface + blockstore bstore.Blockstore +} + +func session(net Network, rs RoutingServer, id peer.ID) ipfs { + p := &peer.Peer{} + + adapter := net.Adapter(p) + htc := rs.Client(p) + + blockstore := bstore.NewBlockstore(ds.NewMapDatastore()) + bs := &bitswap{ + blockstore: blockstore, + notifications: notifications.New(), + strategy: strategy.New(), + routing: htc, + sender: adapter, + } + adapter.SetDelegate(bs) + return ipfs{ + peer: p, + exchange: bs, + blockstore: blockstore, + } +} + +func TestSendToWantingPeer(t *testing.T) { + t.Log("Peer |w| tells me it wants file, but I don't have it") + t.Log("Then another peer |o| sends it to me") + t.Log("After receiving the file from |o|, I send it to the wanting peer |w|") +} diff --git a/bitswap/hash_table.go b/bitswap/hash_table.go new file mode 100644 index 000000000..d030a0f5d --- /dev/null +++ b/bitswap/hash_table.go @@ -0,0 +1,96 @@ +package bitswap + +import ( + "errors" + "sync" + + context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" + peer "github.com/jbenet/go-ipfs/peer" + u "github.com/jbenet/go-ipfs/util" +) + +type RoutingServer interface { + // TODO + Announce(*peer.Peer, u.Key) error + + // TODO + Providers(u.Key) []*peer.Peer + + // TODO + // Returns a Routing instance configured to query this hash table + Client(*peer.Peer) Routing +} + +func newRoutingServer() RoutingServer { + return &hashTable{ + m: make(map[u.Key]map[*peer.Peer]bool), + } +} + +type hashTable struct { + lock sync.RWMutex + m map[u.Key]map[*peer.Peer]bool +} + +var TODO = errors.New("TODO") + +func (rs *hashTable) Announce(p *peer.Peer, k u.Key) error { + rs.lock.Lock() + defer rs.lock.Unlock() + + _, ok := rs.m[k] + if !ok { + rs.m[k] = make(map[*peer.Peer]bool) + } + rs.m[k][p] = true + return nil +} + +func (rs *hashTable) Providers(k u.Key) []*peer.Peer { + rs.lock.RLock() + defer rs.lock.RUnlock() + ret := make([]*peer.Peer, 0) + peerset, ok := rs.m[k] + if !ok { + return ret + } + for peer, _ := range peerset { + ret = append(ret, peer) + } + return ret +} + +// TODO +func (rs *hashTable) Client(p *peer.Peer) Routing { + return &routingClient{ + peer: p, + hashTable: rs, + } +} + +type routingClient struct { + peer *peer.Peer + hashTable RoutingServer +} + +func (a *routingClient) FindProvidersAsync(ctx context.Context, k u.Key, max int) <-chan *peer.Peer { + out := make(chan *peer.Peer) + go func() { + defer close(out) + for i, p := range a.hashTable.Providers(k) { + if max <= i { + return + } + select { + case out <- p: + case <-ctx.Done(): + return + } + } + }() + return out +} + +func (a *routingClient) Provide(key u.Key) error { + return a.hashTable.Announce(a.peer, key) +} diff --git a/bitswap/hash_table_test.go b/bitswap/hash_table_test.go new file mode 100644 index 000000000..fafc1fd9a --- /dev/null +++ b/bitswap/hash_table_test.go @@ -0,0 +1,157 @@ +package bitswap + +import ( + "bytes" + "testing" + + context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" +) +import ( + "github.com/jbenet/go-ipfs/peer" + u "github.com/jbenet/go-ipfs/util" +) + +func TestKeyNotFound(t *testing.T) { + + rs := func() RoutingServer { + // TODO fields + return &hashTable{} + }() + empty := rs.Providers(u.Key("not there")) + if len(empty) != 0 { + t.Fatal("should be empty") + } +} + +func TestSetAndGet(t *testing.T) { + pid := peer.ID([]byte("the peer id")) + p := &peer.Peer{ + ID: pid, + } + k := u.Key("42") + rs := newRoutingServer() + err := rs.Announce(p, k) + if err != nil { + t.Fatal(err) + } + providers := rs.Providers(k) + if len(providers) != 1 { + t.Fatal("should be one") + } + for _, elem := range providers { + if bytes.Equal(elem.ID, pid) { + return + } + } + t.Fatal("ID should have matched") +} + +func TestClientFindProviders(t *testing.T) { + peer := &peer.Peer{ + ID: []byte("42"), + } + rs := newRoutingServer() + client := rs.Client(peer) + k := u.Key("hello") + err := client.Provide(k) + if err != nil { + t.Fatal(err) + } + max := 100 + + providersFromHashTable := rs.Providers(k) + + isInHT := false + for _, p := range providersFromHashTable { + if bytes.Equal(p.ID, peer.ID) { + isInHT = true + } + } + if !isInHT { + t.Fatal("Despite client providing key, peer wasn't in hash table as a provider") + } + providersFromClient := client.FindProvidersAsync(context.Background(), u.Key("hello"), max) + isInClient := false + for p := range providersFromClient { + if bytes.Equal(p.ID, peer.ID) { + isInClient = true + } + } + if !isInClient { + t.Fatal("Despite client providing key, client didn't receive peer when finding providers") + } +} + +func TestClientOverMax(t *testing.T) { + rs := newRoutingServer() + k := u.Key("hello") + numProvidersForHelloKey := 100 + for i := 0; i < numProvidersForHelloKey; i++ { + peer := &peer.Peer{ + ID: []byte(string(i)), + } + err := rs.Announce(peer, k) + if err != nil { + t.Fatal(err) + } + } + providersFromHashTable := rs.Providers(k) + if len(providersFromHashTable) != numProvidersForHelloKey { + t.Log(1 == len(providersFromHashTable)) + t.Fatal("not all providers were returned") + } + + max := 10 + client := rs.Client(&peer.Peer{ID: []byte("TODO")}) + providersFromClient := client.FindProvidersAsync(context.Background(), k, max) + i := 0 + for _ = range providersFromClient { + i++ + } + if i != max { + t.Fatal("Too many providers returned") + } +} + +// TODO does dht ensure won't receive self as a provider? probably not. +func TestCanceledContext(t *testing.T) { + rs := newRoutingServer() + k := u.Key("hello") + + t.Log("async'ly announce infinite stream of providers for key") + i := 0 + go func() { // infinite stream + for { + peer := &peer.Peer{ + ID: []byte(string(i)), + } + err := rs.Announce(peer, k) + if err != nil { + t.Fatal(err) + } + i++ + } + }() + + client := rs.Client(&peer.Peer{ID: []byte("peer id doesn't matter")}) + + t.Log("warning: max is finite so this test is non-deterministic") + t.Log("context cancellation could simply take lower priority") + t.Log("and result in receiving the max number of results") + max := 1000 + + t.Log("cancel the context before consuming") + ctx, cancelFunc := context.WithCancel(context.Background()) + cancelFunc() + providers := client.FindProvidersAsync(ctx, k, max) + + numProvidersReturned := 0 + for _ = range providers { + numProvidersReturned++ + } + t.Log(numProvidersReturned) + + if numProvidersReturned == max { + t.Fatal("Context cancel had no effect") + } +} diff --git a/bitswap/local_network.go b/bitswap/local_network.go new file mode 100644 index 000000000..ff8d5de4c --- /dev/null +++ b/bitswap/local_network.go @@ -0,0 +1,174 @@ +package bitswap + +import ( + "bytes" + "errors" + + context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" + bsmsg "github.com/jbenet/go-ipfs/exchange/bitswap/message" + bsnet "github.com/jbenet/go-ipfs/exchange/bitswap/network" + peer "github.com/jbenet/go-ipfs/peer" + "github.com/jbenet/go-ipfs/util" +) + +type Network interface { + Adapter(*peer.Peer) bsnet.Adapter + + SendMessage( + ctx context.Context, + from *peer.Peer, + to *peer.Peer, + message bsmsg.BitSwapMessage) error + + SendRequest( + ctx context.Context, + from *peer.Peer, + to *peer.Peer, + message bsmsg.BitSwapMessage) ( + incoming bsmsg.BitSwapMessage, err error) +} + +// network impl + +func LocalNetwork() Network { + return &network{ + clients: make(map[util.Key]bsnet.Receiver), + } +} + +type network struct { + clients map[util.Key]bsnet.Receiver +} + +func (n *network) Adapter(p *peer.Peer) bsnet.Adapter { + client := &networkClient{ + local: p, + network: n, + } + n.clients[p.Key()] = client + return client +} + +// TODO should this be completely asynchronous? +// TODO what does the network layer do with errors received from services? +func (n *network) SendMessage( + ctx context.Context, + from *peer.Peer, + to *peer.Peer, + message bsmsg.BitSwapMessage) error { + + receiver, ok := n.clients[to.Key()] + if !ok { + return errors.New("Cannot locate peer on network") + } + + // nb: terminate the context since the context wouldn't actually be passed + // over the network in a real scenario + + go n.deliver(receiver, from, message) + + return nil +} + +func (n *network) deliver( + r bsnet.Receiver, from *peer.Peer, message bsmsg.BitSwapMessage) error { + if message == nil || from == nil { + return errors.New("Invalid input") + } + + nextPeer, nextMsg, err := r.ReceiveMessage(context.TODO(), from, message) + if err != nil { + + // TODO should this error be returned across network boundary? + + // TODO this raises an interesting question about network contract. How + // can the network be expected to behave under different failure + // conditions? What if peer is unreachable? Will we know if messages + // aren't delivered? + + return err + } + + if (nextPeer == nil && nextMsg != nil) || (nextMsg == nil && nextPeer != nil) { + return errors.New("Malformed client request") + } + + if nextPeer == nil && nextMsg == nil { + return nil + } + + nextReceiver, ok := n.clients[nextPeer.Key()] + if !ok { + return errors.New("Cannot locate peer on network") + } + go n.deliver(nextReceiver, nextPeer, nextMsg) + return nil +} + +var NoResponse = errors.New("No response received from the receiver") + +// TODO +func (n *network) SendRequest( + ctx context.Context, + from *peer.Peer, + to *peer.Peer, + message bsmsg.BitSwapMessage) ( + incoming bsmsg.BitSwapMessage, err error) { + + r, ok := n.clients[to.Key()] + if !ok { + return nil, errors.New("Cannot locate peer on network") + } + nextPeer, nextMsg, err := r.ReceiveMessage(context.TODO(), from, message) + if err != nil { + return nil, err + // TODO return nil, NoResponse + } + + // TODO dedupe code + if (nextPeer == nil && nextMsg != nil) || (nextMsg == nil && nextPeer != nil) { + return nil, errors.New("Malformed client request") + } + + // TODO dedupe code + if nextPeer == nil && nextMsg == nil { + return nil, nil + } + + // TODO test when receiver doesn't immediately respond to the initiator of the request + if !bytes.Equal(nextPeer.ID, from.ID) { + go func() { + nextReceiver, ok := n.clients[nextPeer.Key()] + if !ok { + // TODO log the error? + } + n.deliver(nextReceiver, nextPeer, nextMsg) + }() + return nil, NoResponse + } + return nextMsg, nil +} + +type networkClient struct { + local *peer.Peer + bsnet.Receiver + network Network +} + +func (nc *networkClient) SendMessage( + ctx context.Context, + to *peer.Peer, + message bsmsg.BitSwapMessage) error { + return nc.network.SendMessage(ctx, nc.local, to, message) +} + +func (nc *networkClient) SendRequest( + ctx context.Context, + to *peer.Peer, + message bsmsg.BitSwapMessage) (incoming bsmsg.BitSwapMessage, err error) { + return nc.network.SendRequest(ctx, nc.local, to, message) +} + +func (nc *networkClient) SetDelegate(r bsnet.Receiver) { + nc.Receiver = r +} diff --git a/bitswap/local_network_test.go b/bitswap/local_network_test.go new file mode 100644 index 000000000..e5bbda7a0 --- /dev/null +++ b/bitswap/local_network_test.go @@ -0,0 +1,138 @@ +package bitswap + +import ( + "sync" + "testing" + + context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" + bsmsg "github.com/jbenet/go-ipfs/exchange/bitswap/message" + bsnet "github.com/jbenet/go-ipfs/exchange/bitswap/network" + peer "github.com/jbenet/go-ipfs/peer" + testutil "github.com/jbenet/go-ipfs/util/testutil" +) + +func TestSendRequestToCooperativePeer(t *testing.T) { + net := LocalNetwork() + + idOfRecipient := []byte("recipient") + + t.Log("Get two network adapters") + + initiator := net.Adapter(&peer.Peer{ID: []byte("initiator")}) + recipient := net.Adapter(&peer.Peer{ID: idOfRecipient}) + + expectedStr := "response from recipient" + recipient.SetDelegate(lambda(func( + ctx context.Context, + from *peer.Peer, + incoming bsmsg.BitSwapMessage) ( + *peer.Peer, bsmsg.BitSwapMessage, error) { + + t.Log("Recipient received a message from the network") + + // TODO test contents of incoming message + + m := bsmsg.New() + m.AppendBlock(testutil.NewBlockOrFail(t, expectedStr)) + + return from, m, nil + })) + + t.Log("Build a message and send a synchronous request to recipient") + + message := bsmsg.New() + message.AppendBlock(testutil.NewBlockOrFail(t, "data")) + response, err := initiator.SendRequest( + context.Background(), &peer.Peer{ID: idOfRecipient}, message) + if err != nil { + t.Fatal(err) + } + + t.Log("Check the contents of the response from recipient") + + for _, blockFromRecipient := range response.Blocks() { + if string(blockFromRecipient.Data) == expectedStr { + return + } + } + t.Fatal("Should have returned after finding expected block data") +} + +func TestSendMessageAsyncButWaitForResponse(t *testing.T) { + net := LocalNetwork() + idOfResponder := []byte("responder") + waiter := net.Adapter(&peer.Peer{ID: []byte("waiter")}) + responder := net.Adapter(&peer.Peer{ID: idOfResponder}) + + var wg sync.WaitGroup + + wg.Add(1) + + expectedStr := "received async" + + responder.SetDelegate(lambda(func( + ctx context.Context, + fromWaiter *peer.Peer, + msgFromWaiter bsmsg.BitSwapMessage) ( + *peer.Peer, bsmsg.BitSwapMessage, error) { + + msgToWaiter := bsmsg.New() + msgToWaiter.AppendBlock(testutil.NewBlockOrFail(t, expectedStr)) + + return fromWaiter, msgToWaiter, nil + })) + + waiter.SetDelegate(lambda(func( + ctx context.Context, + fromResponder *peer.Peer, + msgFromResponder bsmsg.BitSwapMessage) ( + *peer.Peer, bsmsg.BitSwapMessage, error) { + + // TODO assert that this came from the correct peer and that the message contents are as expected + ok := false + for _, b := range msgFromResponder.Blocks() { + if string(b.Data) == expectedStr { + wg.Done() + ok = true + } + } + + if !ok { + t.Fatal("Message not received from the responder") + + } + return nil, nil, nil + })) + + messageSentAsync := bsmsg.New() + messageSentAsync.AppendBlock(testutil.NewBlockOrFail(t, "data")) + errSending := waiter.SendMessage( + context.Background(), &peer.Peer{ID: idOfResponder}, messageSentAsync) + if errSending != nil { + t.Fatal(errSending) + } + + wg.Wait() // until waiter delegate function is executed +} + +type receiverFunc func(ctx context.Context, p *peer.Peer, + incoming bsmsg.BitSwapMessage) (*peer.Peer, bsmsg.BitSwapMessage, error) + +// lambda returns a Receiver instance given a receiver function +func lambda(f receiverFunc) bsnet.Receiver { + return &lambdaImpl{ + f: f, + } +} + +type lambdaImpl struct { + f func(ctx context.Context, p *peer.Peer, + incoming bsmsg.BitSwapMessage) ( + *peer.Peer, bsmsg.BitSwapMessage, error) +} + +func (lam *lambdaImpl) ReceiveMessage(ctx context.Context, + p *peer.Peer, incoming bsmsg.BitSwapMessage) ( + *peer.Peer, bsmsg.BitSwapMessage, error) { + return lam.f(ctx, p, incoming) +} diff --git a/bitswap/strategy/strategy.go b/bitswap/strategy/strategy.go index 406508d6e..dc7a8e1b3 100644 --- a/bitswap/strategy/strategy.go +++ b/bitswap/strategy/strategy.go @@ -51,6 +51,13 @@ func (s *strategist) Seed(int64) { } func (s *strategist) MessageReceived(p *peer.Peer, m bsmsg.BitSwapMessage) error { + // TODO find a more elegant way to handle this check + if p == nil { + return errors.New("Strategy received nil peer") + } + if m == nil { + return errors.New("Strategy received nil message") + } l := s.ledger(p) for _, key := range m.Wantlist() { l.Wants(key) From 112e77d9db0246285f4358b1d4b8851fb08dc8a2 Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Fri, 19 Sep 2014 12:34:50 -0700 Subject: [PATCH 0021/1035] fix(bitswap) check for nil in public interface This commit was moved from ipfs/go-bitswap@c34211a7862f5bd4343a53f2654ae75867e831e6 --- bitswap/bitswap.go | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index c223addd0..79f4d554b 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -1,6 +1,8 @@ package bitswap import ( + "errors" + context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" ds "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/datastore.go" @@ -87,6 +89,9 @@ func (bs *bitswap) Block(parent context.Context, k u.Key) (*blocks.Block, error) // get better guarantees. May need shared sequence numbers. bs.strategy.MessageSent(p, message) + if response == nil { + return + } bs.ReceiveMessage(ctx, p, response) }(i) } @@ -112,6 +117,12 @@ func (bs *bitswap) HasBlock(ctx context.Context, blk blocks.Block) error { func (bs *bitswap) ReceiveMessage( ctx context.Context, p *peer.Peer, incoming bsmsg.BitSwapMessage) ( *peer.Peer, bsmsg.BitSwapMessage, error) { + if p == nil { + return nil, nil, errors.New("Received nil Peer") + } + if incoming == nil { + return nil, nil, errors.New("Received nil Message") + } bs.strategy.MessageReceived(p, incoming) From 6739a45e9964fed1adec560c72df0a8d3f3adab3 Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Fri, 19 Sep 2014 12:57:45 -0700 Subject: [PATCH 0022/1035] refac(bitswap) less concurrency while testing and iterating This commit was moved from ipfs/go-bitswap@0881636a2e65ab3637e21811f7cbac283250c23a --- bitswap/bitswap.go | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 79f4d554b..98d8952ed 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -79,7 +79,7 @@ func (bs *bitswap) Block(parent context.Context, k u.Key) (*blocks.Block, error) message := bsmsg.New() message.AppendWanted(k) for i := range peersToQuery { - go func(p *peer.Peer) { + func(p *peer.Peer) { response, err := bs.sender.SendRequest(ctx, p, message) if err != nil { return @@ -109,7 +109,7 @@ func (bs *bitswap) Block(parent context.Context, k u.Key) (*blocks.Block, error) // HasBlock announces the existance of a block to bitswap, potentially sending // it to peers (Partners) whose WantLists include it. func (bs *bitswap) HasBlock(ctx context.Context, blk blocks.Block) error { - go bs.sendToPeersThatWant(ctx, blk) + bs.sendToPeersThatWant(ctx, blk) return bs.routing.Provide(blk.Key()) } @@ -128,9 +128,9 @@ func (bs *bitswap) ReceiveMessage( if incoming.Blocks() != nil { for _, block := range incoming.Blocks() { - go bs.blockstore.Put(block) // FIXME(brian): err ignored - go bs.notifications.Publish(block) - go bs.HasBlock(ctx, block) // FIXME err ignored + bs.blockstore.Put(block) // FIXME(brian): err ignored + bs.notifications.Publish(block) + bs.HasBlock(ctx, block) // FIXME err ignored } } @@ -139,12 +139,11 @@ func (bs *bitswap) ReceiveMessage( if bs.strategy.ShouldSendBlockToPeer(key, p) { block, errBlockNotFound := bs.blockstore.Get(key) if errBlockNotFound != nil { - // TODO(brian): log/return the error - continue + return nil, nil, errBlockNotFound } message := bsmsg.New() message.AppendBlock(*block) - go bs.send(ctx, p, message) + bs.send(ctx, p, message) } } } @@ -168,7 +167,7 @@ func (bs *bitswap) sendToPeersThatWant(ctx context.Context, block blocks.Block) if bs.strategy.ShouldSendBlockToPeer(block.Key(), p) { message := bsmsg.New() message.AppendBlock(block) - go bs.send(ctx, p, message) + bs.send(ctx, p, message) } } } From 9e4faa786d9170b46e04c5c61c9dc8bd70eefd70 Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Fri, 19 Sep 2014 12:58:12 -0700 Subject: [PATCH 0023/1035] test(bitswap) This commit was moved from ipfs/go-bitswap@c67d48d99de442735d351e5672eb3b4a60890468 --- bitswap/bitswap_test.go | 36 +++++++++++++++++++++++++++++++----- 1 file changed, 31 insertions(+), 5 deletions(-) diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index cc2bb6fa3..646a6a7f9 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -33,19 +33,45 @@ func TestProviderForKeyButNetworkCannotFind(t *testing.T) { net := LocalNetwork() rs := newRoutingServer() - ipfs := session(net, rs, []byte("peer id")) - // ctx := context.Background() - ctx, _ := context.WithTimeout(context.Background(), time.Nanosecond) - block := testutil.NewBlockOrFail(t, "block") + block := testutil.NewBlockOrFail(t, "block") rs.Announce(&peer.Peer{}, block.Key()) // but not on network - _, err := ipfs.exchange.Block(ctx, block.Key()) + solo := session(net, rs, []byte("peer id")) + + ctx, _ := context.WithTimeout(context.Background(), time.Nanosecond) + _, err := solo.exchange.Block(ctx, block.Key()) + if err != context.DeadlineExceeded { t.Fatal("Expected DeadlineExceeded error") } } +// TestGetBlockAfterRequesting... + +func TestGetBlockFromPeerAfterPeerAnnounces(t *testing.T) { + t.Skip("Failing. Work in progress") + + net := LocalNetwork() + rs := newRoutingServer() + block := testutil.NewBlockOrFail(t, "block") + + hasBlock := session(net, rs, []byte("hasBlock")) + + rs.Announce(hasBlock.peer, block.Key()) + hasBlock.blockstore.Put(block) + hasBlock.exchange.HasBlock(context.Background(), block) + + wantsBlock := session(net, rs, []byte("wantsBlock")) + + ctx, _ := context.WithTimeout(context.Background(), time.Second) + _, err := wantsBlock.exchange.Block(ctx, block.Key()) + if err != nil { + t.Log(err) + t.Fatal("Expected to succeed") + } +} + type ipfs struct { peer *peer.Peer exchange exchange.Interface From b80b198272ee54d94a6795838415608b0dbbc0b5 Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Fri, 19 Sep 2014 13:15:15 -0700 Subject: [PATCH 0024/1035] refac(bitswap:testnet) give testnet its own package This commit was moved from ipfs/go-bitswap@01625e328d059268c0cc385e33b16d003342fb0e --- bitswap/bitswap.go | 13 ++----------- bitswap/bitswap_test.go | 15 ++++++++------- bitswap/network/interface.go | 10 ++++++++++ bitswap/{local_network.go => testnet/network.go} | 2 +- .../network_test.go} | 4 ++-- bitswap/{hash_table.go => testnet/routing.go} | 7 ++++--- .../routing_test.go} | 8 ++++---- 7 files changed, 31 insertions(+), 28 deletions(-) rename bitswap/{local_network.go => testnet/network.go} (99%) rename bitswap/{local_network_test.go => testnet/network_test.go} (98%) rename bitswap/{hash_table.go => testnet/routing.go} (89%) rename bitswap/{hash_table_test.go => testnet/routing_test.go} (96%) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 98d8952ed..d42f73889 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -17,18 +17,9 @@ import ( u "github.com/jbenet/go-ipfs/util" ) -// TODO rename -> Router? -type Routing interface { - // FindProvidersAsync returns a channel of providers for the given key - FindProvidersAsync(context.Context, u.Key, int) <-chan *peer.Peer - - // Provide provides the key to the network - Provide(key u.Key) error -} - // NetMessageSession initializes a BitSwap session that communicates over the // provided NetMessage service -func NetMessageSession(parent context.Context, s bsnet.NetMessageService, p *peer.Peer, d ds.Datastore, directory Routing) exchange.Interface { +func NetMessageSession(parent context.Context, s bsnet.NetMessageService, p *peer.Peer, d ds.Datastore, directory bsnet.Routing) exchange.Interface { networkAdapter := bsnet.NetMessageAdapter(s, nil) bs := &bitswap{ @@ -54,7 +45,7 @@ type bitswap struct { blockstore blockstore.Blockstore // routing interface for communication - routing Routing + routing bsnet.Routing notifications notifications.PubSub diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index 646a6a7f9..dddcfe2c4 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -11,14 +11,15 @@ import ( exchange "github.com/jbenet/go-ipfs/exchange" notifications "github.com/jbenet/go-ipfs/exchange/bitswap/notifications" strategy "github.com/jbenet/go-ipfs/exchange/bitswap/strategy" + testnet "github.com/jbenet/go-ipfs/exchange/bitswap/testnet" peer "github.com/jbenet/go-ipfs/peer" testutil "github.com/jbenet/go-ipfs/util/testutil" ) func TestGetBlockTimeout(t *testing.T) { - net := LocalNetwork() - rs := newRoutingServer() + net := testnet.VirtualNetwork() + rs := testnet.VirtualRoutingServer() ipfs := session(net, rs, []byte("peer id")) ctx, _ := context.WithTimeout(context.Background(), time.Nanosecond) block := testutil.NewBlockOrFail(t, "block") @@ -31,8 +32,8 @@ func TestGetBlockTimeout(t *testing.T) { func TestProviderForKeyButNetworkCannotFind(t *testing.T) { - net := LocalNetwork() - rs := newRoutingServer() + net := testnet.VirtualNetwork() + rs := testnet.VirtualRoutingServer() block := testutil.NewBlockOrFail(t, "block") rs.Announce(&peer.Peer{}, block.Key()) // but not on network @@ -52,8 +53,8 @@ func TestProviderForKeyButNetworkCannotFind(t *testing.T) { func TestGetBlockFromPeerAfterPeerAnnounces(t *testing.T) { t.Skip("Failing. Work in progress") - net := LocalNetwork() - rs := newRoutingServer() + net := testnet.VirtualNetwork() + rs := testnet.VirtualRoutingServer() block := testutil.NewBlockOrFail(t, "block") hasBlock := session(net, rs, []byte("hasBlock")) @@ -78,7 +79,7 @@ type ipfs struct { blockstore bstore.Blockstore } -func session(net Network, rs RoutingServer, id peer.ID) ipfs { +func session(net testnet.Network, rs testnet.RoutingServer, id peer.ID) ipfs { p := &peer.Peer{} adapter := net.Adapter(p) diff --git a/bitswap/network/interface.go b/bitswap/network/interface.go index 29bb0da3b..a84775c15 100644 --- a/bitswap/network/interface.go +++ b/bitswap/network/interface.go @@ -7,6 +7,7 @@ import ( bsmsg "github.com/jbenet/go-ipfs/exchange/bitswap/message" netmsg "github.com/jbenet/go-ipfs/net/message" peer "github.com/jbenet/go-ipfs/peer" + u "github.com/jbenet/go-ipfs/util" ) // Adapter provides network connectivity for BitSwap sessions @@ -41,3 +42,12 @@ type NetMessageService interface { SendMessage(ctx context.Context, m netmsg.NetMessage) error SetHandler(netservice.Handler) } + +// TODO rename -> Router? +type Routing interface { + // FindProvidersAsync returns a channel of providers for the given key + FindProvidersAsync(context.Context, u.Key, int) <-chan *peer.Peer + + // Provide provides the key to the network + Provide(key u.Key) error +} diff --git a/bitswap/local_network.go b/bitswap/testnet/network.go similarity index 99% rename from bitswap/local_network.go rename to bitswap/testnet/network.go index ff8d5de4c..5039e730b 100644 --- a/bitswap/local_network.go +++ b/bitswap/testnet/network.go @@ -30,7 +30,7 @@ type Network interface { // network impl -func LocalNetwork() Network { +func VirtualNetwork() Network { return &network{ clients: make(map[util.Key]bsnet.Receiver), } diff --git a/bitswap/local_network_test.go b/bitswap/testnet/network_test.go similarity index 98% rename from bitswap/local_network_test.go rename to bitswap/testnet/network_test.go index e5bbda7a0..70b0615db 100644 --- a/bitswap/local_network_test.go +++ b/bitswap/testnet/network_test.go @@ -12,7 +12,7 @@ import ( ) func TestSendRequestToCooperativePeer(t *testing.T) { - net := LocalNetwork() + net := VirtualNetwork() idOfRecipient := []byte("recipient") @@ -59,7 +59,7 @@ func TestSendRequestToCooperativePeer(t *testing.T) { } func TestSendMessageAsyncButWaitForResponse(t *testing.T) { - net := LocalNetwork() + net := VirtualNetwork() idOfResponder := []byte("responder") waiter := net.Adapter(&peer.Peer{ID: []byte("waiter")}) responder := net.Adapter(&peer.Peer{ID: idOfResponder}) diff --git a/bitswap/hash_table.go b/bitswap/testnet/routing.go similarity index 89% rename from bitswap/hash_table.go rename to bitswap/testnet/routing.go index d030a0f5d..914623778 100644 --- a/bitswap/hash_table.go +++ b/bitswap/testnet/routing.go @@ -5,6 +5,7 @@ import ( "sync" context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" + bsnet "github.com/jbenet/go-ipfs/exchange/bitswap/network" peer "github.com/jbenet/go-ipfs/peer" u "github.com/jbenet/go-ipfs/util" ) @@ -18,10 +19,10 @@ type RoutingServer interface { // TODO // Returns a Routing instance configured to query this hash table - Client(*peer.Peer) Routing + Client(*peer.Peer) bsnet.Routing } -func newRoutingServer() RoutingServer { +func VirtualRoutingServer() RoutingServer { return &hashTable{ m: make(map[u.Key]map[*peer.Peer]bool), } @@ -61,7 +62,7 @@ func (rs *hashTable) Providers(k u.Key) []*peer.Peer { } // TODO -func (rs *hashTable) Client(p *peer.Peer) Routing { +func (rs *hashTable) Client(p *peer.Peer) bsnet.Routing { return &routingClient{ peer: p, hashTable: rs, diff --git a/bitswap/hash_table_test.go b/bitswap/testnet/routing_test.go similarity index 96% rename from bitswap/hash_table_test.go rename to bitswap/testnet/routing_test.go index fafc1fd9a..d1015ef9c 100644 --- a/bitswap/hash_table_test.go +++ b/bitswap/testnet/routing_test.go @@ -29,7 +29,7 @@ func TestSetAndGet(t *testing.T) { ID: pid, } k := u.Key("42") - rs := newRoutingServer() + rs := VirtualRoutingServer() err := rs.Announce(p, k) if err != nil { t.Fatal(err) @@ -50,7 +50,7 @@ func TestClientFindProviders(t *testing.T) { peer := &peer.Peer{ ID: []byte("42"), } - rs := newRoutingServer() + rs := VirtualRoutingServer() client := rs.Client(peer) k := u.Key("hello") err := client.Provide(k) @@ -83,7 +83,7 @@ func TestClientFindProviders(t *testing.T) { } func TestClientOverMax(t *testing.T) { - rs := newRoutingServer() + rs := VirtualRoutingServer() k := u.Key("hello") numProvidersForHelloKey := 100 for i := 0; i < numProvidersForHelloKey; i++ { @@ -115,7 +115,7 @@ func TestClientOverMax(t *testing.T) { // TODO does dht ensure won't receive self as a provider? probably not. func TestCanceledContext(t *testing.T) { - rs := newRoutingServer() + rs := VirtualRoutingServer() k := u.Key("hello") t.Log("async'ly announce infinite stream of providers for key") From a7f6c31d98489bed4487dc92008fbdb44d35a4e0 Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Fri, 19 Sep 2014 14:46:15 -0700 Subject: [PATCH 0025/1035] fix(bitswap:testnet) use peer.Map This commit was moved from ipfs/go-bitswap@c1873b897373d9c2c3fe8b6569c19852bf432f01 --- bitswap/testnet/routing.go | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/bitswap/testnet/routing.go b/bitswap/testnet/routing.go index 914623778..71a5bfeae 100644 --- a/bitswap/testnet/routing.go +++ b/bitswap/testnet/routing.go @@ -24,13 +24,13 @@ type RoutingServer interface { func VirtualRoutingServer() RoutingServer { return &hashTable{ - m: make(map[u.Key]map[*peer.Peer]bool), + providers: make(map[u.Key]peer.Map), } } type hashTable struct { - lock sync.RWMutex - m map[u.Key]map[*peer.Peer]bool + lock sync.RWMutex + providers map[u.Key]peer.Map } var TODO = errors.New("TODO") @@ -39,11 +39,11 @@ func (rs *hashTable) Announce(p *peer.Peer, k u.Key) error { rs.lock.Lock() defer rs.lock.Unlock() - _, ok := rs.m[k] + _, ok := rs.providers[k] if !ok { - rs.m[k] = make(map[*peer.Peer]bool) + rs.providers[k] = make(peer.Map) } - rs.m[k][p] = true + rs.providers[k][p.Key()] = p return nil } @@ -51,11 +51,11 @@ func (rs *hashTable) Providers(k u.Key) []*peer.Peer { rs.lock.RLock() defer rs.lock.RUnlock() ret := make([]*peer.Peer, 0) - peerset, ok := rs.m[k] + peerset, ok := rs.providers[k] if !ok { return ret } - for peer, _ := range peerset { + for _, peer := range peerset { ret = append(ret, peer) } return ret From d1984096d9d867c458464c745c2cdb5c6b97b263 Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Fri, 19 Sep 2014 15:25:14 -0700 Subject: [PATCH 0026/1035] fix(bitswap:message) don't use proto internally This commit was moved from ipfs/go-bitswap@d2e4bad4d16982ab389de8589047db112a13e6ad --- bitswap/message/message.go | 52 ++++++++++++++++++--------------- bitswap/message/message_test.go | 28 +++++++++++++++++- 2 files changed, 56 insertions(+), 24 deletions(-) diff --git a/bitswap/message/message.go b/bitswap/message/message.go index dc6506313..32109b8f0 100644 --- a/bitswap/message/message.go +++ b/bitswap/message/message.go @@ -26,45 +26,45 @@ type Exportable interface { // message wraps a proto message for convenience type message struct { - pb PBMessage -} - -func newMessageFromProto(pb PBMessage) *message { - return &message{pb: pb} + wantlist []u.Key + blocks []blocks.Block } func New() *message { return new(message) } +func newMessageFromProto(pbm PBMessage) (BitSwapMessage, error) { + m := New() + for _, s := range pbm.GetWantlist() { + m.AppendWanted(u.Key(s)) + } + for _, d := range pbm.GetBlocks() { + b, err := blocks.NewBlock(d) + if err != nil { + return nil, err + } + m.AppendBlock(*b) + } + return m, nil +} + // TODO(brian): convert these into keys func (m *message) Wantlist() []u.Key { - wl := make([]u.Key, len(m.pb.Wantlist)) - for _, str := range m.pb.Wantlist { - wl = append(wl, u.Key(str)) - } - return wl + return m.wantlist } // TODO(brian): convert these into blocks func (m *message) Blocks() []blocks.Block { - bs := make([]blocks.Block, len(m.pb.Blocks)) - for _, data := range m.pb.Blocks { - b, err := blocks.NewBlock(data) - if err != nil { - continue - } - bs = append(bs, *b) - } - return bs + return m.blocks } func (m *message) AppendWanted(k u.Key) { - m.pb.Wantlist = append(m.pb.Wantlist, string(k)) + m.wantlist = append(m.wantlist, k) } func (m *message) AppendBlock(b blocks.Block) { - m.pb.Blocks = append(m.pb.Blocks, b.Data) + m.blocks = append(m.blocks, b) } func FromNet(nmsg netmsg.NetMessage) (BitSwapMessage, error) { @@ -72,8 +72,14 @@ func FromNet(nmsg netmsg.NetMessage) (BitSwapMessage, error) { } func (m *message) ToProto() *PBMessage { - cp := m.pb - return &cp + pb := new(PBMessage) + for _, k := range m.Wantlist() { + pb.Wantlist = append(pb.Wantlist, string(k)) + } + for _, b := range m.Blocks() { + pb.Blocks = append(pb.Blocks, b.Data) + } + return pb } func (m *message) ToNet(p *peer.Peer) (nm.NetMessage, error) { diff --git a/bitswap/message/message_test.go b/bitswap/message/message_test.go index 8ff345f1c..e4b9e123f 100644 --- a/bitswap/message/message_test.go +++ b/bitswap/message/message_test.go @@ -25,7 +25,10 @@ func TestNewMessageFromProto(t *testing.T) { if !contains(protoMessage.Wantlist, str) { t.Fail() } - m := newMessageFromProto(*protoMessage) + m, err := newMessageFromProto(*protoMessage) + if err != nil { + t.Fatal(err) + } if !contains(m.ToProto().GetWantlist(), str) { t.Fail() } @@ -52,6 +55,29 @@ func TestAppendBlock(t *testing.T) { } } +func TestWantlist(t *testing.T) { + keystrs := []string{"foo", "bar", "baz", "bat"} + m := New() + for _, s := range keystrs { + m.AppendWanted(u.Key(s)) + } + exported := m.Wantlist() + + for _, k := range exported { + present := false + for _, s := range keystrs { + + if s == string(k) { + present = true + } + } + if !present { + t.Logf("%v isn't in original list", string(k)) + t.Fail() + } + } +} + func TestCopyProtoByValue(t *testing.T) { const str = "foo" m := New() From 68e401bc0f1d55d065409890dd140d3d07adb478 Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Fri, 19 Sep 2014 15:31:23 -0700 Subject: [PATCH 0027/1035] test(bitswap) send block from one instance to another This commit was moved from ipfs/go-bitswap@4ba4634795f574728fc0c65cbd8120b9e83346f2 --- bitswap/bitswap.go | 13 ++++++++++--- bitswap/bitswap_test.go | 30 ++++++++++++++++++++---------- 2 files changed, 30 insertions(+), 13 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index d42f73889..4c2fe84a4 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -119,9 +119,15 @@ func (bs *bitswap) ReceiveMessage( if incoming.Blocks() != nil { for _, block := range incoming.Blocks() { - bs.blockstore.Put(block) // FIXME(brian): err ignored + err := bs.blockstore.Put(block) // FIXME(brian): err ignored + if err != nil { + return nil, nil, err + } bs.notifications.Publish(block) - bs.HasBlock(ctx, block) // FIXME err ignored + err = bs.HasBlock(ctx, block) // FIXME err ignored + if err != nil { + return nil, nil, err + } } } @@ -134,7 +140,8 @@ func (bs *bitswap) ReceiveMessage( } message := bsmsg.New() message.AppendBlock(*block) - bs.send(ctx, p, message) + defer bs.strategy.MessageSent(p, message) + return p, message, nil } } } diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index dddcfe2c4..67dfa0719 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -1,6 +1,7 @@ package bitswap import ( + "bytes" "testing" "time" @@ -20,11 +21,13 @@ func TestGetBlockTimeout(t *testing.T) { net := testnet.VirtualNetwork() rs := testnet.VirtualRoutingServer() - ipfs := session(net, rs, []byte("peer id")) + + self := session(net, rs, []byte("peer id")) + ctx, _ := context.WithTimeout(context.Background(), time.Nanosecond) block := testutil.NewBlockOrFail(t, "block") + _, err := self.exchange.Block(ctx, block.Key()) - _, err := ipfs.exchange.Block(ctx, block.Key()) if err != context.DeadlineExceeded { t.Fatal("Expected DeadlineExceeded error") } @@ -59,28 +62,35 @@ func TestGetBlockFromPeerAfterPeerAnnounces(t *testing.T) { hasBlock := session(net, rs, []byte("hasBlock")) - rs.Announce(hasBlock.peer, block.Key()) - hasBlock.blockstore.Put(block) - hasBlock.exchange.HasBlock(context.Background(), block) + if err := hasBlock.blockstore.Put(block); err != nil { + t.Fatal(err) + } + if err := hasBlock.exchange.HasBlock(context.Background(), block); err != nil { + t.Fatal(err) + } wantsBlock := session(net, rs, []byte("wantsBlock")) ctx, _ := context.WithTimeout(context.Background(), time.Second) - _, err := wantsBlock.exchange.Block(ctx, block.Key()) + received, err := wantsBlock.exchange.Block(ctx, block.Key()) if err != nil { t.Log(err) t.Fatal("Expected to succeed") } + + if !bytes.Equal(block.Data, received.Data) { + t.Fatal("Data doesn't match") + } } -type ipfs struct { +type testnetBitSwap struct { peer *peer.Peer exchange exchange.Interface blockstore bstore.Blockstore } -func session(net testnet.Network, rs testnet.RoutingServer, id peer.ID) ipfs { - p := &peer.Peer{} +func session(net testnet.Network, rs testnet.RoutingServer, id peer.ID) testnetBitSwap { + p := &peer.Peer{ID: id} adapter := net.Adapter(p) htc := rs.Client(p) @@ -94,7 +104,7 @@ func session(net testnet.Network, rs testnet.RoutingServer, id peer.ID) ipfs { sender: adapter, } adapter.SetDelegate(bs) - return ipfs{ + return testnetBitSwap{ peer: p, exchange: bs, blockstore: blockstore, From 03e8aef4a70432c57cb15cb5fe1f6d6220c934e0 Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Fri, 19 Sep 2014 15:35:06 -0700 Subject: [PATCH 0028/1035] refac(exch:offline) move offline exchange to its own package This commit was moved from ipfs/go-bitswap@cedc1c383a46070d47c3007feb5bfb6ae26384cf --- bitswap/offline.go | 32 -------------------------------- bitswap/offline_test.go | 28 ---------------------------- 2 files changed, 60 deletions(-) delete mode 100644 bitswap/offline.go delete mode 100644 bitswap/offline_test.go diff --git a/bitswap/offline.go b/bitswap/offline.go deleted file mode 100644 index 9695b0b56..000000000 --- a/bitswap/offline.go +++ /dev/null @@ -1,32 +0,0 @@ -package bitswap - -import ( - "errors" - - context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" - - blocks "github.com/jbenet/go-ipfs/blocks" - exchange "github.com/jbenet/go-ipfs/exchange" - u "github.com/jbenet/go-ipfs/util" -) - -func NewOfflineExchange() exchange.Interface { - return &offlineExchange{} -} - -// offlineExchange implements the Exchange interface but doesn't return blocks. -// For use in offline mode. -type offlineExchange struct { -} - -// Block returns nil to signal that a block could not be retrieved for the -// given key. -// NB: This function may return before the timeout expires. -func (_ *offlineExchange) Block(context.Context, u.Key) (*blocks.Block, error) { - return nil, errors.New("Block unavailable. Operating in offline mode") -} - -// HasBlock always returns nil. -func (_ *offlineExchange) HasBlock(context.Context, blocks.Block) error { - return nil -} diff --git a/bitswap/offline_test.go b/bitswap/offline_test.go deleted file mode 100644 index 26821f2c8..000000000 --- a/bitswap/offline_test.go +++ /dev/null @@ -1,28 +0,0 @@ -package bitswap - -import ( - "testing" - - context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" - - u "github.com/jbenet/go-ipfs/util" - testutil "github.com/jbenet/go-ipfs/util/testutil" -) - -func TestBlockReturnsErr(t *testing.T) { - off := NewOfflineExchange() - _, err := off.Block(context.Background(), u.Key("foo")) - if err != nil { - return // as desired - } - t.Fail() -} - -func TestHasBlockReturnsNil(t *testing.T) { - off := NewOfflineExchange() - block := testutil.NewBlockOrFail(t, "data") - err := off.HasBlock(context.Background(), block) - if err != nil { - t.Fatal("") - } -} From e4bd9e95255ccb803c6f2ee3ef2c076bb27dd9ab Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Fri, 19 Sep 2014 15:39:14 -0700 Subject: [PATCH 0029/1035] test(bitswap) enable get block test This commit was moved from ipfs/go-bitswap@b40ee0f19f30403efce35cd7e39e3dd22f27bc16 --- bitswap/bitswap_test.go | 1 - 1 file changed, 1 deletion(-) diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index 67dfa0719..383c1f44c 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -54,7 +54,6 @@ func TestProviderForKeyButNetworkCannotFind(t *testing.T) { // TestGetBlockAfterRequesting... func TestGetBlockFromPeerAfterPeerAnnounces(t *testing.T) { - t.Skip("Failing. Work in progress") net := testnet.VirtualNetwork() rs := testnet.VirtualRoutingServer() From e3edaf3eb46b7c1bd3433e1df315ce5a1b52d0ed Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Fri, 19 Sep 2014 15:42:12 -0700 Subject: [PATCH 0030/1035] chore(bitswap) rm unused helper func This commit was moved from ipfs/go-bitswap@554b5a490c1f2f3f180f52476b982babcbb1535b --- bitswap/bitswap.go | 4 ---- 1 file changed, 4 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 4c2fe84a4..3ee871069 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -155,10 +155,6 @@ func (bs *bitswap) send(ctx context.Context, p *peer.Peer, m bsmsg.BitSwapMessag bs.strategy.MessageSent(p, m) } -func numBytes(b blocks.Block) int { - return len(b.Data) -} - func (bs *bitswap) sendToPeersThatWant(ctx context.Context, block blocks.Block) { for _, p := range bs.strategy.Peers() { if bs.strategy.BlockIsWantedByPeer(block.Key(), p) { From fc3cad6ba7949b81b0d31497efa1f0582c2cae2d Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Fri, 19 Sep 2014 15:44:11 -0700 Subject: [PATCH 0031/1035] refac(bitswap) nil slices are 'range'able This commit was moved from ipfs/go-bitswap@4b4834e5ba83917fdda1b0d9908699aedac2cf67 --- bitswap/bitswap.go | 40 ++++++++++++++++++---------------------- 1 file changed, 18 insertions(+), 22 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 3ee871069..84cb52eb9 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -117,32 +117,28 @@ func (bs *bitswap) ReceiveMessage( bs.strategy.MessageReceived(p, incoming) - if incoming.Blocks() != nil { - for _, block := range incoming.Blocks() { - err := bs.blockstore.Put(block) // FIXME(brian): err ignored - if err != nil { - return nil, nil, err - } - bs.notifications.Publish(block) - err = bs.HasBlock(ctx, block) // FIXME err ignored - if err != nil { - return nil, nil, err - } + for _, block := range incoming.Blocks() { + err := bs.blockstore.Put(block) // FIXME(brian): err ignored + if err != nil { + return nil, nil, err + } + bs.notifications.Publish(block) + err = bs.HasBlock(ctx, block) // FIXME err ignored + if err != nil { + return nil, nil, err } } - if incoming.Wantlist() != nil { - for _, key := range incoming.Wantlist() { - if bs.strategy.ShouldSendBlockToPeer(key, p) { - block, errBlockNotFound := bs.blockstore.Get(key) - if errBlockNotFound != nil { - return nil, nil, errBlockNotFound - } - message := bsmsg.New() - message.AppendBlock(*block) - defer bs.strategy.MessageSent(p, message) - return p, message, nil + for _, key := range incoming.Wantlist() { + if bs.strategy.ShouldSendBlockToPeer(key, p) { + block, errBlockNotFound := bs.blockstore.Get(key) + if errBlockNotFound != nil { + return nil, nil, errBlockNotFound } + message := bsmsg.New() + message.AppendBlock(*block) + defer bs.strategy.MessageSent(p, message) + return p, message, nil } } return nil, nil, nil From a891336ce4f465f4043bae2c49b528c0c1b5e52c Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Fri, 19 Sep 2014 16:03:05 -0700 Subject: [PATCH 0032/1035] test(bitswap) add SessionGenerator This commit was moved from ipfs/go-bitswap@666443af26a1d135655c395eae69268c698ee280 --- bitswap/bitswap_test.go | 65 +++++++++++++++++++++++++++++------------ 1 file changed, 47 insertions(+), 18 deletions(-) diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index 383c1f44c..a68f0667f 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -12,17 +12,18 @@ import ( exchange "github.com/jbenet/go-ipfs/exchange" notifications "github.com/jbenet/go-ipfs/exchange/bitswap/notifications" strategy "github.com/jbenet/go-ipfs/exchange/bitswap/strategy" - testnet "github.com/jbenet/go-ipfs/exchange/bitswap/testnet" + tn "github.com/jbenet/go-ipfs/exchange/bitswap/testnet" peer "github.com/jbenet/go-ipfs/peer" testutil "github.com/jbenet/go-ipfs/util/testutil" ) func TestGetBlockTimeout(t *testing.T) { - net := testnet.VirtualNetwork() - rs := testnet.VirtualRoutingServer() + net := tn.VirtualNetwork() + rs := tn.VirtualRoutingServer() + g := NewSessionGenerator(net, rs) - self := session(net, rs, []byte("peer id")) + self := g.Next() ctx, _ := context.WithTimeout(context.Background(), time.Nanosecond) block := testutil.NewBlockOrFail(t, "block") @@ -35,13 +36,14 @@ func TestGetBlockTimeout(t *testing.T) { func TestProviderForKeyButNetworkCannotFind(t *testing.T) { - net := testnet.VirtualNetwork() - rs := testnet.VirtualRoutingServer() + net := tn.VirtualNetwork() + rs := tn.VirtualRoutingServer() + g := NewSessionGenerator(net, rs) block := testutil.NewBlockOrFail(t, "block") rs.Announce(&peer.Peer{}, block.Key()) // but not on network - solo := session(net, rs, []byte("peer id")) + solo := g.Next() ctx, _ := context.WithTimeout(context.Background(), time.Nanosecond) _, err := solo.exchange.Block(ctx, block.Key()) @@ -55,11 +57,12 @@ func TestProviderForKeyButNetworkCannotFind(t *testing.T) { func TestGetBlockFromPeerAfterPeerAnnounces(t *testing.T) { - net := testnet.VirtualNetwork() - rs := testnet.VirtualRoutingServer() + net := tn.VirtualNetwork() + rs := tn.VirtualRoutingServer() block := testutil.NewBlockOrFail(t, "block") + g := NewSessionGenerator(net, rs) - hasBlock := session(net, rs, []byte("hasBlock")) + hasBlock := g.Next() if err := hasBlock.blockstore.Put(block); err != nil { t.Fatal(err) @@ -68,7 +71,7 @@ func TestGetBlockFromPeerAfterPeerAnnounces(t *testing.T) { t.Fatal(err) } - wantsBlock := session(net, rs, []byte("wantsBlock")) + wantsBlock := g.Next() ctx, _ := context.WithTimeout(context.Background(), time.Second) received, err := wantsBlock.exchange.Block(ctx, block.Key()) @@ -82,13 +85,45 @@ func TestGetBlockFromPeerAfterPeerAnnounces(t *testing.T) { } } +func TestSendToWantingPeer(t *testing.T) { + t.Log("I get a file from peer |w|. In this message, I receive |w|'s wants") + t.Log("Peer |w| tells me it wants file |f|, but I don't have it") + t.Log("Later, peer |o| sends |f| to me") + t.Log("After receiving |f| from |o|, I send it to the wanting peer |w|") +} + +func NewSessionGenerator( + net tn.Network, rs tn.RoutingServer) SessionGenerator { + return SessionGenerator{ + net: net, + rs: rs, + seq: 0, + } +} + +type SessionGenerator struct { + seq int + net tn.Network + rs tn.RoutingServer +} + +func (g *SessionGenerator) Next() testnetBitSwap { + g.seq++ + return session(g.net, g.rs, []byte(string(g.seq))) +} + type testnetBitSwap struct { peer *peer.Peer exchange exchange.Interface blockstore bstore.Blockstore } -func session(net testnet.Network, rs testnet.RoutingServer, id peer.ID) testnetBitSwap { +// session creates a test bitswap session. +// +// NB: It's easy make mistakes by providing the same peer ID to two different +// sessions. To safeguard, use the SessionGenerator to generate sessions. It's +// just a much better idea. +func session(net tn.Network, rs tn.RoutingServer, id peer.ID) testnetBitSwap { p := &peer.Peer{ID: id} adapter := net.Adapter(p) @@ -109,9 +144,3 @@ func session(net testnet.Network, rs testnet.RoutingServer, id peer.ID) testnetB blockstore: blockstore, } } - -func TestSendToWantingPeer(t *testing.T) { - t.Log("Peer |w| tells me it wants file, but I don't have it") - t.Log("Then another peer |o| sends it to me") - t.Log("After receiving the file from |o|, I send it to the wanting peer |w|") -} From 4003fb1588d625d772b6b8ebc32533af96892bc9 Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Fri, 19 Sep 2014 17:34:13 -0700 Subject: [PATCH 0033/1035] docs(bitswap:strat) interface comments This commit was moved from ipfs/go-bitswap@e040a00ad698e8a3fd9f65fa08a6d1b57c6ff43a --- bitswap/strategy/interface.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/bitswap/strategy/interface.go b/bitswap/strategy/interface.go index a95ea8bd2..1a0e14948 100644 --- a/bitswap/strategy/interface.go +++ b/bitswap/strategy/interface.go @@ -7,10 +7,11 @@ import ( ) type Strategy interface { - // Returns a slice of Peers that + // Returns a slice of Peers with whom the local node has active sessions Peers() []*peer.Peer - // WantList returns the WantList for the given Peer + // BlockIsWantedByPeer returns true if peer wants the block given by this + // key BlockIsWantedByPeer(u.Key, *peer.Peer) bool // ShouldSendTo(Peer) decides whether to send data to this Peer From 59b56af4a823bbca44701ca24b0bb7794dacedfe Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Fri, 19 Sep 2014 18:16:02 -0700 Subject: [PATCH 0034/1035] test(bitswap:testnet) shuffle the providers to avoid letting client rely on order for correctness This commit was moved from ipfs/go-bitswap@b7660f5f413e9f2f13b918cfba3a6015aaa438f8 --- bitswap/testnet/routing.go | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/bitswap/testnet/routing.go b/bitswap/testnet/routing.go index 71a5bfeae..b181e2abc 100644 --- a/bitswap/testnet/routing.go +++ b/bitswap/testnet/routing.go @@ -2,6 +2,7 @@ package bitswap import ( "errors" + "math/rand" "sync" context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" @@ -58,6 +59,12 @@ func (rs *hashTable) Providers(k u.Key) []*peer.Peer { for _, peer := range peerset { ret = append(ret, peer) } + + for i := range ret { + j := rand.Intn(i + 1) + ret[i], ret[j] = ret[j], ret[i] + } + return ret } From 1e99bb333c80b614e1886a2b81b45361705282ca Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Fri, 19 Sep 2014 16:12:46 -0700 Subject: [PATCH 0035/1035] feat(bitswap) ACTIVATE FULL CONCURRENCY cap'n fix(bitswap) Put synchronously. Then notify async This commit was moved from ipfs/go-bitswap@fd69a432b3a8004704b7047ebfcd2c5b90d2ff46 --- bitswap/bitswap.go | 29 +++++++++++++++-------------- 1 file changed, 15 insertions(+), 14 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 84cb52eb9..0eaab521c 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -62,6 +62,7 @@ type bitswap struct { func (bs *bitswap) Block(parent context.Context, k u.Key) (*blocks.Block, error) { ctx, cancelFunc := context.WithCancel(parent) + // TODO add to wantlist promise := bs.notifications.Subscribe(ctx, k) go func() { @@ -69,8 +70,8 @@ func (bs *bitswap) Block(parent context.Context, k u.Key) (*blocks.Block, error) peersToQuery := bs.routing.FindProvidersAsync(ctx, k, maxProviders) message := bsmsg.New() message.AppendWanted(k) - for i := range peersToQuery { - func(p *peer.Peer) { + for iiiii := range peersToQuery { + go func(p *peer.Peer) { response, err := bs.sender.SendRequest(ctx, p, message) if err != nil { return @@ -84,13 +85,14 @@ func (bs *bitswap) Block(parent context.Context, k u.Key) (*blocks.Block, error) return } bs.ReceiveMessage(ctx, p, response) - }(i) + }(iiiii) } }() select { case block := <-promise: cancelFunc() + // TODO remove from wantlist return &block, nil case <-parent.Done(): return nil, parent.Err() @@ -115,18 +117,17 @@ func (bs *bitswap) ReceiveMessage( return nil, nil, errors.New("Received nil Message") } - bs.strategy.MessageReceived(p, incoming) + bs.strategy.MessageReceived(p, incoming) // FIRST for _, block := range incoming.Blocks() { - err := bs.blockstore.Put(block) // FIXME(brian): err ignored - if err != nil { - return nil, nil, err - } - bs.notifications.Publish(block) - err = bs.HasBlock(ctx, block) // FIXME err ignored - if err != nil { - return nil, nil, err + // TODO verify blocks? + if err := bs.blockstore.Put(block); err != nil { + continue // FIXME(brian): err ignored } + go bs.notifications.Publish(block) + go func() { + _ = bs.HasBlock(ctx, block) // FIXME err ignored + }() } for _, key := range incoming.Wantlist() { @@ -148,7 +149,7 @@ func (bs *bitswap) ReceiveMessage( // sent func (bs *bitswap) send(ctx context.Context, p *peer.Peer, m bsmsg.BitSwapMessage) { bs.sender.SendMessage(ctx, p, m) - bs.strategy.MessageSent(p, m) + go bs.strategy.MessageSent(p, m) } func (bs *bitswap) sendToPeersThatWant(ctx context.Context, block blocks.Block) { @@ -157,7 +158,7 @@ func (bs *bitswap) sendToPeersThatWant(ctx context.Context, block blocks.Block) if bs.strategy.ShouldSendBlockToPeer(block.Key(), p) { message := bsmsg.New() message.AppendBlock(block) - bs.send(ctx, p, message) + go bs.send(ctx, p, message) } } } From dc35b63bcc7598ce597352b052ad192a9591a0be Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Fri, 19 Sep 2014 17:32:53 -0700 Subject: [PATCH 0036/1035] test(bitswap) test with swarm of ~500 instances test(bitswap) run synchronously to aid the scheduler This commit was moved from ipfs/go-bitswap@27386c5c472991f948a927b1bb53ed9c06a23dc3 --- bitswap/bitswap_test.go | 102 ++++++++++++++++++++++++++++++++++++++-- 1 file changed, 98 insertions(+), 4 deletions(-) diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index a68f0667f..0badc6917 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -2,12 +2,14 @@ package bitswap import ( "bytes" + "sync" "testing" "time" context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" ds "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/datastore.go" + "github.com/jbenet/go-ipfs/blocks" bstore "github.com/jbenet/go-ipfs/blockstore" exchange "github.com/jbenet/go-ipfs/exchange" notifications "github.com/jbenet/go-ipfs/exchange/bitswap/notifications" @@ -85,6 +87,64 @@ func TestGetBlockFromPeerAfterPeerAnnounces(t *testing.T) { } } +func TestSwarm(t *testing.T) { + net := tn.VirtualNetwork() + rs := tn.VirtualRoutingServer() + sg := NewSessionGenerator(net, rs) + bg := NewBlockGenerator(t) + + t.Log("Create a ton of instances, and just a few blocks") + + numInstances := 500 + numBlocks := 2 + + instances := sg.Instances(numInstances) + blocks := bg.Blocks(numBlocks) + + t.Log("Give the blocks to the first instance") + + first := instances[0] + for _, b := range blocks { + first.blockstore.Put(*b) + first.exchange.HasBlock(context.Background(), *b) + rs.Announce(first.peer, b.Key()) + } + + t.Log("Distribute!") + + var wg sync.WaitGroup + + for _, inst := range instances { + for _, b := range blocks { + wg.Add(1) + // NB: executing getOrFail concurrently puts tremendous pressure on + // the goroutine scheduler + getOrFail(inst, b, t, &wg) + } + } + wg.Wait() + + t.Log("Verify!") + + for _, inst := range instances { + for _, b := range blocks { + if _, err := inst.blockstore.Get(b.Key()); err != nil { + t.Fatal(err) + } + } + } +} + +func getOrFail(bitswap instance, b *blocks.Block, t *testing.T, wg *sync.WaitGroup) { + if _, err := bitswap.blockstore.Get(b.Key()); err != nil { + _, err := bitswap.exchange.Block(context.Background(), b.Key()) + if err != nil { + t.Fatal(err) + } + } + wg.Done() +} + func TestSendToWantingPeer(t *testing.T) { t.Log("I get a file from peer |w|. In this message, I receive |w|'s wants") t.Log("Peer |w| tells me it wants file |f|, but I don't have it") @@ -92,6 +152,31 @@ func TestSendToWantingPeer(t *testing.T) { t.Log("After receiving |f| from |o|, I send it to the wanting peer |w|") } +func NewBlockGenerator(t *testing.T) BlockGenerator { + return BlockGenerator{ + T: t, + } +} + +type BlockGenerator struct { + *testing.T // b/c block generation can fail + seq int +} + +func (bg *BlockGenerator) Next() blocks.Block { + bg.seq++ + return testutil.NewBlockOrFail(bg.T, string(bg.seq)) +} + +func (bg *BlockGenerator) Blocks(n int) []*blocks.Block { + blocks := make([]*blocks.Block, 0) + for i := 0; i < n; i++ { + b := bg.Next() + blocks = append(blocks, &b) + } + return blocks +} + func NewSessionGenerator( net tn.Network, rs tn.RoutingServer) SessionGenerator { return SessionGenerator{ @@ -107,12 +192,21 @@ type SessionGenerator struct { rs tn.RoutingServer } -func (g *SessionGenerator) Next() testnetBitSwap { +func (g *SessionGenerator) Next() instance { g.seq++ return session(g.net, g.rs, []byte(string(g.seq))) } -type testnetBitSwap struct { +func (g *SessionGenerator) Instances(n int) []instance { + instances := make([]instance, 0) + for j := 0; j < n; j++ { + inst := g.Next() + instances = append(instances, inst) + } + return instances +} + +type instance struct { peer *peer.Peer exchange exchange.Interface blockstore bstore.Blockstore @@ -123,7 +217,7 @@ type testnetBitSwap struct { // NB: It's easy make mistakes by providing the same peer ID to two different // sessions. To safeguard, use the SessionGenerator to generate sessions. It's // just a much better idea. -func session(net tn.Network, rs tn.RoutingServer, id peer.ID) testnetBitSwap { +func session(net tn.Network, rs tn.RoutingServer, id peer.ID) instance { p := &peer.Peer{ID: id} adapter := net.Adapter(p) @@ -138,7 +232,7 @@ func session(net tn.Network, rs tn.RoutingServer, id peer.ID) testnetBitSwap { sender: adapter, } adapter.SetDelegate(bs) - return testnetBitSwap{ + return instance{ peer: p, exchange: bs, blockstore: blockstore, From ebc952506f220ad3891ba43100493a9aa48cb381 Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Fri, 19 Sep 2014 19:21:53 -0700 Subject: [PATCH 0037/1035] feat(bitswap:message) implement FromNet This commit was moved from ipfs/go-bitswap@1aaa88fa9a1d4d410b7b8fa4e383947365db2eb3 --- bitswap/message/message.go | 16 ++++--- bitswap/message/message_test.go | 74 +++++++++++++++++++++++++++++++++ 2 files changed, 85 insertions(+), 5 deletions(-) diff --git a/bitswap/message/message.go b/bitswap/message/message.go index 32109b8f0..22258e17f 100644 --- a/bitswap/message/message.go +++ b/bitswap/message/message.go @@ -1,11 +1,9 @@ package message import ( - "errors" - - netmsg "github.com/jbenet/go-ipfs/net/message" - + proto "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto" blocks "github.com/jbenet/go-ipfs/blocks" + netmsg "github.com/jbenet/go-ipfs/net/message" nm "github.com/jbenet/go-ipfs/net/message" peer "github.com/jbenet/go-ipfs/peer" u "github.com/jbenet/go-ipfs/util" @@ -68,7 +66,15 @@ func (m *message) AppendBlock(b blocks.Block) { } func FromNet(nmsg netmsg.NetMessage) (BitSwapMessage, error) { - return nil, errors.New("TODO implement") + pb := new(PBMessage) + if err := proto.Unmarshal(nmsg.Data(), pb); err != nil { + return nil, err + } + m, err := newMessageFromProto(*pb) + if err != nil { + return nil, err + } + return m, nil } func (m *message) ToProto() *PBMessage { diff --git a/bitswap/message/message_test.go b/bitswap/message/message_test.go index e4b9e123f..9590f1ff1 100644 --- a/bitswap/message/message_test.go +++ b/bitswap/message/message_test.go @@ -4,6 +4,7 @@ import ( "bytes" "testing" + peer "github.com/jbenet/go-ipfs/peer" u "github.com/jbenet/go-ipfs/util" testutil "github.com/jbenet/go-ipfs/util/testutil" ) @@ -88,6 +89,79 @@ func TestCopyProtoByValue(t *testing.T) { } } +func TestToNetMethodSetsPeer(t *testing.T) { + m := New() + p := &peer.Peer{ID: []byte("X")} + netmsg, err := m.ToNet(p) + if err != nil { + t.Fatal(err) + } + if !(netmsg.Peer().Key() == p.Key()) { + t.Fatal("Peer key is different") + } +} + +func TestToNetFromNetPreservesWantList(t *testing.T) { + original := New() + original.AppendWanted(u.Key("M")) + original.AppendWanted(u.Key("B")) + original.AppendWanted(u.Key("D")) + original.AppendWanted(u.Key("T")) + original.AppendWanted(u.Key("F")) + + netmsg, err := original.ToNet(&peer.Peer{ID: []byte("X")}) + if err != nil { + t.Fatal(err) + } + + copied, err := FromNet(netmsg) + if err != nil { + t.Fatal(err) + } + + keys := make(map[u.Key]bool) + for _, k := range copied.Wantlist() { + keys[k] = true + } + + for _, k := range original.Wantlist() { + if _, ok := keys[k]; !ok { + t.Fatalf("Key Missing: \"%v\"", k) + } + } +} + +func TestToAndFromNetMessage(t *testing.T) { + + original := New() + original.AppendBlock(testutil.NewBlockOrFail(t, "W")) + original.AppendBlock(testutil.NewBlockOrFail(t, "E")) + original.AppendBlock(testutil.NewBlockOrFail(t, "F")) + original.AppendBlock(testutil.NewBlockOrFail(t, "M")) + + p := &peer.Peer{ID: []byte("X")} + netmsg, err := original.ToNet(p) + if err != nil { + t.Fatal(err) + } + + m2, err := FromNet(netmsg) + if err != nil { + t.Fatal(err) + } + + keys := make(map[u.Key]bool) + for _, b := range m2.Blocks() { + keys[b.Key()] = true + } + + for _, b := range original.Blocks() { + if _, ok := keys[b.Key()]; !ok { + t.Fail() + } + } +} + func contains(s []string, x string) bool { for _, a := range s { if a == x { From 18872d25f93d7e9930d8b774d2e295c4e96a9cf7 Mon Sep 17 00:00:00 2001 From: Juan Batiz-Benet Date: Sun, 21 Sep 2014 18:04:43 -0700 Subject: [PATCH 0038/1035] Routing uses context now @perfmode boom This commit was moved from ipfs/go-bitswap@3696041f0e8f20136a88fd111d71a40b8c3e63d4 --- bitswap/bitswap.go | 2 +- bitswap/network/interface.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 0eaab521c..4f63e6c8c 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -103,7 +103,7 @@ func (bs *bitswap) Block(parent context.Context, k u.Key) (*blocks.Block, error) // it to peers (Partners) whose WantLists include it. func (bs *bitswap) HasBlock(ctx context.Context, blk blocks.Block) error { bs.sendToPeersThatWant(ctx, blk) - return bs.routing.Provide(blk.Key()) + return bs.routing.Provide(ctx, blk.Key()) } // TODO(brian): handle errors diff --git a/bitswap/network/interface.go b/bitswap/network/interface.go index a84775c15..15fa9c89e 100644 --- a/bitswap/network/interface.go +++ b/bitswap/network/interface.go @@ -49,5 +49,5 @@ type Routing interface { FindProvidersAsync(context.Context, u.Key, int) <-chan *peer.Peer // Provide provides the key to the network - Provide(key u.Key) error + Provide(context.Context, u.Key) error } From 5bb47251e9c9ec09c73e1519426fd4c48bf73eb6 Mon Sep 17 00:00:00 2001 From: Juan Batiz-Benet Date: Sun, 21 Sep 2014 20:06:30 -0700 Subject: [PATCH 0039/1035] get bitswap working with dht @perfmode using non-async version as apparently there's a bug in async. will look into it. This commit was moved from ipfs/go-bitswap@db399638a60281de4512325ed8b953f76054b044 --- bitswap/bitswap.go | 17 ++++++++++++----- bitswap/network/interface.go | 6 +++++- 2 files changed, 17 insertions(+), 6 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 4f63e6c8c..b78304a36 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -2,6 +2,7 @@ package bitswap import ( "errors" + "fmt" context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" ds "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/datastore.go" @@ -65,12 +66,18 @@ func (bs *bitswap) Block(parent context.Context, k u.Key) (*blocks.Block, error) // TODO add to wantlist promise := bs.notifications.Subscribe(ctx, k) + // const maxProviders = 20 + // using non-async version for now. + peersToQuery, err := bs.routing.FindProviders(ctx, k) + if err != nil { + return nil, fmt.Errorf("No providers found for %d (%v)", k, err) + } + go func() { - const maxProviders = 20 - peersToQuery := bs.routing.FindProvidersAsync(ctx, k, maxProviders) message := bsmsg.New() message.AppendWanted(k) - for iiiii := range peersToQuery { + for _, iiiii := range peersToQuery { + // u.DOut("bitswap got peersToQuery: %s\n", iiiii) go func(p *peer.Peer) { response, err := bs.sender.SendRequest(ctx, p, message) if err != nil { @@ -125,9 +132,9 @@ func (bs *bitswap) ReceiveMessage( continue // FIXME(brian): err ignored } go bs.notifications.Publish(block) - go func() { + go func(block blocks.Block) { _ = bs.HasBlock(ctx, block) // FIXME err ignored - }() + }(block) } for _, key := range incoming.Wantlist() { diff --git a/bitswap/network/interface.go b/bitswap/network/interface.go index 15fa9c89e..f3efc8fe4 100644 --- a/bitswap/network/interface.go +++ b/bitswap/network/interface.go @@ -46,7 +46,11 @@ type NetMessageService interface { // TODO rename -> Router? type Routing interface { // FindProvidersAsync returns a channel of providers for the given key - FindProvidersAsync(context.Context, u.Key, int) <-chan *peer.Peer + // FindProvidersAsync(context.Context, u.Key, int) <-chan *peer.Peer + // ^--- removed this for now because has some bugs apparently. + + // FindProviders returns the providers for the given key + FindProviders(context.Context, u.Key) ([]*peer.Peer, error) // Provide provides the key to the network Provide(context.Context, u.Key) error From aca7200f06f3c109eade69587f836701a7fd186c Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Sat, 20 Sep 2014 15:42:24 -0700 Subject: [PATCH 0040/1035] style(bitswap) make signature more readable This commit was moved from ipfs/go-bitswap@e38bef88da92b148c32a1b58196af1079995110b --- bitswap/bitswap.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index b78304a36..3bee217dd 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -114,9 +114,9 @@ func (bs *bitswap) HasBlock(ctx context.Context, blk blocks.Block) error { } // TODO(brian): handle errors -func (bs *bitswap) ReceiveMessage( - ctx context.Context, p *peer.Peer, incoming bsmsg.BitSwapMessage) ( +func (bs *bitswap) ReceiveMessage(ctx context.Context, p *peer.Peer, incoming bsmsg.BitSwapMessage) ( *peer.Peer, bsmsg.BitSwapMessage, error) { + if p == nil { return nil, nil, errors.New("Received nil Peer") } From ca61f1cb6744bf083274b997f497aa69958f0ae0 Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Sun, 21 Sep 2014 01:46:46 -0700 Subject: [PATCH 0041/1035] chore(bitswap) cleanup This commit was moved from ipfs/go-bitswap@81fb6a7395b77eca65ddb99dcaf8b3c3f1cffbe4 --- bitswap/testnet/routing.go | 7 ------- 1 file changed, 7 deletions(-) diff --git a/bitswap/testnet/routing.go b/bitswap/testnet/routing.go index b181e2abc..6adb7cf2e 100644 --- a/bitswap/testnet/routing.go +++ b/bitswap/testnet/routing.go @@ -1,7 +1,6 @@ package bitswap import ( - "errors" "math/rand" "sync" @@ -12,13 +11,10 @@ import ( ) type RoutingServer interface { - // TODO Announce(*peer.Peer, u.Key) error - // TODO Providers(u.Key) []*peer.Peer - // TODO // Returns a Routing instance configured to query this hash table Client(*peer.Peer) bsnet.Routing } @@ -34,8 +30,6 @@ type hashTable struct { providers map[u.Key]peer.Map } -var TODO = errors.New("TODO") - func (rs *hashTable) Announce(p *peer.Peer, k u.Key) error { rs.lock.Lock() defer rs.lock.Unlock() @@ -68,7 +62,6 @@ func (rs *hashTable) Providers(k u.Key) []*peer.Peer { return ret } -// TODO func (rs *hashTable) Client(p *peer.Peer) bsnet.Routing { return &routingClient{ peer: p, From f48a532f295599896a848aa39efa876360bf539d Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Sun, 21 Sep 2014 02:26:06 -0700 Subject: [PATCH 0042/1035] style(bitswap) swap argument order This commit was moved from ipfs/go-bitswap@d4144bfe4a022a094c30515bc8cd1e35b3928e57 --- bitswap/bitswap.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 3bee217dd..ce5547d9e 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -20,7 +20,7 @@ import ( // NetMessageSession initializes a BitSwap session that communicates over the // provided NetMessage service -func NetMessageSession(parent context.Context, s bsnet.NetMessageService, p *peer.Peer, d ds.Datastore, directory bsnet.Routing) exchange.Interface { +func NetMessageSession(parent context.Context, p *peer.Peer, s bsnet.NetMessageService, directory bsnet.Routing, d ds.Datastore) exchange.Interface { networkAdapter := bsnet.NetMessageAdapter(s, nil) bs := &bitswap{ From 02a0b5f8bb0508f8aa66254e77aa85e344eec512 Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Sun, 21 Sep 2014 17:04:43 -0700 Subject: [PATCH 0043/1035] test(bitswap) test sending wantlist to peers This commit was moved from ipfs/go-bitswap@d345da7d23eb9b4918a4d82d434fb40f2b9ebf9b --- bitswap/bitswap_test.go | 54 ++++++++++++++++++++++++++++++++++++++--- 1 file changed, 51 insertions(+), 3 deletions(-) diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index 0badc6917..60ba7bf0b 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -146,10 +146,58 @@ func getOrFail(bitswap instance, b *blocks.Block, t *testing.T, wg *sync.WaitGro } func TestSendToWantingPeer(t *testing.T) { - t.Log("I get a file from peer |w|. In this message, I receive |w|'s wants") - t.Log("Peer |w| tells me it wants file |f|, but I don't have it") - t.Log("Later, peer |o| sends |f| to me") + net := tn.VirtualNetwork() + rs := tn.VirtualRoutingServer() + sg := NewSessionGenerator(net, rs) + bg := NewBlockGenerator(t) + + me := sg.Next() + w := sg.Next() + o := sg.Next() + + alpha := bg.Next() + + const timeout = 100 * time.Millisecond + const wait = 100 * time.Millisecond + + t.Log("Peer |w| attempts to get a file |alpha|. NB: alpha not available") + ctx, _ := context.WithTimeout(context.Background(), timeout) + _, err := w.exchange.Block(ctx, alpha.Key()) + if err == nil { + t.Error("Expected alpha to NOT be available") + } + time.Sleep(wait) + + t.Log("Peer |w| announces availability of a file |beta|") + beta := bg.Next() + ctx, _ = context.WithTimeout(context.Background(), timeout) + w.exchange.HasBlock(ctx, beta) + time.Sleep(wait) + + t.Log("I request and get |beta| from |w|. In the message, I receive |w|'s wants [alpha]") + t.Log("I don't have alpha, but I keep it on my wantlist.") + ctx, _ = context.WithTimeout(context.Background(), timeout) + me.exchange.Block(ctx, beta.Key()) + time.Sleep(wait) + + t.Log("Peer |o| announces the availability of |alpha|") + ctx, _ = context.WithTimeout(context.Background(), timeout) + o.exchange.HasBlock(ctx, alpha) + time.Sleep(wait) + + t.Log("I request |alpha| for myself.") + ctx, _ = context.WithTimeout(context.Background(), timeout) + me.exchange.Block(ctx, alpha.Key()) + time.Sleep(wait) + t.Log("After receiving |f| from |o|, I send it to the wanting peer |w|") + block, err := w.blockstore.Get(alpha.Key()) + if err != nil { + t.Fatal("Should not have received an error") + } + if block.Key() != alpha.Key() { + t.Error("Expected to receive alpha from me") + } } func NewBlockGenerator(t *testing.T) BlockGenerator { From 1d26946b8eec9d40e3548d9a3b28598b67e06b05 Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Sun, 21 Sep 2014 22:00:13 -0700 Subject: [PATCH 0044/1035] fix(bitswap:testnet) Provide takes ctx This commit was moved from ipfs/go-bitswap@51d5dc023dbf1107c6fdf7a7f3ea99f7f693dd4b --- bitswap/testnet/routing.go | 2 +- bitswap/testnet/routing_test.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/bitswap/testnet/routing.go b/bitswap/testnet/routing.go index 6adb7cf2e..4e2985a4a 100644 --- a/bitswap/testnet/routing.go +++ b/bitswap/testnet/routing.go @@ -92,6 +92,6 @@ func (a *routingClient) FindProvidersAsync(ctx context.Context, k u.Key, max int return out } -func (a *routingClient) Provide(key u.Key) error { +func (a *routingClient) Provide(_ context.Context, key u.Key) error { return a.hashTable.Announce(a.peer, key) } diff --git a/bitswap/testnet/routing_test.go b/bitswap/testnet/routing_test.go index d1015ef9c..dd6450e5e 100644 --- a/bitswap/testnet/routing_test.go +++ b/bitswap/testnet/routing_test.go @@ -53,7 +53,7 @@ func TestClientFindProviders(t *testing.T) { rs := VirtualRoutingServer() client := rs.Client(peer) k := u.Key("hello") - err := client.Provide(k) + err := client.Provide(context.Background(), k) if err != nil { t.Fatal(err) } From ed14468a351aac4232f84638321f20dc1d9a3d48 Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Sun, 21 Sep 2014 22:00:43 -0700 Subject: [PATCH 0045/1035] fix(bitswap) keep interface the same changing the bitswap interace breaks tests and makes things a bit difficult going forward. I think I have a temporary solution to replace the async method. this commit partially reverts changes from: ec50703395098f75946f0bad01816cc54ab18a58 https://github.com/jbenet/go-ipfs/commit/ec50703395098f75946f0bad01816cc54ab18a58 This commit was moved from ipfs/go-bitswap@05265fe607bf54bd416ee233d31b1a6317d8109f --- bitswap/bitswap.go | 11 +++-------- bitswap/network/interface.go | 6 +----- 2 files changed, 4 insertions(+), 13 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index ce5547d9e..2dc73ca8e 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -2,7 +2,6 @@ package bitswap import ( "errors" - "fmt" context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" ds "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/datastore.go" @@ -66,17 +65,13 @@ func (bs *bitswap) Block(parent context.Context, k u.Key) (*blocks.Block, error) // TODO add to wantlist promise := bs.notifications.Subscribe(ctx, k) - // const maxProviders = 20 - // using non-async version for now. - peersToQuery, err := bs.routing.FindProviders(ctx, k) - if err != nil { - return nil, fmt.Errorf("No providers found for %d (%v)", k, err) - } + const maxProviders = 20 + peersToQuery := bs.routing.FindProvidersAsync(ctx, k, maxProviders) go func() { message := bsmsg.New() message.AppendWanted(k) - for _, iiiii := range peersToQuery { + for iiiii := range peersToQuery { // u.DOut("bitswap got peersToQuery: %s\n", iiiii) go func(p *peer.Peer) { response, err := bs.sender.SendRequest(ctx, p, message) diff --git a/bitswap/network/interface.go b/bitswap/network/interface.go index f3efc8fe4..15fa9c89e 100644 --- a/bitswap/network/interface.go +++ b/bitswap/network/interface.go @@ -46,11 +46,7 @@ type NetMessageService interface { // TODO rename -> Router? type Routing interface { // FindProvidersAsync returns a channel of providers for the given key - // FindProvidersAsync(context.Context, u.Key, int) <-chan *peer.Peer - // ^--- removed this for now because has some bugs apparently. - - // FindProviders returns the providers for the given key - FindProviders(context.Context, u.Key) ([]*peer.Peer, error) + FindProvidersAsync(context.Context, u.Key, int) <-chan *peer.Peer // Provide provides the key to the network Provide(context.Context, u.Key) error From f0432d639acdbbe46431273a1bb25634410fbade Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Sun, 21 Sep 2014 21:39:45 -0700 Subject: [PATCH 0046/1035] test(bitswap) send entire wantlist to peers fix(bitswap) pass go vet fixes #97 https://github.com/jbenet/go-ipfs/issues/97 This commit was moved from ipfs/go-bitswap@f96246e119c1710285112342de6405f0cd331c3d --- bitswap/bitswap.go | 70 +++++++++++++++++++++++++++++++++++------ bitswap/bitswap_test.go | 50 ++++++++++++++++++----------- 2 files changed, 93 insertions(+), 27 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 2dc73ca8e..cf5303297 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -2,6 +2,7 @@ package bitswap import ( "errors" + "sync" context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" ds "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/datastore.go" @@ -28,6 +29,9 @@ func NetMessageSession(parent context.Context, p *peer.Peer, s bsnet.NetMessageS strategy: strategy.New(), routing: directory, sender: networkAdapter, + wantlist: WantList{ + data: make(map[u.Key]struct{}), + }, } networkAdapter.SetDelegate(bs) @@ -53,6 +57,39 @@ type bitswap struct { // interact with partners. // TODO(brian): save the strategy's state to the datastore strategy strategy.Strategy + + wantlist WantList +} + +type WantList struct { + lock sync.RWMutex + data map[u.Key]struct{} +} + +func (wl *WantList) Add(k u.Key) { + u.DOut("Adding %v to Wantlist\n", k.Pretty()) + wl.lock.Lock() + defer wl.lock.Unlock() + + wl.data[k] = struct{}{} +} + +func (wl *WantList) Remove(k u.Key) { + u.DOut("Removing %v from Wantlist\n", k.Pretty()) + wl.lock.Lock() + defer wl.lock.Unlock() + + delete(wl.data, k) +} + +func (wl *WantList) Keys() []u.Key { + wl.lock.RLock() + defer wl.lock.RUnlock() + keys := make([]u.Key, 0) + for k, _ := range wl.data { + keys = append(keys, k) + } + return keys } // GetBlock attempts to retrieve a particular block from peers within the @@ -60,9 +97,10 @@ type bitswap struct { // // TODO ensure only one active request per key func (bs *bitswap) Block(parent context.Context, k u.Key) (*blocks.Block, error) { + u.DOut("Get Block %v\n", k.Pretty()) ctx, cancelFunc := context.WithCancel(parent) - // TODO add to wantlist + bs.wantlist.Add(k) promise := bs.notifications.Subscribe(ctx, k) const maxProviders = 20 @@ -70,6 +108,9 @@ func (bs *bitswap) Block(parent context.Context, k u.Key) (*blocks.Block, error) go func() { message := bsmsg.New() + for _, wanted := range bs.wantlist.Keys() { + message.AppendWanted(wanted) + } message.AppendWanted(k) for iiiii := range peersToQuery { // u.DOut("bitswap got peersToQuery: %s\n", iiiii) @@ -94,6 +135,7 @@ func (bs *bitswap) Block(parent context.Context, k u.Key) (*blocks.Block, error) select { case block := <-promise: cancelFunc() + bs.wantlist.Remove(k) // TODO remove from wantlist return &block, nil case <-parent.Done(): @@ -104,6 +146,8 @@ func (bs *bitswap) Block(parent context.Context, k u.Key) (*blocks.Block, error) // HasBlock announces the existance of a block to bitswap, potentially sending // it to peers (Partners) whose WantLists include it. func (bs *bitswap) HasBlock(ctx context.Context, blk blocks.Block) error { + u.DOut("Has Block %v\n", blk.Key().Pretty()) + bs.wantlist.Remove(blk.Key()) bs.sendToPeersThatWant(ctx, blk) return bs.routing.Provide(ctx, blk.Key()) } @@ -111,6 +155,7 @@ func (bs *bitswap) HasBlock(ctx context.Context, blk blocks.Block) error { // TODO(brian): handle errors func (bs *bitswap) ReceiveMessage(ctx context.Context, p *peer.Peer, incoming bsmsg.BitSwapMessage) ( *peer.Peer, bsmsg.BitSwapMessage, error) { + u.DOut("ReceiveMessage from %v\n", p.Key().Pretty()) if p == nil { return nil, nil, errors.New("Received nil Peer") @@ -132,19 +177,21 @@ func (bs *bitswap) ReceiveMessage(ctx context.Context, p *peer.Peer, incoming bs }(block) } + message := bsmsg.New() + for _, wanted := range bs.wantlist.Keys() { + message.AppendWanted(wanted) + } for _, key := range incoming.Wantlist() { if bs.strategy.ShouldSendBlockToPeer(key, p) { - block, errBlockNotFound := bs.blockstore.Get(key) - if errBlockNotFound != nil { - return nil, nil, errBlockNotFound + if block, errBlockNotFound := bs.blockstore.Get(key); errBlockNotFound != nil { + continue + } else { + message.AppendBlock(*block) } - message := bsmsg.New() - message.AppendBlock(*block) - defer bs.strategy.MessageSent(p, message) - return p, message, nil } } - return nil, nil, nil + defer bs.strategy.MessageSent(p, message) + return p, message, nil } // send strives to ensure that accounting is always performed when a message is @@ -155,11 +202,16 @@ func (bs *bitswap) send(ctx context.Context, p *peer.Peer, m bsmsg.BitSwapMessag } func (bs *bitswap) sendToPeersThatWant(ctx context.Context, block blocks.Block) { + u.DOut("Sending %v to peers that want it\n", block.Key().Pretty()) for _, p := range bs.strategy.Peers() { if bs.strategy.BlockIsWantedByPeer(block.Key(), p) { + u.DOut("%v wants %v\n", p.Key().Pretty(), block.Key().Pretty()) if bs.strategy.ShouldSendBlockToPeer(block.Key(), p) { message := bsmsg.New() message.AppendBlock(block) + for _, wanted := range bs.wantlist.Keys() { + message.AppendWanted(wanted) + } go bs.send(ctx, p, message) } } diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index 60ba7bf0b..6ec45f21c 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -16,6 +16,7 @@ import ( strategy "github.com/jbenet/go-ipfs/exchange/bitswap/strategy" tn "github.com/jbenet/go-ipfs/exchange/bitswap/testnet" peer "github.com/jbenet/go-ipfs/peer" + util "github.com/jbenet/go-ipfs/util" testutil "github.com/jbenet/go-ipfs/util/testutil" ) @@ -145,7 +146,10 @@ func getOrFail(bitswap instance, b *blocks.Block, t *testing.T, wg *sync.WaitGro wg.Done() } +// TODO simplify this test. get to the _essence_! func TestSendToWantingPeer(t *testing.T) { + util.Debug = true + net := tn.VirtualNetwork() rs := tn.VirtualRoutingServer() sg := NewSessionGenerator(net, rs) @@ -155,48 +159,55 @@ func TestSendToWantingPeer(t *testing.T) { w := sg.Next() o := sg.Next() + t.Logf("Session %v\n", me.peer.Key().Pretty()) + t.Logf("Session %v\n", w.peer.Key().Pretty()) + t.Logf("Session %v\n", o.peer.Key().Pretty()) + alpha := bg.Next() - const timeout = 100 * time.Millisecond - const wait = 100 * time.Millisecond + const timeout = 1 * time.Millisecond // FIXME don't depend on time - t.Log("Peer |w| attempts to get a file |alpha|. NB: alpha not available") + t.Logf("Peer %v attempts to get %v. NB: not available\n", w.peer.Key().Pretty(), alpha.Key().Pretty()) ctx, _ := context.WithTimeout(context.Background(), timeout) _, err := w.exchange.Block(ctx, alpha.Key()) if err == nil { - t.Error("Expected alpha to NOT be available") + t.Fatalf("Expected %v to NOT be available", alpha.Key().Pretty()) } - time.Sleep(wait) - t.Log("Peer |w| announces availability of a file |beta|") beta := bg.Next() + t.Logf("Peer %v announes availability of %v\n", w.peer.Key().Pretty(), beta.Key().Pretty()) ctx, _ = context.WithTimeout(context.Background(), timeout) + if err := w.blockstore.Put(beta); err != nil { + t.Fatal(err) + } w.exchange.HasBlock(ctx, beta) - time.Sleep(wait) - t.Log("I request and get |beta| from |w|. In the message, I receive |w|'s wants [alpha]") - t.Log("I don't have alpha, but I keep it on my wantlist.") + t.Logf("%v gets %v from %v and discovers it wants %v\n", me.peer.Key().Pretty(), beta.Key().Pretty(), w.peer.Key().Pretty(), alpha.Key().Pretty()) ctx, _ = context.WithTimeout(context.Background(), timeout) - me.exchange.Block(ctx, beta.Key()) - time.Sleep(wait) + if _, err := me.exchange.Block(ctx, beta.Key()); err != nil { + t.Fatal(err) + } - t.Log("Peer |o| announces the availability of |alpha|") + t.Logf("%v announces availability of %v\n", o.peer.Key().Pretty(), alpha.Key().Pretty()) ctx, _ = context.WithTimeout(context.Background(), timeout) + if err := o.blockstore.Put(alpha); err != nil { + t.Fatal(err) + } o.exchange.HasBlock(ctx, alpha) - time.Sleep(wait) - t.Log("I request |alpha| for myself.") + t.Logf("%v requests %v\n", me.peer.Key().Pretty(), alpha.Key().Pretty()) ctx, _ = context.WithTimeout(context.Background(), timeout) - me.exchange.Block(ctx, alpha.Key()) - time.Sleep(wait) + if _, err := me.exchange.Block(ctx, alpha.Key()); err != nil { + t.Fatal(err) + } - t.Log("After receiving |f| from |o|, I send it to the wanting peer |w|") + t.Logf("%v should now have %v\n", w.peer.Key().Pretty(), alpha.Key().Pretty()) block, err := w.blockstore.Get(alpha.Key()) if err != nil { t.Fatal("Should not have received an error") } if block.Key() != alpha.Key() { - t.Error("Expected to receive alpha from me") + t.Fatal("Expected to receive alpha from me") } } @@ -278,6 +289,9 @@ func session(net tn.Network, rs tn.RoutingServer, id peer.ID) instance { strategy: strategy.New(), routing: htc, sender: adapter, + wantlist: WantList{ + data: make(map[util.Key]struct{}), + }, } adapter.SetDelegate(bs) return instance{ From 3f6619e5e98e37675853c7080b102a514d3f7101 Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Sun, 21 Sep 2014 23:04:19 -0700 Subject: [PATCH 0047/1035] refac(bitswap, util) extract KeySet This commit was moved from ipfs/go-bitswap@1afac8dc122ee42eca874707cd0e45669d871bfb --- bitswap/bitswap.go | 38 ++--------------------------------- bitswap/bitswap_test.go | 4 +--- bitswap/strategy/interface.go | 18 ----------------- 3 files changed, 3 insertions(+), 57 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index cf5303297..fcc558a2c 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -2,7 +2,6 @@ package bitswap import ( "errors" - "sync" context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" ds "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/datastore.go" @@ -29,9 +28,7 @@ func NetMessageSession(parent context.Context, p *peer.Peer, s bsnet.NetMessageS strategy: strategy.New(), routing: directory, sender: networkAdapter, - wantlist: WantList{ - data: make(map[u.Key]struct{}), - }, + wantlist: u.NewKeySet(), } networkAdapter.SetDelegate(bs) @@ -58,38 +55,7 @@ type bitswap struct { // TODO(brian): save the strategy's state to the datastore strategy strategy.Strategy - wantlist WantList -} - -type WantList struct { - lock sync.RWMutex - data map[u.Key]struct{} -} - -func (wl *WantList) Add(k u.Key) { - u.DOut("Adding %v to Wantlist\n", k.Pretty()) - wl.lock.Lock() - defer wl.lock.Unlock() - - wl.data[k] = struct{}{} -} - -func (wl *WantList) Remove(k u.Key) { - u.DOut("Removing %v from Wantlist\n", k.Pretty()) - wl.lock.Lock() - defer wl.lock.Unlock() - - delete(wl.data, k) -} - -func (wl *WantList) Keys() []u.Key { - wl.lock.RLock() - defer wl.lock.RUnlock() - keys := make([]u.Key, 0) - for k, _ := range wl.data { - keys = append(keys, k) - } - return keys + wantlist u.KeySet } // GetBlock attempts to retrieve a particular block from peers within the diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index 6ec45f21c..2173fb57f 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -289,9 +289,7 @@ func session(net tn.Network, rs tn.RoutingServer, id peer.ID) instance { strategy: strategy.New(), routing: htc, sender: adapter, - wantlist: WantList{ - data: make(map[util.Key]struct{}), - }, + wantlist: util.NewKeySet(), } adapter.SetDelegate(bs) return instance{ diff --git a/bitswap/strategy/interface.go b/bitswap/strategy/interface.go index 1a0e14948..48097b027 100644 --- a/bitswap/strategy/interface.go +++ b/bitswap/strategy/interface.go @@ -30,21 +30,3 @@ type Strategy interface { NumBytesReceivedFrom(*peer.Peer) uint64 } - -type WantList interface { - // Peer returns the owner of the WantList - Peer() *peer.Peer - - // Intersection returns the keys common to both WantLists - Intersection(WantList) WantList - - KeySet -} - -// TODO(brian): potentially move this somewhere more generic. For now, it's -// useful in BitSwap operations. - -type KeySet interface { - Contains(u.Key) bool - Keys() []u.Key -} From f58ce563fa74f9cd4844fa4d06ae7cd7a3432257 Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Sun, 21 Sep 2014 23:34:42 -0700 Subject: [PATCH 0048/1035] feat(bitswap) expose ability to toggle "niceness" true -> always send to peer false -> use ledger-based strategy described in IPFS paper draft 3 This commit was moved from ipfs/go-bitswap@cd0cb0b7bf66108a1c860b517bc790e93f855025 --- bitswap/bitswap.go | 4 ++-- bitswap/bitswap_test.go | 3 ++- bitswap/strategy/math.go | 3 +++ bitswap/strategy/strategy.go | 13 +++++++++++-- bitswap/strategy/strategy_test.go | 2 +- 5 files changed, 19 insertions(+), 6 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index fcc558a2c..4f5bb45e7 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -19,13 +19,13 @@ import ( // NetMessageSession initializes a BitSwap session that communicates over the // provided NetMessage service -func NetMessageSession(parent context.Context, p *peer.Peer, s bsnet.NetMessageService, directory bsnet.Routing, d ds.Datastore) exchange.Interface { +func NetMessageSession(parent context.Context, p *peer.Peer, s bsnet.NetMessageService, directory bsnet.Routing, d ds.Datastore, nice bool) exchange.Interface { networkAdapter := bsnet.NetMessageAdapter(s, nil) bs := &bitswap{ blockstore: blockstore.NewBlockstore(d), notifications: notifications.New(), - strategy: strategy.New(), + strategy: strategy.New(nice), routing: directory, sender: networkAdapter, wantlist: u.NewKeySet(), diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index 2173fb57f..107180af7 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -283,10 +283,11 @@ func session(net tn.Network, rs tn.RoutingServer, id peer.ID) instance { htc := rs.Client(p) blockstore := bstore.NewBlockstore(ds.NewMapDatastore()) + const alwaysSendToPeer = true bs := &bitswap{ blockstore: blockstore, notifications: notifications.New(), - strategy: strategy.New(), + strategy: strategy.New(alwaysSendToPeer), routing: htc, sender: adapter, wantlist: util.NewKeySet(), diff --git a/bitswap/strategy/math.go b/bitswap/strategy/math.go index 21b1ff163..c5339e5b3 100644 --- a/bitswap/strategy/math.go +++ b/bitswap/strategy/math.go @@ -7,6 +7,9 @@ import ( type strategyFunc func(*ledger) bool +// TODO avoid using rand.Float64 method. it uses a singleton lock and may cause +// performance issues. Instead, instantiate a rand struct and use that to call +// Float64() func standardStrategy(l *ledger) bool { return rand.Float64() <= probabilitySend(l.Accounting.Value()) } diff --git a/bitswap/strategy/strategy.go b/bitswap/strategy/strategy.go index dc7a8e1b3..1cd4a021f 100644 --- a/bitswap/strategy/strategy.go +++ b/bitswap/strategy/strategy.go @@ -9,10 +9,19 @@ import ( ) // TODO declare thread-safe datastore -func New() Strategy { +// TODO niceness should be on a per-peer basis. Use-case: Certain peers are +// "trusted" and/or controlled by a single human user. The user may want for +// these peers to exchange data freely +func New(nice bool) Strategy { + var stratFunc strategyFunc + if nice { + stratFunc = yesManStrategy + } else { + stratFunc = standardStrategy + } return &strategist{ ledgerMap: ledgerMap{}, - strategyFunc: yesManStrategy, + strategyFunc: stratFunc, } } diff --git a/bitswap/strategy/strategy_test.go b/bitswap/strategy/strategy_test.go index e90bcd4ec..21f293c1c 100644 --- a/bitswap/strategy/strategy_test.go +++ b/bitswap/strategy/strategy_test.go @@ -17,7 +17,7 @@ type peerAndStrategist struct { func newPeerAndStrategist(idStr string) peerAndStrategist { return peerAndStrategist{ Peer: &peer.Peer{ID: peer.ID(idStr)}, - Strategy: New(), + Strategy: New(true), } } From 5896acedd85ecde3bc73571a6367391f03f000a8 Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Mon, 22 Sep 2014 03:15:35 -0700 Subject: [PATCH 0049/1035] doc(bitswap:strat) add note to remove blocks from peer's wantlist after sending This commit was moved from ipfs/go-bitswap@022bf05e58fa8755c7851af56752459d1b0feb41 --- bitswap/strategy/strategy.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/bitswap/strategy/strategy.go b/bitswap/strategy/strategy.go index 1cd4a021f..5d09f30b5 100644 --- a/bitswap/strategy/strategy.go +++ b/bitswap/strategy/strategy.go @@ -89,6 +89,9 @@ func (s *strategist) MessageSent(p *peer.Peer, m bsmsg.BitSwapMessage) error { for _, block := range m.Blocks() { l.SentBytes(len(block.Data)) } + + // TODO remove these blocks from peer's want list + return nil } From c5ad8db8a02093cebbf04e66b7ed41b2e080ebff Mon Sep 17 00:00:00 2001 From: Jeromy Date: Mon, 22 Sep 2014 21:11:06 -0700 Subject: [PATCH 0050/1035] implement a mock dht for use in testing This commit was moved from ipfs/go-bitswap@616f776007fccdf916e4c2f2801b759b5d32c2f1 --- bitswap/bitswap_test.go | 20 +++---- bitswap/testnet/routing.go | 96 --------------------------------- bitswap/testnet/routing_test.go | 28 +++++----- 3 files changed, 25 insertions(+), 119 deletions(-) diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index 107180af7..fd9808160 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -16,6 +16,7 @@ import ( strategy "github.com/jbenet/go-ipfs/exchange/bitswap/strategy" tn "github.com/jbenet/go-ipfs/exchange/bitswap/testnet" peer "github.com/jbenet/go-ipfs/peer" + mock "github.com/jbenet/go-ipfs/routing/mock" util "github.com/jbenet/go-ipfs/util" testutil "github.com/jbenet/go-ipfs/util/testutil" ) @@ -23,7 +24,7 @@ import ( func TestGetBlockTimeout(t *testing.T) { net := tn.VirtualNetwork() - rs := tn.VirtualRoutingServer() + rs := mock.VirtualRoutingServer() g := NewSessionGenerator(net, rs) self := g.Next() @@ -40,7 +41,7 @@ func TestGetBlockTimeout(t *testing.T) { func TestProviderForKeyButNetworkCannotFind(t *testing.T) { net := tn.VirtualNetwork() - rs := tn.VirtualRoutingServer() + rs := mock.VirtualRoutingServer() g := NewSessionGenerator(net, rs) block := testutil.NewBlockOrFail(t, "block") @@ -61,7 +62,7 @@ func TestProviderForKeyButNetworkCannotFind(t *testing.T) { func TestGetBlockFromPeerAfterPeerAnnounces(t *testing.T) { net := tn.VirtualNetwork() - rs := tn.VirtualRoutingServer() + rs := mock.VirtualRoutingServer() block := testutil.NewBlockOrFail(t, "block") g := NewSessionGenerator(net, rs) @@ -90,7 +91,7 @@ func TestGetBlockFromPeerAfterPeerAnnounces(t *testing.T) { func TestSwarm(t *testing.T) { net := tn.VirtualNetwork() - rs := tn.VirtualRoutingServer() + rs := mock.VirtualRoutingServer() sg := NewSessionGenerator(net, rs) bg := NewBlockGenerator(t) @@ -151,7 +152,7 @@ func TestSendToWantingPeer(t *testing.T) { util.Debug = true net := tn.VirtualNetwork() - rs := tn.VirtualRoutingServer() + rs := mock.VirtualRoutingServer() sg := NewSessionGenerator(net, rs) bg := NewBlockGenerator(t) @@ -237,7 +238,7 @@ func (bg *BlockGenerator) Blocks(n int) []*blocks.Block { } func NewSessionGenerator( - net tn.Network, rs tn.RoutingServer) SessionGenerator { + net tn.Network, rs mock.RoutingServer) SessionGenerator { return SessionGenerator{ net: net, rs: rs, @@ -248,7 +249,7 @@ func NewSessionGenerator( type SessionGenerator struct { seq int net tn.Network - rs tn.RoutingServer + rs mock.RoutingServer } func (g *SessionGenerator) Next() instance { @@ -276,11 +277,12 @@ type instance struct { // NB: It's easy make mistakes by providing the same peer ID to two different // sessions. To safeguard, use the SessionGenerator to generate sessions. It's // just a much better idea. -func session(net tn.Network, rs tn.RoutingServer, id peer.ID) instance { +func session(net tn.Network, rs mock.RoutingServer, id peer.ID) instance { p := &peer.Peer{ID: id} adapter := net.Adapter(p) - htc := rs.Client(p) + htc := mock.NewMockRouter(p, nil) + htc.SetRoutingServer(rs) blockstore := bstore.NewBlockstore(ds.NewMapDatastore()) const alwaysSendToPeer = true diff --git a/bitswap/testnet/routing.go b/bitswap/testnet/routing.go index 4e2985a4a..67a03afb7 100644 --- a/bitswap/testnet/routing.go +++ b/bitswap/testnet/routing.go @@ -1,97 +1 @@ package bitswap - -import ( - "math/rand" - "sync" - - context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" - bsnet "github.com/jbenet/go-ipfs/exchange/bitswap/network" - peer "github.com/jbenet/go-ipfs/peer" - u "github.com/jbenet/go-ipfs/util" -) - -type RoutingServer interface { - Announce(*peer.Peer, u.Key) error - - Providers(u.Key) []*peer.Peer - - // Returns a Routing instance configured to query this hash table - Client(*peer.Peer) bsnet.Routing -} - -func VirtualRoutingServer() RoutingServer { - return &hashTable{ - providers: make(map[u.Key]peer.Map), - } -} - -type hashTable struct { - lock sync.RWMutex - providers map[u.Key]peer.Map -} - -func (rs *hashTable) Announce(p *peer.Peer, k u.Key) error { - rs.lock.Lock() - defer rs.lock.Unlock() - - _, ok := rs.providers[k] - if !ok { - rs.providers[k] = make(peer.Map) - } - rs.providers[k][p.Key()] = p - return nil -} - -func (rs *hashTable) Providers(k u.Key) []*peer.Peer { - rs.lock.RLock() - defer rs.lock.RUnlock() - ret := make([]*peer.Peer, 0) - peerset, ok := rs.providers[k] - if !ok { - return ret - } - for _, peer := range peerset { - ret = append(ret, peer) - } - - for i := range ret { - j := rand.Intn(i + 1) - ret[i], ret[j] = ret[j], ret[i] - } - - return ret -} - -func (rs *hashTable) Client(p *peer.Peer) bsnet.Routing { - return &routingClient{ - peer: p, - hashTable: rs, - } -} - -type routingClient struct { - peer *peer.Peer - hashTable RoutingServer -} - -func (a *routingClient) FindProvidersAsync(ctx context.Context, k u.Key, max int) <-chan *peer.Peer { - out := make(chan *peer.Peer) - go func() { - defer close(out) - for i, p := range a.hashTable.Providers(k) { - if max <= i { - return - } - select { - case out <- p: - case <-ctx.Done(): - return - } - } - }() - return out -} - -func (a *routingClient) Provide(_ context.Context, key u.Key) error { - return a.hashTable.Announce(a.peer, key) -} diff --git a/bitswap/testnet/routing_test.go b/bitswap/testnet/routing_test.go index dd6450e5e..30a573f6f 100644 --- a/bitswap/testnet/routing_test.go +++ b/bitswap/testnet/routing_test.go @@ -5,19 +5,15 @@ import ( "testing" context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" -) -import ( "github.com/jbenet/go-ipfs/peer" + mock "github.com/jbenet/go-ipfs/routing/mock" u "github.com/jbenet/go-ipfs/util" ) func TestKeyNotFound(t *testing.T) { - rs := func() RoutingServer { - // TODO fields - return &hashTable{} - }() - empty := rs.Providers(u.Key("not there")) + vrs := mock.VirtualRoutingServer() + empty := vrs.Providers(u.Key("not there")) if len(empty) != 0 { t.Fatal("should be empty") } @@ -29,7 +25,7 @@ func TestSetAndGet(t *testing.T) { ID: pid, } k := u.Key("42") - rs := VirtualRoutingServer() + rs := mock.VirtualRoutingServer() err := rs.Announce(p, k) if err != nil { t.Fatal(err) @@ -50,8 +46,9 @@ func TestClientFindProviders(t *testing.T) { peer := &peer.Peer{ ID: []byte("42"), } - rs := VirtualRoutingServer() - client := rs.Client(peer) + rs := mock.VirtualRoutingServer() + client := mock.NewMockRouter(peer, nil) + client.SetRoutingServer(rs) k := u.Key("hello") err := client.Provide(context.Background(), k) if err != nil { @@ -83,7 +80,7 @@ func TestClientFindProviders(t *testing.T) { } func TestClientOverMax(t *testing.T) { - rs := VirtualRoutingServer() + rs := mock.VirtualRoutingServer() k := u.Key("hello") numProvidersForHelloKey := 100 for i := 0; i < numProvidersForHelloKey; i++ { @@ -102,7 +99,8 @@ func TestClientOverMax(t *testing.T) { } max := 10 - client := rs.Client(&peer.Peer{ID: []byte("TODO")}) + client := mock.NewMockRouter(&peer.Peer{ID: []byte("TODO")}, nil) + client.SetRoutingServer(rs) providersFromClient := client.FindProvidersAsync(context.Background(), k, max) i := 0 for _ = range providersFromClient { @@ -115,7 +113,7 @@ func TestClientOverMax(t *testing.T) { // TODO does dht ensure won't receive self as a provider? probably not. func TestCanceledContext(t *testing.T) { - rs := VirtualRoutingServer() + rs := mock.VirtualRoutingServer() k := u.Key("hello") t.Log("async'ly announce infinite stream of providers for key") @@ -133,7 +131,9 @@ func TestCanceledContext(t *testing.T) { } }() - client := rs.Client(&peer.Peer{ID: []byte("peer id doesn't matter")}) + local := &peer.Peer{ID: []byte("peer id doesn't matter")} + client := mock.NewMockRouter(local, nil) + client.SetRoutingServer(rs) t.Log("warning: max is finite so this test is non-deterministic") t.Log("context cancellation could simply take lower priority") From 2f6e8097b3f959a5c43ff4d7cd0bc737e356122b Mon Sep 17 00:00:00 2001 From: Jeromy Date: Tue, 23 Sep 2014 11:45:02 -0700 Subject: [PATCH 0051/1035] change back to using Client method This commit was moved from ipfs/go-bitswap@7b4222ac228916a06c3a699f540b4f550f6ba034 --- bitswap/bitswap_test.go | 3 +-- bitswap/testnet/routing.go | 1 - bitswap/testnet/routing_test.go | 16 +++++++--------- 3 files changed, 8 insertions(+), 12 deletions(-) delete mode 100644 bitswap/testnet/routing.go diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index fd9808160..a9fc11f82 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -281,8 +281,7 @@ func session(net tn.Network, rs mock.RoutingServer, id peer.ID) instance { p := &peer.Peer{ID: id} adapter := net.Adapter(p) - htc := mock.NewMockRouter(p, nil) - htc.SetRoutingServer(rs) + htc := rs.Client(p) blockstore := bstore.NewBlockstore(ds.NewMapDatastore()) const alwaysSendToPeer = true diff --git a/bitswap/testnet/routing.go b/bitswap/testnet/routing.go deleted file mode 100644 index 67a03afb7..000000000 --- a/bitswap/testnet/routing.go +++ /dev/null @@ -1 +0,0 @@ -package bitswap diff --git a/bitswap/testnet/routing_test.go b/bitswap/testnet/routing_test.go index 30a573f6f..b3cbd385a 100644 --- a/bitswap/testnet/routing_test.go +++ b/bitswap/testnet/routing_test.go @@ -43,12 +43,10 @@ func TestSetAndGet(t *testing.T) { } func TestClientFindProviders(t *testing.T) { - peer := &peer.Peer{ - ID: []byte("42"), - } + peer := &peer.Peer{ID: []byte("42")} rs := mock.VirtualRoutingServer() - client := mock.NewMockRouter(peer, nil) - client.SetRoutingServer(rs) + client := rs.Client(peer) + k := u.Key("hello") err := client.Provide(context.Background(), k) if err != nil { @@ -99,8 +97,9 @@ func TestClientOverMax(t *testing.T) { } max := 10 - client := mock.NewMockRouter(&peer.Peer{ID: []byte("TODO")}, nil) - client.SetRoutingServer(rs) + peer := &peer.Peer{ID: []byte("TODO")} + client := rs.Client(peer) + providersFromClient := client.FindProvidersAsync(context.Background(), k, max) i := 0 for _ = range providersFromClient { @@ -132,8 +131,7 @@ func TestCanceledContext(t *testing.T) { }() local := &peer.Peer{ID: []byte("peer id doesn't matter")} - client := mock.NewMockRouter(local, nil) - client.SetRoutingServer(rs) + client := rs.Client(local) t.Log("warning: max is finite so this test is non-deterministic") t.Log("context cancellation could simply take lower priority") From 68d968043016cb953aa5e92f0f115566b1f18ea3 Mon Sep 17 00:00:00 2001 From: Jeromy Date: Tue, 23 Sep 2014 14:08:37 -0700 Subject: [PATCH 0052/1035] move mock routing tests to proper directory This commit was moved from ipfs/go-bitswap@c50d177b53adb88cce5dd5ea5a27b9220d5d1970 --- bitswap/testnet/routing_test.go | 155 -------------------------------- 1 file changed, 155 deletions(-) delete mode 100644 bitswap/testnet/routing_test.go diff --git a/bitswap/testnet/routing_test.go b/bitswap/testnet/routing_test.go deleted file mode 100644 index b3cbd385a..000000000 --- a/bitswap/testnet/routing_test.go +++ /dev/null @@ -1,155 +0,0 @@ -package bitswap - -import ( - "bytes" - "testing" - - context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" - "github.com/jbenet/go-ipfs/peer" - mock "github.com/jbenet/go-ipfs/routing/mock" - u "github.com/jbenet/go-ipfs/util" -) - -func TestKeyNotFound(t *testing.T) { - - vrs := mock.VirtualRoutingServer() - empty := vrs.Providers(u.Key("not there")) - if len(empty) != 0 { - t.Fatal("should be empty") - } -} - -func TestSetAndGet(t *testing.T) { - pid := peer.ID([]byte("the peer id")) - p := &peer.Peer{ - ID: pid, - } - k := u.Key("42") - rs := mock.VirtualRoutingServer() - err := rs.Announce(p, k) - if err != nil { - t.Fatal(err) - } - providers := rs.Providers(k) - if len(providers) != 1 { - t.Fatal("should be one") - } - for _, elem := range providers { - if bytes.Equal(elem.ID, pid) { - return - } - } - t.Fatal("ID should have matched") -} - -func TestClientFindProviders(t *testing.T) { - peer := &peer.Peer{ID: []byte("42")} - rs := mock.VirtualRoutingServer() - client := rs.Client(peer) - - k := u.Key("hello") - err := client.Provide(context.Background(), k) - if err != nil { - t.Fatal(err) - } - max := 100 - - providersFromHashTable := rs.Providers(k) - - isInHT := false - for _, p := range providersFromHashTable { - if bytes.Equal(p.ID, peer.ID) { - isInHT = true - } - } - if !isInHT { - t.Fatal("Despite client providing key, peer wasn't in hash table as a provider") - } - providersFromClient := client.FindProvidersAsync(context.Background(), u.Key("hello"), max) - isInClient := false - for p := range providersFromClient { - if bytes.Equal(p.ID, peer.ID) { - isInClient = true - } - } - if !isInClient { - t.Fatal("Despite client providing key, client didn't receive peer when finding providers") - } -} - -func TestClientOverMax(t *testing.T) { - rs := mock.VirtualRoutingServer() - k := u.Key("hello") - numProvidersForHelloKey := 100 - for i := 0; i < numProvidersForHelloKey; i++ { - peer := &peer.Peer{ - ID: []byte(string(i)), - } - err := rs.Announce(peer, k) - if err != nil { - t.Fatal(err) - } - } - providersFromHashTable := rs.Providers(k) - if len(providersFromHashTable) != numProvidersForHelloKey { - t.Log(1 == len(providersFromHashTable)) - t.Fatal("not all providers were returned") - } - - max := 10 - peer := &peer.Peer{ID: []byte("TODO")} - client := rs.Client(peer) - - providersFromClient := client.FindProvidersAsync(context.Background(), k, max) - i := 0 - for _ = range providersFromClient { - i++ - } - if i != max { - t.Fatal("Too many providers returned") - } -} - -// TODO does dht ensure won't receive self as a provider? probably not. -func TestCanceledContext(t *testing.T) { - rs := mock.VirtualRoutingServer() - k := u.Key("hello") - - t.Log("async'ly announce infinite stream of providers for key") - i := 0 - go func() { // infinite stream - for { - peer := &peer.Peer{ - ID: []byte(string(i)), - } - err := rs.Announce(peer, k) - if err != nil { - t.Fatal(err) - } - i++ - } - }() - - local := &peer.Peer{ID: []byte("peer id doesn't matter")} - client := rs.Client(local) - - t.Log("warning: max is finite so this test is non-deterministic") - t.Log("context cancellation could simply take lower priority") - t.Log("and result in receiving the max number of results") - max := 1000 - - t.Log("cancel the context before consuming") - ctx, cancelFunc := context.WithCancel(context.Background()) - cancelFunc() - providers := client.FindProvidersAsync(ctx, k, max) - - numProvidersReturned := 0 - for _ = range providers { - numProvidersReturned++ - } - t.Log(numProvidersReturned) - - if numProvidersReturned == max { - t.Fatal("Context cancel had no effect") - } -} From fd1af3c4a2405f8c833785de50f48d3b5ef0f0ab Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Mon, 22 Sep 2014 12:34:41 -0400 Subject: [PATCH 0053/1035] feat(bitswap:network) propagate errors up the stack Rather than pushing errors back down to lower layers, propagate the errors upward. This commit adds a `ReceiveError` method to BitSwap's network receiver. Still TODO: rm the error return value from: net.service.handler.HandleMessage This is inspired by delegation patterns in found in the wild. This commit was moved from ipfs/go-bitswap@7b1cda70ecb162ba2c68daed0d764ca198fa72cf --- bitswap/bitswap.go | 17 +++++++++++------ bitswap/network/interface.go | 4 +++- bitswap/network/net_message_adapter.go | 15 ++++++--------- bitswap/testnet/network.go | 24 +++++------------------- bitswap/testnet/network_test.go | 25 ++++++++++++++----------- 5 files changed, 39 insertions(+), 46 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 4f5bb45e7..4ba9e179f 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -1,8 +1,6 @@ package bitswap import ( - "errors" - context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" ds "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/datastore.go" @@ -120,14 +118,16 @@ func (bs *bitswap) HasBlock(ctx context.Context, blk blocks.Block) error { // TODO(brian): handle errors func (bs *bitswap) ReceiveMessage(ctx context.Context, p *peer.Peer, incoming bsmsg.BitSwapMessage) ( - *peer.Peer, bsmsg.BitSwapMessage, error) { + *peer.Peer, bsmsg.BitSwapMessage) { u.DOut("ReceiveMessage from %v\n", p.Key().Pretty()) if p == nil { - return nil, nil, errors.New("Received nil Peer") + // TODO propagate the error upward + return nil, nil } if incoming == nil { - return nil, nil, errors.New("Received nil Message") + // TODO propagate the error upward + return nil, nil } bs.strategy.MessageReceived(p, incoming) // FIRST @@ -157,7 +157,12 @@ func (bs *bitswap) ReceiveMessage(ctx context.Context, p *peer.Peer, incoming bs } } defer bs.strategy.MessageSent(p, message) - return p, message, nil + return p, message +} + +func (bs *bitswap) ReceiveError(err error) { + // TODO log the network error + // TODO bubble the network error up to the parent context/error logger } // send strives to ensure that accounting is always performed when a message is diff --git a/bitswap/network/interface.go b/bitswap/network/interface.go index 15fa9c89e..611dea8cb 100644 --- a/bitswap/network/interface.go +++ b/bitswap/network/interface.go @@ -33,7 +33,9 @@ type Adapter interface { type Receiver interface { ReceiveMessage( ctx context.Context, sender *peer.Peer, incoming bsmsg.BitSwapMessage) ( - destination *peer.Peer, outgoing bsmsg.BitSwapMessage, err error) + destination *peer.Peer, outgoing bsmsg.BitSwapMessage) + + ReceiveError(error) } // TODO(brian): move this to go-ipfs/net package diff --git a/bitswap/network/net_message_adapter.go b/bitswap/network/net_message_adapter.go index 603317afb..842f069f1 100644 --- a/bitswap/network/net_message_adapter.go +++ b/bitswap/network/net_message_adapter.go @@ -1,8 +1,6 @@ package network import ( - "errors" - context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" bsmsg "github.com/jbenet/go-ipfs/exchange/bitswap/message" @@ -34,18 +32,16 @@ func (adapter *impl) HandleMessage( ctx context.Context, incoming netmsg.NetMessage) (netmsg.NetMessage, error) { if adapter.receiver == nil { - return nil, errors.New("No receiver. NetMessage dropped") + return nil, nil } received, err := bsmsg.FromNet(incoming) if err != nil { - return nil, err + adapter.receiver.ReceiveError(err) + return nil, nil } - p, bsmsg, err := adapter.receiver.ReceiveMessage(ctx, incoming.Peer(), received) - if err != nil { - return nil, err - } + p, bsmsg := adapter.receiver.ReceiveMessage(ctx, incoming.Peer(), received) // TODO(brian): put this in a helper function if bsmsg == nil || p == nil { @@ -54,7 +50,8 @@ func (adapter *impl) HandleMessage( outgoing, err := bsmsg.ToNet(p) if err != nil { - return nil, err + adapter.receiver.ReceiveError(err) + return nil, nil } return outgoing, nil diff --git a/bitswap/testnet/network.go b/bitswap/testnet/network.go index 5039e730b..4d5f8c35e 100644 --- a/bitswap/testnet/network.go +++ b/bitswap/testnet/network.go @@ -76,18 +76,7 @@ func (n *network) deliver( return errors.New("Invalid input") } - nextPeer, nextMsg, err := r.ReceiveMessage(context.TODO(), from, message) - if err != nil { - - // TODO should this error be returned across network boundary? - - // TODO this raises an interesting question about network contract. How - // can the network be expected to behave under different failure - // conditions? What if peer is unreachable? Will we know if messages - // aren't delivered? - - return err - } + nextPeer, nextMsg := r.ReceiveMessage(context.TODO(), from, message) if (nextPeer == nil && nextMsg != nil) || (nextMsg == nil && nextPeer != nil) { return errors.New("Malformed client request") @@ -119,15 +108,12 @@ func (n *network) SendRequest( if !ok { return nil, errors.New("Cannot locate peer on network") } - nextPeer, nextMsg, err := r.ReceiveMessage(context.TODO(), from, message) - if err != nil { - return nil, err - // TODO return nil, NoResponse - } + nextPeer, nextMsg := r.ReceiveMessage(context.TODO(), from, message) // TODO dedupe code if (nextPeer == nil && nextMsg != nil) || (nextMsg == nil && nextPeer != nil) { - return nil, errors.New("Malformed client request") + r.ReceiveError(errors.New("Malformed client request")) + return nil, nil } // TODO dedupe code @@ -144,7 +130,7 @@ func (n *network) SendRequest( } n.deliver(nextReceiver, nextPeer, nextMsg) }() - return nil, NoResponse + return nil, nil } return nextMsg, nil } diff --git a/bitswap/testnet/network_test.go b/bitswap/testnet/network_test.go index 70b0615db..15502783e 100644 --- a/bitswap/testnet/network_test.go +++ b/bitswap/testnet/network_test.go @@ -26,7 +26,7 @@ func TestSendRequestToCooperativePeer(t *testing.T) { ctx context.Context, from *peer.Peer, incoming bsmsg.BitSwapMessage) ( - *peer.Peer, bsmsg.BitSwapMessage, error) { + *peer.Peer, bsmsg.BitSwapMessage) { t.Log("Recipient received a message from the network") @@ -35,7 +35,7 @@ func TestSendRequestToCooperativePeer(t *testing.T) { m := bsmsg.New() m.AppendBlock(testutil.NewBlockOrFail(t, expectedStr)) - return from, m, nil + return from, m })) t.Log("Build a message and send a synchronous request to recipient") @@ -74,19 +74,19 @@ func TestSendMessageAsyncButWaitForResponse(t *testing.T) { ctx context.Context, fromWaiter *peer.Peer, msgFromWaiter bsmsg.BitSwapMessage) ( - *peer.Peer, bsmsg.BitSwapMessage, error) { + *peer.Peer, bsmsg.BitSwapMessage) { msgToWaiter := bsmsg.New() msgToWaiter.AppendBlock(testutil.NewBlockOrFail(t, expectedStr)) - return fromWaiter, msgToWaiter, nil + return fromWaiter, msgToWaiter })) waiter.SetDelegate(lambda(func( ctx context.Context, fromResponder *peer.Peer, msgFromResponder bsmsg.BitSwapMessage) ( - *peer.Peer, bsmsg.BitSwapMessage, error) { + *peer.Peer, bsmsg.BitSwapMessage) { // TODO assert that this came from the correct peer and that the message contents are as expected ok := false @@ -101,7 +101,7 @@ func TestSendMessageAsyncButWaitForResponse(t *testing.T) { t.Fatal("Message not received from the responder") } - return nil, nil, nil + return nil, nil })) messageSentAsync := bsmsg.New() @@ -116,7 +116,7 @@ func TestSendMessageAsyncButWaitForResponse(t *testing.T) { } type receiverFunc func(ctx context.Context, p *peer.Peer, - incoming bsmsg.BitSwapMessage) (*peer.Peer, bsmsg.BitSwapMessage, error) + incoming bsmsg.BitSwapMessage) (*peer.Peer, bsmsg.BitSwapMessage) // lambda returns a Receiver instance given a receiver function func lambda(f receiverFunc) bsnet.Receiver { @@ -126,13 +126,16 @@ func lambda(f receiverFunc) bsnet.Receiver { } type lambdaImpl struct { - f func(ctx context.Context, p *peer.Peer, - incoming bsmsg.BitSwapMessage) ( - *peer.Peer, bsmsg.BitSwapMessage, error) + f func(ctx context.Context, p *peer.Peer, incoming bsmsg.BitSwapMessage) ( + *peer.Peer, bsmsg.BitSwapMessage) } func (lam *lambdaImpl) ReceiveMessage(ctx context.Context, p *peer.Peer, incoming bsmsg.BitSwapMessage) ( - *peer.Peer, bsmsg.BitSwapMessage, error) { + *peer.Peer, bsmsg.BitSwapMessage) { return lam.f(ctx, p, incoming) } + +func (lam *lambdaImpl) ReceiveError(err error) { + // TODO log error +} From e31bdd2e25f1357d241d463dd89531b5121f254a Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Mon, 22 Sep 2014 14:04:41 -0400 Subject: [PATCH 0054/1035] feat(net:service, routing) remove error return value This commit was moved from ipfs/go-bitswap@e0a9615709b0e442661888eab7883f233163cf59 --- bitswap/network/net_message_adapter.go | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/bitswap/network/net_message_adapter.go b/bitswap/network/net_message_adapter.go index 842f069f1..fe3bd6a36 100644 --- a/bitswap/network/net_message_adapter.go +++ b/bitswap/network/net_message_adapter.go @@ -29,32 +29,32 @@ type impl struct { // HandleMessage marshals and unmarshals net messages, forwarding them to the // BitSwapMessage receiver func (adapter *impl) HandleMessage( - ctx context.Context, incoming netmsg.NetMessage) (netmsg.NetMessage, error) { + ctx context.Context, incoming netmsg.NetMessage) netmsg.NetMessage { if adapter.receiver == nil { - return nil, nil + return nil } received, err := bsmsg.FromNet(incoming) if err != nil { - adapter.receiver.ReceiveError(err) - return nil, nil + go adapter.receiver.ReceiveError(err) + return nil } p, bsmsg := adapter.receiver.ReceiveMessage(ctx, incoming.Peer(), received) // TODO(brian): put this in a helper function if bsmsg == nil || p == nil { - return nil, nil + return nil } outgoing, err := bsmsg.ToNet(p) if err != nil { - adapter.receiver.ReceiveError(err) - return nil, nil + go adapter.receiver.ReceiveError(err) + return nil } - return outgoing, nil + return outgoing } func (adapter *impl) SendMessage( From 3f5756ba48772fbd812e52216aa467384d2af028 Mon Sep 17 00:00:00 2001 From: Juan Batiz-Benet Date: Mon, 6 Oct 2014 04:23:55 -0700 Subject: [PATCH 0055/1035] Obviated need for `.ID.Pretty()` all over the place. This commit was moved from ipfs/go-bitswap@3d12baaee50b46bf541b119301c1860f2a8637b7 --- bitswap/bitswap.go | 10 +++++----- bitswap/bitswap_test.go | 20 ++++++++++---------- 2 files changed, 15 insertions(+), 15 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 4ba9e179f..e4eaeb4a4 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -61,7 +61,7 @@ type bitswap struct { // // TODO ensure only one active request per key func (bs *bitswap) Block(parent context.Context, k u.Key) (*blocks.Block, error) { - u.DOut("Get Block %v\n", k.Pretty()) + u.DOut("Get Block %v\n", k) ctx, cancelFunc := context.WithCancel(parent) bs.wantlist.Add(k) @@ -110,7 +110,7 @@ func (bs *bitswap) Block(parent context.Context, k u.Key) (*blocks.Block, error) // HasBlock announces the existance of a block to bitswap, potentially sending // it to peers (Partners) whose WantLists include it. func (bs *bitswap) HasBlock(ctx context.Context, blk blocks.Block) error { - u.DOut("Has Block %v\n", blk.Key().Pretty()) + u.DOut("Has Block %v\n", blk.Key()) bs.wantlist.Remove(blk.Key()) bs.sendToPeersThatWant(ctx, blk) return bs.routing.Provide(ctx, blk.Key()) @@ -119,7 +119,7 @@ func (bs *bitswap) HasBlock(ctx context.Context, blk blocks.Block) error { // TODO(brian): handle errors func (bs *bitswap) ReceiveMessage(ctx context.Context, p *peer.Peer, incoming bsmsg.BitSwapMessage) ( *peer.Peer, bsmsg.BitSwapMessage) { - u.DOut("ReceiveMessage from %v\n", p.Key().Pretty()) + u.DOut("ReceiveMessage from %v\n", p.Key()) if p == nil { // TODO propagate the error upward @@ -173,10 +173,10 @@ func (bs *bitswap) send(ctx context.Context, p *peer.Peer, m bsmsg.BitSwapMessag } func (bs *bitswap) sendToPeersThatWant(ctx context.Context, block blocks.Block) { - u.DOut("Sending %v to peers that want it\n", block.Key().Pretty()) + u.DOut("Sending %v to peers that want it\n", block.Key()) for _, p := range bs.strategy.Peers() { if bs.strategy.BlockIsWantedByPeer(block.Key(), p) { - u.DOut("%v wants %v\n", p.Key().Pretty(), block.Key().Pretty()) + u.DOut("%v wants %v\n", p, block.Key()) if bs.strategy.ShouldSendBlockToPeer(block.Key(), p) { message := bsmsg.New() message.AppendBlock(block) diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index a9fc11f82..3a9bed97c 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -160,49 +160,49 @@ func TestSendToWantingPeer(t *testing.T) { w := sg.Next() o := sg.Next() - t.Logf("Session %v\n", me.peer.Key().Pretty()) - t.Logf("Session %v\n", w.peer.Key().Pretty()) - t.Logf("Session %v\n", o.peer.Key().Pretty()) + t.Logf("Session %v\n", me.peer) + t.Logf("Session %v\n", w.peer) + t.Logf("Session %v\n", o.peer) alpha := bg.Next() const timeout = 1 * time.Millisecond // FIXME don't depend on time - t.Logf("Peer %v attempts to get %v. NB: not available\n", w.peer.Key().Pretty(), alpha.Key().Pretty()) + t.Logf("Peer %v attempts to get %v. NB: not available\n", w.peer, alpha.Key()) ctx, _ := context.WithTimeout(context.Background(), timeout) _, err := w.exchange.Block(ctx, alpha.Key()) if err == nil { - t.Fatalf("Expected %v to NOT be available", alpha.Key().Pretty()) + t.Fatalf("Expected %v to NOT be available", alpha.Key()) } beta := bg.Next() - t.Logf("Peer %v announes availability of %v\n", w.peer.Key().Pretty(), beta.Key().Pretty()) + t.Logf("Peer %v announes availability of %v\n", w.peer, beta.Key()) ctx, _ = context.WithTimeout(context.Background(), timeout) if err := w.blockstore.Put(beta); err != nil { t.Fatal(err) } w.exchange.HasBlock(ctx, beta) - t.Logf("%v gets %v from %v and discovers it wants %v\n", me.peer.Key().Pretty(), beta.Key().Pretty(), w.peer.Key().Pretty(), alpha.Key().Pretty()) + t.Logf("%v gets %v from %v and discovers it wants %v\n", me.peer, beta.Key(), w.peer, alpha.Key()) ctx, _ = context.WithTimeout(context.Background(), timeout) if _, err := me.exchange.Block(ctx, beta.Key()); err != nil { t.Fatal(err) } - t.Logf("%v announces availability of %v\n", o.peer.Key().Pretty(), alpha.Key().Pretty()) + t.Logf("%v announces availability of %v\n", o.peer, alpha.Key()) ctx, _ = context.WithTimeout(context.Background(), timeout) if err := o.blockstore.Put(alpha); err != nil { t.Fatal(err) } o.exchange.HasBlock(ctx, alpha) - t.Logf("%v requests %v\n", me.peer.Key().Pretty(), alpha.Key().Pretty()) + t.Logf("%v requests %v\n", me.peer, alpha.Key()) ctx, _ = context.WithTimeout(context.Background(), timeout) if _, err := me.exchange.Block(ctx, alpha.Key()); err != nil { t.Fatal(err) } - t.Logf("%v should now have %v\n", w.peer.Key().Pretty(), alpha.Key().Pretty()) + t.Logf("%v should now have %v\n", w.peer, alpha.Key()) block, err := w.blockstore.Get(alpha.Key()) if err != nil { t.Fatal("Should not have received an error") From 94ae1c849dde4122cbe6602a473c833f5556c5df Mon Sep 17 00:00:00 2001 From: Jeromy Date: Tue, 7 Oct 2014 20:46:01 +0000 Subject: [PATCH 0056/1035] removed error from return type of blocks.NewBlock() This commit was moved from ipfs/go-bitswap@e66cbacab720ac4e0f2e7b4f9e672945ab4b5bf4 --- bitswap/bitswap_test.go | 28 +++++++++------------ bitswap/message/message.go | 14 +++-------- bitswap/message/message_test.go | 19 ++++++-------- bitswap/notifications/notifications_test.go | 10 +++----- bitswap/strategy/strategy_test.go | 6 ++--- bitswap/testnet/network_test.go | 10 ++++---- 6 files changed, 36 insertions(+), 51 deletions(-) diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index 3a9bed97c..fd01aacd9 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -9,7 +9,7 @@ import ( context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" ds "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/datastore.go" - "github.com/jbenet/go-ipfs/blocks" + blocks "github.com/jbenet/go-ipfs/blocks" bstore "github.com/jbenet/go-ipfs/blockstore" exchange "github.com/jbenet/go-ipfs/exchange" notifications "github.com/jbenet/go-ipfs/exchange/bitswap/notifications" @@ -18,7 +18,6 @@ import ( peer "github.com/jbenet/go-ipfs/peer" mock "github.com/jbenet/go-ipfs/routing/mock" util "github.com/jbenet/go-ipfs/util" - testutil "github.com/jbenet/go-ipfs/util/testutil" ) func TestGetBlockTimeout(t *testing.T) { @@ -30,7 +29,7 @@ func TestGetBlockTimeout(t *testing.T) { self := g.Next() ctx, _ := context.WithTimeout(context.Background(), time.Nanosecond) - block := testutil.NewBlockOrFail(t, "block") + block := blocks.NewBlock([]byte("block")) _, err := self.exchange.Block(ctx, block.Key()) if err != context.DeadlineExceeded { @@ -44,7 +43,7 @@ func TestProviderForKeyButNetworkCannotFind(t *testing.T) { rs := mock.VirtualRoutingServer() g := NewSessionGenerator(net, rs) - block := testutil.NewBlockOrFail(t, "block") + block := blocks.NewBlock([]byte("block")) rs.Announce(&peer.Peer{}, block.Key()) // but not on network solo := g.Next() @@ -63,15 +62,15 @@ func TestGetBlockFromPeerAfterPeerAnnounces(t *testing.T) { net := tn.VirtualNetwork() rs := mock.VirtualRoutingServer() - block := testutil.NewBlockOrFail(t, "block") + block := blocks.NewBlock([]byte("block")) g := NewSessionGenerator(net, rs) hasBlock := g.Next() - if err := hasBlock.blockstore.Put(block); err != nil { + if err := hasBlock.blockstore.Put(*block); err != nil { t.Fatal(err) } - if err := hasBlock.exchange.HasBlock(context.Background(), block); err != nil { + if err := hasBlock.exchange.HasBlock(context.Background(), *block); err != nil { t.Fatal(err) } @@ -93,7 +92,7 @@ func TestSwarm(t *testing.T) { net := tn.VirtualNetwork() rs := mock.VirtualRoutingServer() sg := NewSessionGenerator(net, rs) - bg := NewBlockGenerator(t) + bg := NewBlockGenerator() t.Log("Create a ton of instances, and just a few blocks") @@ -154,7 +153,7 @@ func TestSendToWantingPeer(t *testing.T) { net := tn.VirtualNetwork() rs := mock.VirtualRoutingServer() sg := NewSessionGenerator(net, rs) - bg := NewBlockGenerator(t) + bg := NewBlockGenerator() me := sg.Next() w := sg.Next() @@ -212,20 +211,17 @@ func TestSendToWantingPeer(t *testing.T) { } } -func NewBlockGenerator(t *testing.T) BlockGenerator { - return BlockGenerator{ - T: t, - } +func NewBlockGenerator() BlockGenerator { + return BlockGenerator{} } type BlockGenerator struct { - *testing.T // b/c block generation can fail - seq int + seq int } func (bg *BlockGenerator) Next() blocks.Block { bg.seq++ - return testutil.NewBlockOrFail(bg.T, string(bg.seq)) + return *blocks.NewBlock([]byte(string(bg.seq))) } func (bg *BlockGenerator) Blocks(n int) []*blocks.Block { diff --git a/bitswap/message/message.go b/bitswap/message/message.go index 22258e17f..a724f7cc7 100644 --- a/bitswap/message/message.go +++ b/bitswap/message/message.go @@ -32,19 +32,16 @@ func New() *message { return new(message) } -func newMessageFromProto(pbm PBMessage) (BitSwapMessage, error) { +func newMessageFromProto(pbm PBMessage) BitSwapMessage { m := New() for _, s := range pbm.GetWantlist() { m.AppendWanted(u.Key(s)) } for _, d := range pbm.GetBlocks() { - b, err := blocks.NewBlock(d) - if err != nil { - return nil, err - } + b := blocks.NewBlock(d) m.AppendBlock(*b) } - return m, nil + return m } // TODO(brian): convert these into keys @@ -70,10 +67,7 @@ func FromNet(nmsg netmsg.NetMessage) (BitSwapMessage, error) { if err := proto.Unmarshal(nmsg.Data(), pb); err != nil { return nil, err } - m, err := newMessageFromProto(*pb) - if err != nil { - return nil, err - } + m := newMessageFromProto(*pb) return m, nil } diff --git a/bitswap/message/message_test.go b/bitswap/message/message_test.go index 9590f1ff1..b5954eba8 100644 --- a/bitswap/message/message_test.go +++ b/bitswap/message/message_test.go @@ -4,9 +4,9 @@ import ( "bytes" "testing" + "github.com/jbenet/go-ipfs/blocks" peer "github.com/jbenet/go-ipfs/peer" u "github.com/jbenet/go-ipfs/util" - testutil "github.com/jbenet/go-ipfs/util/testutil" ) func TestAppendWanted(t *testing.T) { @@ -26,10 +26,7 @@ func TestNewMessageFromProto(t *testing.T) { if !contains(protoMessage.Wantlist, str) { t.Fail() } - m, err := newMessageFromProto(*protoMessage) - if err != nil { - t.Fatal(err) - } + m := newMessageFromProto(*protoMessage) if !contains(m.ToProto().GetWantlist(), str) { t.Fail() } @@ -43,8 +40,8 @@ func TestAppendBlock(t *testing.T) { m := New() for _, str := range strs { - block := testutil.NewBlockOrFail(t, str) - m.AppendBlock(block) + block := blocks.NewBlock([]byte(str)) + m.AppendBlock(*block) } // assert strings are in proto message @@ -134,10 +131,10 @@ func TestToNetFromNetPreservesWantList(t *testing.T) { func TestToAndFromNetMessage(t *testing.T) { original := New() - original.AppendBlock(testutil.NewBlockOrFail(t, "W")) - original.AppendBlock(testutil.NewBlockOrFail(t, "E")) - original.AppendBlock(testutil.NewBlockOrFail(t, "F")) - original.AppendBlock(testutil.NewBlockOrFail(t, "M")) + original.AppendBlock(*blocks.NewBlock([]byte("W"))) + original.AppendBlock(*blocks.NewBlock([]byte("E"))) + original.AppendBlock(*blocks.NewBlock([]byte("F"))) + original.AppendBlock(*blocks.NewBlock([]byte("M"))) p := &peer.Peer{ID: []byte("X")} netmsg, err := original.ToNet(p) diff --git a/bitswap/notifications/notifications_test.go b/bitswap/notifications/notifications_test.go index b12cc7d83..063634f61 100644 --- a/bitswap/notifications/notifications_test.go +++ b/bitswap/notifications/notifications_test.go @@ -6,25 +6,23 @@ import ( "time" context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" - testutil "github.com/jbenet/go-ipfs/util/testutil" - blocks "github.com/jbenet/go-ipfs/blocks" ) func TestPublishSubscribe(t *testing.T) { - blockSent := testutil.NewBlockOrFail(t, "Greetings from The Interval") + blockSent := blocks.NewBlock([]byte("Greetings from The Interval")) n := New() defer n.Shutdown() ch := n.Subscribe(context.Background(), blockSent.Key()) - n.Publish(blockSent) + n.Publish(*blockSent) blockRecvd, ok := <-ch if !ok { t.Fail() } - assertBlocksEqual(t, blockRecvd, blockSent) + assertBlocksEqual(t, blockRecvd, *blockSent) } @@ -35,7 +33,7 @@ func TestCarryOnWhenDeadlineExpires(t *testing.T) { n := New() defer n.Shutdown() - block := testutil.NewBlockOrFail(t, "A Missed Connection") + block := blocks.NewBlock([]byte("A Missed Connection")) blockChannel := n.Subscribe(fastExpiringCtx, block.Key()) assertBlockChannelNil(t, blockChannel) diff --git a/bitswap/strategy/strategy_test.go b/bitswap/strategy/strategy_test.go index 21f293c1c..dccc4a374 100644 --- a/bitswap/strategy/strategy_test.go +++ b/bitswap/strategy/strategy_test.go @@ -4,9 +4,9 @@ import ( "strings" "testing" + blocks "github.com/jbenet/go-ipfs/blocks" message "github.com/jbenet/go-ipfs/exchange/bitswap/message" peer "github.com/jbenet/go-ipfs/peer" - testutil "github.com/jbenet/go-ipfs/util/testutil" ) type peerAndStrategist struct { @@ -30,7 +30,7 @@ func TestConsistentAccounting(t *testing.T) { m := message.New() content := []string{"this", "is", "message", "i"} - m.AppendBlock(testutil.NewBlockOrFail(t, strings.Join(content, " "))) + m.AppendBlock(*blocks.NewBlock([]byte(strings.Join(content, " ")))) sender.MessageSent(receiver.Peer, m) receiver.MessageReceived(sender.Peer, m) @@ -57,7 +57,7 @@ func TestBlockRecordedAsWantedAfterMessageReceived(t *testing.T) { beggar := newPeerAndStrategist("can't be chooser") chooser := newPeerAndStrategist("chooses JIF") - block := testutil.NewBlockOrFail(t, "data wanted by beggar") + block := blocks.NewBlock([]byte("data wanted by beggar")) messageFromBeggarToChooser := message.New() messageFromBeggarToChooser.AppendWanted(block.Key()) diff --git a/bitswap/testnet/network_test.go b/bitswap/testnet/network_test.go index 15502783e..fbd7c8893 100644 --- a/bitswap/testnet/network_test.go +++ b/bitswap/testnet/network_test.go @@ -5,10 +5,10 @@ import ( "testing" context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" + blocks "github.com/jbenet/go-ipfs/blocks" bsmsg "github.com/jbenet/go-ipfs/exchange/bitswap/message" bsnet "github.com/jbenet/go-ipfs/exchange/bitswap/network" peer "github.com/jbenet/go-ipfs/peer" - testutil "github.com/jbenet/go-ipfs/util/testutil" ) func TestSendRequestToCooperativePeer(t *testing.T) { @@ -33,7 +33,7 @@ func TestSendRequestToCooperativePeer(t *testing.T) { // TODO test contents of incoming message m := bsmsg.New() - m.AppendBlock(testutil.NewBlockOrFail(t, expectedStr)) + m.AppendBlock(*blocks.NewBlock([]byte(expectedStr))) return from, m })) @@ -41,7 +41,7 @@ func TestSendRequestToCooperativePeer(t *testing.T) { t.Log("Build a message and send a synchronous request to recipient") message := bsmsg.New() - message.AppendBlock(testutil.NewBlockOrFail(t, "data")) + message.AppendBlock(*blocks.NewBlock([]byte("data"))) response, err := initiator.SendRequest( context.Background(), &peer.Peer{ID: idOfRecipient}, message) if err != nil { @@ -77,7 +77,7 @@ func TestSendMessageAsyncButWaitForResponse(t *testing.T) { *peer.Peer, bsmsg.BitSwapMessage) { msgToWaiter := bsmsg.New() - msgToWaiter.AppendBlock(testutil.NewBlockOrFail(t, expectedStr)) + msgToWaiter.AppendBlock(*blocks.NewBlock([]byte(expectedStr))) return fromWaiter, msgToWaiter })) @@ -105,7 +105,7 @@ func TestSendMessageAsyncButWaitForResponse(t *testing.T) { })) messageSentAsync := bsmsg.New() - messageSentAsync.AppendBlock(testutil.NewBlockOrFail(t, "data")) + messageSentAsync.AppendBlock(*blocks.NewBlock([]byte("data"))) errSending := waiter.SendMessage( context.Background(), &peer.Peer{ID: idOfResponder}, messageSentAsync) if errSending != nil { From 7bb51e9e8116a5d28dab794af210f896e7e587bf Mon Sep 17 00:00:00 2001 From: Juan Batiz-Benet Date: Tue, 7 Oct 2014 21:27:47 -0700 Subject: [PATCH 0057/1035] bugfix: use consistent interface We'll want a `type blocks.Block interface {}` later, but for now, make sure Blockstore uses ptrs for both Get and Put. + fix NewBlock output compile error This commit was moved from ipfs/go-bitswap@23c3ca5140101bd3116b4d3da8e2437c9d7350d7 --- bitswap/bitswap.go | 2 +- bitswap/bitswap_test.go | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index e4eaeb4a4..20f9d234c 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -134,7 +134,7 @@ func (bs *bitswap) ReceiveMessage(ctx context.Context, p *peer.Peer, incoming bs for _, block := range incoming.Blocks() { // TODO verify blocks? - if err := bs.blockstore.Put(block); err != nil { + if err := bs.blockstore.Put(&block); err != nil { continue // FIXME(brian): err ignored } go bs.notifications.Publish(block) diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index fd01aacd9..d1c92d8d0 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -67,7 +67,7 @@ func TestGetBlockFromPeerAfterPeerAnnounces(t *testing.T) { hasBlock := g.Next() - if err := hasBlock.blockstore.Put(*block); err != nil { + if err := hasBlock.blockstore.Put(block); err != nil { t.Fatal(err) } if err := hasBlock.exchange.HasBlock(context.Background(), *block); err != nil { @@ -106,7 +106,7 @@ func TestSwarm(t *testing.T) { first := instances[0] for _, b := range blocks { - first.blockstore.Put(*b) + first.blockstore.Put(b) first.exchange.HasBlock(context.Background(), *b) rs.Announce(first.peer, b.Key()) } @@ -177,7 +177,7 @@ func TestSendToWantingPeer(t *testing.T) { beta := bg.Next() t.Logf("Peer %v announes availability of %v\n", w.peer, beta.Key()) ctx, _ = context.WithTimeout(context.Background(), timeout) - if err := w.blockstore.Put(beta); err != nil { + if err := w.blockstore.Put(&beta); err != nil { t.Fatal(err) } w.exchange.HasBlock(ctx, beta) @@ -190,7 +190,7 @@ func TestSendToWantingPeer(t *testing.T) { t.Logf("%v announces availability of %v\n", o.peer, alpha.Key()) ctx, _ = context.WithTimeout(context.Background(), timeout) - if err := o.blockstore.Put(alpha); err != nil { + if err := o.blockstore.Put(&alpha); err != nil { t.Fatal(err) } o.exchange.HasBlock(ctx, alpha) From 268d0563a6ebc9e11ed19d32bba710c559a76601 Mon Sep 17 00:00:00 2001 From: Juan Batiz-Benet Date: Thu, 9 Oct 2014 04:48:13 -0700 Subject: [PATCH 0058/1035] u.DOut -> log.Debug and other logging switches. I kept the u.PErr and u.POut in cli commands, as those do need to write raw output directly. This commit was moved from ipfs/go-bitswap@866f2538991810bc17804c0a05f4f5d4be3bb8b9 --- bitswap/bitswap.go | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 20f9d234c..819100cfe 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -15,6 +15,8 @@ import ( u "github.com/jbenet/go-ipfs/util" ) +var log = u.Logger("bitswap") + // NetMessageSession initializes a BitSwap session that communicates over the // provided NetMessage service func NetMessageSession(parent context.Context, p *peer.Peer, s bsnet.NetMessageService, directory bsnet.Routing, d ds.Datastore, nice bool) exchange.Interface { @@ -61,7 +63,7 @@ type bitswap struct { // // TODO ensure only one active request per key func (bs *bitswap) Block(parent context.Context, k u.Key) (*blocks.Block, error) { - u.DOut("Get Block %v\n", k) + log.Debug("Get Block %v", k) ctx, cancelFunc := context.WithCancel(parent) bs.wantlist.Add(k) @@ -77,7 +79,7 @@ func (bs *bitswap) Block(parent context.Context, k u.Key) (*blocks.Block, error) } message.AppendWanted(k) for iiiii := range peersToQuery { - // u.DOut("bitswap got peersToQuery: %s\n", iiiii) + // log.Debug("bitswap got peersToQuery: %s", iiiii) go func(p *peer.Peer) { response, err := bs.sender.SendRequest(ctx, p, message) if err != nil { @@ -110,7 +112,7 @@ func (bs *bitswap) Block(parent context.Context, k u.Key) (*blocks.Block, error) // HasBlock announces the existance of a block to bitswap, potentially sending // it to peers (Partners) whose WantLists include it. func (bs *bitswap) HasBlock(ctx context.Context, blk blocks.Block) error { - u.DOut("Has Block %v\n", blk.Key()) + log.Debug("Has Block %v", blk.Key()) bs.wantlist.Remove(blk.Key()) bs.sendToPeersThatWant(ctx, blk) return bs.routing.Provide(ctx, blk.Key()) @@ -119,7 +121,7 @@ func (bs *bitswap) HasBlock(ctx context.Context, blk blocks.Block) error { // TODO(brian): handle errors func (bs *bitswap) ReceiveMessage(ctx context.Context, p *peer.Peer, incoming bsmsg.BitSwapMessage) ( *peer.Peer, bsmsg.BitSwapMessage) { - u.DOut("ReceiveMessage from %v\n", p.Key()) + log.Debug("ReceiveMessage from %v", p.Key()) if p == nil { // TODO propagate the error upward @@ -173,10 +175,10 @@ func (bs *bitswap) send(ctx context.Context, p *peer.Peer, m bsmsg.BitSwapMessag } func (bs *bitswap) sendToPeersThatWant(ctx context.Context, block blocks.Block) { - u.DOut("Sending %v to peers that want it\n", block.Key()) + log.Debug("Sending %v to peers that want it", block.Key()) for _, p := range bs.strategy.Peers() { if bs.strategy.BlockIsWantedByPeer(block.Key(), p) { - u.DOut("%v wants %v\n", p, block.Key()) + log.Debug("%v wants %v", p, block.Key()) if bs.strategy.ShouldSendBlockToPeer(block.Key(), p) { message := bsmsg.New() message.AppendBlock(block) From 73d846be9800130b5c6aef475da772d0f9b692c0 Mon Sep 17 00:00:00 2001 From: Juan Batiz-Benet Date: Fri, 10 Oct 2014 05:15:36 -0700 Subject: [PATCH 0059/1035] clean up and add inet.Network to bitswap new Service interface This commit was moved from ipfs/go-bitswap@bd392b81bfd05fa8f145ff1a35c72ae25b8bb9b6 --- bitswap/bitswap.go | 13 ++++++++++--- bitswap/network/interface.go | 9 --------- bitswap/network/net_message_adapter.go | 5 +++-- 3 files changed, 13 insertions(+), 14 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 819100cfe..7eb8870aa 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -11,6 +11,7 @@ import ( bsnet "github.com/jbenet/go-ipfs/exchange/bitswap/network" notifications "github.com/jbenet/go-ipfs/exchange/bitswap/notifications" strategy "github.com/jbenet/go-ipfs/exchange/bitswap/strategy" + inet "github.com/jbenet/go-ipfs/net" peer "github.com/jbenet/go-ipfs/peer" u "github.com/jbenet/go-ipfs/util" ) @@ -19,14 +20,17 @@ var log = u.Logger("bitswap") // NetMessageSession initializes a BitSwap session that communicates over the // provided NetMessage service -func NetMessageSession(parent context.Context, p *peer.Peer, s bsnet.NetMessageService, directory bsnet.Routing, d ds.Datastore, nice bool) exchange.Interface { +func NetMessageSession(parent context.Context, p *peer.Peer, + net inet.Network, srv inet.Service, directory bsnet.Routing, + d ds.Datastore, nice bool) exchange.Interface { - networkAdapter := bsnet.NetMessageAdapter(s, nil) + networkAdapter := bsnet.NetMessageAdapter(srv, nil) bs := &bitswap{ blockstore: blockstore.NewBlockstore(d), notifications: notifications.New(), strategy: strategy.New(nice), routing: directory, + network: net, sender: networkAdapter, wantlist: u.NewKeySet(), } @@ -38,6 +42,9 @@ func NetMessageSession(parent context.Context, p *peer.Peer, s bsnet.NetMessageS // bitswap instances implement the bitswap protocol. type bitswap struct { + // network maintains connections to the outside world. + network inet.Network + // sender delivers messages on behalf of the session sender bsnet.Adapter @@ -79,7 +86,7 @@ func (bs *bitswap) Block(parent context.Context, k u.Key) (*blocks.Block, error) } message.AppendWanted(k) for iiiii := range peersToQuery { - // log.Debug("bitswap got peersToQuery: %s", iiiii) + log.Debug("bitswap got peersToQuery: %s", iiiii) go func(p *peer.Peer) { response, err := bs.sender.SendRequest(ctx, p, message) if err != nil { diff --git a/bitswap/network/interface.go b/bitswap/network/interface.go index 611dea8cb..8985ecefc 100644 --- a/bitswap/network/interface.go +++ b/bitswap/network/interface.go @@ -2,10 +2,8 @@ package network import ( context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" - netservice "github.com/jbenet/go-ipfs/net/service" bsmsg "github.com/jbenet/go-ipfs/exchange/bitswap/message" - netmsg "github.com/jbenet/go-ipfs/net/message" peer "github.com/jbenet/go-ipfs/peer" u "github.com/jbenet/go-ipfs/util" ) @@ -38,13 +36,6 @@ type Receiver interface { ReceiveError(error) } -// TODO(brian): move this to go-ipfs/net package -type NetMessageService interface { - SendRequest(ctx context.Context, m netmsg.NetMessage) (netmsg.NetMessage, error) - SendMessage(ctx context.Context, m netmsg.NetMessage) error - SetHandler(netservice.Handler) -} - // TODO rename -> Router? type Routing interface { // FindProvidersAsync returns a channel of providers for the given key diff --git a/bitswap/network/net_message_adapter.go b/bitswap/network/net_message_adapter.go index fe3bd6a36..a95e566cc 100644 --- a/bitswap/network/net_message_adapter.go +++ b/bitswap/network/net_message_adapter.go @@ -4,12 +4,13 @@ import ( context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" bsmsg "github.com/jbenet/go-ipfs/exchange/bitswap/message" + inet "github.com/jbenet/go-ipfs/net" netmsg "github.com/jbenet/go-ipfs/net/message" peer "github.com/jbenet/go-ipfs/peer" ) // NetMessageAdapter wraps a NetMessage network service -func NetMessageAdapter(s NetMessageService, r Receiver) Adapter { +func NetMessageAdapter(s inet.Service, r Receiver) Adapter { adapter := impl{ nms: s, receiver: r, @@ -20,7 +21,7 @@ func NetMessageAdapter(s NetMessageService, r Receiver) Adapter { // implements an Adapter that integrates with a NetMessage network service type impl struct { - nms NetMessageService + nms inet.Service // inbound messages from the network are forwarded to the receiver receiver Receiver From 4ed7deaa86f2f9ad7e5f951f519449e3b188f9ba Mon Sep 17 00:00:00 2001 From: Juan Batiz-Benet Date: Sat, 11 Oct 2014 06:31:03 -0700 Subject: [PATCH 0060/1035] bitswap dials peers Important bugfix. Otherwise bitswap cannot message peers the node has not connected to yet :( This commit was moved from ipfs/go-bitswap@0ee59e4b04e874cafca924deafd1bb8bd3c47b2e --- bitswap/bitswap.go | 14 +++++++++----- bitswap/network/interface.go | 3 +++ bitswap/network/net_message_adapter.go | 8 +++++++- bitswap/testnet/network.go | 16 ++++++++++++++++ 4 files changed, 35 insertions(+), 6 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 7eb8870aa..2cfff3919 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -24,13 +24,12 @@ func NetMessageSession(parent context.Context, p *peer.Peer, net inet.Network, srv inet.Service, directory bsnet.Routing, d ds.Datastore, nice bool) exchange.Interface { - networkAdapter := bsnet.NetMessageAdapter(srv, nil) + networkAdapter := bsnet.NetMessageAdapter(srv, net, nil) bs := &bitswap{ blockstore: blockstore.NewBlockstore(d), notifications: notifications.New(), strategy: strategy.New(nice), routing: directory, - network: net, sender: networkAdapter, wantlist: u.NewKeySet(), } @@ -42,9 +41,6 @@ func NetMessageSession(parent context.Context, p *peer.Peer, // bitswap instances implement the bitswap protocol. type bitswap struct { - // network maintains connections to the outside world. - network inet.Network - // sender delivers messages on behalf of the session sender bsnet.Adapter @@ -88,8 +84,16 @@ func (bs *bitswap) Block(parent context.Context, k u.Key) (*blocks.Block, error) for iiiii := range peersToQuery { log.Debug("bitswap got peersToQuery: %s", iiiii) go func(p *peer.Peer) { + + err := bs.sender.DialPeer(p) + if err != nil { + log.Error("Error sender.DialPeer(%s)", p) + return + } + response, err := bs.sender.SendRequest(ctx, p, message) if err != nil { + log.Error("Error sender.SendRequest(%s)", p) return } // FIXME ensure accounting is handled correctly when diff --git a/bitswap/network/interface.go b/bitswap/network/interface.go index 8985ecefc..03d7d3415 100644 --- a/bitswap/network/interface.go +++ b/bitswap/network/interface.go @@ -11,6 +11,9 @@ import ( // Adapter provides network connectivity for BitSwap sessions type Adapter interface { + // DialPeer ensures there is a connection to peer. + DialPeer(*peer.Peer) error + // SendMessage sends a BitSwap message to a peer. SendMessage( context.Context, diff --git a/bitswap/network/net_message_adapter.go b/bitswap/network/net_message_adapter.go index a95e566cc..ce0ae41dd 100644 --- a/bitswap/network/net_message_adapter.go +++ b/bitswap/network/net_message_adapter.go @@ -10,9 +10,10 @@ import ( ) // NetMessageAdapter wraps a NetMessage network service -func NetMessageAdapter(s inet.Service, r Receiver) Adapter { +func NetMessageAdapter(s inet.Service, n inet.Network, r Receiver) Adapter { adapter := impl{ nms: s, + net: n, receiver: r, } s.SetHandler(&adapter) @@ -22,6 +23,7 @@ func NetMessageAdapter(s inet.Service, r Receiver) Adapter { // implements an Adapter that integrates with a NetMessage network service type impl struct { nms inet.Service + net inet.Network // inbound messages from the network are forwarded to the receiver receiver Receiver @@ -58,6 +60,10 @@ func (adapter *impl) HandleMessage( return outgoing } +func (adapter *impl) DialPeer(p *peer.Peer) error { + return adapter.DialPeer(p) +} + func (adapter *impl) SendMessage( ctx context.Context, p *peer.Peer, diff --git a/bitswap/testnet/network.go b/bitswap/testnet/network.go index 4d5f8c35e..c3081337d 100644 --- a/bitswap/testnet/network.go +++ b/bitswap/testnet/network.go @@ -3,6 +3,7 @@ package bitswap import ( "bytes" "errors" + "fmt" context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" bsmsg "github.com/jbenet/go-ipfs/exchange/bitswap/message" @@ -14,6 +15,8 @@ import ( type Network interface { Adapter(*peer.Peer) bsnet.Adapter + HasPeer(*peer.Peer) bool + SendMessage( ctx context.Context, from *peer.Peer, @@ -49,6 +52,11 @@ func (n *network) Adapter(p *peer.Peer) bsnet.Adapter { return client } +func (n *network) HasPeer(p *peer.Peer) bool { + _, found := n.clients[p.Key()] + return found +} + // TODO should this be completely asynchronous? // TODO what does the network layer do with errors received from services? func (n *network) SendMessage( @@ -155,6 +163,14 @@ func (nc *networkClient) SendRequest( return nc.network.SendRequest(ctx, nc.local, to, message) } +func (nc *networkClient) DialPeer(p *peer.Peer) error { + // no need to do anything because dialing isn't a thing in this test net. + if !nc.network.HasPeer(p) { + return fmt.Errorf("Peer not in network: %s", p) + } + return nil +} + func (nc *networkClient) SetDelegate(r bsnet.Receiver) { nc.Receiver = r } From 4a8f03adc8094338ef5fece32ddd53024a719aef Mon Sep 17 00:00:00 2001 From: Juan Batiz-Benet Date: Mon, 13 Oct 2014 01:31:18 -0700 Subject: [PATCH 0061/1035] meant to call net.DialPeer This commit was moved from ipfs/go-bitswap@b51f66d12191ea4b34e0a730687e1570a98a6035 --- bitswap/network/net_message_adapter.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bitswap/network/net_message_adapter.go b/bitswap/network/net_message_adapter.go index ce0ae41dd..52f428076 100644 --- a/bitswap/network/net_message_adapter.go +++ b/bitswap/network/net_message_adapter.go @@ -61,7 +61,7 @@ func (adapter *impl) HandleMessage( } func (adapter *impl) DialPeer(p *peer.Peer) error { - return adapter.DialPeer(p) + return adapter.net.DialPeer(p) } func (adapter *impl) SendMessage( From cb5137d7997f20ff463dce567061ff64c9ea22c0 Mon Sep 17 00:00:00 2001 From: Juan Batiz-Benet Date: Mon, 13 Oct 2014 01:31:51 -0700 Subject: [PATCH 0062/1035] logging + tweaks This commit was moved from ipfs/go-bitswap@897c70982d49de4f16860b0661bad2c091628b63 --- bitswap/bitswap.go | 1 + 1 file changed, 1 insertion(+) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 2cfff3919..af513c1de 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -85,6 +85,7 @@ func (bs *bitswap) Block(parent context.Context, k u.Key) (*blocks.Block, error) log.Debug("bitswap got peersToQuery: %s", iiiii) go func(p *peer.Peer) { + log.Debug("bitswap dialing peer: %s", p) err := bs.sender.DialPeer(p) if err != nil { log.Error("Error sender.DialPeer(%s)", p) From 05c93f6d255aa9012fbcec58377a634e7928814f Mon Sep 17 00:00:00 2001 From: Juan Batiz-Benet Date: Mon, 13 Oct 2014 05:05:59 -0700 Subject: [PATCH 0063/1035] iiii -> peerToQuery (that wasn't mine :p) This commit was moved from ipfs/go-bitswap@13395cbfd16274f4aa84a0c212e272cc91fc2ba1 --- bitswap/bitswap.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index af513c1de..b93b1a9b8 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -81,8 +81,8 @@ func (bs *bitswap) Block(parent context.Context, k u.Key) (*blocks.Block, error) message.AppendWanted(wanted) } message.AppendWanted(k) - for iiiii := range peersToQuery { - log.Debug("bitswap got peersToQuery: %s", iiiii) + for peerToQuery := range peersToQuery { + log.Debug("bitswap got peersToQuery: %s", peerToQuery) go func(p *peer.Peer) { log.Debug("bitswap dialing peer: %s", p) @@ -106,7 +106,7 @@ func (bs *bitswap) Block(parent context.Context, k u.Key) (*blocks.Block, error) return } bs.ReceiveMessage(ctx, p, response) - }(iiiii) + }(peerToQuery) } }() From 132111957595e38b9e7df01483aceccac5d6cc41 Mon Sep 17 00:00:00 2001 From: Juan Batiz-Benet Date: Mon, 20 Oct 2014 03:26:44 -0700 Subject: [PATCH 0064/1035] peer.Peer is now an interface ![](http://m.memegen.com/77n7dk.jpg) This commit was moved from ipfs/go-bitswap@3ef5ef2e588f8a6b3f280260652addb7d9ade5e4 --- bitswap/bitswap.go | 10 +++---- bitswap/bitswap_test.go | 6 ++--- bitswap/message/message.go | 4 +-- bitswap/message/message_test.go | 7 ++--- bitswap/network/interface.go | 12 ++++----- bitswap/network/net_message_adapter.go | 6 ++--- bitswap/strategy/interface.go | 14 +++++----- bitswap/strategy/ledger.go | 4 +-- bitswap/strategy/strategy.go | 18 ++++++------- bitswap/strategy/strategy_test.go | 6 ++--- bitswap/testnet/network.go | 36 +++++++++++++------------- bitswap/testnet/network_test.go | 36 +++++++++++++------------- 12 files changed, 80 insertions(+), 79 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index b93b1a9b8..4a3170fac 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -20,7 +20,7 @@ var log = u.Logger("bitswap") // NetMessageSession initializes a BitSwap session that communicates over the // provided NetMessage service -func NetMessageSession(parent context.Context, p *peer.Peer, +func NetMessageSession(parent context.Context, p peer.Peer, net inet.Network, srv inet.Service, directory bsnet.Routing, d ds.Datastore, nice bool) exchange.Interface { @@ -83,7 +83,7 @@ func (bs *bitswap) Block(parent context.Context, k u.Key) (*blocks.Block, error) message.AppendWanted(k) for peerToQuery := range peersToQuery { log.Debug("bitswap got peersToQuery: %s", peerToQuery) - go func(p *peer.Peer) { + go func(p peer.Peer) { log.Debug("bitswap dialing peer: %s", p) err := bs.sender.DialPeer(p) @@ -131,8 +131,8 @@ func (bs *bitswap) HasBlock(ctx context.Context, blk blocks.Block) error { } // TODO(brian): handle errors -func (bs *bitswap) ReceiveMessage(ctx context.Context, p *peer.Peer, incoming bsmsg.BitSwapMessage) ( - *peer.Peer, bsmsg.BitSwapMessage) { +func (bs *bitswap) ReceiveMessage(ctx context.Context, p peer.Peer, incoming bsmsg.BitSwapMessage) ( + peer.Peer, bsmsg.BitSwapMessage) { log.Debug("ReceiveMessage from %v", p.Key()) if p == nil { @@ -181,7 +181,7 @@ func (bs *bitswap) ReceiveError(err error) { // send strives to ensure that accounting is always performed when a message is // sent -func (bs *bitswap) send(ctx context.Context, p *peer.Peer, m bsmsg.BitSwapMessage) { +func (bs *bitswap) send(ctx context.Context, p peer.Peer, m bsmsg.BitSwapMessage) { bs.sender.SendMessage(ctx, p, m) go bs.strategy.MessageSent(p, m) } diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index d1c92d8d0..8a2f1f421 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -44,7 +44,7 @@ func TestProviderForKeyButNetworkCannotFind(t *testing.T) { g := NewSessionGenerator(net, rs) block := blocks.NewBlock([]byte("block")) - rs.Announce(&peer.Peer{}, block.Key()) // but not on network + rs.Announce(peer.WithIDString("testing"), block.Key()) // but not on network solo := g.Next() @@ -263,7 +263,7 @@ func (g *SessionGenerator) Instances(n int) []instance { } type instance struct { - peer *peer.Peer + peer peer.Peer exchange exchange.Interface blockstore bstore.Blockstore } @@ -274,7 +274,7 @@ type instance struct { // sessions. To safeguard, use the SessionGenerator to generate sessions. It's // just a much better idea. func session(net tn.Network, rs mock.RoutingServer, id peer.ID) instance { - p := &peer.Peer{ID: id} + p := peer.WithID(id) adapter := net.Adapter(p) htc := rs.Client(p) diff --git a/bitswap/message/message.go b/bitswap/message/message.go index a724f7cc7..423cc329c 100644 --- a/bitswap/message/message.go +++ b/bitswap/message/message.go @@ -19,7 +19,7 @@ type BitSwapMessage interface { type Exportable interface { ToProto() *PBMessage - ToNet(p *peer.Peer) (nm.NetMessage, error) + ToNet(p peer.Peer) (nm.NetMessage, error) } // message wraps a proto message for convenience @@ -82,6 +82,6 @@ func (m *message) ToProto() *PBMessage { return pb } -func (m *message) ToNet(p *peer.Peer) (nm.NetMessage, error) { +func (m *message) ToNet(p peer.Peer) (nm.NetMessage, error) { return nm.FromObject(p, m.ToProto()) } diff --git a/bitswap/message/message_test.go b/bitswap/message/message_test.go index b5954eba8..5aa63ecc3 100644 --- a/bitswap/message/message_test.go +++ b/bitswap/message/message_test.go @@ -88,7 +88,7 @@ func TestCopyProtoByValue(t *testing.T) { func TestToNetMethodSetsPeer(t *testing.T) { m := New() - p := &peer.Peer{ID: []byte("X")} + p := peer.WithIDString("X") netmsg, err := m.ToNet(p) if err != nil { t.Fatal(err) @@ -106,7 +106,8 @@ func TestToNetFromNetPreservesWantList(t *testing.T) { original.AppendWanted(u.Key("T")) original.AppendWanted(u.Key("F")) - netmsg, err := original.ToNet(&peer.Peer{ID: []byte("X")}) + p := peer.WithIDString("X") + netmsg, err := original.ToNet(p) if err != nil { t.Fatal(err) } @@ -136,7 +137,7 @@ func TestToAndFromNetMessage(t *testing.T) { original.AppendBlock(*blocks.NewBlock([]byte("F"))) original.AppendBlock(*blocks.NewBlock([]byte("M"))) - p := &peer.Peer{ID: []byte("X")} + p := peer.WithIDString("X") netmsg, err := original.ToNet(p) if err != nil { t.Fatal(err) diff --git a/bitswap/network/interface.go b/bitswap/network/interface.go index 03d7d3415..467b0f400 100644 --- a/bitswap/network/interface.go +++ b/bitswap/network/interface.go @@ -12,18 +12,18 @@ import ( type Adapter interface { // DialPeer ensures there is a connection to peer. - DialPeer(*peer.Peer) error + DialPeer(peer.Peer) error // SendMessage sends a BitSwap message to a peer. SendMessage( context.Context, - *peer.Peer, + peer.Peer, bsmsg.BitSwapMessage) error // SendRequest sends a BitSwap message to a peer and waits for a response. SendRequest( context.Context, - *peer.Peer, + peer.Peer, bsmsg.BitSwapMessage) (incoming bsmsg.BitSwapMessage, err error) // SetDelegate registers the Reciver to handle messages received from the @@ -33,8 +33,8 @@ type Adapter interface { type Receiver interface { ReceiveMessage( - ctx context.Context, sender *peer.Peer, incoming bsmsg.BitSwapMessage) ( - destination *peer.Peer, outgoing bsmsg.BitSwapMessage) + ctx context.Context, sender peer.Peer, incoming bsmsg.BitSwapMessage) ( + destination peer.Peer, outgoing bsmsg.BitSwapMessage) ReceiveError(error) } @@ -42,7 +42,7 @@ type Receiver interface { // TODO rename -> Router? type Routing interface { // FindProvidersAsync returns a channel of providers for the given key - FindProvidersAsync(context.Context, u.Key, int) <-chan *peer.Peer + FindProvidersAsync(context.Context, u.Key, int) <-chan peer.Peer // Provide provides the key to the network Provide(context.Context, u.Key) error diff --git a/bitswap/network/net_message_adapter.go b/bitswap/network/net_message_adapter.go index 52f428076..3ae11a2c6 100644 --- a/bitswap/network/net_message_adapter.go +++ b/bitswap/network/net_message_adapter.go @@ -60,13 +60,13 @@ func (adapter *impl) HandleMessage( return outgoing } -func (adapter *impl) DialPeer(p *peer.Peer) error { +func (adapter *impl) DialPeer(p peer.Peer) error { return adapter.net.DialPeer(p) } func (adapter *impl) SendMessage( ctx context.Context, - p *peer.Peer, + p peer.Peer, outgoing bsmsg.BitSwapMessage) error { nmsg, err := outgoing.ToNet(p) @@ -78,7 +78,7 @@ func (adapter *impl) SendMessage( func (adapter *impl) SendRequest( ctx context.Context, - p *peer.Peer, + p peer.Peer, outgoing bsmsg.BitSwapMessage) (bsmsg.BitSwapMessage, error) { outgoingMsg, err := outgoing.ToNet(p) diff --git a/bitswap/strategy/interface.go b/bitswap/strategy/interface.go index 48097b027..ac1f09a1f 100644 --- a/bitswap/strategy/interface.go +++ b/bitswap/strategy/interface.go @@ -8,25 +8,25 @@ import ( type Strategy interface { // Returns a slice of Peers with whom the local node has active sessions - Peers() []*peer.Peer + Peers() []peer.Peer // BlockIsWantedByPeer returns true if peer wants the block given by this // key - BlockIsWantedByPeer(u.Key, *peer.Peer) bool + BlockIsWantedByPeer(u.Key, peer.Peer) bool // ShouldSendTo(Peer) decides whether to send data to this Peer - ShouldSendBlockToPeer(u.Key, *peer.Peer) bool + ShouldSendBlockToPeer(u.Key, peer.Peer) bool // Seed initializes the decider to a deterministic state Seed(int64) // MessageReceived records receipt of message for accounting purposes - MessageReceived(*peer.Peer, bsmsg.BitSwapMessage) error + MessageReceived(peer.Peer, bsmsg.BitSwapMessage) error // MessageSent records sending of message for accounting purposes - MessageSent(*peer.Peer, bsmsg.BitSwapMessage) error + MessageSent(peer.Peer, bsmsg.BitSwapMessage) error - NumBytesSentTo(*peer.Peer) uint64 + NumBytesSentTo(peer.Peer) uint64 - NumBytesReceivedFrom(*peer.Peer) uint64 + NumBytesReceivedFrom(peer.Peer) uint64 } diff --git a/bitswap/strategy/ledger.go b/bitswap/strategy/ledger.go index 34f301055..3700c1f43 100644 --- a/bitswap/strategy/ledger.go +++ b/bitswap/strategy/ledger.go @@ -12,7 +12,7 @@ import ( // access/lookups. type keySet map[u.Key]struct{} -func newLedger(p *peer.Peer, strategy strategyFunc) *ledger { +func newLedger(p peer.Peer, strategy strategyFunc) *ledger { return &ledger{ wantList: keySet{}, Strategy: strategy, @@ -25,7 +25,7 @@ type ledger struct { lock sync.RWMutex // Partner is the remote Peer. - Partner *peer.Peer + Partner peer.Peer // Accounting tracks bytes sent and recieved. Accounting debtRatio diff --git a/bitswap/strategy/strategy.go b/bitswap/strategy/strategy.go index 5d09f30b5..399d7777b 100644 --- a/bitswap/strategy/strategy.go +++ b/bitswap/strategy/strategy.go @@ -37,20 +37,20 @@ type ledgerMap map[peerKey]*ledger type peerKey u.Key // Peers returns a list of peers -func (s *strategist) Peers() []*peer.Peer { - response := make([]*peer.Peer, 0) +func (s *strategist) Peers() []peer.Peer { + response := make([]peer.Peer, 0) for _, ledger := range s.ledgerMap { response = append(response, ledger.Partner) } return response } -func (s *strategist) BlockIsWantedByPeer(k u.Key, p *peer.Peer) bool { +func (s *strategist) BlockIsWantedByPeer(k u.Key, p peer.Peer) bool { ledger := s.ledger(p) return ledger.WantListContains(k) } -func (s *strategist) ShouldSendBlockToPeer(k u.Key, p *peer.Peer) bool { +func (s *strategist) ShouldSendBlockToPeer(k u.Key, p peer.Peer) bool { ledger := s.ledger(p) return ledger.ShouldSend() } @@ -59,7 +59,7 @@ func (s *strategist) Seed(int64) { // TODO } -func (s *strategist) MessageReceived(p *peer.Peer, m bsmsg.BitSwapMessage) error { +func (s *strategist) MessageReceived(p peer.Peer, m bsmsg.BitSwapMessage) error { // TODO find a more elegant way to handle this check if p == nil { return errors.New("Strategy received nil peer") @@ -84,7 +84,7 @@ func (s *strategist) MessageReceived(p *peer.Peer, m bsmsg.BitSwapMessage) error // inconsistent. Would need to ensure that Sends and acknowledgement of the // send happen atomically -func (s *strategist) MessageSent(p *peer.Peer, m bsmsg.BitSwapMessage) error { +func (s *strategist) MessageSent(p peer.Peer, m bsmsg.BitSwapMessage) error { l := s.ledger(p) for _, block := range m.Blocks() { l.SentBytes(len(block.Data)) @@ -95,16 +95,16 @@ func (s *strategist) MessageSent(p *peer.Peer, m bsmsg.BitSwapMessage) error { return nil } -func (s *strategist) NumBytesSentTo(p *peer.Peer) uint64 { +func (s *strategist) NumBytesSentTo(p peer.Peer) uint64 { return s.ledger(p).Accounting.BytesSent } -func (s *strategist) NumBytesReceivedFrom(p *peer.Peer) uint64 { +func (s *strategist) NumBytesReceivedFrom(p peer.Peer) uint64 { return s.ledger(p).Accounting.BytesRecv } // ledger lazily instantiates a ledger -func (s *strategist) ledger(p *peer.Peer) *ledger { +func (s *strategist) ledger(p peer.Peer) *ledger { l, ok := s.ledgerMap[peerKey(p.Key())] if !ok { l = newLedger(p, s.strategyFunc) diff --git a/bitswap/strategy/strategy_test.go b/bitswap/strategy/strategy_test.go index dccc4a374..e3ffc05ea 100644 --- a/bitswap/strategy/strategy_test.go +++ b/bitswap/strategy/strategy_test.go @@ -10,13 +10,13 @@ import ( ) type peerAndStrategist struct { - *peer.Peer + peer.Peer Strategy } func newPeerAndStrategist(idStr string) peerAndStrategist { return peerAndStrategist{ - Peer: &peer.Peer{ID: peer.ID(idStr)}, + Peer: peer.WithIDString(idStr), Strategy: New(true), } } @@ -93,7 +93,7 @@ func TestPeerIsAddedToPeersWhenMessageReceivedOrSent(t *testing.T) { } } -func peerIsPartner(p *peer.Peer, s Strategy) bool { +func peerIsPartner(p peer.Peer, s Strategy) bool { for _, partner := range s.Peers() { if partner.Key() == p.Key() { return true diff --git a/bitswap/testnet/network.go b/bitswap/testnet/network.go index c3081337d..418f75ce0 100644 --- a/bitswap/testnet/network.go +++ b/bitswap/testnet/network.go @@ -13,20 +13,20 @@ import ( ) type Network interface { - Adapter(*peer.Peer) bsnet.Adapter + Adapter(peer.Peer) bsnet.Adapter - HasPeer(*peer.Peer) bool + HasPeer(peer.Peer) bool SendMessage( ctx context.Context, - from *peer.Peer, - to *peer.Peer, + from peer.Peer, + to peer.Peer, message bsmsg.BitSwapMessage) error SendRequest( ctx context.Context, - from *peer.Peer, - to *peer.Peer, + from peer.Peer, + to peer.Peer, message bsmsg.BitSwapMessage) ( incoming bsmsg.BitSwapMessage, err error) } @@ -43,7 +43,7 @@ type network struct { clients map[util.Key]bsnet.Receiver } -func (n *network) Adapter(p *peer.Peer) bsnet.Adapter { +func (n *network) Adapter(p peer.Peer) bsnet.Adapter { client := &networkClient{ local: p, network: n, @@ -52,7 +52,7 @@ func (n *network) Adapter(p *peer.Peer) bsnet.Adapter { return client } -func (n *network) HasPeer(p *peer.Peer) bool { +func (n *network) HasPeer(p peer.Peer) bool { _, found := n.clients[p.Key()] return found } @@ -61,8 +61,8 @@ func (n *network) HasPeer(p *peer.Peer) bool { // TODO what does the network layer do with errors received from services? func (n *network) SendMessage( ctx context.Context, - from *peer.Peer, - to *peer.Peer, + from peer.Peer, + to peer.Peer, message bsmsg.BitSwapMessage) error { receiver, ok := n.clients[to.Key()] @@ -79,7 +79,7 @@ func (n *network) SendMessage( } func (n *network) deliver( - r bsnet.Receiver, from *peer.Peer, message bsmsg.BitSwapMessage) error { + r bsnet.Receiver, from peer.Peer, message bsmsg.BitSwapMessage) error { if message == nil || from == nil { return errors.New("Invalid input") } @@ -107,8 +107,8 @@ var NoResponse = errors.New("No response received from the receiver") // TODO func (n *network) SendRequest( ctx context.Context, - from *peer.Peer, - to *peer.Peer, + from peer.Peer, + to peer.Peer, message bsmsg.BitSwapMessage) ( incoming bsmsg.BitSwapMessage, err error) { @@ -130,7 +130,7 @@ func (n *network) SendRequest( } // TODO test when receiver doesn't immediately respond to the initiator of the request - if !bytes.Equal(nextPeer.ID, from.ID) { + if !bytes.Equal(nextPeer.ID(), from.ID()) { go func() { nextReceiver, ok := n.clients[nextPeer.Key()] if !ok { @@ -144,26 +144,26 @@ func (n *network) SendRequest( } type networkClient struct { - local *peer.Peer + local peer.Peer bsnet.Receiver network Network } func (nc *networkClient) SendMessage( ctx context.Context, - to *peer.Peer, + to peer.Peer, message bsmsg.BitSwapMessage) error { return nc.network.SendMessage(ctx, nc.local, to, message) } func (nc *networkClient) SendRequest( ctx context.Context, - to *peer.Peer, + to peer.Peer, message bsmsg.BitSwapMessage) (incoming bsmsg.BitSwapMessage, err error) { return nc.network.SendRequest(ctx, nc.local, to, message) } -func (nc *networkClient) DialPeer(p *peer.Peer) error { +func (nc *networkClient) DialPeer(p peer.Peer) error { // no need to do anything because dialing isn't a thing in this test net. if !nc.network.HasPeer(p) { return fmt.Errorf("Peer not in network: %s", p) diff --git a/bitswap/testnet/network_test.go b/bitswap/testnet/network_test.go index fbd7c8893..c2cc28f8d 100644 --- a/bitswap/testnet/network_test.go +++ b/bitswap/testnet/network_test.go @@ -18,15 +18,15 @@ func TestSendRequestToCooperativePeer(t *testing.T) { t.Log("Get two network adapters") - initiator := net.Adapter(&peer.Peer{ID: []byte("initiator")}) - recipient := net.Adapter(&peer.Peer{ID: idOfRecipient}) + initiator := net.Adapter(peer.WithIDString("initiator")) + recipient := net.Adapter(peer.WithID(idOfRecipient)) expectedStr := "response from recipient" recipient.SetDelegate(lambda(func( ctx context.Context, - from *peer.Peer, + from peer.Peer, incoming bsmsg.BitSwapMessage) ( - *peer.Peer, bsmsg.BitSwapMessage) { + peer.Peer, bsmsg.BitSwapMessage) { t.Log("Recipient received a message from the network") @@ -43,7 +43,7 @@ func TestSendRequestToCooperativePeer(t *testing.T) { message := bsmsg.New() message.AppendBlock(*blocks.NewBlock([]byte("data"))) response, err := initiator.SendRequest( - context.Background(), &peer.Peer{ID: idOfRecipient}, message) + context.Background(), peer.WithID(idOfRecipient), message) if err != nil { t.Fatal(err) } @@ -61,8 +61,8 @@ func TestSendRequestToCooperativePeer(t *testing.T) { func TestSendMessageAsyncButWaitForResponse(t *testing.T) { net := VirtualNetwork() idOfResponder := []byte("responder") - waiter := net.Adapter(&peer.Peer{ID: []byte("waiter")}) - responder := net.Adapter(&peer.Peer{ID: idOfResponder}) + waiter := net.Adapter(peer.WithIDString("waiter")) + responder := net.Adapter(peer.WithID(idOfResponder)) var wg sync.WaitGroup @@ -72,9 +72,9 @@ func TestSendMessageAsyncButWaitForResponse(t *testing.T) { responder.SetDelegate(lambda(func( ctx context.Context, - fromWaiter *peer.Peer, + fromWaiter peer.Peer, msgFromWaiter bsmsg.BitSwapMessage) ( - *peer.Peer, bsmsg.BitSwapMessage) { + peer.Peer, bsmsg.BitSwapMessage) { msgToWaiter := bsmsg.New() msgToWaiter.AppendBlock(*blocks.NewBlock([]byte(expectedStr))) @@ -84,9 +84,9 @@ func TestSendMessageAsyncButWaitForResponse(t *testing.T) { waiter.SetDelegate(lambda(func( ctx context.Context, - fromResponder *peer.Peer, + fromResponder peer.Peer, msgFromResponder bsmsg.BitSwapMessage) ( - *peer.Peer, bsmsg.BitSwapMessage) { + peer.Peer, bsmsg.BitSwapMessage) { // TODO assert that this came from the correct peer and that the message contents are as expected ok := false @@ -107,7 +107,7 @@ func TestSendMessageAsyncButWaitForResponse(t *testing.T) { messageSentAsync := bsmsg.New() messageSentAsync.AppendBlock(*blocks.NewBlock([]byte("data"))) errSending := waiter.SendMessage( - context.Background(), &peer.Peer{ID: idOfResponder}, messageSentAsync) + context.Background(), peer.WithID(idOfResponder), messageSentAsync) if errSending != nil { t.Fatal(errSending) } @@ -115,8 +115,8 @@ func TestSendMessageAsyncButWaitForResponse(t *testing.T) { wg.Wait() // until waiter delegate function is executed } -type receiverFunc func(ctx context.Context, p *peer.Peer, - incoming bsmsg.BitSwapMessage) (*peer.Peer, bsmsg.BitSwapMessage) +type receiverFunc func(ctx context.Context, p peer.Peer, + incoming bsmsg.BitSwapMessage) (peer.Peer, bsmsg.BitSwapMessage) // lambda returns a Receiver instance given a receiver function func lambda(f receiverFunc) bsnet.Receiver { @@ -126,13 +126,13 @@ func lambda(f receiverFunc) bsnet.Receiver { } type lambdaImpl struct { - f func(ctx context.Context, p *peer.Peer, incoming bsmsg.BitSwapMessage) ( - *peer.Peer, bsmsg.BitSwapMessage) + f func(ctx context.Context, p peer.Peer, incoming bsmsg.BitSwapMessage) ( + peer.Peer, bsmsg.BitSwapMessage) } func (lam *lambdaImpl) ReceiveMessage(ctx context.Context, - p *peer.Peer, incoming bsmsg.BitSwapMessage) ( - *peer.Peer, bsmsg.BitSwapMessage) { + p peer.Peer, incoming bsmsg.BitSwapMessage) ( + peer.Peer, bsmsg.BitSwapMessage) { return lam.f(ctx, p, incoming) } From a8edb61272c6957f1c28cd09ea94deecbdf1ca9f Mon Sep 17 00:00:00 2001 From: Juan Batiz-Benet Date: Tue, 21 Oct 2014 15:10:58 -0700 Subject: [PATCH 0065/1035] renamed datastore.go -> go-datastore This commit was moved from ipfs/go-bitswap@43ecec8520589b59981608e24dd808889f4116d2 --- bitswap/bitswap.go | 2 +- bitswap/bitswap_test.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 4a3170fac..19ee6e2fc 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -2,7 +2,7 @@ package bitswap import ( context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" - ds "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/datastore.go" + ds "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore" blocks "github.com/jbenet/go-ipfs/blocks" blockstore "github.com/jbenet/go-ipfs/blockstore" diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index 8a2f1f421..4c881a04e 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -8,7 +8,7 @@ import ( context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" - ds "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/datastore.go" + ds "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore" blocks "github.com/jbenet/go-ipfs/blocks" bstore "github.com/jbenet/go-ipfs/blockstore" exchange "github.com/jbenet/go-ipfs/exchange" From f2338d98d09dd7be7f2570b24f6b00de866b5834 Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Wed, 22 Oct 2014 05:09:01 -0700 Subject: [PATCH 0066/1035] refactor(exchange/bitswap) move proto to internal pb package This commit was moved from ipfs/go-bitswap@a49858105947f8da2e38b028d0c30d4d2820db2d --- bitswap/message/{ => internal/pb}/message.pb.go | 2 +- bitswap/message/{ => internal/pb}/message.proto | 2 +- bitswap/message/message.go | 11 ++++++----- bitswap/message/message_test.go | 5 +++-- 4 files changed, 11 insertions(+), 9 deletions(-) rename bitswap/message/{ => internal/pb}/message.pb.go (98%) rename bitswap/message/{ => internal/pb}/message.proto (82%) diff --git a/bitswap/message/message.pb.go b/bitswap/message/internal/pb/message.pb.go similarity index 98% rename from bitswap/message/message.pb.go rename to bitswap/message/internal/pb/message.pb.go index d1089f5c9..1ee209151 100644 --- a/bitswap/message/message.pb.go +++ b/bitswap/message/internal/pb/message.pb.go @@ -11,7 +11,7 @@ It is generated from these files: It has these top-level messages: PBMessage */ -package message +package pb import proto "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto" import math "math" diff --git a/bitswap/message/message.proto b/bitswap/message/internal/pb/message.proto similarity index 82% rename from bitswap/message/message.proto rename to bitswap/message/internal/pb/message.proto index a0e4d1997..5e61bd9d7 100644 --- a/bitswap/message/message.proto +++ b/bitswap/message/internal/pb/message.proto @@ -1,4 +1,4 @@ -package message; +package pb; message PBMessage { repeated string wantlist = 1; diff --git a/bitswap/message/message.go b/bitswap/message/message.go index 423cc329c..3717353dd 100644 --- a/bitswap/message/message.go +++ b/bitswap/message/message.go @@ -3,6 +3,7 @@ package message import ( proto "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto" blocks "github.com/jbenet/go-ipfs/blocks" + pb "github.com/jbenet/go-ipfs/exchange/bitswap/message/internal/pb" netmsg "github.com/jbenet/go-ipfs/net/message" nm "github.com/jbenet/go-ipfs/net/message" peer "github.com/jbenet/go-ipfs/peer" @@ -18,7 +19,7 @@ type BitSwapMessage interface { } type Exportable interface { - ToProto() *PBMessage + ToProto() *pb.PBMessage ToNet(p peer.Peer) (nm.NetMessage, error) } @@ -32,7 +33,7 @@ func New() *message { return new(message) } -func newMessageFromProto(pbm PBMessage) BitSwapMessage { +func newMessageFromProto(pbm pb.PBMessage) BitSwapMessage { m := New() for _, s := range pbm.GetWantlist() { m.AppendWanted(u.Key(s)) @@ -63,7 +64,7 @@ func (m *message) AppendBlock(b blocks.Block) { } func FromNet(nmsg netmsg.NetMessage) (BitSwapMessage, error) { - pb := new(PBMessage) + pb := new(pb.PBMessage) if err := proto.Unmarshal(nmsg.Data(), pb); err != nil { return nil, err } @@ -71,8 +72,8 @@ func FromNet(nmsg netmsg.NetMessage) (BitSwapMessage, error) { return m, nil } -func (m *message) ToProto() *PBMessage { - pb := new(PBMessage) +func (m *message) ToProto() *pb.PBMessage { + pb := new(pb.PBMessage) for _, k := range m.Wantlist() { pb.Wantlist = append(pb.Wantlist, string(k)) } diff --git a/bitswap/message/message_test.go b/bitswap/message/message_test.go index 5aa63ecc3..33174b2e2 100644 --- a/bitswap/message/message_test.go +++ b/bitswap/message/message_test.go @@ -4,7 +4,8 @@ import ( "bytes" "testing" - "github.com/jbenet/go-ipfs/blocks" + blocks "github.com/jbenet/go-ipfs/blocks" + pb "github.com/jbenet/go-ipfs/exchange/bitswap/message/internal/pb" peer "github.com/jbenet/go-ipfs/peer" u "github.com/jbenet/go-ipfs/util" ) @@ -21,7 +22,7 @@ func TestAppendWanted(t *testing.T) { func TestNewMessageFromProto(t *testing.T) { const str = "a_key" - protoMessage := new(PBMessage) + protoMessage := new(pb.PBMessage) protoMessage.Wantlist = []string{string(str)} if !contains(protoMessage.Wantlist, str) { t.Fail() From 32bbd31977592d4a1e824c9596f1ff8c7cc5ab09 Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Wed, 22 Oct 2014 13:59:24 -0700 Subject: [PATCH 0067/1035] fix(exch/bs/pb) rename proto package -> bitswap_message_pb This commit was moved from ipfs/go-bitswap@39136e02ee8a5fc6f0c533321c0eee652227d72c --- bitswap/message/internal/pb/message.pb.go | 8 ++++---- bitswap/message/internal/pb/message.proto | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/bitswap/message/internal/pb/message.pb.go b/bitswap/message/internal/pb/message.pb.go index 1ee209151..bd08e84bd 100644 --- a/bitswap/message/internal/pb/message.pb.go +++ b/bitswap/message/internal/pb/message.pb.go @@ -1,9 +1,9 @@ -// Code generated by protoc-gen-go. +// Code generated by protoc-gen-gogo. // source: message.proto // DO NOT EDIT! /* -Package bitswap is a generated protocol buffer package. +Package bitswap_message_pb is a generated protocol buffer package. It is generated from these files: message.proto @@ -11,9 +11,9 @@ It is generated from these files: It has these top-level messages: PBMessage */ -package pb +package bitswap_message_pb -import proto "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto" +import proto "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto" import math "math" // Reference imports to suppress errors if they are not otherwise used. diff --git a/bitswap/message/internal/pb/message.proto b/bitswap/message/internal/pb/message.proto index 5e61bd9d7..36cdbfd6e 100644 --- a/bitswap/message/internal/pb/message.proto +++ b/bitswap/message/internal/pb/message.proto @@ -1,4 +1,4 @@ -package pb; +package bitswap.message.pb; message PBMessage { repeated string wantlist = 1; From 168245480a15127a5bf9ba340ca84358ad502815 Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Wed, 22 Oct 2014 14:41:17 -0700 Subject: [PATCH 0068/1035] misc(exch/bitswap) add TODOs This commit was moved from ipfs/go-bitswap@ae109ad1612db75a5b04bc30430902b264be0fe7 --- bitswap/message/message.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/bitswap/message/message.go b/bitswap/message/message.go index 3717353dd..1f9f1a4bd 100644 --- a/bitswap/message/message.go +++ b/bitswap/message/message.go @@ -10,6 +10,9 @@ import ( u "github.com/jbenet/go-ipfs/util" ) +// TODO move message.go into the bitswap package +// TODO move bs/msg/internal/pb to bs/internal/pb and rename pb package to bitswap_pb + type BitSwapMessage interface { Wantlist() []u.Key Blocks() []blocks.Block From 19c1a1c0d0ba0e6b139f7c65335315afa4d9ba2f Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Wed, 22 Oct 2014 21:44:37 -0700 Subject: [PATCH 0069/1035] refactor(bitswap) mv proto PBMessage -> Message This commit was moved from ipfs/go-bitswap@ed584b1e1ac75e02509a7c17f744147fe2a6cbc8 --- bitswap/message/internal/pb/message.pb.go | 14 +++++++------- bitswap/message/internal/pb/message.proto | 2 +- bitswap/message/message.go | 10 +++++----- bitswap/message/message_test.go | 2 +- 4 files changed, 14 insertions(+), 14 deletions(-) diff --git a/bitswap/message/internal/pb/message.pb.go b/bitswap/message/internal/pb/message.pb.go index bd08e84bd..f6f8a9bbc 100644 --- a/bitswap/message/internal/pb/message.pb.go +++ b/bitswap/message/internal/pb/message.pb.go @@ -9,7 +9,7 @@ It is generated from these files: message.proto It has these top-level messages: - PBMessage + Message */ package bitswap_message_pb @@ -20,24 +20,24 @@ import math "math" var _ = proto.Marshal var _ = math.Inf -type PBMessage struct { +type Message struct { Wantlist []string `protobuf:"bytes,1,rep,name=wantlist" json:"wantlist,omitempty"` Blocks [][]byte `protobuf:"bytes,2,rep,name=blocks" json:"blocks,omitempty"` XXX_unrecognized []byte `json:"-"` } -func (m *PBMessage) Reset() { *m = PBMessage{} } -func (m *PBMessage) String() string { return proto.CompactTextString(m) } -func (*PBMessage) ProtoMessage() {} +func (m *Message) Reset() { *m = Message{} } +func (m *Message) String() string { return proto.CompactTextString(m) } +func (*Message) ProtoMessage() {} -func (m *PBMessage) GetWantlist() []string { +func (m *Message) GetWantlist() []string { if m != nil { return m.Wantlist } return nil } -func (m *PBMessage) GetBlocks() [][]byte { +func (m *Message) GetBlocks() [][]byte { if m != nil { return m.Blocks } diff --git a/bitswap/message/internal/pb/message.proto b/bitswap/message/internal/pb/message.proto index 36cdbfd6e..a8c6c7252 100644 --- a/bitswap/message/internal/pb/message.proto +++ b/bitswap/message/internal/pb/message.proto @@ -1,6 +1,6 @@ package bitswap.message.pb; -message PBMessage { +message Message { repeated string wantlist = 1; repeated bytes blocks = 2; } diff --git a/bitswap/message/message.go b/bitswap/message/message.go index 1f9f1a4bd..b7216b024 100644 --- a/bitswap/message/message.go +++ b/bitswap/message/message.go @@ -22,7 +22,7 @@ type BitSwapMessage interface { } type Exportable interface { - ToProto() *pb.PBMessage + ToProto() *pb.Message ToNet(p peer.Peer) (nm.NetMessage, error) } @@ -36,7 +36,7 @@ func New() *message { return new(message) } -func newMessageFromProto(pbm pb.PBMessage) BitSwapMessage { +func newMessageFromProto(pbm pb.Message) BitSwapMessage { m := New() for _, s := range pbm.GetWantlist() { m.AppendWanted(u.Key(s)) @@ -67,7 +67,7 @@ func (m *message) AppendBlock(b blocks.Block) { } func FromNet(nmsg netmsg.NetMessage) (BitSwapMessage, error) { - pb := new(pb.PBMessage) + pb := new(pb.Message) if err := proto.Unmarshal(nmsg.Data(), pb); err != nil { return nil, err } @@ -75,8 +75,8 @@ func FromNet(nmsg netmsg.NetMessage) (BitSwapMessage, error) { return m, nil } -func (m *message) ToProto() *pb.PBMessage { - pb := new(pb.PBMessage) +func (m *message) ToProto() *pb.Message { + pb := new(pb.Message) for _, k := range m.Wantlist() { pb.Wantlist = append(pb.Wantlist, string(k)) } diff --git a/bitswap/message/message_test.go b/bitswap/message/message_test.go index 33174b2e2..932c14e9b 100644 --- a/bitswap/message/message_test.go +++ b/bitswap/message/message_test.go @@ -22,7 +22,7 @@ func TestAppendWanted(t *testing.T) { func TestNewMessageFromProto(t *testing.T) { const str = "a_key" - protoMessage := new(pb.PBMessage) + protoMessage := new(pb.Message) protoMessage.Wantlist = []string{string(str)} if !contains(protoMessage.Wantlist, str) { t.Fail() From 39c5a7773021e5fd3e35874721f73a1d6a3f2a0b Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Fri, 24 Oct 2014 16:14:12 -0700 Subject: [PATCH 0070/1035] fix(blockstore, bitswap) enforce threadsafety in blockstore fixes data race detected in a testnet test This commit was moved from ipfs/go-bitswap@a1ca02ea979c2853e85ca7296e9d6035327d8588 --- bitswap/bitswap.go | 2 +- bitswap/bitswap_test.go | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 19ee6e2fc..89ddbc821 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -22,7 +22,7 @@ var log = u.Logger("bitswap") // provided NetMessage service func NetMessageSession(parent context.Context, p peer.Peer, net inet.Network, srv inet.Service, directory bsnet.Routing, - d ds.Datastore, nice bool) exchange.Interface { + d ds.ThreadSafeDatastore, nice bool) exchange.Interface { networkAdapter := bsnet.NetMessageAdapter(srv, net, nil) bs := &bitswap{ diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index 4c881a04e..f34ea3c84 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -9,6 +9,7 @@ import ( context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" ds "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore" + ds_sync "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore/sync" blocks "github.com/jbenet/go-ipfs/blocks" bstore "github.com/jbenet/go-ipfs/blockstore" exchange "github.com/jbenet/go-ipfs/exchange" @@ -279,7 +280,7 @@ func session(net tn.Network, rs mock.RoutingServer, id peer.ID) instance { adapter := net.Adapter(p) htc := rs.Client(p) - blockstore := bstore.NewBlockstore(ds.NewMapDatastore()) + blockstore := bstore.NewBlockstore(ds_sync.MutexWrap(ds.NewMapDatastore())) const alwaysSendToPeer = true bs := &bitswap{ blockstore: blockstore, From 3951b01cbe1ddc9a61b4cc6b7e7fc7f8dd0e23e8 Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Fri, 24 Oct 2014 16:15:48 -0700 Subject: [PATCH 0071/1035] fix(bitswap) move mutex up to strategy from ledger addresses concurrent access in bitswap session This commit was moved from ipfs/go-bitswap@4b21e4db40a6a22ec596ad0dc6e0524ff936d914 --- bitswap/strategy/ledger.go | 21 +-------------------- bitswap/strategy/ledger_test.go | 22 ---------------------- bitswap/strategy/strategy.go | 26 ++++++++++++++++++++++++++ 3 files changed, 27 insertions(+), 42 deletions(-) diff --git a/bitswap/strategy/ledger.go b/bitswap/strategy/ledger.go index 3700c1f43..9f33b1aba 100644 --- a/bitswap/strategy/ledger.go +++ b/bitswap/strategy/ledger.go @@ -1,7 +1,6 @@ package strategy import ( - "sync" "time" peer "github.com/jbenet/go-ipfs/peer" @@ -21,9 +20,8 @@ func newLedger(p peer.Peer, strategy strategyFunc) *ledger { } // ledger stores the data exchange relationship between two peers. +// NOT threadsafe type ledger struct { - lock sync.RWMutex - // Partner is the remote Peer. Partner peer.Peer @@ -46,25 +44,16 @@ type ledger struct { } func (l *ledger) ShouldSend() bool { - l.lock.Lock() - defer l.lock.Unlock() - return l.Strategy(l) } func (l *ledger) SentBytes(n int) { - l.lock.Lock() - defer l.lock.Unlock() - l.exchangeCount++ l.lastExchange = time.Now() l.Accounting.BytesSent += uint64(n) } func (l *ledger) ReceivedBytes(n int) { - l.lock.Lock() - defer l.lock.Unlock() - l.exchangeCount++ l.lastExchange = time.Now() l.Accounting.BytesRecv += uint64(n) @@ -72,22 +61,14 @@ func (l *ledger) ReceivedBytes(n int) { // TODO: this needs to be different. We need timeouts. func (l *ledger) Wants(k u.Key) { - l.lock.Lock() - defer l.lock.Unlock() - l.wantList[k] = struct{}{} } func (l *ledger) WantListContains(k u.Key) bool { - l.lock.RLock() - defer l.lock.RUnlock() - _, ok := l.wantList[k] return ok } func (l *ledger) ExchangeCount() uint64 { - l.lock.RLock() - defer l.lock.RUnlock() return l.exchangeCount } diff --git a/bitswap/strategy/ledger_test.go b/bitswap/strategy/ledger_test.go index 0fdfae0cc..4271d525c 100644 --- a/bitswap/strategy/ledger_test.go +++ b/bitswap/strategy/ledger_test.go @@ -1,23 +1 @@ package strategy - -import ( - "sync" - "testing" -) - -func TestRaceConditions(t *testing.T) { - const numberOfExpectedExchanges = 10000 - l := new(ledger) - var wg sync.WaitGroup - for i := 0; i < numberOfExpectedExchanges; i++ { - wg.Add(1) - go func() { - defer wg.Done() - l.ReceivedBytes(1) - }() - } - wg.Wait() - if l.ExchangeCount() != numberOfExpectedExchanges { - t.Fail() - } -} diff --git a/bitswap/strategy/strategy.go b/bitswap/strategy/strategy.go index 399d7777b..42cbe7773 100644 --- a/bitswap/strategy/strategy.go +++ b/bitswap/strategy/strategy.go @@ -2,6 +2,7 @@ package strategy import ( "errors" + "sync" bsmsg "github.com/jbenet/go-ipfs/exchange/bitswap/message" "github.com/jbenet/go-ipfs/peer" @@ -26,6 +27,7 @@ func New(nice bool) Strategy { } type strategist struct { + lock sync.RWMutex ledgerMap strategyFunc } @@ -38,6 +40,9 @@ type peerKey u.Key // Peers returns a list of peers func (s *strategist) Peers() []peer.Peer { + s.lock.RLock() + defer s.lock.RUnlock() + response := make([]peer.Peer, 0) for _, ledger := range s.ledgerMap { response = append(response, ledger.Partner) @@ -46,20 +51,32 @@ func (s *strategist) Peers() []peer.Peer { } func (s *strategist) BlockIsWantedByPeer(k u.Key, p peer.Peer) bool { + s.lock.RLock() + defer s.lock.RUnlock() + ledger := s.ledger(p) return ledger.WantListContains(k) } func (s *strategist) ShouldSendBlockToPeer(k u.Key, p peer.Peer) bool { + s.lock.RLock() + defer s.lock.RUnlock() + ledger := s.ledger(p) return ledger.ShouldSend() } func (s *strategist) Seed(int64) { + s.lock.Lock() + defer s.lock.Unlock() + // TODO } func (s *strategist) MessageReceived(p peer.Peer, m bsmsg.BitSwapMessage) error { + s.lock.Lock() + defer s.lock.Unlock() + // TODO find a more elegant way to handle this check if p == nil { return errors.New("Strategy received nil peer") @@ -85,6 +102,9 @@ func (s *strategist) MessageReceived(p peer.Peer, m bsmsg.BitSwapMessage) error // send happen atomically func (s *strategist) MessageSent(p peer.Peer, m bsmsg.BitSwapMessage) error { + s.lock.Lock() + defer s.lock.Unlock() + l := s.ledger(p) for _, block := range m.Blocks() { l.SentBytes(len(block.Data)) @@ -96,10 +116,16 @@ func (s *strategist) MessageSent(p peer.Peer, m bsmsg.BitSwapMessage) error { } func (s *strategist) NumBytesSentTo(p peer.Peer) uint64 { + s.lock.RLock() + defer s.lock.RUnlock() + return s.ledger(p).Accounting.BytesSent } func (s *strategist) NumBytesReceivedFrom(p peer.Peer) uint64 { + s.lock.RLock() + defer s.lock.RUnlock() + return s.ledger(p).Accounting.BytesRecv } From b59f1c02d6d79669c7d8ead243785c22f3bb5272 Mon Sep 17 00:00:00 2001 From: Juan Batiz-Benet Date: Sat, 25 Oct 2014 03:17:14 -0700 Subject: [PATCH 0072/1035] go-vet friendly codebase - distinguish log.Error and log.Errorf functions - Initialize structs with field names - A bit of unreachable code (defers) This commit was moved from ipfs/go-bitswap@fc71f990b92815e85f21bd679c231c112a702734 --- bitswap/bitswap.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 89ddbc821..64dcf96a8 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -88,13 +88,13 @@ func (bs *bitswap) Block(parent context.Context, k u.Key) (*blocks.Block, error) log.Debug("bitswap dialing peer: %s", p) err := bs.sender.DialPeer(p) if err != nil { - log.Error("Error sender.DialPeer(%s)", p) + log.Errorf("Error sender.DialPeer(%s)", p) return } response, err := bs.sender.SendRequest(ctx, p, message) if err != nil { - log.Error("Error sender.SendRequest(%s)", p) + log.Errorf("Error sender.SendRequest(%s)", p) return } // FIXME ensure accounting is handled correctly when From 23e51f2600b86ac54bab3a33c77c8c5ce238422d Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Fri, 24 Oct 2014 17:16:57 -0700 Subject: [PATCH 0073/1035] fix(bitswap) rm todo This commit was moved from ipfs/go-bitswap@090a205626960993179801e77794b57be2bc501b --- bitswap/strategy/strategy.go | 1 - 1 file changed, 1 deletion(-) diff --git a/bitswap/strategy/strategy.go b/bitswap/strategy/strategy.go index 42cbe7773..1f1bd9049 100644 --- a/bitswap/strategy/strategy.go +++ b/bitswap/strategy/strategy.go @@ -9,7 +9,6 @@ import ( u "github.com/jbenet/go-ipfs/util" ) -// TODO declare thread-safe datastore // TODO niceness should be on a per-peer basis. Use-case: Certain peers are // "trusted" and/or controlled by a single human user. The user may want for // these peers to exchange data freely From 66f8ef072fdbcf970d4b8ae44069da9032f593e0 Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Fri, 24 Oct 2014 17:19:17 -0700 Subject: [PATCH 0074/1035] style(bitswap) import This commit was moved from ipfs/go-bitswap@ba0d68c0e9b41ff3f4b22257855db24e58a6ca06 --- bitswap/strategy/strategy.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bitswap/strategy/strategy.go b/bitswap/strategy/strategy.go index 1f1bd9049..b778c7a34 100644 --- a/bitswap/strategy/strategy.go +++ b/bitswap/strategy/strategy.go @@ -5,7 +5,7 @@ import ( "sync" bsmsg "github.com/jbenet/go-ipfs/exchange/bitswap/message" - "github.com/jbenet/go-ipfs/peer" + peer "github.com/jbenet/go-ipfs/peer" u "github.com/jbenet/go-ipfs/util" ) From 72bf098d0439d535d150b394dbfcea662e670896 Mon Sep 17 00:00:00 2001 From: Jeromy Date: Sat, 25 Oct 2014 03:36:00 -0700 Subject: [PATCH 0075/1035] add in dag removal This commit was moved from ipfs/go-bitswap@e5876d9a8ed7e73d00d911790dc5eb5aee0527d4 --- bitswap/bitswap.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 64dcf96a8..9d3abccc2 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -94,7 +94,7 @@ func (bs *bitswap) Block(parent context.Context, k u.Key) (*blocks.Block, error) response, err := bs.sender.SendRequest(ctx, p, message) if err != nil { - log.Errorf("Error sender.SendRequest(%s)", p) + log.Error("Error sender.SendRequest(%s) = %s", p, err) return } // FIXME ensure accounting is handled correctly when From 8fe9fa338d9010aa26301697dda35c3d934d8f82 Mon Sep 17 00:00:00 2001 From: Jeromy Date: Sat, 25 Oct 2014 14:50:22 -0700 Subject: [PATCH 0076/1035] logging, logging, and some minor logging This commit was moved from ipfs/go-bitswap@ce2404ec64a5e8dc869581aa749d36a85fbd8280 --- bitswap/bitswap.go | 31 +++++++++++++++++++------- bitswap/network/net_message_adapter.go | 3 +++ 2 files changed, 26 insertions(+), 8 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 9d3abccc2..f631c651c 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -66,7 +66,7 @@ type bitswap struct { // // TODO ensure only one active request per key func (bs *bitswap) Block(parent context.Context, k u.Key) (*blocks.Block, error) { - log.Debug("Get Block %v", k) + log.Debugf("Get Block %v", k) ctx, cancelFunc := context.WithCancel(parent) bs.wantlist.Add(k) @@ -82,10 +82,10 @@ func (bs *bitswap) Block(parent context.Context, k u.Key) (*blocks.Block, error) } message.AppendWanted(k) for peerToQuery := range peersToQuery { - log.Debug("bitswap got peersToQuery: %s", peerToQuery) + log.Debugf("bitswap got peersToQuery: %s", peerToQuery) go func(p peer.Peer) { - log.Debug("bitswap dialing peer: %s", p) + log.Debugf("bitswap dialing peer: %s", p) err := bs.sender.DialPeer(p) if err != nil { log.Errorf("Error sender.DialPeer(%s)", p) @@ -124,7 +124,7 @@ func (bs *bitswap) Block(parent context.Context, k u.Key) (*blocks.Block, error) // HasBlock announces the existance of a block to bitswap, potentially sending // it to peers (Partners) whose WantLists include it. func (bs *bitswap) HasBlock(ctx context.Context, blk blocks.Block) error { - log.Debug("Has Block %v", blk.Key()) + log.Debugf("Has Block %v", blk.Key()) bs.wantlist.Remove(blk.Key()) bs.sendToPeersThatWant(ctx, blk) return bs.routing.Provide(ctx, blk.Key()) @@ -133,17 +133,24 @@ func (bs *bitswap) HasBlock(ctx context.Context, blk blocks.Block) error { // TODO(brian): handle errors func (bs *bitswap) ReceiveMessage(ctx context.Context, p peer.Peer, incoming bsmsg.BitSwapMessage) ( peer.Peer, bsmsg.BitSwapMessage) { - log.Debug("ReceiveMessage from %v", p.Key()) + log.Debugf("ReceiveMessage from %v", p.Key()) + log.Debugf("Message wantlist: %v", incoming.Wantlist()) + log.Debugf("Message blockset: %v", incoming.Blocks()) if p == nil { + log.Error("Received message from nil peer!") // TODO propagate the error upward return nil, nil } if incoming == nil { + log.Error("Got nil bitswap message!") // TODO propagate the error upward return nil, nil } + // Record message bytes in ledger + // TODO: this is bad, and could be easily abused. + // Should only track *useful* messages in ledger bs.strategy.MessageReceived(p, incoming) // FIRST for _, block := range incoming.Blocks() { @@ -153,7 +160,10 @@ func (bs *bitswap) ReceiveMessage(ctx context.Context, p peer.Peer, incoming bsm } go bs.notifications.Publish(block) go func(block blocks.Block) { - _ = bs.HasBlock(ctx, block) // FIXME err ignored + err := bs.HasBlock(ctx, block) // FIXME err ignored + if err != nil { + log.Errorf("HasBlock errored: %s", err) + } }(block) } @@ -162,6 +172,8 @@ func (bs *bitswap) ReceiveMessage(ctx context.Context, p peer.Peer, incoming bsm message.AppendWanted(wanted) } for _, key := range incoming.Wantlist() { + // TODO: might be better to check if we have the block before checking + // if we should send it to someone if bs.strategy.ShouldSendBlockToPeer(key, p) { if block, errBlockNotFound := bs.blockstore.Get(key); errBlockNotFound != nil { continue @@ -171,10 +183,13 @@ func (bs *bitswap) ReceiveMessage(ctx context.Context, p peer.Peer, incoming bsm } } defer bs.strategy.MessageSent(p, message) + + log.Debug("Returning message.") return p, message } func (bs *bitswap) ReceiveError(err error) { + log.Errorf("Bitswap ReceiveError: %s", err) // TODO log the network error // TODO bubble the network error up to the parent context/error logger } @@ -187,10 +202,10 @@ func (bs *bitswap) send(ctx context.Context, p peer.Peer, m bsmsg.BitSwapMessage } func (bs *bitswap) sendToPeersThatWant(ctx context.Context, block blocks.Block) { - log.Debug("Sending %v to peers that want it", block.Key()) + log.Debugf("Sending %v to peers that want it", block.Key()) for _, p := range bs.strategy.Peers() { if bs.strategy.BlockIsWantedByPeer(block.Key(), p) { - log.Debug("%v wants %v", p, block.Key()) + log.Debugf("%v wants %v", p, block.Key()) if bs.strategy.ShouldSendBlockToPeer(block.Key(), p) { message := bsmsg.New() message.AppendBlock(block) diff --git a/bitswap/network/net_message_adapter.go b/bitswap/network/net_message_adapter.go index 3ae11a2c6..9f51e9010 100644 --- a/bitswap/network/net_message_adapter.go +++ b/bitswap/network/net_message_adapter.go @@ -1,6 +1,8 @@ package network import ( + "errors" + context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" bsmsg "github.com/jbenet/go-ipfs/exchange/bitswap/message" @@ -48,6 +50,7 @@ func (adapter *impl) HandleMessage( // TODO(brian): put this in a helper function if bsmsg == nil || p == nil { + adapter.receiver.ReceiveError(errors.New("ReceiveMessage returned nil peer or message")) return nil } From 3ea53c821773b2444ed06a3bc91c3796142682a0 Mon Sep 17 00:00:00 2001 From: Jeromy Date: Sun, 26 Oct 2014 00:45:40 +0000 Subject: [PATCH 0077/1035] lots of logging This commit was moved from ipfs/go-bitswap@96a29940ca1eab0b3edbf315f621245a1000649d --- bitswap/bitswap.go | 1 - bitswap/network/net_message_adapter.go | 4 ++++ 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index f631c651c..5e00a5888 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -135,7 +135,6 @@ func (bs *bitswap) ReceiveMessage(ctx context.Context, p peer.Peer, incoming bsm peer.Peer, bsmsg.BitSwapMessage) { log.Debugf("ReceiveMessage from %v", p.Key()) log.Debugf("Message wantlist: %v", incoming.Wantlist()) - log.Debugf("Message blockset: %v", incoming.Blocks()) if p == nil { log.Error("Received message from nil peer!") diff --git a/bitswap/network/net_message_adapter.go b/bitswap/network/net_message_adapter.go index 9f51e9010..c7e1a852d 100644 --- a/bitswap/network/net_message_adapter.go +++ b/bitswap/network/net_message_adapter.go @@ -4,6 +4,7 @@ import ( "errors" context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" + "github.com/jbenet/go-ipfs/util" bsmsg "github.com/jbenet/go-ipfs/exchange/bitswap/message" inet "github.com/jbenet/go-ipfs/net" @@ -11,6 +12,8 @@ import ( peer "github.com/jbenet/go-ipfs/peer" ) +var log = util.Logger("net_message_adapter") + // NetMessageAdapter wraps a NetMessage network service func NetMessageAdapter(s inet.Service, n inet.Network, r Receiver) Adapter { adapter := impl{ @@ -60,6 +63,7 @@ func (adapter *impl) HandleMessage( return nil } + log.Debugf("Message size: %d", len(outgoing.Data())) return outgoing } From dd721f9d10e50f64fdc2a3ef3f6878f649a67bf0 Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Mon, 27 Oct 2014 05:45:20 -0700 Subject: [PATCH 0078/1035] fix(bitswap) duplicate key in wantlist @whyrusleeping noticed this a couple days ago potential long-term fix: prevent duplicate entries in the wantlist by using a map/set and iterating over this data structure on export This commit was moved from ipfs/go-bitswap@eb32931f28007e32f894b48f312fbb0c21563a3d --- bitswap/bitswap.go | 1 - 1 file changed, 1 deletion(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 5e00a5888..9e1948030 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -80,7 +80,6 @@ func (bs *bitswap) Block(parent context.Context, k u.Key) (*blocks.Block, error) for _, wanted := range bs.wantlist.Keys() { message.AppendWanted(wanted) } - message.AppendWanted(k) for peerToQuery := range peersToQuery { log.Debugf("bitswap got peersToQuery: %s", peerToQuery) go func(p peer.Peer) { From 7942a9bfdbc16d5756770bf27fb5edc9745b242a Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Mon, 27 Oct 2014 05:54:50 -0700 Subject: [PATCH 0079/1035] style(bitswap/message) rename AppendWanted -> AddWanted implementation will be patched to ensure bitswap messages cannot contain duplicate blocks or keys This commit was moved from ipfs/go-bitswap@6db7212797d05a6984c92a00c6028a093878d082 --- bitswap/bitswap.go | 6 +++--- bitswap/message/message.go | 6 +++--- bitswap/message/message_test.go | 16 ++++++++-------- bitswap/strategy/strategy_test.go | 2 +- 4 files changed, 15 insertions(+), 15 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 9e1948030..ec004da43 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -78,7 +78,7 @@ func (bs *bitswap) Block(parent context.Context, k u.Key) (*blocks.Block, error) go func() { message := bsmsg.New() for _, wanted := range bs.wantlist.Keys() { - message.AppendWanted(wanted) + message.AddWanted(wanted) } for peerToQuery := range peersToQuery { log.Debugf("bitswap got peersToQuery: %s", peerToQuery) @@ -167,7 +167,7 @@ func (bs *bitswap) ReceiveMessage(ctx context.Context, p peer.Peer, incoming bsm message := bsmsg.New() for _, wanted := range bs.wantlist.Keys() { - message.AppendWanted(wanted) + message.AddWanted(wanted) } for _, key := range incoming.Wantlist() { // TODO: might be better to check if we have the block before checking @@ -208,7 +208,7 @@ func (bs *bitswap) sendToPeersThatWant(ctx context.Context, block blocks.Block) message := bsmsg.New() message.AppendBlock(block) for _, wanted := range bs.wantlist.Keys() { - message.AppendWanted(wanted) + message.AddWanted(wanted) } go bs.send(ctx, p, message) } diff --git a/bitswap/message/message.go b/bitswap/message/message.go index b7216b024..d2ebd74b3 100644 --- a/bitswap/message/message.go +++ b/bitswap/message/message.go @@ -16,7 +16,7 @@ import ( type BitSwapMessage interface { Wantlist() []u.Key Blocks() []blocks.Block - AppendWanted(k u.Key) + AddWanted(k u.Key) AppendBlock(b blocks.Block) Exportable } @@ -39,7 +39,7 @@ func New() *message { func newMessageFromProto(pbm pb.Message) BitSwapMessage { m := New() for _, s := range pbm.GetWantlist() { - m.AppendWanted(u.Key(s)) + m.AddWanted(u.Key(s)) } for _, d := range pbm.GetBlocks() { b := blocks.NewBlock(d) @@ -58,7 +58,7 @@ func (m *message) Blocks() []blocks.Block { return m.blocks } -func (m *message) AppendWanted(k u.Key) { +func (m *message) AddWanted(k u.Key) { m.wantlist = append(m.wantlist, k) } diff --git a/bitswap/message/message_test.go b/bitswap/message/message_test.go index 932c14e9b..4b385791c 100644 --- a/bitswap/message/message_test.go +++ b/bitswap/message/message_test.go @@ -13,7 +13,7 @@ import ( func TestAppendWanted(t *testing.T) { const str = "foo" m := New() - m.AppendWanted(u.Key(str)) + m.AddWanted(u.Key(str)) if !contains(m.ToProto().GetWantlist(), str) { t.Fail() @@ -58,7 +58,7 @@ func TestWantlist(t *testing.T) { keystrs := []string{"foo", "bar", "baz", "bat"} m := New() for _, s := range keystrs { - m.AppendWanted(u.Key(s)) + m.AddWanted(u.Key(s)) } exported := m.Wantlist() @@ -81,7 +81,7 @@ func TestCopyProtoByValue(t *testing.T) { const str = "foo" m := New() protoBeforeAppend := m.ToProto() - m.AppendWanted(u.Key(str)) + m.AddWanted(u.Key(str)) if contains(protoBeforeAppend.GetWantlist(), str) { t.Fail() } @@ -101,11 +101,11 @@ func TestToNetMethodSetsPeer(t *testing.T) { func TestToNetFromNetPreservesWantList(t *testing.T) { original := New() - original.AppendWanted(u.Key("M")) - original.AppendWanted(u.Key("B")) - original.AppendWanted(u.Key("D")) - original.AppendWanted(u.Key("T")) - original.AppendWanted(u.Key("F")) + original.AddWanted(u.Key("M")) + original.AddWanted(u.Key("B")) + original.AddWanted(u.Key("D")) + original.AddWanted(u.Key("T")) + original.AddWanted(u.Key("F")) p := peer.WithIDString("X") netmsg, err := original.ToNet(p) diff --git a/bitswap/strategy/strategy_test.go b/bitswap/strategy/strategy_test.go index e3ffc05ea..5fc7efb0a 100644 --- a/bitswap/strategy/strategy_test.go +++ b/bitswap/strategy/strategy_test.go @@ -60,7 +60,7 @@ func TestBlockRecordedAsWantedAfterMessageReceived(t *testing.T) { block := blocks.NewBlock([]byte("data wanted by beggar")) messageFromBeggarToChooser := message.New() - messageFromBeggarToChooser.AppendWanted(block.Key()) + messageFromBeggarToChooser.AddWanted(block.Key()) chooser.MessageReceived(beggar.Peer, messageFromBeggarToChooser) // for this test, doesn't matter if you record that beggar sent From 1c5c1a2b134bf7812dab36c10120c848486c5a90 Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Mon, 27 Oct 2014 06:04:09 -0700 Subject: [PATCH 0080/1035] refactor(bitswap/message) use map to prevent duplicate entries A nice invariant for bitswap sessions: Senders and receivers can trust that messages do not contain duplicate blocks or duplicate keys. Backing the message with a map enforces this invariant. This comes at the cost of O(n) getters. This commit was moved from ipfs/go-bitswap@a68d93109b43fba1bdabd5cd4f38dd78e659176e --- bitswap/message/message.go | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/bitswap/message/message.go b/bitswap/message/message.go index d2ebd74b3..5d3aeb97d 100644 --- a/bitswap/message/message.go +++ b/bitswap/message/message.go @@ -28,12 +28,14 @@ type Exportable interface { // message wraps a proto message for convenience type message struct { - wantlist []u.Key + wantlist map[u.Key]struct{} blocks []blocks.Block } -func New() *message { - return new(message) +func New() BitSwapMessage { + return &message{ + wantlist: make(map[u.Key]struct{}), + } } func newMessageFromProto(pbm pb.Message) BitSwapMessage { @@ -50,7 +52,11 @@ func newMessageFromProto(pbm pb.Message) BitSwapMessage { // TODO(brian): convert these into keys func (m *message) Wantlist() []u.Key { - return m.wantlist + wl := make([]u.Key, 0) + for k, _ := range m.wantlist { + wl = append(wl, k) + } + return wl } // TODO(brian): convert these into blocks @@ -59,7 +65,7 @@ func (m *message) Blocks() []blocks.Block { } func (m *message) AddWanted(k u.Key) { - m.wantlist = append(m.wantlist, k) + m.wantlist[k] = struct{}{} } func (m *message) AppendBlock(b blocks.Block) { From f04cbd3463fb7979d09dd933b3724979d162e34b Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Mon, 27 Oct 2014 06:06:44 -0700 Subject: [PATCH 0081/1035] style(bitswap/message) rename struct so there's one less name to think about This commit was moved from ipfs/go-bitswap@824a185f4053e62d69405640e0638cebb19a16ab --- bitswap/message/message.go | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/bitswap/message/message.go b/bitswap/message/message.go index 5d3aeb97d..1914f6c38 100644 --- a/bitswap/message/message.go +++ b/bitswap/message/message.go @@ -27,13 +27,13 @@ type Exportable interface { } // message wraps a proto message for convenience -type message struct { +type impl struct { wantlist map[u.Key]struct{} blocks []blocks.Block } func New() BitSwapMessage { - return &message{ + return &impl{ wantlist: make(map[u.Key]struct{}), } } @@ -51,7 +51,7 @@ func newMessageFromProto(pbm pb.Message) BitSwapMessage { } // TODO(brian): convert these into keys -func (m *message) Wantlist() []u.Key { +func (m *impl) Wantlist() []u.Key { wl := make([]u.Key, 0) for k, _ := range m.wantlist { wl = append(wl, k) @@ -60,15 +60,15 @@ func (m *message) Wantlist() []u.Key { } // TODO(brian): convert these into blocks -func (m *message) Blocks() []blocks.Block { +func (m *impl) Blocks() []blocks.Block { return m.blocks } -func (m *message) AddWanted(k u.Key) { +func (m *impl) AddWanted(k u.Key) { m.wantlist[k] = struct{}{} } -func (m *message) AppendBlock(b blocks.Block) { +func (m *impl) AppendBlock(b blocks.Block) { m.blocks = append(m.blocks, b) } @@ -81,7 +81,7 @@ func FromNet(nmsg netmsg.NetMessage) (BitSwapMessage, error) { return m, nil } -func (m *message) ToProto() *pb.Message { +func (m *impl) ToProto() *pb.Message { pb := new(pb.Message) for _, k := range m.Wantlist() { pb.Wantlist = append(pb.Wantlist, string(k)) @@ -92,6 +92,6 @@ func (m *message) ToProto() *pb.Message { return pb } -func (m *message) ToNet(p peer.Peer) (nm.NetMessage, error) { +func (m *impl) ToNet(p peer.Peer) (nm.NetMessage, error) { return nm.FromObject(p, m.ToProto()) } From 3c13a494aa3d641a66255b043f81c57d09cda743 Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Mon, 27 Oct 2014 06:16:11 -0700 Subject: [PATCH 0082/1035] fix(bitswap/message) impl with map to ensure no duplicate blocks comes at the cost of O(n) Blocks() method. This commit was moved from ipfs/go-bitswap@9cebc05a845ff5cf55cc61a6f83710a7b2bd446a --- bitswap/message/message.go | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/bitswap/message/message.go b/bitswap/message/message.go index 1914f6c38..d39ff821d 100644 --- a/bitswap/message/message.go +++ b/bitswap/message/message.go @@ -26,15 +26,15 @@ type Exportable interface { ToNet(p peer.Peer) (nm.NetMessage, error) } -// message wraps a proto message for convenience type impl struct { wantlist map[u.Key]struct{} - blocks []blocks.Block + blocks map[u.Key]blocks.Block } func New() BitSwapMessage { return &impl{ wantlist: make(map[u.Key]struct{}), + blocks: make(map[u.Key]blocks.Block), } } @@ -61,7 +61,11 @@ func (m *impl) Wantlist() []u.Key { // TODO(brian): convert these into blocks func (m *impl) Blocks() []blocks.Block { - return m.blocks + bs := make([]blocks.Block, 0) + for _, block := range m.blocks { + bs = append(bs, block) + } + return bs } func (m *impl) AddWanted(k u.Key) { @@ -69,7 +73,7 @@ func (m *impl) AddWanted(k u.Key) { } func (m *impl) AppendBlock(b blocks.Block) { - m.blocks = append(m.blocks, b) + m.blocks[b.Key()] = b } func FromNet(nmsg netmsg.NetMessage) (BitSwapMessage, error) { From 1773f49b37587c969f745347474a19380bba0316 Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Mon, 27 Oct 2014 06:18:03 -0700 Subject: [PATCH 0083/1035] style(bitswap/message) rename method -> AddBlock to emphasize idempotence This commit was moved from ipfs/go-bitswap@8cd17e8dc0a27bbd486aefcd58d3b0e5ea7e5610 --- bitswap/bitswap.go | 4 ++-- bitswap/message/message.go | 6 +++--- bitswap/message/message_test.go | 10 +++++----- bitswap/strategy/strategy_test.go | 2 +- bitswap/testnet/network_test.go | 8 ++++---- 5 files changed, 15 insertions(+), 15 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index ec004da43..a785b15dc 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -176,7 +176,7 @@ func (bs *bitswap) ReceiveMessage(ctx context.Context, p peer.Peer, incoming bsm if block, errBlockNotFound := bs.blockstore.Get(key); errBlockNotFound != nil { continue } else { - message.AppendBlock(*block) + message.AddBlock(*block) } } } @@ -206,7 +206,7 @@ func (bs *bitswap) sendToPeersThatWant(ctx context.Context, block blocks.Block) log.Debugf("%v wants %v", p, block.Key()) if bs.strategy.ShouldSendBlockToPeer(block.Key(), p) { message := bsmsg.New() - message.AppendBlock(block) + message.AddBlock(block) for _, wanted := range bs.wantlist.Keys() { message.AddWanted(wanted) } diff --git a/bitswap/message/message.go b/bitswap/message/message.go index d39ff821d..f9663c3f3 100644 --- a/bitswap/message/message.go +++ b/bitswap/message/message.go @@ -17,7 +17,7 @@ type BitSwapMessage interface { Wantlist() []u.Key Blocks() []blocks.Block AddWanted(k u.Key) - AppendBlock(b blocks.Block) + AddBlock(b blocks.Block) Exportable } @@ -45,7 +45,7 @@ func newMessageFromProto(pbm pb.Message) BitSwapMessage { } for _, d := range pbm.GetBlocks() { b := blocks.NewBlock(d) - m.AppendBlock(*b) + m.AddBlock(*b) } return m } @@ -72,7 +72,7 @@ func (m *impl) AddWanted(k u.Key) { m.wantlist[k] = struct{}{} } -func (m *impl) AppendBlock(b blocks.Block) { +func (m *impl) AddBlock(b blocks.Block) { m.blocks[b.Key()] = b } diff --git a/bitswap/message/message_test.go b/bitswap/message/message_test.go index 4b385791c..f98934b37 100644 --- a/bitswap/message/message_test.go +++ b/bitswap/message/message_test.go @@ -42,7 +42,7 @@ func TestAppendBlock(t *testing.T) { m := New() for _, str := range strs { block := blocks.NewBlock([]byte(str)) - m.AppendBlock(*block) + m.AddBlock(*block) } // assert strings are in proto message @@ -133,10 +133,10 @@ func TestToNetFromNetPreservesWantList(t *testing.T) { func TestToAndFromNetMessage(t *testing.T) { original := New() - original.AppendBlock(*blocks.NewBlock([]byte("W"))) - original.AppendBlock(*blocks.NewBlock([]byte("E"))) - original.AppendBlock(*blocks.NewBlock([]byte("F"))) - original.AppendBlock(*blocks.NewBlock([]byte("M"))) + original.AddBlock(*blocks.NewBlock([]byte("W"))) + original.AddBlock(*blocks.NewBlock([]byte("E"))) + original.AddBlock(*blocks.NewBlock([]byte("F"))) + original.AddBlock(*blocks.NewBlock([]byte("M"))) p := peer.WithIDString("X") netmsg, err := original.ToNet(p) diff --git a/bitswap/strategy/strategy_test.go b/bitswap/strategy/strategy_test.go index 5fc7efb0a..ef93d9827 100644 --- a/bitswap/strategy/strategy_test.go +++ b/bitswap/strategy/strategy_test.go @@ -30,7 +30,7 @@ func TestConsistentAccounting(t *testing.T) { m := message.New() content := []string{"this", "is", "message", "i"} - m.AppendBlock(*blocks.NewBlock([]byte(strings.Join(content, " ")))) + m.AddBlock(*blocks.NewBlock([]byte(strings.Join(content, " ")))) sender.MessageSent(receiver.Peer, m) receiver.MessageReceived(sender.Peer, m) diff --git a/bitswap/testnet/network_test.go b/bitswap/testnet/network_test.go index c2cc28f8d..3930c2a8c 100644 --- a/bitswap/testnet/network_test.go +++ b/bitswap/testnet/network_test.go @@ -33,7 +33,7 @@ func TestSendRequestToCooperativePeer(t *testing.T) { // TODO test contents of incoming message m := bsmsg.New() - m.AppendBlock(*blocks.NewBlock([]byte(expectedStr))) + m.AddBlock(*blocks.NewBlock([]byte(expectedStr))) return from, m })) @@ -41,7 +41,7 @@ func TestSendRequestToCooperativePeer(t *testing.T) { t.Log("Build a message and send a synchronous request to recipient") message := bsmsg.New() - message.AppendBlock(*blocks.NewBlock([]byte("data"))) + message.AddBlock(*blocks.NewBlock([]byte("data"))) response, err := initiator.SendRequest( context.Background(), peer.WithID(idOfRecipient), message) if err != nil { @@ -77,7 +77,7 @@ func TestSendMessageAsyncButWaitForResponse(t *testing.T) { peer.Peer, bsmsg.BitSwapMessage) { msgToWaiter := bsmsg.New() - msgToWaiter.AppendBlock(*blocks.NewBlock([]byte(expectedStr))) + msgToWaiter.AddBlock(*blocks.NewBlock([]byte(expectedStr))) return fromWaiter, msgToWaiter })) @@ -105,7 +105,7 @@ func TestSendMessageAsyncButWaitForResponse(t *testing.T) { })) messageSentAsync := bsmsg.New() - messageSentAsync.AppendBlock(*blocks.NewBlock([]byte("data"))) + messageSentAsync.AddBlock(*blocks.NewBlock([]byte("data"))) errSending := waiter.SendMessage( context.Background(), peer.WithID(idOfResponder), messageSentAsync) if errSending != nil { From 3647df9485b3516d73c84721336c1a79eca27f3b Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Mon, 27 Oct 2014 06:31:49 -0700 Subject: [PATCH 0084/1035] test(bitswap/message) no duplicates This commit was moved from ipfs/go-bitswap@f758e76d8601f3c72415c3ece8da71aebc16dd33 --- bitswap/message/message_test.go | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/bitswap/message/message_test.go b/bitswap/message/message_test.go index f98934b37..9c69136cd 100644 --- a/bitswap/message/message_test.go +++ b/bitswap/message/message_test.go @@ -169,3 +169,20 @@ func contains(s []string, x string) bool { } return false } + +func TestDuplicates(t *testing.T) { + b := blocks.NewBlock([]byte("foo")) + msg := New() + + msg.AddWanted(b.Key()) + msg.AddWanted(b.Key()) + if len(msg.Wantlist()) != 1 { + t.Fatal("Duplicate in BitSwapMessage") + } + + msg.AddBlock(*b) + msg.AddBlock(*b) + if len(msg.Blocks()) != 1 { + t.Fatal("Duplicate in BitSwapMessage") + } +} From faf7f9e4e00233ee9fd53e859c1c77291a59ed15 Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Mon, 27 Oct 2014 22:39:21 -0700 Subject: [PATCH 0085/1035] fix(bitswap) preserve ordering in bitswap message This commit was moved from ipfs/go-bitswap@2c7761fa730e10e296cc1adaf269d5faa728a90c --- bitswap/message/message.go | 25 +++++++++++++------------ 1 file changed, 13 insertions(+), 12 deletions(-) diff --git a/bitswap/message/message.go b/bitswap/message/message.go index f9663c3f3..4b5735a9d 100644 --- a/bitswap/message/message.go +++ b/bitswap/message/message.go @@ -27,14 +27,16 @@ type Exportable interface { } type impl struct { - wantlist map[u.Key]struct{} - blocks map[u.Key]blocks.Block + existsInWantlist map[u.Key]struct{} // map to detect duplicates + wantlist []u.Key // slice to preserve ordering + blocks map[u.Key]blocks.Block // map to detect duplicates } func New() BitSwapMessage { return &impl{ - wantlist: make(map[u.Key]struct{}), - blocks: make(map[u.Key]blocks.Block), + blocks: make(map[u.Key]blocks.Block), + existsInWantlist: make(map[u.Key]struct{}), + wantlist: make([]u.Key, 0), } } @@ -50,16 +52,10 @@ func newMessageFromProto(pbm pb.Message) BitSwapMessage { return m } -// TODO(brian): convert these into keys func (m *impl) Wantlist() []u.Key { - wl := make([]u.Key, 0) - for k, _ := range m.wantlist { - wl = append(wl, k) - } - return wl + return m.wantlist } -// TODO(brian): convert these into blocks func (m *impl) Blocks() []blocks.Block { bs := make([]blocks.Block, 0) for _, block := range m.blocks { @@ -69,7 +65,12 @@ func (m *impl) Blocks() []blocks.Block { } func (m *impl) AddWanted(k u.Key) { - m.wantlist[k] = struct{}{} + _, exists := m.existsInWantlist[k] + if exists { + return + } + m.existsInWantlist[k] = struct{}{} + m.wantlist = append(m.wantlist, k) } func (m *impl) AddBlock(b blocks.Block) { From 66f31618e1d91eb267029d81b59718948c47f2ec Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Mon, 27 Oct 2014 22:39:42 -0700 Subject: [PATCH 0086/1035] docs(bitswap/message) BitSwapMessage interface This commit was moved from ipfs/go-bitswap@5597393da377da72fbddb03ad81a6d840182bbde --- bitswap/message/message.go | 19 +++++++++++++++++-- 1 file changed, 17 insertions(+), 2 deletions(-) diff --git a/bitswap/message/message.go b/bitswap/message/message.go index 4b5735a9d..e0aea227d 100644 --- a/bitswap/message/message.go +++ b/bitswap/message/message.go @@ -14,10 +14,25 @@ import ( // TODO move bs/msg/internal/pb to bs/internal/pb and rename pb package to bitswap_pb type BitSwapMessage interface { + // Wantlist returns a slice of unique keys that represent data wanted by + // the sender. Wantlist() []u.Key + + // Blocks returns a slice of unique blocks Blocks() []blocks.Block - AddWanted(k u.Key) - AddBlock(b blocks.Block) + + // AddWanted adds the key to the Wantlist. + // + // Insertion order determines priority. That is, earlier insertions are + // deemed higher priority than keys inserted later. + // + // t = 0, msg.AddWanted(A) + // t = 1, msg.AddWanted(B) + // + // implies Priority(A) > Priority(B) + AddWanted(u.Key) + + AddBlock(blocks.Block) Exportable } From 2b42885f62db84e424114ee63255ca3c2a304b6d Mon Sep 17 00:00:00 2001 From: Jeromy Date: Sun, 26 Oct 2014 08:01:33 +0000 Subject: [PATCH 0087/1035] benchmark secure channel This commit was moved from ipfs/go-bitswap@128c4a40a68dc01262949c475b5a19d560782bec --- bitswap/bitswap.go | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index a785b15dc..c8a53ec2b 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -1,6 +1,8 @@ package bitswap import ( + "time" + context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" ds "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore" @@ -67,6 +69,10 @@ type bitswap struct { // TODO ensure only one active request per key func (bs *bitswap) Block(parent context.Context, k u.Key) (*blocks.Block, error) { log.Debugf("Get Block %v", k) + now := time.Now() + defer func() { + log.Errorf("GetBlock took %f secs", time.Now().Sub(now).Seconds()) + }() ctx, cancelFunc := context.WithCancel(parent) bs.wantlist.Add(k) @@ -160,7 +166,7 @@ func (bs *bitswap) ReceiveMessage(ctx context.Context, p peer.Peer, incoming bsm go func(block blocks.Block) { err := bs.HasBlock(ctx, block) // FIXME err ignored if err != nil { - log.Errorf("HasBlock errored: %s", err) + log.Warningf("HasBlock errored: %s", err) } }(block) } From 4c1002a30c684fc935f6f5e4d7fa7be3cbad31ac Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Sun, 2 Nov 2014 20:40:25 -0800 Subject: [PATCH 0088/1035] docs: TODO This commit was moved from ipfs/go-bitswap@1baa039e088772faf02e93ed4b21858b44295f16 --- bitswap/bitswap.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index c8a53ec2b..88ff418c7 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -29,7 +29,7 @@ func NetMessageSession(parent context.Context, p peer.Peer, networkAdapter := bsnet.NetMessageAdapter(srv, net, nil) bs := &bitswap{ blockstore: blockstore.NewBlockstore(d), - notifications: notifications.New(), + notifications: notifications.New(), // TODO Shutdown() strategy: strategy.New(nice), routing: directory, sender: networkAdapter, From 96df222ef7b0dc7675be2df81dcabd23c80e37bb Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Wed, 5 Nov 2014 08:43:03 -0800 Subject: [PATCH 0089/1035] fix(bitswap_test) race cond https://github.com/jbenet/go-ipfs/issues/270#issuecomment-61826022 This commit was moved from ipfs/go-bitswap@d3a79ef1519b5bc7ddde43cf9babce02377c36a4 --- bitswap/bitswap_test.go | 2 -- 1 file changed, 2 deletions(-) diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index f34ea3c84..4a01444e5 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -149,8 +149,6 @@ func getOrFail(bitswap instance, b *blocks.Block, t *testing.T, wg *sync.WaitGro // TODO simplify this test. get to the _essence_! func TestSendToWantingPeer(t *testing.T) { - util.Debug = true - net := tn.VirtualNetwork() rs := mock.VirtualRoutingServer() sg := NewSessionGenerator(net, rs) From 85f5c0b889027ee404bc3e53cae3d5b5a993a06a Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Wed, 5 Nov 2014 03:00:04 -0800 Subject: [PATCH 0090/1035] fix(exchange) add context to DialPeer This commit was moved from ipfs/go-bitswap@54b7ba45fe19094aa24f49abad748acfb8e1e9a1 --- bitswap/bitswap.go | 2 +- bitswap/network/interface.go | 2 +- bitswap/network/net_message_adapter.go | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 88ff418c7..af84caa05 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -91,7 +91,7 @@ func (bs *bitswap) Block(parent context.Context, k u.Key) (*blocks.Block, error) go func(p peer.Peer) { log.Debugf("bitswap dialing peer: %s", p) - err := bs.sender.DialPeer(p) + err := bs.sender.DialPeer(ctx, p) if err != nil { log.Errorf("Error sender.DialPeer(%s)", p) return diff --git a/bitswap/network/interface.go b/bitswap/network/interface.go index 467b0f400..1d3fc63a5 100644 --- a/bitswap/network/interface.go +++ b/bitswap/network/interface.go @@ -12,7 +12,7 @@ import ( type Adapter interface { // DialPeer ensures there is a connection to peer. - DialPeer(peer.Peer) error + DialPeer(context.Context, peer.Peer) error // SendMessage sends a BitSwap message to a peer. SendMessage( diff --git a/bitswap/network/net_message_adapter.go b/bitswap/network/net_message_adapter.go index c7e1a852d..1bdf13ae9 100644 --- a/bitswap/network/net_message_adapter.go +++ b/bitswap/network/net_message_adapter.go @@ -67,7 +67,7 @@ func (adapter *impl) HandleMessage( return outgoing } -func (adapter *impl) DialPeer(p peer.Peer) error { +func (adapter *impl) DialPeer(ctx context.Context, p peer.Peer) error { return adapter.net.DialPeer(p) } From 959f3fec89b99d3256bb8d0f7f50394cadcf6ede Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Wed, 5 Nov 2014 04:26:30 -0800 Subject: [PATCH 0091/1035] fix(net) pass contexts to dial peer This commit was moved from ipfs/go-bitswap@fc8168f6328d2c4efb227cccd335984e34fd4200 --- bitswap/network/net_message_adapter.go | 2 +- bitswap/testnet/network.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/bitswap/network/net_message_adapter.go b/bitswap/network/net_message_adapter.go index 1bdf13ae9..f3fe1b257 100644 --- a/bitswap/network/net_message_adapter.go +++ b/bitswap/network/net_message_adapter.go @@ -68,7 +68,7 @@ func (adapter *impl) HandleMessage( } func (adapter *impl) DialPeer(ctx context.Context, p peer.Peer) error { - return adapter.net.DialPeer(p) + return adapter.net.DialPeer(ctx, p) } func (adapter *impl) SendMessage( diff --git a/bitswap/testnet/network.go b/bitswap/testnet/network.go index 418f75ce0..a7864c2a1 100644 --- a/bitswap/testnet/network.go +++ b/bitswap/testnet/network.go @@ -163,7 +163,7 @@ func (nc *networkClient) SendRequest( return nc.network.SendRequest(ctx, nc.local, to, message) } -func (nc *networkClient) DialPeer(p peer.Peer) error { +func (nc *networkClient) DialPeer(ctx context.Context, p peer.Peer) error { // no need to do anything because dialing isn't a thing in this test net. if !nc.network.HasPeer(p) { return fmt.Errorf("Peer not in network: %s", p) From 161b6eebffbf591e984181d6c3ef43d7549ef200 Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Wed, 5 Nov 2014 07:05:12 -0800 Subject: [PATCH 0092/1035] fix(bitswap) don't 'go' local function calls This commit was moved from ipfs/go-bitswap@d18a24cf5504cdfd76575be7371c3cb3a193d8f4 --- bitswap/bitswap.go | 41 ++++++++++++++++++++++++++--------------- 1 file changed, 26 insertions(+), 15 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index af84caa05..843bed4a9 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -21,15 +21,28 @@ import ( var log = u.Logger("bitswap") // NetMessageSession initializes a BitSwap session that communicates over the -// provided NetMessage service -func NetMessageSession(parent context.Context, p peer.Peer, +// provided NetMessage service. +// Runs until context is cancelled +func NetMessageSession(ctx context.Context, p peer.Peer, net inet.Network, srv inet.Service, directory bsnet.Routing, d ds.ThreadSafeDatastore, nice bool) exchange.Interface { networkAdapter := bsnet.NetMessageAdapter(srv, net, nil) + + notif := notifications.New() + + go func() { + for { + select { + case <-ctx.Done(): + notif.Shutdown() + } + } + }() + bs := &bitswap{ blockstore: blockstore.NewBlockstore(d), - notifications: notifications.New(), // TODO Shutdown() + notifications: notif, strategy: strategy.New(nice), routing: directory, sender: networkAdapter, @@ -119,15 +132,14 @@ func (bs *bitswap) Block(parent context.Context, k u.Key) (*blocks.Block, error) case block := <-promise: cancelFunc() bs.wantlist.Remove(k) - // TODO remove from wantlist return &block, nil case <-parent.Done(): return nil, parent.Err() } } -// HasBlock announces the existance of a block to bitswap, potentially sending -// it to peers (Partners) whose WantLists include it. +// HasBlock announces the existance of a block to this bitswap service. The +// service will potentially notify its peers. func (bs *bitswap) HasBlock(ctx context.Context, blk blocks.Block) error { log.Debugf("Has Block %v", blk.Key()) bs.wantlist.Remove(blk.Key()) @@ -162,13 +174,11 @@ func (bs *bitswap) ReceiveMessage(ctx context.Context, p peer.Peer, incoming bsm if err := bs.blockstore.Put(&block); err != nil { continue // FIXME(brian): err ignored } - go bs.notifications.Publish(block) - go func(block blocks.Block) { - err := bs.HasBlock(ctx, block) // FIXME err ignored - if err != nil { - log.Warningf("HasBlock errored: %s", err) - } - }(block) + bs.notifications.Publish(block) + err := bs.HasBlock(ctx, block) + if err != nil { + log.Warningf("HasBlock errored: %s", err) + } } message := bsmsg.New() @@ -202,11 +212,12 @@ func (bs *bitswap) ReceiveError(err error) { // sent func (bs *bitswap) send(ctx context.Context, p peer.Peer, m bsmsg.BitSwapMessage) { bs.sender.SendMessage(ctx, p, m) - go bs.strategy.MessageSent(p, m) + bs.strategy.MessageSent(p, m) } func (bs *bitswap) sendToPeersThatWant(ctx context.Context, block blocks.Block) { log.Debugf("Sending %v to peers that want it", block.Key()) + for _, p := range bs.strategy.Peers() { if bs.strategy.BlockIsWantedByPeer(block.Key(), p) { log.Debugf("%v wants %v", p, block.Key()) @@ -216,7 +227,7 @@ func (bs *bitswap) sendToPeersThatWant(ctx context.Context, block blocks.Block) for _, wanted := range bs.wantlist.Keys() { message.AddWanted(wanted) } - go bs.send(ctx, p, message) + bs.send(ctx, p, message) } } } From f4936327c4572abbf6fc0b3dd756ad49fd479e74 Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Wed, 5 Nov 2014 07:07:49 -0800 Subject: [PATCH 0093/1035] fix(bitswap) always cancel on return This commit was moved from ipfs/go-bitswap@d42ec402a85538390019d4220b4f3df1a34d9c9b --- bitswap/bitswap.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 843bed4a9..3ccab5d97 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -88,6 +88,8 @@ func (bs *bitswap) Block(parent context.Context, k u.Key) (*blocks.Block, error) }() ctx, cancelFunc := context.WithCancel(parent) + defer cancelFunc() + bs.wantlist.Add(k) promise := bs.notifications.Subscribe(ctx, k) @@ -130,7 +132,6 @@ func (bs *bitswap) Block(parent context.Context, k u.Key) (*blocks.Block, error) select { case block := <-promise: - cancelFunc() bs.wantlist.Remove(k) return &block, nil case <-parent.Done(): From 0cee5476a493dcb3d572bcf4803142ec504f0af5 Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Wed, 5 Nov 2014 10:13:24 -0800 Subject: [PATCH 0094/1035] fix(bitswap) shut down async This commit was moved from ipfs/go-bitswap@23802fdf9cc2a2bd64b56d232d21d65f2e14a630 --- bitswap/bitswap.go | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 3ccab5d97..369fcee75 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -32,11 +32,9 @@ func NetMessageSession(ctx context.Context, p peer.Peer, notif := notifications.New() go func() { - for { - select { - case <-ctx.Done(): - notif.Shutdown() - } + select { + case <-ctx.Done(): + notif.Shutdown() } }() From e2bfa6eda7e01514f1902c30a1a3ddb0d2d0a4cc Mon Sep 17 00:00:00 2001 From: Juan Batiz-Benet Date: Thu, 6 Nov 2014 18:03:10 -0800 Subject: [PATCH 0095/1035] bitswap error -> debug (use IPFS_LOGGING=debug) This commit was moved from ipfs/go-bitswap@7ca6dbade639843c45300b60e0f5fd590d1060a5 --- bitswap/bitswap.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 369fcee75..ed444b100 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -82,7 +82,7 @@ func (bs *bitswap) Block(parent context.Context, k u.Key) (*blocks.Block, error) log.Debugf("Get Block %v", k) now := time.Now() defer func() { - log.Errorf("GetBlock took %f secs", time.Now().Sub(now).Seconds()) + log.Debugf("GetBlock took %f secs", time.Now().Sub(now).Seconds()) }() ctx, cancelFunc := context.WithCancel(parent) From 603eb95c059b28a396f6d69fdfae3a43b68e2d7c Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Sat, 8 Nov 2014 21:37:56 -0800 Subject: [PATCH 0096/1035] docs(exchange) This commit was moved from ipfs/go-bitswap@7b5a11c939855ef076ebb9276806583fa71309c6 --- bitswap/bitswap.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index ed444b100..d51bd2b87 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -1,3 +1,5 @@ +// package bitswap implements the IPFS Exchange interface with the BitSwap +// bilateral exchange protocol. package bitswap import ( From acb882f818c4fa844e5fff5f73877a644e676b62 Mon Sep 17 00:00:00 2001 From: Jeromy Date: Wed, 12 Nov 2014 10:39:11 -0800 Subject: [PATCH 0097/1035] log -> logf This commit was moved from ipfs/go-bitswap@85982228f36eae85c041a14b747cdf521b5a3412 --- bitswap/bitswap.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index d51bd2b87..7e3a57ec1 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -114,7 +114,7 @@ func (bs *bitswap) Block(parent context.Context, k u.Key) (*blocks.Block, error) response, err := bs.sender.SendRequest(ctx, p, message) if err != nil { - log.Error("Error sender.SendRequest(%s) = %s", p, err) + log.Errorf("Error sender.SendRequest(%s) = %s", p, err) return } // FIXME ensure accounting is handled correctly when From b128fc9d780700a3996be23bb40a0600a3f3f71e Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Mon, 27 Oct 2014 06:53:08 -0700 Subject: [PATCH 0098/1035] style(bitswap) rename variable to 'routing' This commit was moved from ipfs/go-bitswap@014813e8f736f1cdc9d153827ad8a66c8916bff4 --- bitswap/bitswap.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 7e3a57ec1..52e6f30f8 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -26,7 +26,7 @@ var log = u.Logger("bitswap") // provided NetMessage service. // Runs until context is cancelled func NetMessageSession(ctx context.Context, p peer.Peer, - net inet.Network, srv inet.Service, directory bsnet.Routing, + net inet.Network, srv inet.Service, routing bsnet.Routing, d ds.ThreadSafeDatastore, nice bool) exchange.Interface { networkAdapter := bsnet.NetMessageAdapter(srv, net, nil) @@ -44,7 +44,7 @@ func NetMessageSession(ctx context.Context, p peer.Peer, blockstore: blockstore.NewBlockstore(d), notifications: notif, strategy: strategy.New(nice), - routing: directory, + routing: routing, sender: networkAdapter, wantlist: u.NewKeySet(), } From 55bf10cc64094737f4235c0acdf8947bf6396a04 Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Mon, 27 Oct 2014 07:02:38 -0700 Subject: [PATCH 0099/1035] style(bitswap) rename Adapter -> BitSwapNetwork for clarity This commit was moved from ipfs/go-bitswap@9ac618652a502311290e1c6d340f05b16a94ddb3 --- bitswap/bitswap.go | 2 +- bitswap/network/interface.go | 6 +++--- bitswap/network/net_message_adapter.go | 2 +- bitswap/testnet/network.go | 4 ++-- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 52e6f30f8..413f55198 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -57,7 +57,7 @@ func NetMessageSession(ctx context.Context, p peer.Peer, type bitswap struct { // sender delivers messages on behalf of the session - sender bsnet.Adapter + sender bsnet.BitSwapNetwork // blockstore is the local database // NB: ensure threadsafety diff --git a/bitswap/network/interface.go b/bitswap/network/interface.go index 1d3fc63a5..44557b064 100644 --- a/bitswap/network/interface.go +++ b/bitswap/network/interface.go @@ -8,8 +8,8 @@ import ( u "github.com/jbenet/go-ipfs/util" ) -// Adapter provides network connectivity for BitSwap sessions -type Adapter interface { +// BitSwapNetwork provides network connectivity for BitSwap sessions +type BitSwapNetwork interface { // DialPeer ensures there is a connection to peer. DialPeer(context.Context, peer.Peer) error @@ -31,6 +31,7 @@ type Adapter interface { SetDelegate(Receiver) } +// Implement Receiver to receive messages from the BitSwapNetwork type Receiver interface { ReceiveMessage( ctx context.Context, sender peer.Peer, incoming bsmsg.BitSwapMessage) ( @@ -39,7 +40,6 @@ type Receiver interface { ReceiveError(error) } -// TODO rename -> Router? type Routing interface { // FindProvidersAsync returns a channel of providers for the given key FindProvidersAsync(context.Context, u.Key, int) <-chan peer.Peer diff --git a/bitswap/network/net_message_adapter.go b/bitswap/network/net_message_adapter.go index f3fe1b257..3a181532c 100644 --- a/bitswap/network/net_message_adapter.go +++ b/bitswap/network/net_message_adapter.go @@ -15,7 +15,7 @@ import ( var log = util.Logger("net_message_adapter") // NetMessageAdapter wraps a NetMessage network service -func NetMessageAdapter(s inet.Service, n inet.Network, r Receiver) Adapter { +func NetMessageAdapter(s inet.Service, n inet.Network, r Receiver) BitSwapNetwork { adapter := impl{ nms: s, net: n, diff --git a/bitswap/testnet/network.go b/bitswap/testnet/network.go index a7864c2a1..691b7cb42 100644 --- a/bitswap/testnet/network.go +++ b/bitswap/testnet/network.go @@ -13,7 +13,7 @@ import ( ) type Network interface { - Adapter(peer.Peer) bsnet.Adapter + Adapter(peer.Peer) bsnet.BitSwapNetwork HasPeer(peer.Peer) bool @@ -43,7 +43,7 @@ type network struct { clients map[util.Key]bsnet.Receiver } -func (n *network) Adapter(p peer.Peer) bsnet.Adapter { +func (n *network) Adapter(p peer.Peer) bsnet.BitSwapNetwork { client := &networkClient{ local: p, network: n, From 99f5043b21e96131ad9fa4b3dd76c0c92ca3559a Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Mon, 27 Oct 2014 07:04:52 -0700 Subject: [PATCH 0100/1035] rename var This commit was moved from ipfs/go-bitswap@6a64affbceae34a70ad332bfdd34197f0aaa0d1f --- bitswap/bitswap.go | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 413f55198..b16cc3ea7 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -29,10 +29,7 @@ func NetMessageSession(ctx context.Context, p peer.Peer, net inet.Network, srv inet.Service, routing bsnet.Routing, d ds.ThreadSafeDatastore, nice bool) exchange.Interface { - networkAdapter := bsnet.NetMessageAdapter(srv, net, nil) - notif := notifications.New() - go func() { select { case <-ctx.Done(): @@ -40,15 +37,17 @@ func NetMessageSession(ctx context.Context, p peer.Peer, } }() + network := bsnet.NetMessageAdapter(srv, net, nil) + bs := &bitswap{ blockstore: blockstore.NewBlockstore(d), notifications: notif, strategy: strategy.New(nice), routing: routing, - sender: networkAdapter, + sender: network, wantlist: u.NewKeySet(), } - networkAdapter.SetDelegate(bs) + network.SetDelegate(bs) return bs } From fa1b45a26842420f2c1834b894afef79eda39861 Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Mon, 27 Oct 2014 07:16:28 -0700 Subject: [PATCH 0101/1035] refactor(bitswap/network) rename -> BitSwapNetwork remove 'adapter' concept instead, describe the component as the bitswap network it's still an adapter, but it's just not necessary to describe it as such This commit was moved from ipfs/go-bitswap@c5333a20539deb3af8e641d199658325aee07c01 --- bitswap/bitswap.go | 2 +- .../{net_message_adapter.go => ipfs_impl.go} | 51 ++++++++++--------- 2 files changed, 27 insertions(+), 26 deletions(-) rename bitswap/network/{net_message_adapter.go => ipfs_impl.go} (57%) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index b16cc3ea7..b5b41b7d1 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -37,7 +37,7 @@ func NetMessageSession(ctx context.Context, p peer.Peer, } }() - network := bsnet.NetMessageAdapter(srv, net, nil) + network := bsnet.NewFromIpfsNetwork(srv, net) bs := &bitswap{ blockstore: blockstore.NewBlockstore(d), diff --git a/bitswap/network/net_message_adapter.go b/bitswap/network/ipfs_impl.go similarity index 57% rename from bitswap/network/net_message_adapter.go rename to bitswap/network/ipfs_impl.go index 3a181532c..5cccf1a79 100644 --- a/bitswap/network/net_message_adapter.go +++ b/bitswap/network/ipfs_impl.go @@ -4,31 +4,32 @@ import ( "errors" context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" - "github.com/jbenet/go-ipfs/util" bsmsg "github.com/jbenet/go-ipfs/exchange/bitswap/message" inet "github.com/jbenet/go-ipfs/net" netmsg "github.com/jbenet/go-ipfs/net/message" peer "github.com/jbenet/go-ipfs/peer" + util "github.com/jbenet/go-ipfs/util" ) -var log = util.Logger("net_message_adapter") +var log = util.Logger("bitswap_network") -// NetMessageAdapter wraps a NetMessage network service -func NetMessageAdapter(s inet.Service, n inet.Network, r Receiver) BitSwapNetwork { - adapter := impl{ - nms: s, - net: n, - receiver: r, +// NewFromIpfsNetwork returns a BitSwapNetwork supported by underlying IPFS +// Network & Service +func NewFromIpfsNetwork(s inet.Service, n inet.Network) BitSwapNetwork { + bitswapNetwork := impl{ + service: s, + net: n, } - s.SetHandler(&adapter) - return &adapter + s.SetHandler(&bitswapNetwork) + return &bitswapNetwork } -// implements an Adapter that integrates with a NetMessage network service +// impl transforms the ipfs network interface, which sends and receives +// NetMessage objects, into the bitswap network interface. type impl struct { - nms inet.Service - net inet.Network + service inet.Service + net inet.Network // inbound messages from the network are forwarded to the receiver receiver Receiver @@ -36,30 +37,30 @@ type impl struct { // HandleMessage marshals and unmarshals net messages, forwarding them to the // BitSwapMessage receiver -func (adapter *impl) HandleMessage( +func (bsnet *impl) HandleMessage( ctx context.Context, incoming netmsg.NetMessage) netmsg.NetMessage { - if adapter.receiver == nil { + if bsnet.receiver == nil { return nil } received, err := bsmsg.FromNet(incoming) if err != nil { - go adapter.receiver.ReceiveError(err) + go bsnet.receiver.ReceiveError(err) return nil } - p, bsmsg := adapter.receiver.ReceiveMessage(ctx, incoming.Peer(), received) + p, bsmsg := bsnet.receiver.ReceiveMessage(ctx, incoming.Peer(), received) // TODO(brian): put this in a helper function if bsmsg == nil || p == nil { - adapter.receiver.ReceiveError(errors.New("ReceiveMessage returned nil peer or message")) + bsnet.receiver.ReceiveError(errors.New("ReceiveMessage returned nil peer or message")) return nil } outgoing, err := bsmsg.ToNet(p) if err != nil { - go adapter.receiver.ReceiveError(err) + go bsnet.receiver.ReceiveError(err) return nil } @@ -71,7 +72,7 @@ func (adapter *impl) DialPeer(ctx context.Context, p peer.Peer) error { return adapter.net.DialPeer(ctx, p) } -func (adapter *impl) SendMessage( +func (bsnet *impl) SendMessage( ctx context.Context, p peer.Peer, outgoing bsmsg.BitSwapMessage) error { @@ -80,10 +81,10 @@ func (adapter *impl) SendMessage( if err != nil { return err } - return adapter.nms.SendMessage(ctx, nmsg) + return bsnet.service.SendMessage(ctx, nmsg) } -func (adapter *impl) SendRequest( +func (bsnet *impl) SendRequest( ctx context.Context, p peer.Peer, outgoing bsmsg.BitSwapMessage) (bsmsg.BitSwapMessage, error) { @@ -92,13 +93,13 @@ func (adapter *impl) SendRequest( if err != nil { return nil, err } - incomingMsg, err := adapter.nms.SendRequest(ctx, outgoingMsg) + incomingMsg, err := bsnet.service.SendRequest(ctx, outgoingMsg) if err != nil { return nil, err } return bsmsg.FromNet(incomingMsg) } -func (adapter *impl) SetDelegate(r Receiver) { - adapter.receiver = r +func (bsnet *impl) SetDelegate(r Receiver) { + bsnet.receiver = r } From c579c6ed1a165ef08328096232a8c965bd5879fc Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Mon, 27 Oct 2014 07:41:29 -0700 Subject: [PATCH 0102/1035] refactor(core, bitswap) split bitswap init into two steps @jbenet This commit was moved from ipfs/go-bitswap@dfb0a9c627e39e116cc9ae4221f58933a22c9001 --- bitswap/bitswap.go | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index b5b41b7d1..529c78689 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -15,18 +15,18 @@ import ( bsnet "github.com/jbenet/go-ipfs/exchange/bitswap/network" notifications "github.com/jbenet/go-ipfs/exchange/bitswap/notifications" strategy "github.com/jbenet/go-ipfs/exchange/bitswap/strategy" - inet "github.com/jbenet/go-ipfs/net" peer "github.com/jbenet/go-ipfs/peer" u "github.com/jbenet/go-ipfs/util" ) var log = u.Logger("bitswap") -// NetMessageSession initializes a BitSwap session that communicates over the -// provided NetMessage service. +// New initializes a BitSwap instance that communicates over the +// provided BitSwapNetwork. This function registers the returned instance as +// the network delegate. // Runs until context is cancelled -func NetMessageSession(ctx context.Context, p peer.Peer, - net inet.Network, srv inet.Service, routing bsnet.Routing, +func New(ctx context.Context, p peer.Peer, + network bsnet.BitSwapNetwork, routing bsnet.Routing, d ds.ThreadSafeDatastore, nice bool) exchange.Interface { notif := notifications.New() @@ -37,8 +37,6 @@ func NetMessageSession(ctx context.Context, p peer.Peer, } }() - network := bsnet.NewFromIpfsNetwork(srv, net) - bs := &bitswap{ blockstore: blockstore.NewBlockstore(d), notifications: notif, From 884b6daa68552a4951a762981eedcb406694d145 Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Mon, 27 Oct 2014 15:25:12 -0700 Subject: [PATCH 0103/1035] refctor(bitswap/network) replace Network interface with Dialer interface This commit was moved from ipfs/go-bitswap@a7170e4e42d436f08a80339edf5eb42b4fa43279 --- bitswap/network/ipfs_impl.go | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index 5cccf1a79..c94a4859f 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -15,11 +15,11 @@ import ( var log = util.Logger("bitswap_network") // NewFromIpfsNetwork returns a BitSwapNetwork supported by underlying IPFS -// Network & Service -func NewFromIpfsNetwork(s inet.Service, n inet.Network) BitSwapNetwork { +// Dialer & Service +func NewFromIpfsNetwork(s inet.Service, dialer inet.Dialer) BitSwapNetwork { bitswapNetwork := impl{ service: s, - net: n, + dialer: dialer, } s.SetHandler(&bitswapNetwork) return &bitswapNetwork @@ -29,7 +29,7 @@ func NewFromIpfsNetwork(s inet.Service, n inet.Network) BitSwapNetwork { // NetMessage objects, into the bitswap network interface. type impl struct { service inet.Service - net inet.Network + dialer inet.Dialer // inbound messages from the network are forwarded to the receiver receiver Receiver @@ -68,8 +68,8 @@ func (bsnet *impl) HandleMessage( return outgoing } -func (adapter *impl) DialPeer(ctx context.Context, p peer.Peer) error { - return adapter.net.DialPeer(ctx, p) +func (bsnet *impl) DialPeer(ctx context.Context, p peer.Peer) error { + return bsnet.dialer.DialPeer(ctx, p) } func (bsnet *impl) SendMessage( From 682f1a222923e7fe309005512732f432fb691544 Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Sat, 15 Nov 2014 00:19:47 -0800 Subject: [PATCH 0104/1035] chore(tests) add Short() -> SkipNow() to slowest tests vanilla: 21.57 real 45.14 user 8.51 sys short: 14.40 real 31.13 user 5.56 sys License: MIT Signed-off-by: Brian Tiger Chow This commit was moved from ipfs/go-bitswap@0b529b1366eef15cf5d9c8c17e3864616c7d2841 --- bitswap/bitswap_test.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index 4a01444e5..a851f0f56 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -90,6 +90,9 @@ func TestGetBlockFromPeerAfterPeerAnnounces(t *testing.T) { } func TestSwarm(t *testing.T) { + if testing.Short() { + t.SkipNow() + } net := tn.VirtualNetwork() rs := mock.VirtualRoutingServer() sg := NewSessionGenerator(net, rs) From ea4b6358d02f1bf73df05f33d74b31f7272f1402 Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Wed, 19 Nov 2014 08:31:32 -0800 Subject: [PATCH 0105/1035] fix(bitswap/notifications) don't force sender to block on receiver License: MIT Signed-off-by: Brian Tiger Chow This commit was moved from ipfs/go-bitswap@eeffc3a5ea358aef9cbfef0954a26b3b4e466900 --- bitswap/notifications/notifications.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bitswap/notifications/notifications.go b/bitswap/notifications/notifications.go index 2da2b7fad..34888d510 100644 --- a/bitswap/notifications/notifications.go +++ b/bitswap/notifications/notifications.go @@ -34,7 +34,7 @@ func (ps *impl) Publish(block blocks.Block) { func (ps *impl) Subscribe(ctx context.Context, k u.Key) <-chan blocks.Block { topic := string(k) subChan := ps.wrapped.SubOnce(topic) - blockChannel := make(chan blocks.Block) + blockChannel := make(chan blocks.Block, 1) // buffered so the sender doesn't wait on receiver go func() { defer close(blockChannel) select { From f62bb0037090f8a163ba182e13665f505ae0f9d3 Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Thu, 20 Nov 2014 23:23:48 -0800 Subject: [PATCH 0106/1035] fix(bitswap) shutdown License: MIT Signed-off-by: Brian Tiger Chow This commit was moved from ipfs/go-bitswap@b0e60a694827db87346324cef47feb3c6f3ba9bb --- bitswap/bitswap.go | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 529c78689..8af8426d3 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -31,10 +31,8 @@ func New(ctx context.Context, p peer.Peer, notif := notifications.New() go func() { - select { - case <-ctx.Done(): - notif.Shutdown() - } + <-ctx.Done() + notif.Shutdown() }() bs := &bitswap{ From 1c1d16f7243ee96df12f0e27d4950a1316e7c9d1 Mon Sep 17 00:00:00 2001 From: Jeromy Date: Tue, 18 Nov 2014 21:31:00 -0800 Subject: [PATCH 0107/1035] beginnings of a bitswap refactor This commit was moved from ipfs/go-bitswap@fefe7d37908ee6336f9977384800b0109b0abb98 --- bitswap/bitswap.go | 139 +++++++++++++++++++++++++++++----------- bitswap/bitswap_test.go | 18 +++--- 2 files changed, 111 insertions(+), 46 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 8af8426d3..6daf32555 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -42,8 +42,10 @@ func New(ctx context.Context, p peer.Peer, routing: routing, sender: network, wantlist: u.NewKeySet(), + blockReq: make(chan u.Key, 32), } network.SetDelegate(bs) + go bs.run(ctx) return bs } @@ -63,6 +65,8 @@ type bitswap struct { notifications notifications.PubSub + blockReq chan u.Key + // strategy listens to network traffic and makes decisions about how to // interact with partners. // TODO(brian): save the strategy's state to the datastore @@ -75,7 +79,7 @@ type bitswap struct { // deadline enforced by the context // // TODO ensure only one active request per key -func (bs *bitswap) Block(parent context.Context, k u.Key) (*blocks.Block, error) { +func (bs *bitswap) GetBlock(parent context.Context, k u.Key) (*blocks.Block, error) { log.Debugf("Get Block %v", k) now := time.Now() defer func() { @@ -88,42 +92,11 @@ func (bs *bitswap) Block(parent context.Context, k u.Key) (*blocks.Block, error) bs.wantlist.Add(k) promise := bs.notifications.Subscribe(ctx, k) - const maxProviders = 20 - peersToQuery := bs.routing.FindProvidersAsync(ctx, k, maxProviders) - - go func() { - message := bsmsg.New() - for _, wanted := range bs.wantlist.Keys() { - message.AddWanted(wanted) - } - for peerToQuery := range peersToQuery { - log.Debugf("bitswap got peersToQuery: %s", peerToQuery) - go func(p peer.Peer) { - - log.Debugf("bitswap dialing peer: %s", p) - err := bs.sender.DialPeer(ctx, p) - if err != nil { - log.Errorf("Error sender.DialPeer(%s)", p) - return - } - - response, err := bs.sender.SendRequest(ctx, p, message) - if err != nil { - log.Errorf("Error sender.SendRequest(%s) = %s", p, err) - return - } - // FIXME ensure accounting is handled correctly when - // communication fails. May require slightly different API to - // get better guarantees. May need shared sequence numbers. - bs.strategy.MessageSent(p, message) - - if response == nil { - return - } - bs.ReceiveMessage(ctx, p, response) - }(peerToQuery) - } - }() + select { + case bs.blockReq <- k: + case <-parent.Done(): + return nil, parent.Err() + } select { case block := <-promise: @@ -134,6 +107,96 @@ func (bs *bitswap) Block(parent context.Context, k u.Key) (*blocks.Block, error) } } +func (bs *bitswap) GetBlocks(parent context.Context, ks []u.Key) (*blocks.Block, error) { + // TODO: something smart + return nil, nil +} + +func (bs *bitswap) sendWantListTo(ctx context.Context, peers <-chan peer.Peer) error { + message := bsmsg.New() + for _, wanted := range bs.wantlist.Keys() { + message.AddWanted(wanted) + } + for peerToQuery := range peers { + log.Debugf("bitswap got peersToQuery: %s", peerToQuery) + go func(p peer.Peer) { + + log.Debugf("bitswap dialing peer: %s", p) + err := bs.sender.DialPeer(ctx, p) + if err != nil { + log.Errorf("Error sender.DialPeer(%s)", p) + return + } + + response, err := bs.sender.SendRequest(ctx, p, message) + if err != nil { + log.Errorf("Error sender.SendRequest(%s) = %s", p, err) + return + } + // FIXME ensure accounting is handled correctly when + // communication fails. May require slightly different API to + // get better guarantees. May need shared sequence numbers. + bs.strategy.MessageSent(p, message) + + if response == nil { + return + } + bs.ReceiveMessage(ctx, p, response) + }(peerToQuery) + } + return nil +} + +func (bs *bitswap) run(ctx context.Context) { + var sendlist <-chan peer.Peer + + // Every so often, we should resend out our current want list + rebroadcastTime := time.Second * 5 + + // Time to wait before sending out wantlists to better batch up requests + bufferTime := time.Millisecond * 3 + peersPerSend := 6 + + timeout := time.After(rebroadcastTime) + threshold := 10 + unsent := 0 + for { + select { + case <-timeout: + if sendlist == nil { + // rely on semi randomness of maps + firstKey := bs.wantlist.Keys()[0] + sendlist = bs.routing.FindProvidersAsync(ctx, firstKey, 6) + } + err := bs.sendWantListTo(ctx, sendlist) + if err != nil { + log.Error("error sending wantlist: %s", err) + } + sendlist = nil + timeout = time.After(rebroadcastTime) + case k := <-bs.blockReq: + if unsent == 0 { + sendlist = bs.routing.FindProvidersAsync(ctx, k, peersPerSend) + } + unsent++ + + if unsent >= threshold { + // send wantlist to sendlist + bs.sendWantListTo(ctx, sendlist) + unsent = 0 + timeout = time.After(rebroadcastTime) + sendlist = nil + } else { + // set a timeout to wait for more blocks or send current wantlist + + timeout = time.After(bufferTime) + } + case <-ctx.Done(): + return + } + } +} + // HasBlock announces the existance of a block to this bitswap service. The // service will potentially notify its peers. func (bs *bitswap) HasBlock(ctx context.Context, blk blocks.Block) error { @@ -192,8 +255,8 @@ func (bs *bitswap) ReceiveMessage(ctx context.Context, p peer.Peer, incoming bsm } } } - defer bs.strategy.MessageSent(p, message) + bs.strategy.MessageSent(p, message) log.Debug("Returning message.") return p, message } diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index a851f0f56..ee1e7644d 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -31,7 +31,7 @@ func TestGetBlockTimeout(t *testing.T) { ctx, _ := context.WithTimeout(context.Background(), time.Nanosecond) block := blocks.NewBlock([]byte("block")) - _, err := self.exchange.Block(ctx, block.Key()) + _, err := self.exchange.GetBlock(ctx, block.Key()) if err != context.DeadlineExceeded { t.Fatal("Expected DeadlineExceeded error") @@ -50,7 +50,7 @@ func TestProviderForKeyButNetworkCannotFind(t *testing.T) { solo := g.Next() ctx, _ := context.WithTimeout(context.Background(), time.Nanosecond) - _, err := solo.exchange.Block(ctx, block.Key()) + _, err := solo.exchange.GetBlock(ctx, block.Key()) if err != context.DeadlineExceeded { t.Fatal("Expected DeadlineExceeded error") @@ -78,7 +78,7 @@ func TestGetBlockFromPeerAfterPeerAnnounces(t *testing.T) { wantsBlock := g.Next() ctx, _ := context.WithTimeout(context.Background(), time.Second) - received, err := wantsBlock.exchange.Block(ctx, block.Key()) + received, err := wantsBlock.exchange.GetBlock(ctx, block.Key()) if err != nil { t.Log(err) t.Fatal("Expected to succeed") @@ -100,7 +100,7 @@ func TestSwarm(t *testing.T) { t.Log("Create a ton of instances, and just a few blocks") - numInstances := 500 + numInstances := 5 numBlocks := 2 instances := sg.Instances(numInstances) @@ -142,7 +142,7 @@ func TestSwarm(t *testing.T) { func getOrFail(bitswap instance, b *blocks.Block, t *testing.T, wg *sync.WaitGroup) { if _, err := bitswap.blockstore.Get(b.Key()); err != nil { - _, err := bitswap.exchange.Block(context.Background(), b.Key()) + _, err := bitswap.exchange.GetBlock(context.Background(), b.Key()) if err != nil { t.Fatal(err) } @@ -171,7 +171,7 @@ func TestSendToWantingPeer(t *testing.T) { t.Logf("Peer %v attempts to get %v. NB: not available\n", w.peer, alpha.Key()) ctx, _ := context.WithTimeout(context.Background(), timeout) - _, err := w.exchange.Block(ctx, alpha.Key()) + _, err := w.exchange.GetBlock(ctx, alpha.Key()) if err == nil { t.Fatalf("Expected %v to NOT be available", alpha.Key()) } @@ -186,7 +186,7 @@ func TestSendToWantingPeer(t *testing.T) { t.Logf("%v gets %v from %v and discovers it wants %v\n", me.peer, beta.Key(), w.peer, alpha.Key()) ctx, _ = context.WithTimeout(context.Background(), timeout) - if _, err := me.exchange.Block(ctx, beta.Key()); err != nil { + if _, err := me.exchange.GetBlock(ctx, beta.Key()); err != nil { t.Fatal(err) } @@ -199,7 +199,7 @@ func TestSendToWantingPeer(t *testing.T) { t.Logf("%v requests %v\n", me.peer, alpha.Key()) ctx, _ = context.WithTimeout(context.Background(), timeout) - if _, err := me.exchange.Block(ctx, alpha.Key()); err != nil { + if _, err := me.exchange.GetBlock(ctx, alpha.Key()); err != nil { t.Fatal(err) } @@ -290,8 +290,10 @@ func session(net tn.Network, rs mock.RoutingServer, id peer.ID) instance { routing: htc, sender: adapter, wantlist: util.NewKeySet(), + blockReq: make(chan util.Key, 32), } adapter.SetDelegate(bs) + go bs.run(context.TODO()) return instance{ peer: p, exchange: bs, From 9e8502efe20b7680f21bbbd3a89bd70120c47b3a Mon Sep 17 00:00:00 2001 From: Jeromy Date: Tue, 18 Nov 2014 21:46:13 -0800 Subject: [PATCH 0108/1035] dont panic on empty wantlist This commit was moved from ipfs/go-bitswap@709075f79cdde11829dec038c663aaf1d381e218 --- bitswap/bitswap.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 6daf32555..4aaacdbfd 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -163,9 +163,13 @@ func (bs *bitswap) run(ctx context.Context) { for { select { case <-timeout: + wantlist := bs.wantlist.Keys() + if len(wantlist) == 0 { + continue + } if sendlist == nil { // rely on semi randomness of maps - firstKey := bs.wantlist.Keys()[0] + firstKey := wantlist[0] sendlist = bs.routing.FindProvidersAsync(ctx, firstKey, 6) } err := bs.sendWantListTo(ctx, sendlist) From 061edb46089c06f1d56e5be356f50f5f765adf30 Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Tue, 18 Nov 2014 21:52:58 -0800 Subject: [PATCH 0109/1035] test(bitswap) @whyrusleeping This appears to be a timing issue. The asynchronous nature of the new structure provides has the bitswap waiting on the context a bit more. This isn't a problem at all, but in this test, it makes the functions return in an inconveniently timely manner. TODO don't let the test depend on time. License: MIT Signed-off-by: Brian Tiger Chow This commit was moved from ipfs/go-bitswap@ca31457dbacfa550f0ea5fa07ed4aaf28352db82 --- bitswap/bitswap_test.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index ee1e7644d..f69cb7629 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -152,6 +152,10 @@ func getOrFail(bitswap instance, b *blocks.Block, t *testing.T, wg *sync.WaitGro // TODO simplify this test. get to the _essence_! func TestSendToWantingPeer(t *testing.T) { + if testing.Short() { + t.SkipNow() + } + net := tn.VirtualNetwork() rs := mock.VirtualRoutingServer() sg := NewSessionGenerator(net, rs) @@ -167,7 +171,7 @@ func TestSendToWantingPeer(t *testing.T) { alpha := bg.Next() - const timeout = 1 * time.Millisecond // FIXME don't depend on time + const timeout = 100 * time.Millisecond // FIXME don't depend on time t.Logf("Peer %v attempts to get %v. NB: not available\n", w.peer, alpha.Key()) ctx, _ := context.WithTimeout(context.Background(), timeout) From f9b3f2625eccfbad027feb8f6df593b0fffa1c3f Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Tue, 18 Nov 2014 22:05:53 -0800 Subject: [PATCH 0110/1035] events(bitswap) try the new event logger in the bitswap GetBlock method @jbenet @whyrusleeping Let me know if you want to direct the eventlog output to _both_ the file and stderr. Right now it goes to file. Perhaps this is just a minor bip in the larger discussion around log levels. https://github.com/jbenet/go-ipfs/issues/292 License: MIT Signed-off-by: Brian Tiger Chow This commit was moved from ipfs/go-bitswap@40f3a6a6bc738d528e706428320fc937d2c01b01 --- bitswap/bitswap.go | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 4aaacdbfd..a4bb0ec0c 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -17,9 +17,10 @@ import ( strategy "github.com/jbenet/go-ipfs/exchange/bitswap/strategy" peer "github.com/jbenet/go-ipfs/peer" u "github.com/jbenet/go-ipfs/util" + "github.com/jbenet/go-ipfs/util/eventlog" ) -var log = u.Logger("bitswap") +var log = eventlog.Logger("bitswap") // New initializes a BitSwap instance that communicates over the // provided BitSwapNetwork. This function registers the returned instance as @@ -80,15 +81,21 @@ type bitswap struct { // // TODO ensure only one active request per key func (bs *bitswap) GetBlock(parent context.Context, k u.Key) (*blocks.Block, error) { - log.Debugf("Get Block %v", k) - now := time.Now() - defer func() { - log.Debugf("GetBlock took %f secs", time.Now().Sub(now).Seconds()) - }() + + // make sure to derive a new |ctx| and pass it to children. It's correct to + // listen on |parent| here, but incorrect to pass |parent| to new async + // functions. This is difficult to enforce. May this comment keep you safe. ctx, cancelFunc := context.WithCancel(parent) defer cancelFunc() + ctx = eventlog.ContextWithMetadata(ctx, eventlog.Uuid("BitswapGetBlockRequest")) + log.Event(ctx, "BitswapGetBlockRequestBegin", &k) + + defer func() { + log.Event(ctx, "BitSwapGetBlockRequestEnd", &k) + }() + bs.wantlist.Add(k) promise := bs.notifications.Subscribe(ctx, k) From 4951bf7be062b722fbc6998e3c03d66c556c7559 Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Tue, 18 Nov 2014 22:14:10 -0800 Subject: [PATCH 0111/1035] fix(bitswap) handle error @whyrusleeping License: MIT Signed-off-by: Brian Tiger Chow This commit was moved from ipfs/go-bitswap@c0decfc9f8446a1aecd3ea308bd5788a15532807 --- bitswap/bitswap.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index a4bb0ec0c..4a66aaa06 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -193,7 +193,10 @@ func (bs *bitswap) run(ctx context.Context) { if unsent >= threshold { // send wantlist to sendlist - bs.sendWantListTo(ctx, sendlist) + err := bs.sendWantListTo(ctx, sendlist) + if err != nil { + log.Errorf("error sending wantlist: %s", err) + } unsent = 0 timeout = time.After(rebroadcastTime) sendlist = nil From 0d06223c70da5224fe5540ca37e105d50d1b08d1 Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Tue, 18 Nov 2014 22:14:36 -0800 Subject: [PATCH 0112/1035] fix(bitswap) consistent event names @whyrusleeping @jbenet since the logger is created with package scope, don't need to specify the package name in event messages License: MIT Signed-off-by: Brian Tiger Chow This commit was moved from ipfs/go-bitswap@a4ccbf394b3b0f546741efcb8d21924dc3112272 --- bitswap/bitswap.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 4a66aaa06..bcfcebd94 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -89,11 +89,11 @@ func (bs *bitswap) GetBlock(parent context.Context, k u.Key) (*blocks.Block, err ctx, cancelFunc := context.WithCancel(parent) defer cancelFunc() - ctx = eventlog.ContextWithMetadata(ctx, eventlog.Uuid("BitswapGetBlockRequest")) - log.Event(ctx, "BitswapGetBlockRequestBegin", &k) + ctx = eventlog.ContextWithMetadata(ctx, eventlog.Uuid("GetBlockRequest")) + log.Event(ctx, "GetBlockRequestBegin", &k) defer func() { - log.Event(ctx, "BitSwapGetBlockRequestEnd", &k) + log.Event(ctx, "GetBlockRequestEnd", &k) }() bs.wantlist.Add(k) From a9fd0e9b43689270dddebdf0a6b57419b2597e75 Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Tue, 18 Nov 2014 22:17:09 -0800 Subject: [PATCH 0113/1035] fix(log) ->f @whyrusleeping License: MIT Signed-off-by: Brian Tiger Chow This commit was moved from ipfs/go-bitswap@aca2566d6e1c09be75d707fb584046c47d5bfa58 --- bitswap/bitswap.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index bcfcebd94..9f0b7c7b9 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -181,7 +181,7 @@ func (bs *bitswap) run(ctx context.Context) { } err := bs.sendWantListTo(ctx, sendlist) if err != nil { - log.Error("error sending wantlist: %s", err) + log.Errorf("error sending wantlist: %s", err) } sendlist = nil timeout = time.After(rebroadcastTime) From 2361f14634c1ab182c9152ace0f10fe612e749dd Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Tue, 18 Nov 2014 22:18:25 -0800 Subject: [PATCH 0114/1035] use event logger here too? License: MIT Signed-off-by: Brian Tiger Chow This commit was moved from ipfs/go-bitswap@56726e027dee8c01c3adbbcfd3c1e263f40e953e --- bitswap/bitswap.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 9f0b7c7b9..7e82168bf 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -125,10 +125,10 @@ func (bs *bitswap) sendWantListTo(ctx context.Context, peers <-chan peer.Peer) e message.AddWanted(wanted) } for peerToQuery := range peers { - log.Debugf("bitswap got peersToQuery: %s", peerToQuery) + log.Event(ctx, "PeerToQuery", peerToQuery) go func(p peer.Peer) { - log.Debugf("bitswap dialing peer: %s", p) + log.Event(ctx, "DialPeer", p) err := bs.sender.DialPeer(ctx, p) if err != nil { log.Errorf("Error sender.DialPeer(%s)", p) From bfec4f7609dbbe23ca4a15a7e4bddb4a478908fd Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Tue, 18 Nov 2014 22:20:25 -0800 Subject: [PATCH 0115/1035] clarify MessageReceived contract License: MIT Signed-off-by: Brian Tiger Chow This commit was moved from ipfs/go-bitswap@a82ac0a9888b20b1ce9ab03209acc662d2fff119 --- bitswap/strategy/strategy.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/bitswap/strategy/strategy.go b/bitswap/strategy/strategy.go index b778c7a34..78209c38e 100644 --- a/bitswap/strategy/strategy.go +++ b/bitswap/strategy/strategy.go @@ -72,6 +72,8 @@ func (s *strategist) Seed(int64) { // TODO } +// MessageReceived performs book-keeping. Returns error if passed invalid +// arguments. func (s *strategist) MessageReceived(p peer.Peer, m bsmsg.BitSwapMessage) error { s.lock.Lock() defer s.lock.Unlock() @@ -91,7 +93,7 @@ func (s *strategist) MessageReceived(p peer.Peer, m bsmsg.BitSwapMessage) error // FIXME extract blocks.NumBytes(block) or block.NumBytes() method l.ReceivedBytes(len(block.Data)) } - return errors.New("TODO") + return nil } // TODO add contents of m.WantList() to my local wantlist? NB: could introduce From ba9c7b21f966de8e135b25a00b21fcbe6f1c0530 Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Tue, 18 Nov 2014 22:31:42 -0800 Subject: [PATCH 0116/1035] naming License: MIT Signed-off-by: Brian Tiger Chow This commit was moved from ipfs/go-bitswap@9cad93a890d7877d2d102e968b49b95e8c98f10e --- bitswap/bitswap.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 7e82168bf..87116fd42 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -43,7 +43,7 @@ func New(ctx context.Context, p peer.Peer, routing: routing, sender: network, wantlist: u.NewKeySet(), - blockReq: make(chan u.Key, 32), + blockRequests: make(chan u.Key, 32), } network.SetDelegate(bs) go bs.run(ctx) @@ -66,7 +66,7 @@ type bitswap struct { notifications notifications.PubSub - blockReq chan u.Key + blockRequests chan u.Key // strategy listens to network traffic and makes decisions about how to // interact with partners. @@ -100,7 +100,7 @@ func (bs *bitswap) GetBlock(parent context.Context, k u.Key) (*blocks.Block, err promise := bs.notifications.Subscribe(ctx, k) select { - case bs.blockReq <- k: + case bs.blockRequests <- k: case <-parent.Done(): return nil, parent.Err() } @@ -185,7 +185,7 @@ func (bs *bitswap) run(ctx context.Context) { } sendlist = nil timeout = time.After(rebroadcastTime) - case k := <-bs.blockReq: + case k := <-bs.blockRequests: if unsent == 0 { sendlist = bs.routing.FindProvidersAsync(ctx, k, peersPerSend) } From 99e9dc9829245d7f30d970f996344e995a591f83 Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Tue, 18 Nov 2014 22:37:47 -0800 Subject: [PATCH 0117/1035] constify to make it clear what _can_ and _can't_ change over time License: MIT Signed-off-by: Brian Tiger Chow This commit was moved from ipfs/go-bitswap@3f6bbecc73964735521a94af6475963a475f71ec --- bitswap/bitswap.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 87116fd42..73c95c230 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -158,14 +158,14 @@ func (bs *bitswap) run(ctx context.Context) { var sendlist <-chan peer.Peer // Every so often, we should resend out our current want list - rebroadcastTime := time.Second * 5 + const rebroadcastTime = time.Second * 5 // Time to wait before sending out wantlists to better batch up requests - bufferTime := time.Millisecond * 3 + const bufferTime = time.Millisecond * 3 peersPerSend := 6 timeout := time.After(rebroadcastTime) - threshold := 10 + const threshold = 10 unsent := 0 for { select { From a67f9b10050a66831f2c778e77b7fcc7c7090c65 Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Tue, 18 Nov 2014 22:58:06 -0800 Subject: [PATCH 0118/1035] some renaming License: MIT Signed-off-by: Brian Tiger Chow This commit was moved from ipfs/go-bitswap@599e308845ff98d07e3b0accafd6cf47ba96d448 --- bitswap/bitswap.go | 24 +++++++++++------------- bitswap/bitswap_test.go | 2 +- 2 files changed, 12 insertions(+), 14 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 73c95c230..b8f8a7d18 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -155,21 +155,19 @@ func (bs *bitswap) sendWantListTo(ctx context.Context, peers <-chan peer.Peer) e } func (bs *bitswap) run(ctx context.Context) { - var sendlist <-chan peer.Peer - // Every so often, we should resend out our current want list - const rebroadcastTime = time.Second * 5 - - // Time to wait before sending out wantlists to better batch up requests - const bufferTime = time.Millisecond * 3 - peersPerSend := 6 - - timeout := time.After(rebroadcastTime) + const rebroadcastPeriod = time.Second * 5 // Every so often, we should resend out our current want list + const batchDelay = time.Millisecond * 3 // Time to wait before sending out wantlists to better batch up requests + const peersPerSend = 6 const threshold = 10 + + var sendlist <-chan peer.Peer // NB: must be initialized to zero value + broadcastSignal := time.After(rebroadcastPeriod) unsent := 0 + for { select { - case <-timeout: + case <-broadcastSignal: wantlist := bs.wantlist.Keys() if len(wantlist) == 0 { continue @@ -184,7 +182,7 @@ func (bs *bitswap) run(ctx context.Context) { log.Errorf("error sending wantlist: %s", err) } sendlist = nil - timeout = time.After(rebroadcastTime) + broadcastSignal = time.After(rebroadcastPeriod) case k := <-bs.blockRequests: if unsent == 0 { sendlist = bs.routing.FindProvidersAsync(ctx, k, peersPerSend) @@ -198,12 +196,12 @@ func (bs *bitswap) run(ctx context.Context) { log.Errorf("error sending wantlist: %s", err) } unsent = 0 - timeout = time.After(rebroadcastTime) + broadcastSignal = time.After(rebroadcastPeriod) sendlist = nil } else { // set a timeout to wait for more blocks or send current wantlist - timeout = time.After(bufferTime) + broadcastSignal = time.After(batchDelay) } case <-ctx.Done(): return diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index f69cb7629..e06eabefa 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -294,7 +294,7 @@ func session(net tn.Network, rs mock.RoutingServer, id peer.ID) instance { routing: htc, sender: adapter, wantlist: util.NewKeySet(), - blockReq: make(chan util.Key, 32), + blockRequests: make(chan util.Key, 32), } adapter.SetDelegate(bs) go bs.run(context.TODO()) From 8241ce60ff4a559266bc130c6dccd39d2ee65892 Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Tue, 18 Nov 2014 22:58:30 -0800 Subject: [PATCH 0119/1035] simplify License: MIT Signed-off-by: Brian Tiger Chow This commit was moved from ipfs/go-bitswap@0163be780746cd19ff0764ab3ab2cb2f5e333bb7 --- bitswap/bitswap.go | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index b8f8a7d18..1102dda75 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -91,10 +91,7 @@ func (bs *bitswap) GetBlock(parent context.Context, k u.Key) (*blocks.Block, err ctx = eventlog.ContextWithMetadata(ctx, eventlog.Uuid("GetBlockRequest")) log.Event(ctx, "GetBlockRequestBegin", &k) - - defer func() { - log.Event(ctx, "GetBlockRequestEnd", &k) - }() + defer log.Event(ctx, "GetBlockRequestEnd", &k) bs.wantlist.Add(k) promise := bs.notifications.Subscribe(ctx, k) From 6faa52bd292a4bb9e4d6dbcb9a5c49d446ac73f6 Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Wed, 19 Nov 2014 09:40:21 -0800 Subject: [PATCH 0120/1035] misc(bitswap) renaming License: MIT Signed-off-by: Brian Tiger Chow This commit was moved from ipfs/go-bitswap@eaa7a9d5a20cfe7ffcf78dc22c8f075cc752614a --- bitswap/bitswap.go | 35 ++++++++++++++++++----------------- 1 file changed, 18 insertions(+), 17 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 1102dda75..e904d28a6 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -153,14 +153,14 @@ func (bs *bitswap) sendWantListTo(ctx context.Context, peers <-chan peer.Peer) e func (bs *bitswap) run(ctx context.Context) { + const batchDelay = time.Millisecond * 3 // Time to wait before sending out wantlists to better batch up requests + const numKeysPerBatch = 10 + const maxProvidersPerRequest = 6 const rebroadcastPeriod = time.Second * 5 // Every so often, we should resend out our current want list - const batchDelay = time.Millisecond * 3 // Time to wait before sending out wantlists to better batch up requests - const peersPerSend = 6 - const threshold = 10 - var sendlist <-chan peer.Peer // NB: must be initialized to zero value + var providers <-chan peer.Peer // NB: must be initialized to zero value broadcastSignal := time.After(rebroadcastPeriod) - unsent := 0 + unsentKeys := 0 for { select { @@ -169,32 +169,33 @@ func (bs *bitswap) run(ctx context.Context) { if len(wantlist) == 0 { continue } - if sendlist == nil { + if providers == nil { // rely on semi randomness of maps firstKey := wantlist[0] - sendlist = bs.routing.FindProvidersAsync(ctx, firstKey, 6) + providers = bs.routing.FindProvidersAsync(ctx, firstKey, 6) } - err := bs.sendWantListTo(ctx, sendlist) + err := bs.sendWantListTo(ctx, providers) if err != nil { log.Errorf("error sending wantlist: %s", err) } - sendlist = nil + providers = nil broadcastSignal = time.After(rebroadcastPeriod) + case k := <-bs.blockRequests: - if unsent == 0 { - sendlist = bs.routing.FindProvidersAsync(ctx, k, peersPerSend) + if unsentKeys == 0 { + providers = bs.routing.FindProvidersAsync(ctx, k, maxProvidersPerRequest) } - unsent++ + unsentKeys++ - if unsent >= threshold { - // send wantlist to sendlist - err := bs.sendWantListTo(ctx, sendlist) + if unsentKeys >= numKeysPerBatch { + // send wantlist to providers + err := bs.sendWantListTo(ctx, providers) if err != nil { log.Errorf("error sending wantlist: %s", err) } - unsent = 0 + unsentKeys = 0 broadcastSignal = time.After(rebroadcastPeriod) - sendlist = nil + providers = nil } else { // set a timeout to wait for more blocks or send current wantlist From 69c03f60e981bbb90ae663ca76073d5dc9b192aa Mon Sep 17 00:00:00 2001 From: Jeromy Date: Wed, 19 Nov 2014 10:13:31 -0800 Subject: [PATCH 0121/1035] added a new test for a dhthell scenario that was failing This commit was moved from ipfs/go-bitswap@e3bf5cd8c57fe7f8b0a4255fa82c93855fc94102 --- bitswap/bitswap.go | 8 +++---- bitswap/bitswap_test.go | 53 ++++++++++++++++++++++++++++++++++++++++- 2 files changed, 55 insertions(+), 6 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index e904d28a6..1539b5fc8 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -128,7 +128,7 @@ func (bs *bitswap) sendWantListTo(ctx context.Context, peers <-chan peer.Peer) e log.Event(ctx, "DialPeer", p) err := bs.sender.DialPeer(ctx, p) if err != nil { - log.Errorf("Error sender.DialPeer(%s)", p) + log.Errorf("Error sender.DialPeer(%s): %s", p, err) return } @@ -153,10 +153,8 @@ func (bs *bitswap) sendWantListTo(ctx context.Context, peers <-chan peer.Peer) e func (bs *bitswap) run(ctx context.Context) { - const batchDelay = time.Millisecond * 3 // Time to wait before sending out wantlists to better batch up requests - const numKeysPerBatch = 10 - const maxProvidersPerRequest = 6 - const rebroadcastPeriod = time.Second * 5 // Every so often, we should resend out our current want list + // Every so often, we should resend out our current want list + rebroadcastTime := time.Second * 5 var providers <-chan peer.Peer // NB: must be initialized to zero value broadcastSignal := time.After(rebroadcastPeriod) diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index e06eabefa..e3b4d913a 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -100,7 +100,7 @@ func TestSwarm(t *testing.T) { t.Log("Create a ton of instances, and just a few blocks") - numInstances := 5 + numInstances := 500 numBlocks := 2 instances := sg.Instances(numInstances) @@ -140,6 +140,57 @@ func TestSwarm(t *testing.T) { } } +func TestLargeFile(t *testing.T) { + if testing.Short() { + t.SkipNow() + } + net := tn.VirtualNetwork() + rs := mock.VirtualRoutingServer() + sg := NewSessionGenerator(net, rs) + bg := NewBlockGenerator() + + t.Log("Test a few nodes trying to get one file with a lot of blocks") + + numInstances := 10 + numBlocks := 100 + + instances := sg.Instances(numInstances) + blocks := bg.Blocks(numBlocks) + + t.Log("Give the blocks to the first instance") + + first := instances[0] + for _, b := range blocks { + first.blockstore.Put(b) + first.exchange.HasBlock(context.Background(), *b) + rs.Announce(first.peer, b.Key()) + } + + t.Log("Distribute!") + + var wg sync.WaitGroup + + for _, inst := range instances { + for _, b := range blocks { + wg.Add(1) + // NB: executing getOrFail concurrently puts tremendous pressure on + // the goroutine scheduler + getOrFail(inst, b, t, &wg) + } + } + wg.Wait() + + t.Log("Verify!") + + for _, inst := range instances { + for _, b := range blocks { + if _, err := inst.blockstore.Get(b.Key()); err != nil { + t.Fatal(err) + } + } + } +} + func getOrFail(bitswap instance, b *blocks.Block, t *testing.T, wg *sync.WaitGroup) { if _, err := bitswap.blockstore.Get(b.Key()); err != nil { _, err := bitswap.exchange.GetBlock(context.Background(), b.Key()) From e6a29259c4f7989871f3b02bf3a754d095579d37 Mon Sep 17 00:00:00 2001 From: Jeromy Date: Wed, 19 Nov 2014 23:32:51 +0000 Subject: [PATCH 0122/1035] move some variables into strategy This commit was moved from ipfs/go-bitswap@c6af3fe40e64538cc80419b93b6580a01b092d1b --- bitswap/bitswap.go | 18 ++++++++++-------- bitswap/strategy/interface.go | 7 +++++++ bitswap/strategy/strategy.go | 13 +++++++++++++ 3 files changed, 30 insertions(+), 8 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 1539b5fc8..7ad9afb6e 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -157,9 +157,10 @@ func (bs *bitswap) run(ctx context.Context) { rebroadcastTime := time.Second * 5 var providers <-chan peer.Peer // NB: must be initialized to zero value - broadcastSignal := time.After(rebroadcastPeriod) - unsentKeys := 0 + broadcastSignal := time.After(bs.strategy.GetRebroadcastDelay()) + // Number of unsent keys for the current batch + unsentKeys := 0 for { select { case <-broadcastSignal: @@ -170,14 +171,14 @@ func (bs *bitswap) run(ctx context.Context) { if providers == nil { // rely on semi randomness of maps firstKey := wantlist[0] - providers = bs.routing.FindProvidersAsync(ctx, firstKey, 6) + providers = bs.routing.FindProvidersAsync(ctx, firstKey, maxProvidersPerRequest) } err := bs.sendWantListTo(ctx, providers) if err != nil { log.Errorf("error sending wantlist: %s", err) } providers = nil - broadcastSignal = time.After(rebroadcastPeriod) + broadcastSignal = time.After(bs.strategy.GetRebroadcastDelay()) case k := <-bs.blockRequests: if unsentKeys == 0 { @@ -185,19 +186,19 @@ func (bs *bitswap) run(ctx context.Context) { } unsentKeys++ - if unsentKeys >= numKeysPerBatch { + if unsentKeys >= bs.strategy.GetBatchSize() { // send wantlist to providers err := bs.sendWantListTo(ctx, providers) if err != nil { log.Errorf("error sending wantlist: %s", err) } unsentKeys = 0 - broadcastSignal = time.After(rebroadcastPeriod) + broadcastSignal = time.After(bs.strategy.GetRebroadcastDelay()) providers = nil } else { // set a timeout to wait for more blocks or send current wantlist - broadcastSignal = time.After(batchDelay) + broadcastSignal = time.After(bs.strategy.GetBatchDelay()) } case <-ctx.Done(): return @@ -217,7 +218,7 @@ func (bs *bitswap) HasBlock(ctx context.Context, blk blocks.Block) error { // TODO(brian): handle errors func (bs *bitswap) ReceiveMessage(ctx context.Context, p peer.Peer, incoming bsmsg.BitSwapMessage) ( peer.Peer, bsmsg.BitSwapMessage) { - log.Debugf("ReceiveMessage from %v", p.Key()) + log.Debugf("ReceiveMessage from %s", p) log.Debugf("Message wantlist: %v", incoming.Wantlist()) if p == nil { @@ -239,6 +240,7 @@ func (bs *bitswap) ReceiveMessage(ctx context.Context, p peer.Peer, incoming bsm for _, block := range incoming.Blocks() { // TODO verify blocks? if err := bs.blockstore.Put(&block); err != nil { + log.Criticalf("error putting block: %s", err) continue // FIXME(brian): err ignored } bs.notifications.Publish(block) diff --git a/bitswap/strategy/interface.go b/bitswap/strategy/interface.go index ac1f09a1f..9ac601d70 100644 --- a/bitswap/strategy/interface.go +++ b/bitswap/strategy/interface.go @@ -1,6 +1,8 @@ package strategy import ( + "time" + bsmsg "github.com/jbenet/go-ipfs/exchange/bitswap/message" peer "github.com/jbenet/go-ipfs/peer" u "github.com/jbenet/go-ipfs/util" @@ -29,4 +31,9 @@ type Strategy interface { NumBytesSentTo(peer.Peer) uint64 NumBytesReceivedFrom(peer.Peer) uint64 + + // Values determining bitswap behavioural patterns + GetBatchSize() int + GetBatchDelay() time.Duration + GetRebroadcastDelay() time.Duration } diff --git a/bitswap/strategy/strategy.go b/bitswap/strategy/strategy.go index 78209c38e..d58894b05 100644 --- a/bitswap/strategy/strategy.go +++ b/bitswap/strategy/strategy.go @@ -3,6 +3,7 @@ package strategy import ( "errors" "sync" + "time" bsmsg "github.com/jbenet/go-ipfs/exchange/bitswap/message" peer "github.com/jbenet/go-ipfs/peer" @@ -139,3 +140,15 @@ func (s *strategist) ledger(p peer.Peer) *ledger { } return l } + +func (s *strategist) GetBatchSize() int { + return 10 +} + +func (s *strategist) GetBatchDelay() time.Duration { + return time.Millisecond * 3 +} + +func (s *strategist) GetRebroadcastDelay() time.Duration { + return time.Second * 2 +} From da5c41755cade9f06f2b2adbb319c33a3d39e684 Mon Sep 17 00:00:00 2001 From: Jeromy Date: Wed, 19 Nov 2014 23:34:40 +0000 Subject: [PATCH 0123/1035] fix tests halting This commit was moved from ipfs/go-bitswap@44f321389b0eb89c1af1104c964304b4cb0aaa3a --- bitswap/bitswap.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 7ad9afb6e..3115c73bb 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -117,6 +117,9 @@ func (bs *bitswap) GetBlocks(parent context.Context, ks []u.Key) (*blocks.Block, } func (bs *bitswap) sendWantListTo(ctx context.Context, peers <-chan peer.Peer) error { + if peers == nil { + panic("Cant send wantlist to nil peerchan") + } message := bsmsg.New() for _, wanted := range bs.wantlist.Keys() { message.AddWanted(wanted) @@ -164,6 +167,7 @@ func (bs *bitswap) run(ctx context.Context) { for { select { case <-broadcastSignal: + unsentKeys = 0 wantlist := bs.wantlist.Keys() if len(wantlist) == 0 { continue From 33a9a9e61b2c442e8c5c27fca82346eb00bdda11 Mon Sep 17 00:00:00 2001 From: Jeromy Date: Thu, 20 Nov 2014 04:58:26 +0000 Subject: [PATCH 0124/1035] remove buffer timing in bitswap in favor of manual batching This commit was moved from ipfs/go-bitswap@26f78574e5f959a0cd10007d34955f0b3cc54521 --- bitswap/bitswap.go | 52 ++++++++++++++--------------------------- bitswap/bitswap_test.go | 2 +- 2 files changed, 19 insertions(+), 35 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 3115c73bb..a497a4594 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -43,7 +43,7 @@ func New(ctx context.Context, p peer.Peer, routing: routing, sender: network, wantlist: u.NewKeySet(), - blockRequests: make(chan u.Key, 32), + batchRequests: make(chan []u.Key, 32), } network.SetDelegate(bs) go bs.run(ctx) @@ -66,7 +66,10 @@ type bitswap struct { notifications notifications.PubSub - blockRequests chan u.Key + // Requests for a set of related blocks + // the assumption is made that the same peer is likely to + // have more than a single block in the set + batchRequests chan []u.Key // strategy listens to network traffic and makes decisions about how to // interact with partners. @@ -97,7 +100,7 @@ func (bs *bitswap) GetBlock(parent context.Context, k u.Key) (*blocks.Block, err promise := bs.notifications.Subscribe(ctx, k) select { - case bs.blockRequests <- k: + case bs.batchRequests <- []u.Key{k}: case <-parent.Done(): return nil, parent.Err() } @@ -159,50 +162,31 @@ func (bs *bitswap) run(ctx context.Context) { // Every so often, we should resend out our current want list rebroadcastTime := time.Second * 5 - var providers <-chan peer.Peer // NB: must be initialized to zero value - broadcastSignal := time.After(bs.strategy.GetRebroadcastDelay()) + broadcastSignal := time.NewTicker(bs.strategy.GetRebroadcastDelay()) - // Number of unsent keys for the current batch - unsentKeys := 0 for { select { - case <-broadcastSignal: - unsentKeys = 0 + case <-broadcastSignal.C: wantlist := bs.wantlist.Keys() if len(wantlist) == 0 { continue } - if providers == nil { - // rely on semi randomness of maps - firstKey := wantlist[0] - providers = bs.routing.FindProvidersAsync(ctx, firstKey, maxProvidersPerRequest) - } + providers := bs.routing.FindProvidersAsync(ctx, wantlist[0], maxProvidersPerRequest) + err := bs.sendWantListTo(ctx, providers) if err != nil { log.Errorf("error sending wantlist: %s", err) } - providers = nil - broadcastSignal = time.After(bs.strategy.GetRebroadcastDelay()) - - case k := <-bs.blockRequests: - if unsentKeys == 0 { - providers = bs.routing.FindProvidersAsync(ctx, k, maxProvidersPerRequest) + case ks := <-bs.batchRequests: + if len(ks) == 0 { + log.Warning("Received batch request for zero blocks") + continue } - unsentKeys++ - - if unsentKeys >= bs.strategy.GetBatchSize() { - // send wantlist to providers - err := bs.sendWantListTo(ctx, providers) - if err != nil { - log.Errorf("error sending wantlist: %s", err) - } - unsentKeys = 0 - broadcastSignal = time.After(bs.strategy.GetRebroadcastDelay()) - providers = nil - } else { - // set a timeout to wait for more blocks or send current wantlist + providers := bs.routing.FindProvidersAsync(ctx, ks[0], maxProvidersPerRequest) - broadcastSignal = time.After(bs.strategy.GetBatchDelay()) + err := bs.sendWantListTo(ctx, providers) + if err != nil { + log.Errorf("error sending wantlist: %s", err) } case <-ctx.Done(): return diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index e3b4d913a..7b4b36fa0 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -345,7 +345,7 @@ func session(net tn.Network, rs mock.RoutingServer, id peer.ID) instance { routing: htc, sender: adapter, wantlist: util.NewKeySet(), - blockRequests: make(chan util.Key, 32), + batchRequests: make(chan []util.Key, 32), } adapter.SetDelegate(bs) go bs.run(context.TODO()) From bd1662216027a7cf71dedf7a060eb0afc3724cff Mon Sep 17 00:00:00 2001 From: Jeromy Date: Thu, 20 Nov 2014 06:16:53 +0000 Subject: [PATCH 0125/1035] randomize rebroadcast target This commit was moved from ipfs/go-bitswap@6ee1fe5ec9958dd28639e7692d7cb4de4d13b190 --- bitswap/bitswap.go | 9 +++++++-- bitswap/strategy/interface.go | 1 - bitswap/strategy/strategy.go | 4 ---- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index a497a4594..35346644b 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -3,6 +3,7 @@ package bitswap import ( + "math/rand" "time" context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" @@ -96,7 +97,6 @@ func (bs *bitswap) GetBlock(parent context.Context, k u.Key) (*blocks.Block, err log.Event(ctx, "GetBlockRequestBegin", &k) defer log.Event(ctx, "GetBlockRequestEnd", &k) - bs.wantlist.Add(k) promise := bs.notifications.Subscribe(ctx, k) select { @@ -171,17 +171,22 @@ func (bs *bitswap) run(ctx context.Context) { if len(wantlist) == 0 { continue } - providers := bs.routing.FindProvidersAsync(ctx, wantlist[0], maxProvidersPerRequest) + n := rand.Intn(len(wantlist)) + providers := bs.routing.FindProvidersAsync(ctx, wantlist[n], maxProvidersPerRequest) err := bs.sendWantListTo(ctx, providers) if err != nil { log.Errorf("error sending wantlist: %s", err) } case ks := <-bs.batchRequests: + // TODO: implement batching on len(ks) > X for some X if len(ks) == 0 { log.Warning("Received batch request for zero blocks") continue } + for _, k := range ks { + bs.wantlist.Add(k) + } providers := bs.routing.FindProvidersAsync(ctx, ks[0], maxProvidersPerRequest) err := bs.sendWantListTo(ctx, providers) diff --git a/bitswap/strategy/interface.go b/bitswap/strategy/interface.go index 9ac601d70..503a50d41 100644 --- a/bitswap/strategy/interface.go +++ b/bitswap/strategy/interface.go @@ -34,6 +34,5 @@ type Strategy interface { // Values determining bitswap behavioural patterns GetBatchSize() int - GetBatchDelay() time.Duration GetRebroadcastDelay() time.Duration } diff --git a/bitswap/strategy/strategy.go b/bitswap/strategy/strategy.go index d58894b05..ad69b841a 100644 --- a/bitswap/strategy/strategy.go +++ b/bitswap/strategy/strategy.go @@ -145,10 +145,6 @@ func (s *strategist) GetBatchSize() int { return 10 } -func (s *strategist) GetBatchDelay() time.Duration { - return time.Millisecond * 3 -} - func (s *strategist) GetRebroadcastDelay() time.Duration { return time.Second * 2 } From 64bb0c4e59c8185f82b7892182c43aabdc35b813 Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Wed, 19 Nov 2014 12:56:23 -0800 Subject: [PATCH 0126/1035] style(bitswap/notifications) make it more obvious License: MIT Signed-off-by: Brian Tiger Chow This commit was moved from ipfs/go-bitswap@140a141c6e8419d462bc72f3e5fcf9215e03838c --- bitswap/notifications/notifications.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/bitswap/notifications/notifications.go b/bitswap/notifications/notifications.go index 34888d510..bd30bbad6 100644 --- a/bitswap/notifications/notifications.go +++ b/bitswap/notifications/notifications.go @@ -8,6 +8,8 @@ import ( u "github.com/jbenet/go-ipfs/util" ) +const bufferSize = 16 + type PubSub interface { Publish(block blocks.Block) Subscribe(ctx context.Context, k u.Key) <-chan blocks.Block @@ -15,7 +17,6 @@ type PubSub interface { } func New() PubSub { - const bufferSize = 16 return &impl{*pubsub.New(bufferSize)} } From 9dec36ff9d9a37c379e7966d303a7f4827761f88 Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Wed, 19 Nov 2014 14:19:22 -0800 Subject: [PATCH 0127/1035] feat(bitswap/notifications) Subscribe to multiple keys License: MIT Signed-off-by: Brian Tiger Chow This commit was moved from ipfs/go-bitswap@8a87b709e9cf73ac08d9a03cabcafde16a965e02 --- bitswap/notifications/notifications.go | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/bitswap/notifications/notifications.go b/bitswap/notifications/notifications.go index bd30bbad6..a2646c814 100644 --- a/bitswap/notifications/notifications.go +++ b/bitswap/notifications/notifications.go @@ -12,7 +12,7 @@ const bufferSize = 16 type PubSub interface { Publish(block blocks.Block) - Subscribe(ctx context.Context, k u.Key) <-chan blocks.Block + Subscribe(ctx context.Context, keys ...u.Key) <-chan blocks.Block Shutdown() } @@ -31,10 +31,13 @@ func (ps *impl) Publish(block blocks.Block) { // Subscribe returns a one-time use |blockChannel|. |blockChannel| returns nil // if the |ctx| times out or is cancelled. Then channel is closed after the -// block given by |k| is sent. -func (ps *impl) Subscribe(ctx context.Context, k u.Key) <-chan blocks.Block { - topic := string(k) - subChan := ps.wrapped.SubOnce(topic) +// blocks given by |keys| are sent. +func (ps *impl) Subscribe(ctx context.Context, keys ...u.Key) <-chan blocks.Block { + topics := make([]string, 0) + for _, key := range keys { + topics = append(topics, string(key)) + } + subChan := ps.wrapped.SubOnce(topics...) blockChannel := make(chan blocks.Block, 1) // buffered so the sender doesn't wait on receiver go func() { defer close(blockChannel) @@ -45,7 +48,7 @@ func (ps *impl) Subscribe(ctx context.Context, k u.Key) <-chan blocks.Block { blockChannel <- block } case <-ctx.Done(): - ps.wrapped.Unsub(subChan, topic) + ps.wrapped.Unsub(subChan, topics...) } }() return blockChannel From f6effaa322c75ea915d71745f3ec189fcf09162e Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Wed, 19 Nov 2014 22:51:34 -0800 Subject: [PATCH 0128/1035] tests(bitswap) share constructor between tests @whyrusleeping i hope this makes it a bit easier to work with tests License: MIT Signed-off-by: Brian Tiger Chow This commit was moved from ipfs/go-bitswap@12b83ff818dd20e674750b49b6ec1ba60348d746 --- bitswap/bitswap.go | 8 +++----- bitswap/bitswap_test.go | 23 +++++++---------------- 2 files changed, 10 insertions(+), 21 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 35346644b..a14d68cc0 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -7,7 +7,6 @@ import ( "time" context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" - ds "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore" blocks "github.com/jbenet/go-ipfs/blocks" blockstore "github.com/jbenet/go-ipfs/blockstore" @@ -27,9 +26,8 @@ var log = eventlog.Logger("bitswap") // provided BitSwapNetwork. This function registers the returned instance as // the network delegate. // Runs until context is cancelled -func New(ctx context.Context, p peer.Peer, - network bsnet.BitSwapNetwork, routing bsnet.Routing, - d ds.ThreadSafeDatastore, nice bool) exchange.Interface { +func New(ctx context.Context, p peer.Peer, network bsnet.BitSwapNetwork, routing bsnet.Routing, + bstore blockstore.Blockstore, nice bool) exchange.Interface { notif := notifications.New() go func() { @@ -38,7 +36,7 @@ func New(ctx context.Context, p peer.Peer, }() bs := &bitswap{ - blockstore: blockstore.NewBlockstore(d), + blockstore: bstore, notifications: notif, strategy: strategy.New(nice), routing: routing, diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index 7b4b36fa0..78509e649 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -11,14 +11,12 @@ import ( ds "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore" ds_sync "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore/sync" blocks "github.com/jbenet/go-ipfs/blocks" + blockstore "github.com/jbenet/go-ipfs/blockstore" bstore "github.com/jbenet/go-ipfs/blockstore" exchange "github.com/jbenet/go-ipfs/exchange" - notifications "github.com/jbenet/go-ipfs/exchange/bitswap/notifications" - strategy "github.com/jbenet/go-ipfs/exchange/bitswap/strategy" tn "github.com/jbenet/go-ipfs/exchange/bitswap/testnet" peer "github.com/jbenet/go-ipfs/peer" mock "github.com/jbenet/go-ipfs/routing/mock" - util "github.com/jbenet/go-ipfs/util" ) func TestGetBlockTimeout(t *testing.T) { @@ -335,23 +333,16 @@ func session(net tn.Network, rs mock.RoutingServer, id peer.ID) instance { adapter := net.Adapter(p) htc := rs.Client(p) + bstore := blockstore.NewBlockstore(ds_sync.MutexWrap(ds.NewMapDatastore())) - blockstore := bstore.NewBlockstore(ds_sync.MutexWrap(ds.NewMapDatastore())) const alwaysSendToPeer = true - bs := &bitswap{ - blockstore: blockstore, - notifications: notifications.New(), - strategy: strategy.New(alwaysSendToPeer), - routing: htc, - sender: adapter, - wantlist: util.NewKeySet(), - batchRequests: make(chan []util.Key, 32), - } - adapter.SetDelegate(bs) - go bs.run(context.TODO()) + ctx := context.TODO() + + bs := New(ctx, p, adapter, htc, bstore, alwaysSendToPeer) + return instance{ peer: p, exchange: bs, - blockstore: blockstore, + blockstore: bstore, } } From a923136d7203790acc936672bb77f4b0c94c9e3b Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Wed, 19 Nov 2014 23:11:58 -0800 Subject: [PATCH 0129/1035] refactor(bitswap) move wantlist to loop receive License: MIT Signed-off-by: Brian Tiger Chow This commit was moved from ipfs/go-bitswap@470b02d432774c5d312c9a671cc79e7321497c11 --- bitswap/bitswap.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index a14d68cc0..608656e53 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -178,6 +178,9 @@ func (bs *bitswap) run(ctx context.Context) { } case ks := <-bs.batchRequests: // TODO: implement batching on len(ks) > X for some X + for _, k := range ks { + bs.wantlist.Add(k) + } if len(ks) == 0 { log.Warning("Received batch request for zero blocks") continue From 81bfd52cc7f27504c7fc0c927debf1fc22fee885 Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Wed, 19 Nov 2014 23:27:08 -0800 Subject: [PATCH 0130/1035] feat(bitswap) implement GetBlocks @whyrusleeping @jbenet License: MIT Signed-off-by: Brian Tiger Chow This commit was moved from ipfs/go-bitswap@2a4a6d3e8d3beeeb9c49233e2703891c62ae9a6d --- bitswap/bitswap.go | 36 +++++++++++++++++++++++------------- 1 file changed, 23 insertions(+), 13 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 608656e53..6ff604134 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -79,9 +79,7 @@ type bitswap struct { } // GetBlock attempts to retrieve a particular block from peers within the -// deadline enforced by the context -// -// TODO ensure only one active request per key +// deadline enforced by the context. func (bs *bitswap) GetBlock(parent context.Context, k u.Key) (*blocks.Block, error) { // make sure to derive a new |ctx| and pass it to children. It's correct to @@ -95,26 +93,36 @@ func (bs *bitswap) GetBlock(parent context.Context, k u.Key) (*blocks.Block, err log.Event(ctx, "GetBlockRequestBegin", &k) defer log.Event(ctx, "GetBlockRequestEnd", &k) - promise := bs.notifications.Subscribe(ctx, k) - - select { - case bs.batchRequests <- []u.Key{k}: - case <-parent.Done(): - return nil, parent.Err() + promise, err := bs.GetBlocks(parent, []u.Key{k}) + if err != nil { + return nil, err } select { case block := <-promise: - bs.wantlist.Remove(k) return &block, nil case <-parent.Done(): return nil, parent.Err() } } -func (bs *bitswap) GetBlocks(parent context.Context, ks []u.Key) (*blocks.Block, error) { - // TODO: something smart - return nil, nil +// GetBlocks returns a channel where the caller may receive blocks that +// correspond to the provided |keys|. Returns an error if BitSwap is unable to +// begin this request within the deadline enforced by the context. +// +// NB: Your request remains open until the context expires. To conserve +// resources, provide a context with a reasonably short deadline (ie. not one +// that lasts throughout the lifetime of the server) +func (bs *bitswap) GetBlocks(ctx context.Context, keys []u.Key) (<-chan blocks.Block, error) { + // TODO log the request + + promise := bs.notifications.Subscribe(ctx, keys...) + select { + case bs.batchRequests <- keys: + return promise, nil + case <-ctx.Done(): + return nil, ctx.Err() + } } func (bs *bitswap) sendWantListTo(ctx context.Context, peers <-chan peer.Peer) error { @@ -155,6 +163,7 @@ func (bs *bitswap) sendWantListTo(ctx context.Context, peers <-chan peer.Peer) e return nil } +// TODO ensure only one active request per key func (bs *bitswap) run(ctx context.Context) { // Every so often, we should resend out our current want list @@ -238,6 +247,7 @@ func (bs *bitswap) ReceiveMessage(ctx context.Context, p peer.Peer, incoming bsm continue // FIXME(brian): err ignored } bs.notifications.Publish(block) + bs.wantlist.Remove(block.Key()) err := bs.HasBlock(ctx, block) if err != nil { log.Warningf("HasBlock errored: %s", err) From 44640b3900cc861361e5dd88685264e58a087204 Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Wed, 19 Nov 2014 23:34:14 -0800 Subject: [PATCH 0131/1035] fix(bitswap) stop the ticker when the run loop exits @whyrusleeping License: MIT Signed-off-by: Brian Tiger Chow This commit was moved from ipfs/go-bitswap@fd8e2b3aa2398a7348aee5dd52101c4dffc94f9b --- bitswap/bitswap.go | 1 + 1 file changed, 1 insertion(+) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 6ff604134..97fd0576f 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -170,6 +170,7 @@ func (bs *bitswap) run(ctx context.Context) { rebroadcastTime := time.Second * 5 broadcastSignal := time.NewTicker(bs.strategy.GetRebroadcastDelay()) + defer broadcastSignal.Stop() for { select { From fafcccc1b968ffdcbce2dc7deffa447e041d9c5d Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Thu, 20 Nov 2014 00:02:20 -0800 Subject: [PATCH 0132/1035] tests(bitswap) share code between the two large tests License: MIT Signed-off-by: Brian Tiger Chow This commit was moved from ipfs/go-bitswap@80244dac280ee21ee43982fb57b8b1a6885cbb62 --- bitswap/bitswap_test.go | 58 +++++++++-------------------------------- 1 file changed, 12 insertions(+), 46 deletions(-) diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index 78509e649..ce881f846 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -87,58 +87,27 @@ func TestGetBlockFromPeerAfterPeerAnnounces(t *testing.T) { } } -func TestSwarm(t *testing.T) { +func TestLargeSwarm(t *testing.T) { if testing.Short() { t.SkipNow() } - net := tn.VirtualNetwork() - rs := mock.VirtualRoutingServer() - sg := NewSessionGenerator(net, rs) - bg := NewBlockGenerator() - - t.Log("Create a ton of instances, and just a few blocks") - + t.Parallel() numInstances := 500 numBlocks := 2 + PerformDistributionTest(t, numInstances, numBlocks) +} - instances := sg.Instances(numInstances) - blocks := bg.Blocks(numBlocks) - - t.Log("Give the blocks to the first instance") - - first := instances[0] - for _, b := range blocks { - first.blockstore.Put(b) - first.exchange.HasBlock(context.Background(), *b) - rs.Announce(first.peer, b.Key()) - } - - t.Log("Distribute!") - - var wg sync.WaitGroup - - for _, inst := range instances { - for _, b := range blocks { - wg.Add(1) - // NB: executing getOrFail concurrently puts tremendous pressure on - // the goroutine scheduler - getOrFail(inst, b, t, &wg) - } - } - wg.Wait() - - t.Log("Verify!") - - for _, inst := range instances { - for _, b := range blocks { - if _, err := inst.blockstore.Get(b.Key()); err != nil { - t.Fatal(err) - } - } +func TestLargeFile(t *testing.T) { + if testing.Short() { + t.SkipNow() } + t.Parallel() + numInstances := 10 + numBlocks := 100 + PerformDistributionTest(t, numInstances, numBlocks) } -func TestLargeFile(t *testing.T) { +func PerformDistributionTest(t *testing.T, numInstances, numBlocks int) { if testing.Short() { t.SkipNow() } @@ -149,9 +118,6 @@ func TestLargeFile(t *testing.T) { t.Log("Test a few nodes trying to get one file with a lot of blocks") - numInstances := 10 - numBlocks := 100 - instances := sg.Instances(numInstances) blocks := bg.Blocks(numBlocks) From ec74af61445cfa2fc645520e21f4db2f392a67c9 Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Thu, 20 Nov 2014 17:27:48 -0800 Subject: [PATCH 0133/1035] refactor(blockstore) mv under blocks/ @jbenet @whyrusleeping the pyramids were built one brick at a time addresses: https://github.com/jbenet/go-ipfs/issues/370 License: MIT Signed-off-by: Brian Tiger Chow This commit was moved from ipfs/go-bitswap@08d0a11bdf78b8f95806fe884221c7465c3eb222 --- bitswap/bitswap.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 97fd0576f..d47ea81a4 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -9,7 +9,7 @@ import ( context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" blocks "github.com/jbenet/go-ipfs/blocks" - blockstore "github.com/jbenet/go-ipfs/blockstore" + blockstore "github.com/jbenet/go-ipfs/blocks/blockstore" exchange "github.com/jbenet/go-ipfs/exchange" bsmsg "github.com/jbenet/go-ipfs/exchange/bitswap/message" bsnet "github.com/jbenet/go-ipfs/exchange/bitswap/network" From 817905429f4ab3d49d6e21203e5d1fa2e3a479d3 Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Thu, 20 Nov 2014 17:41:22 -0800 Subject: [PATCH 0134/1035] rename License: MIT Signed-off-by: Brian Tiger Chow This commit was moved from ipfs/go-bitswap@7d72132cbfcec7f25ff387de876264479d3fdcb6 --- bitswap/bitswap_test.go | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index ce881f846..52dad14b5 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -11,8 +11,7 @@ import ( ds "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore" ds_sync "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore/sync" blocks "github.com/jbenet/go-ipfs/blocks" - blockstore "github.com/jbenet/go-ipfs/blockstore" - bstore "github.com/jbenet/go-ipfs/blockstore" + blockstore "github.com/jbenet/go-ipfs/blocks/blockstore" exchange "github.com/jbenet/go-ipfs/exchange" tn "github.com/jbenet/go-ipfs/exchange/bitswap/testnet" peer "github.com/jbenet/go-ipfs/peer" @@ -286,7 +285,7 @@ func (g *SessionGenerator) Instances(n int) []instance { type instance struct { peer peer.Peer exchange exchange.Interface - blockstore bstore.Blockstore + blockstore blockstore.Blockstore } // session creates a test bitswap session. From 35c4c9546872fd4ba9913432ebfb1b01e8ea128c Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Thu, 20 Nov 2014 18:14:36 -0800 Subject: [PATCH 0135/1035] fix(bitswap/loop) add to wantlist just once oops set Add is idempotent but it's a waste of resources License: MIT Signed-off-by: Brian Tiger Chow This commit was moved from ipfs/go-bitswap@6af9f4178ad281b2b1c44062c4e308649526378c --- bitswap/bitswap.go | 3 --- 1 file changed, 3 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index d47ea81a4..3ff301448 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -188,9 +188,6 @@ func (bs *bitswap) run(ctx context.Context) { } case ks := <-bs.batchRequests: // TODO: implement batching on len(ks) > X for some X - for _, k := range ks { - bs.wantlist.Add(k) - } if len(ks) == 0 { log.Warning("Received batch request for zero blocks") continue From 78dfc57d26cddd93ea4ae72c7f1304f54eec2dbb Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Thu, 20 Nov 2014 18:19:48 -0800 Subject: [PATCH 0136/1035] feat(bitswap) find providers for all keys on wantlist @jbenet @whyrusleeping this addresses a failure case where 1) bitswap wants blocks A and B 2) partner 1 has A and partner 2 has B 3) We choose a key at random, drawing A. 4) Then, we request A, neglecting to find a provider for B. Sending the full wantlist is meant to be used as a helpful additional piece of data, but... unless our hunch is support by statistical inference at runtime, it's not safe to assume that a peer will have blocks for related keys. Routing must be the source of truth. License: MIT Signed-off-by: Brian Tiger Chow This commit was moved from ipfs/go-bitswap@36ebde35f39f01d381a69cbd9ea1c559bf676eef --- bitswap/bitswap.go | 17 ++++++----------- 1 file changed, 6 insertions(+), 11 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 3ff301448..3c0f93119 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -3,7 +3,6 @@ package bitswap import ( - "math/rand" "time" context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" @@ -175,16 +174,12 @@ func (bs *bitswap) run(ctx context.Context) { for { select { case <-broadcastSignal.C: - wantlist := bs.wantlist.Keys() - if len(wantlist) == 0 { - continue - } - n := rand.Intn(len(wantlist)) - providers := bs.routing.FindProvidersAsync(ctx, wantlist[n], maxProvidersPerRequest) - - err := bs.sendWantListTo(ctx, providers) - if err != nil { - log.Errorf("error sending wantlist: %s", err) + for _, k := range bs.wantlist.Keys() { + providers := bs.routing.FindProvidersAsync(ctx, k, maxProvidersPerRequest) + err := bs.sendWantListTo(ctx, providers) + if err != nil { + log.Errorf("error sending wantlist: %s", err) + } } case ks := <-bs.batchRequests: // TODO: implement batching on len(ks) > X for some X From 516e5c8e52c07048479c30c3287769edc46f8490 Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Thu, 20 Nov 2014 18:25:56 -0800 Subject: [PATCH 0137/1035] feat(bitswap) loop over all provided keys License: MIT Signed-off-by: Brian Tiger Chow This commit was moved from ipfs/go-bitswap@499aa2c3b47b869e1ad55fc26c94643ec84c2ebb --- bitswap/bitswap.go | 16 +++++----------- 1 file changed, 5 insertions(+), 11 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 3c0f93119..ed7155b6d 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -182,19 +182,13 @@ func (bs *bitswap) run(ctx context.Context) { } } case ks := <-bs.batchRequests: - // TODO: implement batching on len(ks) > X for some X - if len(ks) == 0 { - log.Warning("Received batch request for zero blocks") - continue - } for _, k := range ks { bs.wantlist.Add(k) - } - providers := bs.routing.FindProvidersAsync(ctx, ks[0], maxProvidersPerRequest) - - err := bs.sendWantListTo(ctx, providers) - if err != nil { - log.Errorf("error sending wantlist: %s", err) + providers := bs.routing.FindProvidersAsync(ctx, k, maxProvidersPerRequest) + err := bs.sendWantListTo(ctx, providers) + if err != nil { + log.Errorf("error sending wantlist: %s", err) + } } case <-ctx.Done(): return From e26387ff387095921288335dc5fc707ebc30be8c Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Thu, 20 Nov 2014 18:27:05 -0800 Subject: [PATCH 0138/1035] style(bitswap) name -> loop eh? License: MIT Signed-off-by: Brian Tiger Chow This commit was moved from ipfs/go-bitswap@5045d096bcb77c539af276cb2b07dc49f9c9a90f --- bitswap/bitswap.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index ed7155b6d..1c1982edc 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -44,7 +44,7 @@ func New(ctx context.Context, p peer.Peer, network bsnet.BitSwapNetwork, routing batchRequests: make(chan []u.Key, 32), } network.SetDelegate(bs) - go bs.run(ctx) + go bs.loop(ctx) return bs } @@ -163,7 +163,7 @@ func (bs *bitswap) sendWantListTo(ctx context.Context, peers <-chan peer.Peer) e } // TODO ensure only one active request per key -func (bs *bitswap) run(ctx context.Context) { +func (bs *bitswap) loop(ctx context.Context) { // Every so often, we should resend out our current want list rebroadcastTime := time.Second * 5 From db01c8063c812c09378c3dc53641dbf1fae1d1bd Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Thu, 20 Nov 2014 18:28:29 -0800 Subject: [PATCH 0139/1035] fix(bitswap) signal termination to async'ly spawned workers License: MIT Signed-off-by: Brian Tiger Chow This commit was moved from ipfs/go-bitswap@d6e2157ae6797595d4103834e049f760c28352f6 --- bitswap/bitswap.go | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 1c1982edc..6bfcb4800 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -163,7 +163,10 @@ func (bs *bitswap) sendWantListTo(ctx context.Context, peers <-chan peer.Peer) e } // TODO ensure only one active request per key -func (bs *bitswap) loop(ctx context.Context) { +func (bs *bitswap) loop(parent context.Context) { + + ctx, cancel := context.WithCancel(parent) + defer cancel() // signal termination // Every so often, we should resend out our current want list rebroadcastTime := time.Second * 5 @@ -190,7 +193,7 @@ func (bs *bitswap) loop(ctx context.Context) { log.Errorf("error sending wantlist: %s", err) } } - case <-ctx.Done(): + case <-parent.Done(): return } } From 5ec5b3586a3f92afb36e3f895d06c3c8e4dec2bb Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Thu, 20 Nov 2014 18:34:42 -0800 Subject: [PATCH 0140/1035] fix(exchange) allow exchange to be closed License: MIT Signed-off-by: Brian Tiger Chow This commit was moved from ipfs/go-bitswap@26e6f28e379faa4c44a05496068454b61e550a91 --- bitswap/bitswap.go | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 6bfcb4800..cb5db26f3 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -25,9 +25,11 @@ var log = eventlog.Logger("bitswap") // provided BitSwapNetwork. This function registers the returned instance as // the network delegate. // Runs until context is cancelled -func New(ctx context.Context, p peer.Peer, network bsnet.BitSwapNetwork, routing bsnet.Routing, +func New(parent context.Context, p peer.Peer, network bsnet.BitSwapNetwork, routing bsnet.Routing, bstore blockstore.Blockstore, nice bool) exchange.Interface { + ctx, cancelFunc := context.WithCancel(parent) + notif := notifications.New() go func() { <-ctx.Done() @@ -36,6 +38,7 @@ func New(ctx context.Context, p peer.Peer, network bsnet.BitSwapNetwork, routing bs := &bitswap{ blockstore: bstore, + cancelFunc: cancelFunc, notifications: notif, strategy: strategy.New(nice), routing: routing, @@ -75,6 +78,9 @@ type bitswap struct { strategy strategy.Strategy wantlist u.KeySet + + // cancelFunc signals cancellation to the bitswap event loop + cancelFunc func() } // GetBlock attempts to retrieve a particular block from peers within the @@ -295,3 +301,8 @@ func (bs *bitswap) sendToPeersThatWant(ctx context.Context, block blocks.Block) } } } + +func (bs *bitswap) Close() error { + bs.cancelFunc() + return nil // to conform to Closer interface +} From b1666a1535d81e6c39ba5c1f617a08e777b67b43 Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Thu, 20 Nov 2014 19:14:16 -0800 Subject: [PATCH 0141/1035] refactor(bitswap) group the deferreds License: MIT Signed-off-by: Brian Tiger Chow This commit was moved from ipfs/go-bitswap@58b9745f9dbddc529d2c876be7f59ae638f2d80a --- bitswap/bitswap.go | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index cb5db26f3..05ed27eb3 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -92,11 +92,14 @@ func (bs *bitswap) GetBlock(parent context.Context, k u.Key) (*blocks.Block, err // functions. This is difficult to enforce. May this comment keep you safe. ctx, cancelFunc := context.WithCancel(parent) - defer cancelFunc() ctx = eventlog.ContextWithMetadata(ctx, eventlog.Uuid("GetBlockRequest")) log.Event(ctx, "GetBlockRequestBegin", &k) - defer log.Event(ctx, "GetBlockRequestEnd", &k) + + defer func() { + cancelFunc() + log.Event(ctx, "GetBlockRequestEnd", &k) + }() promise, err := bs.GetBlocks(parent, []u.Key{k}) if err != nil { @@ -109,6 +112,7 @@ func (bs *bitswap) GetBlock(parent context.Context, k u.Key) (*blocks.Block, err case <-parent.Done(): return nil, parent.Err() } + } // GetBlocks returns a channel where the caller may receive blocks that @@ -172,13 +176,15 @@ func (bs *bitswap) sendWantListTo(ctx context.Context, peers <-chan peer.Peer) e func (bs *bitswap) loop(parent context.Context) { ctx, cancel := context.WithCancel(parent) - defer cancel() // signal termination // Every so often, we should resend out our current want list rebroadcastTime := time.Second * 5 broadcastSignal := time.NewTicker(bs.strategy.GetRebroadcastDelay()) - defer broadcastSignal.Stop() + defer func() { + cancel() // signal to derived async functions + broadcastSignal.Stop() + }() for { select { From 13ed94443141391f795344e46f0d7d47e8097374 Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Thu, 20 Nov 2014 19:12:02 -0800 Subject: [PATCH 0142/1035] test(bitswap) Close (but skip for now) License: MIT Signed-off-by: Brian Tiger Chow This commit was moved from ipfs/go-bitswap@b90e5fb71dc38f802b539054df756dca9d20f898 --- bitswap/bitswap_test.go | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index 52dad14b5..a8483c3bd 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -18,6 +18,21 @@ import ( mock "github.com/jbenet/go-ipfs/routing/mock" ) +func TestClose(t *testing.T) { + // TODO + t.Skip("TODO Bitswap's Close implementation is a WIP") + vnet := tn.VirtualNetwork() + rout := mock.VirtualRoutingServer() + sesgen := NewSessionGenerator(vnet, rout) + bgen := NewBlockGenerator() + + block := bgen.Next() + bitswap := sesgen.Next() + + bitswap.exchange.Close() + bitswap.exchange.GetBlock(context.Background(), block.Key()) +} + func TestGetBlockTimeout(t *testing.T) { net := tn.VirtualNetwork() From 44dd46f2461819a34f5a9c1f7a84c1694130638d Mon Sep 17 00:00:00 2001 From: Jeromy Date: Fri, 21 Nov 2014 06:40:34 +0000 Subject: [PATCH 0143/1035] wire GetBlocks into blockservice This commit was moved from ipfs/go-bitswap@b5121d638bad5e379eecfb53042612985f6fe823 --- bitswap/bitswap.go | 12 ++++++------ bitswap/bitswap_test.go | 14 +++++++------- bitswap/message/message.go | 20 ++++++++++---------- bitswap/message/message_test.go | 14 +++++++------- bitswap/notifications/notifications.go | 12 ++++++------ bitswap/notifications/notifications_test.go | 8 ++++---- bitswap/strategy/strategy_test.go | 2 +- bitswap/testnet/network_test.go | 8 ++++---- 8 files changed, 45 insertions(+), 45 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 05ed27eb3..604cfa21a 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -108,7 +108,7 @@ func (bs *bitswap) GetBlock(parent context.Context, k u.Key) (*blocks.Block, err select { case block := <-promise: - return &block, nil + return block, nil case <-parent.Done(): return nil, parent.Err() } @@ -122,7 +122,7 @@ func (bs *bitswap) GetBlock(parent context.Context, k u.Key) (*blocks.Block, err // NB: Your request remains open until the context expires. To conserve // resources, provide a context with a reasonably short deadline (ie. not one // that lasts throughout the lifetime of the server) -func (bs *bitswap) GetBlocks(ctx context.Context, keys []u.Key) (<-chan blocks.Block, error) { +func (bs *bitswap) GetBlocks(ctx context.Context, keys []u.Key) (<-chan *blocks.Block, error) { // TODO log the request promise := bs.notifications.Subscribe(ctx, keys...) @@ -213,7 +213,7 @@ func (bs *bitswap) loop(parent context.Context) { // HasBlock announces the existance of a block to this bitswap service. The // service will potentially notify its peers. -func (bs *bitswap) HasBlock(ctx context.Context, blk blocks.Block) error { +func (bs *bitswap) HasBlock(ctx context.Context, blk *blocks.Block) error { log.Debugf("Has Block %v", blk.Key()) bs.wantlist.Remove(blk.Key()) bs.sendToPeersThatWant(ctx, blk) @@ -244,7 +244,7 @@ func (bs *bitswap) ReceiveMessage(ctx context.Context, p peer.Peer, incoming bsm for _, block := range incoming.Blocks() { // TODO verify blocks? - if err := bs.blockstore.Put(&block); err != nil { + if err := bs.blockstore.Put(block); err != nil { log.Criticalf("error putting block: %s", err) continue // FIXME(brian): err ignored } @@ -267,7 +267,7 @@ func (bs *bitswap) ReceiveMessage(ctx context.Context, p peer.Peer, incoming bsm if block, errBlockNotFound := bs.blockstore.Get(key); errBlockNotFound != nil { continue } else { - message.AddBlock(*block) + message.AddBlock(block) } } } @@ -290,7 +290,7 @@ func (bs *bitswap) send(ctx context.Context, p peer.Peer, m bsmsg.BitSwapMessage bs.strategy.MessageSent(p, m) } -func (bs *bitswap) sendToPeersThatWant(ctx context.Context, block blocks.Block) { +func (bs *bitswap) sendToPeersThatWant(ctx context.Context, block *blocks.Block) { log.Debugf("Sending %v to peers that want it", block.Key()) for _, p := range bs.strategy.Peers() { diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index a8483c3bd..4f5755ae0 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -83,7 +83,7 @@ func TestGetBlockFromPeerAfterPeerAnnounces(t *testing.T) { if err := hasBlock.blockstore.Put(block); err != nil { t.Fatal(err) } - if err := hasBlock.exchange.HasBlock(context.Background(), *block); err != nil { + if err := hasBlock.exchange.HasBlock(context.Background(), block); err != nil { t.Fatal(err) } @@ -140,7 +140,7 @@ func PerformDistributionTest(t *testing.T, numInstances, numBlocks int) { first := instances[0] for _, b := range blocks { first.blockstore.Put(b) - first.exchange.HasBlock(context.Background(), *b) + first.exchange.HasBlock(context.Background(), b) rs.Announce(first.peer, b.Key()) } @@ -212,7 +212,7 @@ func TestSendToWantingPeer(t *testing.T) { beta := bg.Next() t.Logf("Peer %v announes availability of %v\n", w.peer, beta.Key()) ctx, _ = context.WithTimeout(context.Background(), timeout) - if err := w.blockstore.Put(&beta); err != nil { + if err := w.blockstore.Put(beta); err != nil { t.Fatal(err) } w.exchange.HasBlock(ctx, beta) @@ -225,7 +225,7 @@ func TestSendToWantingPeer(t *testing.T) { t.Logf("%v announces availability of %v\n", o.peer, alpha.Key()) ctx, _ = context.WithTimeout(context.Background(), timeout) - if err := o.blockstore.Put(&alpha); err != nil { + if err := o.blockstore.Put(alpha); err != nil { t.Fatal(err) } o.exchange.HasBlock(ctx, alpha) @@ -254,16 +254,16 @@ type BlockGenerator struct { seq int } -func (bg *BlockGenerator) Next() blocks.Block { +func (bg *BlockGenerator) Next() *blocks.Block { bg.seq++ - return *blocks.NewBlock([]byte(string(bg.seq))) + return blocks.NewBlock([]byte(string(bg.seq))) } func (bg *BlockGenerator) Blocks(n int) []*blocks.Block { blocks := make([]*blocks.Block, 0) for i := 0; i < n; i++ { b := bg.Next() - blocks = append(blocks, &b) + blocks = append(blocks, b) } return blocks } diff --git a/bitswap/message/message.go b/bitswap/message/message.go index e0aea227d..b69450a6f 100644 --- a/bitswap/message/message.go +++ b/bitswap/message/message.go @@ -19,7 +19,7 @@ type BitSwapMessage interface { Wantlist() []u.Key // Blocks returns a slice of unique blocks - Blocks() []blocks.Block + Blocks() []*blocks.Block // AddWanted adds the key to the Wantlist. // @@ -32,7 +32,7 @@ type BitSwapMessage interface { // implies Priority(A) > Priority(B) AddWanted(u.Key) - AddBlock(blocks.Block) + AddBlock(*blocks.Block) Exportable } @@ -42,14 +42,14 @@ type Exportable interface { } type impl struct { - existsInWantlist map[u.Key]struct{} // map to detect duplicates - wantlist []u.Key // slice to preserve ordering - blocks map[u.Key]blocks.Block // map to detect duplicates + existsInWantlist map[u.Key]struct{} // map to detect duplicates + wantlist []u.Key // slice to preserve ordering + blocks map[u.Key]*blocks.Block // map to detect duplicates } func New() BitSwapMessage { return &impl{ - blocks: make(map[u.Key]blocks.Block), + blocks: make(map[u.Key]*blocks.Block), existsInWantlist: make(map[u.Key]struct{}), wantlist: make([]u.Key, 0), } @@ -62,7 +62,7 @@ func newMessageFromProto(pbm pb.Message) BitSwapMessage { } for _, d := range pbm.GetBlocks() { b := blocks.NewBlock(d) - m.AddBlock(*b) + m.AddBlock(b) } return m } @@ -71,8 +71,8 @@ func (m *impl) Wantlist() []u.Key { return m.wantlist } -func (m *impl) Blocks() []blocks.Block { - bs := make([]blocks.Block, 0) +func (m *impl) Blocks() []*blocks.Block { + bs := make([]*blocks.Block, 0) for _, block := range m.blocks { bs = append(bs, block) } @@ -88,7 +88,7 @@ func (m *impl) AddWanted(k u.Key) { m.wantlist = append(m.wantlist, k) } -func (m *impl) AddBlock(b blocks.Block) { +func (m *impl) AddBlock(b *blocks.Block) { m.blocks[b.Key()] = b } diff --git a/bitswap/message/message_test.go b/bitswap/message/message_test.go index 9c69136cd..de64b7925 100644 --- a/bitswap/message/message_test.go +++ b/bitswap/message/message_test.go @@ -42,7 +42,7 @@ func TestAppendBlock(t *testing.T) { m := New() for _, str := range strs { block := blocks.NewBlock([]byte(str)) - m.AddBlock(*block) + m.AddBlock(block) } // assert strings are in proto message @@ -133,10 +133,10 @@ func TestToNetFromNetPreservesWantList(t *testing.T) { func TestToAndFromNetMessage(t *testing.T) { original := New() - original.AddBlock(*blocks.NewBlock([]byte("W"))) - original.AddBlock(*blocks.NewBlock([]byte("E"))) - original.AddBlock(*blocks.NewBlock([]byte("F"))) - original.AddBlock(*blocks.NewBlock([]byte("M"))) + original.AddBlock(blocks.NewBlock([]byte("W"))) + original.AddBlock(blocks.NewBlock([]byte("E"))) + original.AddBlock(blocks.NewBlock([]byte("F"))) + original.AddBlock(blocks.NewBlock([]byte("M"))) p := peer.WithIDString("X") netmsg, err := original.ToNet(p) @@ -180,8 +180,8 @@ func TestDuplicates(t *testing.T) { t.Fatal("Duplicate in BitSwapMessage") } - msg.AddBlock(*b) - msg.AddBlock(*b) + msg.AddBlock(b) + msg.AddBlock(b) if len(msg.Blocks()) != 1 { t.Fatal("Duplicate in BitSwapMessage") } diff --git a/bitswap/notifications/notifications.go b/bitswap/notifications/notifications.go index a2646c814..2497f6316 100644 --- a/bitswap/notifications/notifications.go +++ b/bitswap/notifications/notifications.go @@ -11,8 +11,8 @@ import ( const bufferSize = 16 type PubSub interface { - Publish(block blocks.Block) - Subscribe(ctx context.Context, keys ...u.Key) <-chan blocks.Block + Publish(block *blocks.Block) + Subscribe(ctx context.Context, keys ...u.Key) <-chan *blocks.Block Shutdown() } @@ -24,7 +24,7 @@ type impl struct { wrapped pubsub.PubSub } -func (ps *impl) Publish(block blocks.Block) { +func (ps *impl) Publish(block *blocks.Block) { topic := string(block.Key()) ps.wrapped.Pub(block, topic) } @@ -32,18 +32,18 @@ func (ps *impl) Publish(block blocks.Block) { // Subscribe returns a one-time use |blockChannel|. |blockChannel| returns nil // if the |ctx| times out or is cancelled. Then channel is closed after the // blocks given by |keys| are sent. -func (ps *impl) Subscribe(ctx context.Context, keys ...u.Key) <-chan blocks.Block { +func (ps *impl) Subscribe(ctx context.Context, keys ...u.Key) <-chan *blocks.Block { topics := make([]string, 0) for _, key := range keys { topics = append(topics, string(key)) } subChan := ps.wrapped.SubOnce(topics...) - blockChannel := make(chan blocks.Block, 1) // buffered so the sender doesn't wait on receiver + blockChannel := make(chan *blocks.Block, 1) // buffered so the sender doesn't wait on receiver go func() { defer close(blockChannel) select { case val := <-subChan: - block, ok := val.(blocks.Block) + block, ok := val.(*blocks.Block) if ok { blockChannel <- block } diff --git a/bitswap/notifications/notifications_test.go b/bitswap/notifications/notifications_test.go index 063634f61..ebbae2a51 100644 --- a/bitswap/notifications/notifications_test.go +++ b/bitswap/notifications/notifications_test.go @@ -16,13 +16,13 @@ func TestPublishSubscribe(t *testing.T) { defer n.Shutdown() ch := n.Subscribe(context.Background(), blockSent.Key()) - n.Publish(*blockSent) + n.Publish(blockSent) blockRecvd, ok := <-ch if !ok { t.Fail() } - assertBlocksEqual(t, blockRecvd, *blockSent) + assertBlocksEqual(t, blockRecvd, blockSent) } @@ -39,14 +39,14 @@ func TestCarryOnWhenDeadlineExpires(t *testing.T) { assertBlockChannelNil(t, blockChannel) } -func assertBlockChannelNil(t *testing.T, blockChannel <-chan blocks.Block) { +func assertBlockChannelNil(t *testing.T, blockChannel <-chan *blocks.Block) { _, ok := <-blockChannel if ok { t.Fail() } } -func assertBlocksEqual(t *testing.T, a, b blocks.Block) { +func assertBlocksEqual(t *testing.T, a, b *blocks.Block) { if !bytes.Equal(a.Data, b.Data) { t.Fail() } diff --git a/bitswap/strategy/strategy_test.go b/bitswap/strategy/strategy_test.go index ef93d9827..d07af601b 100644 --- a/bitswap/strategy/strategy_test.go +++ b/bitswap/strategy/strategy_test.go @@ -30,7 +30,7 @@ func TestConsistentAccounting(t *testing.T) { m := message.New() content := []string{"this", "is", "message", "i"} - m.AddBlock(*blocks.NewBlock([]byte(strings.Join(content, " ")))) + m.AddBlock(blocks.NewBlock([]byte(strings.Join(content, " ")))) sender.MessageSent(receiver.Peer, m) receiver.MessageReceived(sender.Peer, m) diff --git a/bitswap/testnet/network_test.go b/bitswap/testnet/network_test.go index 3930c2a8c..6f57aad50 100644 --- a/bitswap/testnet/network_test.go +++ b/bitswap/testnet/network_test.go @@ -33,7 +33,7 @@ func TestSendRequestToCooperativePeer(t *testing.T) { // TODO test contents of incoming message m := bsmsg.New() - m.AddBlock(*blocks.NewBlock([]byte(expectedStr))) + m.AddBlock(blocks.NewBlock([]byte(expectedStr))) return from, m })) @@ -41,7 +41,7 @@ func TestSendRequestToCooperativePeer(t *testing.T) { t.Log("Build a message and send a synchronous request to recipient") message := bsmsg.New() - message.AddBlock(*blocks.NewBlock([]byte("data"))) + message.AddBlock(blocks.NewBlock([]byte("data"))) response, err := initiator.SendRequest( context.Background(), peer.WithID(idOfRecipient), message) if err != nil { @@ -77,7 +77,7 @@ func TestSendMessageAsyncButWaitForResponse(t *testing.T) { peer.Peer, bsmsg.BitSwapMessage) { msgToWaiter := bsmsg.New() - msgToWaiter.AddBlock(*blocks.NewBlock([]byte(expectedStr))) + msgToWaiter.AddBlock(blocks.NewBlock([]byte(expectedStr))) return fromWaiter, msgToWaiter })) @@ -105,7 +105,7 @@ func TestSendMessageAsyncButWaitForResponse(t *testing.T) { })) messageSentAsync := bsmsg.New() - messageSentAsync.AddBlock(*blocks.NewBlock([]byte("data"))) + messageSentAsync.AddBlock(blocks.NewBlock([]byte("data"))) errSending := waiter.SendMessage( context.Background(), peer.WithID(idOfResponder), messageSentAsync) if errSending != nil { From ebf2c82b6e028661f78d2f67f9803a31e9717698 Mon Sep 17 00:00:00 2001 From: Jeromy Date: Fri, 21 Nov 2014 18:14:28 +0000 Subject: [PATCH 0144/1035] tracking down a bug dhthell found, added asserts and better logging. This commit was moved from ipfs/go-bitswap@36798def1153020eae7fa7b53fba4b0856f392c6 --- bitswap/bitswap.go | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 604cfa21a..6a6565d19 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -197,13 +197,19 @@ func (bs *bitswap) loop(parent context.Context) { } } case ks := <-bs.batchRequests: + // TODO: implement batching on len(ks) > X for some X + if len(ks) == 0 { + log.Warning("Received batch request for zero blocks") + continue + } for _, k := range ks { bs.wantlist.Add(k) - providers := bs.routing.FindProvidersAsync(ctx, k, maxProvidersPerRequest) - err := bs.sendWantListTo(ctx, providers) - if err != nil { - log.Errorf("error sending wantlist: %s", err) - } + } + providers := bs.routing.FindProvidersAsync(ctx, ks[0], maxProvidersPerRequest) + + err := bs.sendWantListTo(ctx, providers) + if err != nil { + log.Errorf("error sending wantlist: %s", err) } case <-parent.Done(): return From 39fd3758b671d50b0afa7470b3a7eeaedbbbc048 Mon Sep 17 00:00:00 2001 From: Jeromy Date: Fri, 21 Nov 2014 23:03:05 +0000 Subject: [PATCH 0145/1035] a little more correctness on the new bitswap impl This commit was moved from ipfs/go-bitswap@92e2d2473d1ed8b4daf77b2d4e3a22d92eab024e --- bitswap/bitswap.go | 47 ++++++++++++++++++++++++++++-------- bitswap/bitswap_test.go | 2 +- bitswap/strategy/ledger.go | 1 + bitswap/strategy/strategy.go | 2 ++ 4 files changed, 41 insertions(+), 11 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 6a6565d19..001f844b7 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -128,12 +128,35 @@ func (bs *bitswap) GetBlocks(ctx context.Context, keys []u.Key) (<-chan *blocks. promise := bs.notifications.Subscribe(ctx, keys...) select { case bs.batchRequests <- keys: - return promise, nil + return pipeBlocks(ctx, promise, len(keys)), nil case <-ctx.Done(): return nil, ctx.Err() } } +func pipeBlocks(ctx context.Context, in <-chan *blocks.Block, count int) <-chan *blocks.Block { + out := make(chan *blocks.Block, 1) + go func() { + defer close(out) + for i := 0; i < count; i++ { + select { + case blk, ok := <-in: + if !ok { + return + } + select { + case out <- blk: + case <-ctx.Done(): + return + } + case <-ctx.Done(): + return + } + } + }() + return out +} + func (bs *bitswap) sendWantListTo(ctx context.Context, peers <-chan peer.Peer) error { if peers == nil { panic("Cant send wantlist to nil peerchan") @@ -220,7 +243,7 @@ func (bs *bitswap) loop(parent context.Context) { // HasBlock announces the existance of a block to this bitswap service. The // service will potentially notify its peers. func (bs *bitswap) HasBlock(ctx context.Context, blk *blocks.Block) error { - log.Debugf("Has Block %v", blk.Key()) + log.Debugf("Has Block %s", blk.Key()) bs.wantlist.Remove(blk.Key()) bs.sendToPeersThatWant(ctx, blk) return bs.routing.Provide(ctx, blk.Key()) @@ -262,10 +285,6 @@ func (bs *bitswap) ReceiveMessage(ctx context.Context, p peer.Peer, incoming bsm } } - message := bsmsg.New() - for _, wanted := range bs.wantlist.Keys() { - message.AddWanted(wanted) - } for _, key := range incoming.Wantlist() { // TODO: might be better to check if we have the block before checking // if we should send it to someone @@ -273,14 +292,22 @@ func (bs *bitswap) ReceiveMessage(ctx context.Context, p peer.Peer, incoming bsm if block, errBlockNotFound := bs.blockstore.Get(key); errBlockNotFound != nil { continue } else { - message.AddBlock(block) + // Create a separate message to send this block in + blkmsg := bsmsg.New() + + // TODO: only send this the first time + for _, k := range bs.wantlist.Keys() { + blkmsg.AddWanted(k) + } + + blkmsg.AddBlock(block) + bs.strategy.MessageSent(p, blkmsg) + bs.send(ctx, p, blkmsg) } } } - bs.strategy.MessageSent(p, message) - log.Debug("Returning message.") - return p, message + return nil, nil } func (bs *bitswap) ReceiveError(err error) { diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index 4f5755ae0..426c0a315 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -106,7 +106,7 @@ func TestLargeSwarm(t *testing.T) { t.SkipNow() } t.Parallel() - numInstances := 500 + numInstances := 5 numBlocks := 2 PerformDistributionTest(t, numInstances, numBlocks) } diff --git a/bitswap/strategy/ledger.go b/bitswap/strategy/ledger.go index 9f33b1aba..74feb3407 100644 --- a/bitswap/strategy/ledger.go +++ b/bitswap/strategy/ledger.go @@ -61,6 +61,7 @@ func (l *ledger) ReceivedBytes(n int) { // TODO: this needs to be different. We need timeouts. func (l *ledger) Wants(k u.Key) { + log.Debugf("peer %s wants %s", l.Partner, k) l.wantList[k] = struct{}{} } diff --git a/bitswap/strategy/strategy.go b/bitswap/strategy/strategy.go index ad69b841a..d86092da6 100644 --- a/bitswap/strategy/strategy.go +++ b/bitswap/strategy/strategy.go @@ -10,6 +10,8 @@ import ( u "github.com/jbenet/go-ipfs/util" ) +var log = u.Logger("strategy") + // TODO niceness should be on a per-peer basis. Use-case: Certain peers are // "trusted" and/or controlled by a single human user. The user may want for // these peers to exchange data freely From 4a7017f7098945ba50e1ed5f07968ea7c443d3c8 Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Fri, 21 Nov 2014 15:25:37 -0800 Subject: [PATCH 0146/1035] test(notifications) we expect this to fail. will be fixed in upcoming commit License: MIT Signed-off-by: Brian Tiger Chow This commit was moved from ipfs/go-bitswap@f1eb07d93fed64e35190131ed3e070d72bb20b40 --- bitswap/notifications/notifications_test.go | 23 +++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/bitswap/notifications/notifications_test.go b/bitswap/notifications/notifications_test.go index ebbae2a51..5c51f322e 100644 --- a/bitswap/notifications/notifications_test.go +++ b/bitswap/notifications/notifications_test.go @@ -26,6 +26,29 @@ func TestPublishSubscribe(t *testing.T) { } +func TestSubscribeMany(t *testing.T) { + e1 := blocks.NewBlock([]byte("Greetings from The Interval")) + e2 := blocks.NewBlock([]byte("Greetings from The Interval")) + + n := New() + defer n.Shutdown() + ch := n.Subscribe(context.Background(), e1.Key(), e2.Key()) + + n.Publish(e1) + r1, ok := <-ch + if !ok { + t.Fatal("didn't receive first expected block") + } + assertBlocksEqual(t, e1, r1) + + n.Publish(e2) + r2, ok := <-ch + if !ok { + t.Fatal("didn't receive second expected block") + } + assertBlocksEqual(t, e2, r2) +} + func TestCarryOnWhenDeadlineExpires(t *testing.T) { impossibleDeadline := time.Nanosecond From 163b1d03ecddab64e12813d6e84c88a8b4c5beaf Mon Sep 17 00:00:00 2001 From: Jeromy Date: Fri, 21 Nov 2014 23:33:33 +0000 Subject: [PATCH 0147/1035] use @maybebtc's ForwardBlocks function This commit was moved from ipfs/go-bitswap@a2a4327b36fae483c8fa41691f47b25414ce5611 --- bitswap/bitswap.go | 26 ++------------------------ 1 file changed, 2 insertions(+), 24 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 001f844b7..5ad3c8026 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -16,6 +16,7 @@ import ( strategy "github.com/jbenet/go-ipfs/exchange/bitswap/strategy" peer "github.com/jbenet/go-ipfs/peer" u "github.com/jbenet/go-ipfs/util" + async "github.com/jbenet/go-ipfs/util/async" "github.com/jbenet/go-ipfs/util/eventlog" ) @@ -128,35 +129,12 @@ func (bs *bitswap) GetBlocks(ctx context.Context, keys []u.Key) (<-chan *blocks. promise := bs.notifications.Subscribe(ctx, keys...) select { case bs.batchRequests <- keys: - return pipeBlocks(ctx, promise, len(keys)), nil + return async.ForwardN(ctx, promise, len(keys)), nil case <-ctx.Done(): return nil, ctx.Err() } } -func pipeBlocks(ctx context.Context, in <-chan *blocks.Block, count int) <-chan *blocks.Block { - out := make(chan *blocks.Block, 1) - go func() { - defer close(out) - for i := 0; i < count; i++ { - select { - case blk, ok := <-in: - if !ok { - return - } - select { - case out <- blk: - case <-ctx.Done(): - return - } - case <-ctx.Done(): - return - } - } - }() - return out -} - func (bs *bitswap) sendWantListTo(ctx context.Context, peers <-chan peer.Peer) error { if peers == nil { panic("Cant send wantlist to nil peerchan") From 593a9b5e0bd97f812ac835f03040aba877c4fc1c Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Fri, 21 Nov 2014 15:41:04 -0800 Subject: [PATCH 0148/1035] docs(bitswap/notifications) License: MIT Signed-off-by: Brian Tiger Chow This commit was moved from ipfs/go-bitswap@a6371b4b119bb926d0c0cbc460d9c79c26dbc553 --- bitswap/notifications/notifications.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/bitswap/notifications/notifications.go b/bitswap/notifications/notifications.go index 2497f6316..1de7bf909 100644 --- a/bitswap/notifications/notifications.go +++ b/bitswap/notifications/notifications.go @@ -29,9 +29,9 @@ func (ps *impl) Publish(block *blocks.Block) { ps.wrapped.Pub(block, topic) } -// Subscribe returns a one-time use |blockChannel|. |blockChannel| returns nil -// if the |ctx| times out or is cancelled. Then channel is closed after the -// blocks given by |keys| are sent. +// Subscribe returns a channel of blocks for the given |keys|. |blockChannel| +// is closed if the |ctx| times out or is cancelled, or after sending len(keys) +// blocks. func (ps *impl) Subscribe(ctx context.Context, keys ...u.Key) <-chan *blocks.Block { topics := make([]string, 0) for _, key := range keys { From 351e5892f555f55db8415287a556119da097ff85 Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Fri, 21 Nov 2014 17:18:40 -0800 Subject: [PATCH 0149/1035] fix(bitswap/notifications) subscribe to many License: MIT Signed-off-by: Brian Tiger Chow This commit was moved from ipfs/go-bitswap@097bc1b4cfad3d2a6ab6f04dc1719c3d88a713b3 --- bitswap/notifications/notifications.go | 51 ++++++++++++++++++++++++-- 1 file changed, 47 insertions(+), 4 deletions(-) diff --git a/bitswap/notifications/notifications.go b/bitswap/notifications/notifications.go index 1de7bf909..74833810a 100644 --- a/bitswap/notifications/notifications.go +++ b/bitswap/notifications/notifications.go @@ -29,10 +29,7 @@ func (ps *impl) Publish(block *blocks.Block) { ps.wrapped.Pub(block, topic) } -// Subscribe returns a channel of blocks for the given |keys|. |blockChannel| -// is closed if the |ctx| times out or is cancelled, or after sending len(keys) -// blocks. -func (ps *impl) Subscribe(ctx context.Context, keys ...u.Key) <-chan *blocks.Block { +func (ps *impl) SubscribeDeprec(ctx context.Context, keys ...u.Key) <-chan *blocks.Block { topics := make([]string, 0) for _, key := range keys { topics = append(topics, string(key)) @@ -57,3 +54,49 @@ func (ps *impl) Subscribe(ctx context.Context, keys ...u.Key) <-chan *blocks.Blo func (ps *impl) Shutdown() { ps.wrapped.Shutdown() } + +// Subscribe returns a channel of blocks for the given |keys|. |blockChannel| +// is closed if the |ctx| times out or is cancelled, or after sending len(keys) +// blocks. +func (ps *impl) Subscribe(ctx context.Context, keys ...u.Key) <-chan *blocks.Block { + topics := toStrings(keys) + blocksCh := make(chan *blocks.Block, len(keys)) + valuesCh := make(chan interface{}, len(keys)) + ps.wrapped.AddSub(valuesCh, topics...) + + go func() { + defer func() { + ps.wrapped.Unsub(valuesCh, topics...) + close(blocksCh) + }() + for _, _ = range keys { + select { + case <-ctx.Done(): + return + case val, ok := <-valuesCh: + if !ok { + return + } + block, ok := val.(*blocks.Block) + if !ok { + return + } + select { + case <-ctx.Done(): + return + case blocksCh <- block: // continue + } + } + } + }() + + return blocksCh +} + +func toStrings(keys []u.Key) []string { + strs := make([]string, 0) + for _, key := range keys { + strs = append(strs, string(key)) + } + return strs +} From 0581e31426b33338920006ad6246b0009442a89f Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Fri, 21 Nov 2014 17:21:03 -0800 Subject: [PATCH 0150/1035] tests(bitswap/notifications) test niladic License: MIT Signed-off-by: Brian Tiger Chow This commit was moved from ipfs/go-bitswap@abdf5c870a470da1f30c6d705630789a76a2c914 --- bitswap/notifications/notifications_test.go | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/bitswap/notifications/notifications_test.go b/bitswap/notifications/notifications_test.go index 5c51f322e..7352320d9 100644 --- a/bitswap/notifications/notifications_test.go +++ b/bitswap/notifications/notifications_test.go @@ -49,6 +49,15 @@ func TestSubscribeMany(t *testing.T) { assertBlocksEqual(t, e2, r2) } +func TestSubscribeIsANoopWhenCalledWithNoKeys(t *testing.T) { + n := New() + defer n.Shutdown() + ch := n.Subscribe(context.TODO()) // no keys provided + if _, ok := <-ch; ok { + t.Fatal("should be closed if no keys provided") + } +} + func TestCarryOnWhenDeadlineExpires(t *testing.T) { impossibleDeadline := time.Nanosecond From d47d18261680460c8fc11a997cf0af00d46ddd35 Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Fri, 21 Nov 2014 17:22:44 -0800 Subject: [PATCH 0151/1035] refactor(bitswap) forwardN no longer needed @whyrusleeping now, the pubsub channel closes after sending N blocks. we got this functionality for free from the fix. So, the forwardN wrap is no longer required! woohoo License: MIT Signed-off-by: Brian Tiger Chow This commit was moved from ipfs/go-bitswap@a50a3497310f6f96b0e6ccc77e93a909cb7dfd03 --- bitswap/bitswap.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 5ad3c8026..95cb7ebf6 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -16,7 +16,6 @@ import ( strategy "github.com/jbenet/go-ipfs/exchange/bitswap/strategy" peer "github.com/jbenet/go-ipfs/peer" u "github.com/jbenet/go-ipfs/util" - async "github.com/jbenet/go-ipfs/util/async" "github.com/jbenet/go-ipfs/util/eventlog" ) @@ -129,7 +128,7 @@ func (bs *bitswap) GetBlocks(ctx context.Context, keys []u.Key) (<-chan *blocks. promise := bs.notifications.Subscribe(ctx, keys...) select { case bs.batchRequests <- keys: - return async.ForwardN(ctx, promise, len(keys)), nil + return promise, nil case <-ctx.Done(): return nil, ctx.Err() } From d08790f7a878516d4e3a2b515d024b214de8de67 Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Fri, 21 Nov 2014 17:29:07 -0800 Subject: [PATCH 0152/1035] misc(bs/n) rm dead code License: MIT Signed-off-by: Brian Tiger Chow This commit was moved from ipfs/go-bitswap@173ac606841917a4e5f0ed30f59a9ad6f5ce76de --- bitswap/notifications/notifications.go | 22 ---------------------- 1 file changed, 22 deletions(-) diff --git a/bitswap/notifications/notifications.go b/bitswap/notifications/notifications.go index 74833810a..ee82f0305 100644 --- a/bitswap/notifications/notifications.go +++ b/bitswap/notifications/notifications.go @@ -29,28 +29,6 @@ func (ps *impl) Publish(block *blocks.Block) { ps.wrapped.Pub(block, topic) } -func (ps *impl) SubscribeDeprec(ctx context.Context, keys ...u.Key) <-chan *blocks.Block { - topics := make([]string, 0) - for _, key := range keys { - topics = append(topics, string(key)) - } - subChan := ps.wrapped.SubOnce(topics...) - blockChannel := make(chan *blocks.Block, 1) // buffered so the sender doesn't wait on receiver - go func() { - defer close(blockChannel) - select { - case val := <-subChan: - block, ok := val.(*blocks.Block) - if ok { - blockChannel <- block - } - case <-ctx.Done(): - ps.wrapped.Unsub(subChan, topics...) - } - }() - return blockChannel -} - func (ps *impl) Shutdown() { ps.wrapped.Shutdown() } From a856e9b6833f963de5e80d4aea17ed6386347eb6 Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Fri, 21 Nov 2014 17:43:57 -0800 Subject: [PATCH 0153/1035] fix(bs/n) remove unnecessary variable to remove ambiguity (before it was possible to loop over either topics or keys by only keeping keys, there's no confusing about what to use for the loop range License: MIT Signed-off-by: Brian Tiger Chow This commit was moved from ipfs/go-bitswap@365044cefbf373adf7bfc83e8fb6ca227d4f7f32 --- bitswap/notifications/notifications.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/bitswap/notifications/notifications.go b/bitswap/notifications/notifications.go index ee82f0305..b07f3bf73 100644 --- a/bitswap/notifications/notifications.go +++ b/bitswap/notifications/notifications.go @@ -37,14 +37,14 @@ func (ps *impl) Shutdown() { // is closed if the |ctx| times out or is cancelled, or after sending len(keys) // blocks. func (ps *impl) Subscribe(ctx context.Context, keys ...u.Key) <-chan *blocks.Block { - topics := toStrings(keys) + blocksCh := make(chan *blocks.Block, len(keys)) valuesCh := make(chan interface{}, len(keys)) - ps.wrapped.AddSub(valuesCh, topics...) + ps.wrapped.AddSub(valuesCh, toStrings(keys)...) go func() { defer func() { - ps.wrapped.Unsub(valuesCh, topics...) + ps.wrapped.Unsub(valuesCh, toStrings(keys)...) close(blocksCh) }() for _, _ = range keys { From 6aa8d98dd84b2ae36d366d6b0c7afa24a41e557e Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Fri, 21 Nov 2014 18:48:15 -0800 Subject: [PATCH 0154/1035] test(bs/n) check for duplicates received License: MIT Signed-off-by: Brian Tiger Chow This commit was moved from ipfs/go-bitswap@6a7c1d4d8dd466f1fe01fb54e3ba726b54a44068 --- bitswap/notifications/notifications_test.go | 29 +++++++++++++++++++-- 1 file changed, 27 insertions(+), 2 deletions(-) diff --git a/bitswap/notifications/notifications_test.go b/bitswap/notifications/notifications_test.go index 7352320d9..2b2f769e6 100644 --- a/bitswap/notifications/notifications_test.go +++ b/bitswap/notifications/notifications_test.go @@ -9,6 +9,31 @@ import ( blocks "github.com/jbenet/go-ipfs/blocks" ) +func TestDuplicates(t *testing.T) { + b1 := blocks.NewBlock([]byte("1")) + b2 := blocks.NewBlock([]byte("2")) + + n := New() + defer n.Shutdown() + ch := n.Subscribe(context.Background(), b1.Key(), b2.Key()) + + n.Publish(b1) + blockRecvd, ok := <-ch + if !ok { + t.Fail() + } + assertBlocksEqual(t, b1, blockRecvd) + + n.Publish(b1) // ignored duplicate + + n.Publish(b2) + blockRecvd, ok = <-ch + if !ok { + t.Fail() + } + assertBlocksEqual(t, b2, blockRecvd) +} + func TestPublishSubscribe(t *testing.T) { blockSent := blocks.NewBlock([]byte("Greetings from The Interval")) @@ -80,9 +105,9 @@ func assertBlockChannelNil(t *testing.T, blockChannel <-chan *blocks.Block) { func assertBlocksEqual(t *testing.T, a, b *blocks.Block) { if !bytes.Equal(a.Data, b.Data) { - t.Fail() + t.Fatal("blocks aren't equal") } if a.Key() != b.Key() { - t.Fail() + t.Fatal("block keys aren't equal") } } From c8021f459e9aba0f5b3f01fb26db5847ea6e0c4d Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Fri, 21 Nov 2014 19:11:09 -0800 Subject: [PATCH 0155/1035] fix(bs/notifications) prevent duplicates @whyrusleeping now notifications _guarantees_ there won't be any duplicates License: MIT Signed-off-by: Brian Tiger Chow This commit was moved from ipfs/go-bitswap@be976cc5c64ece244896f1f9048daa512219f742 --- bitswap/notifications/notifications.go | 19 ++++++++++++++++++- bitswap/notifications/notifications_test.go | 4 ++-- 2 files changed, 20 insertions(+), 3 deletions(-) diff --git a/bitswap/notifications/notifications.go b/bitswap/notifications/notifications.go index b07f3bf73..20a0f623d 100644 --- a/bitswap/notifications/notifications.go +++ b/bitswap/notifications/notifications.go @@ -47,7 +47,12 @@ func (ps *impl) Subscribe(ctx context.Context, keys ...u.Key) <-chan *blocks.Blo ps.wrapped.Unsub(valuesCh, toStrings(keys)...) close(blocksCh) }() - for _, _ = range keys { + seen := make(map[u.Key]struct{}) + i := 0 // req'd because it only counts unique block sends + for { + if i >= len(keys) { + return + } select { case <-ctx.Done(): return @@ -59,10 +64,22 @@ func (ps *impl) Subscribe(ctx context.Context, keys ...u.Key) <-chan *blocks.Blo if !ok { return } + if _, ok := seen[block.Key()]; ok { + continue + } select { case <-ctx.Done(): return case blocksCh <- block: // continue + // Unsub alone is insufficient for keeping out duplicates. + // It's a race to unsubscribe before pubsub handles the + // next Publish call. Therefore, must also check for + // duplicates manually. Unsub is a performance + // consideration to avoid lots of unnecessary channel + // chatter. + ps.wrapped.Unsub(valuesCh, string(block.Key())) + i++ + seen[block.Key()] = struct{}{} } } } diff --git a/bitswap/notifications/notifications_test.go b/bitswap/notifications/notifications_test.go index 2b2f769e6..6467f3d4f 100644 --- a/bitswap/notifications/notifications_test.go +++ b/bitswap/notifications/notifications_test.go @@ -52,8 +52,8 @@ func TestPublishSubscribe(t *testing.T) { } func TestSubscribeMany(t *testing.T) { - e1 := blocks.NewBlock([]byte("Greetings from The Interval")) - e2 := blocks.NewBlock([]byte("Greetings from The Interval")) + e1 := blocks.NewBlock([]byte("1")) + e2 := blocks.NewBlock([]byte("2")) n := New() defer n.Shutdown() From 16e3546007f36b948778d0b916ba19185a64c6c4 Mon Sep 17 00:00:00 2001 From: Jeromy Date: Sat, 22 Nov 2014 22:27:19 +0000 Subject: [PATCH 0156/1035] ensure sending of wantlist to friendly peers This commit was moved from ipfs/go-bitswap@fe048093760933ae8c9a3036b44803234656a736 --- bitswap/bitswap.go | 19 ++++++++++++++++--- bitswap/bitswap_test.go | 3 ++- 2 files changed, 18 insertions(+), 4 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 95cb7ebf6..b5edfcf27 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -262,6 +262,7 @@ func (bs *bitswap) ReceiveMessage(ctx context.Context, p peer.Peer, incoming bsm } } + first := true for _, key := range incoming.Wantlist() { // TODO: might be better to check if we have the block before checking // if we should send it to someone @@ -272,9 +273,11 @@ func (bs *bitswap) ReceiveMessage(ctx context.Context, p peer.Peer, incoming bsm // Create a separate message to send this block in blkmsg := bsmsg.New() - // TODO: only send this the first time - for _, k := range bs.wantlist.Keys() { - blkmsg.AddWanted(k) + if first { + for _, k := range bs.wantlist.Keys() { + blkmsg.AddWanted(k) + } + first = false } blkmsg.AddBlock(block) @@ -284,6 +287,16 @@ func (bs *bitswap) ReceiveMessage(ctx context.Context, p peer.Peer, incoming bsm } } + // If they send us a block, we should guarantee that we send + // them our updated want list one way or another + if len(incoming.Blocks()) > 0 && first { + message := bsmsg.New() + for _, k := range bs.wantlist.Keys() { + message.AddWanted(k) + } + return p, message + } + return nil, nil } diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index 426c0a315..0610164a0 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -1,4 +1,4 @@ -package bitswap +package bitswap_test import ( "bytes" @@ -7,6 +7,7 @@ import ( "time" context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" + . "github.com/jbenet/go-ipfs/exchange/bitswap" ds "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore" ds_sync "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore/sync" From 32af6cb8f156a23b66d56ffb7d0a4fa3f1ec10b7 Mon Sep 17 00:00:00 2001 From: Jeromy Date: Sun, 23 Nov 2014 19:14:06 +0000 Subject: [PATCH 0157/1035] add a test to blockservice to demonstate GetBlocks failure. This commit was moved from ipfs/go-bitswap@f56f5506960e954542b88c50deb1bb572bea4453 --- bitswap/bitswap.go | 19 ++------ bitswap/bitswap_test.go | 90 +---------------------------------- bitswap/testutils.go | 101 ++++++++++++++++++++++++++++++++++++++++ 3 files changed, 106 insertions(+), 104 deletions(-) create mode 100644 bitswap/testutils.go diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index b5edfcf27..95cb7ebf6 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -262,7 +262,6 @@ func (bs *bitswap) ReceiveMessage(ctx context.Context, p peer.Peer, incoming bsm } } - first := true for _, key := range incoming.Wantlist() { // TODO: might be better to check if we have the block before checking // if we should send it to someone @@ -273,11 +272,9 @@ func (bs *bitswap) ReceiveMessage(ctx context.Context, p peer.Peer, incoming bsm // Create a separate message to send this block in blkmsg := bsmsg.New() - if first { - for _, k := range bs.wantlist.Keys() { - blkmsg.AddWanted(k) - } - first = false + // TODO: only send this the first time + for _, k := range bs.wantlist.Keys() { + blkmsg.AddWanted(k) } blkmsg.AddBlock(block) @@ -287,16 +284,6 @@ func (bs *bitswap) ReceiveMessage(ctx context.Context, p peer.Peer, incoming bsm } } - // If they send us a block, we should guarantee that we send - // them our updated want list one way or another - if len(incoming.Blocks()) > 0 && first { - message := bsmsg.New() - for _, k := range bs.wantlist.Keys() { - message.AddWanted(k) - } - return p, message - } - return nil, nil } diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index 0610164a0..7cd1c22f9 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -1,4 +1,4 @@ -package bitswap_test +package bitswap import ( "bytes" @@ -7,13 +7,8 @@ import ( "time" context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" - . "github.com/jbenet/go-ipfs/exchange/bitswap" - ds "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore" - ds_sync "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore/sync" blocks "github.com/jbenet/go-ipfs/blocks" - blockstore "github.com/jbenet/go-ipfs/blocks/blockstore" - exchange "github.com/jbenet/go-ipfs/exchange" tn "github.com/jbenet/go-ipfs/exchange/bitswap/testnet" peer "github.com/jbenet/go-ipfs/peer" mock "github.com/jbenet/go-ipfs/routing/mock" @@ -170,7 +165,7 @@ func PerformDistributionTest(t *testing.T, numInstances, numBlocks int) { } } -func getOrFail(bitswap instance, b *blocks.Block, t *testing.T, wg *sync.WaitGroup) { +func getOrFail(bitswap Instance, b *blocks.Block, t *testing.T, wg *sync.WaitGroup) { if _, err := bitswap.blockstore.Get(b.Key()); err != nil { _, err := bitswap.exchange.GetBlock(context.Background(), b.Key()) if err != nil { @@ -246,84 +241,3 @@ func TestSendToWantingPeer(t *testing.T) { t.Fatal("Expected to receive alpha from me") } } - -func NewBlockGenerator() BlockGenerator { - return BlockGenerator{} -} - -type BlockGenerator struct { - seq int -} - -func (bg *BlockGenerator) Next() *blocks.Block { - bg.seq++ - return blocks.NewBlock([]byte(string(bg.seq))) -} - -func (bg *BlockGenerator) Blocks(n int) []*blocks.Block { - blocks := make([]*blocks.Block, 0) - for i := 0; i < n; i++ { - b := bg.Next() - blocks = append(blocks, b) - } - return blocks -} - -func NewSessionGenerator( - net tn.Network, rs mock.RoutingServer) SessionGenerator { - return SessionGenerator{ - net: net, - rs: rs, - seq: 0, - } -} - -type SessionGenerator struct { - seq int - net tn.Network - rs mock.RoutingServer -} - -func (g *SessionGenerator) Next() instance { - g.seq++ - return session(g.net, g.rs, []byte(string(g.seq))) -} - -func (g *SessionGenerator) Instances(n int) []instance { - instances := make([]instance, 0) - for j := 0; j < n; j++ { - inst := g.Next() - instances = append(instances, inst) - } - return instances -} - -type instance struct { - peer peer.Peer - exchange exchange.Interface - blockstore blockstore.Blockstore -} - -// session creates a test bitswap session. -// -// NB: It's easy make mistakes by providing the same peer ID to two different -// sessions. To safeguard, use the SessionGenerator to generate sessions. It's -// just a much better idea. -func session(net tn.Network, rs mock.RoutingServer, id peer.ID) instance { - p := peer.WithID(id) - - adapter := net.Adapter(p) - htc := rs.Client(p) - bstore := blockstore.NewBlockstore(ds_sync.MutexWrap(ds.NewMapDatastore())) - - const alwaysSendToPeer = true - ctx := context.TODO() - - bs := New(ctx, p, adapter, htc, bstore, alwaysSendToPeer) - - return instance{ - peer: p, - exchange: bs, - blockstore: bstore, - } -} diff --git a/bitswap/testutils.go b/bitswap/testutils.go new file mode 100644 index 000000000..c32cee6f9 --- /dev/null +++ b/bitswap/testutils.go @@ -0,0 +1,101 @@ +package bitswap + +import ( + "code.google.com/p/go.net/context" + ds "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore" + ds_sync "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore/sync" + "github.com/jbenet/go-ipfs/blocks" + "github.com/jbenet/go-ipfs/blocks/blockstore" + "github.com/jbenet/go-ipfs/exchange" + tn "github.com/jbenet/go-ipfs/exchange/bitswap/testnet" + "github.com/jbenet/go-ipfs/peer" + "github.com/jbenet/go-ipfs/routing/mock" +) + +/* +TODO: This whole file needs somewhere better to live. +The issue is that its very difficult to move it somewhere else +without creating circular dependencies. +Additional thought required. +*/ + +func NewBlockGenerator() BlockGenerator { + return BlockGenerator{} +} + +type BlockGenerator struct { + seq int +} + +func (bg *BlockGenerator) Next() *blocks.Block { + bg.seq++ + return blocks.NewBlock([]byte(string(bg.seq))) +} + +func (bg *BlockGenerator) Blocks(n int) []*blocks.Block { + blocks := make([]*blocks.Block, 0) + for i := 0; i < n; i++ { + b := bg.Next() + blocks = append(blocks, b) + } + return blocks +} + +func NewSessionGenerator( + net tn.Network, rs mock.RoutingServer) SessionGenerator { + return SessionGenerator{ + net: net, + rs: rs, + seq: 0, + } +} + +type SessionGenerator struct { + seq int + net tn.Network + rs mock.RoutingServer +} + +func (g *SessionGenerator) Next() Instance { + g.seq++ + return session(g.net, g.rs, []byte(string(g.seq))) +} + +func (g *SessionGenerator) Instances(n int) []Instance { + instances := make([]Instance, 0) + for j := 0; j < n; j++ { + inst := g.Next() + instances = append(instances, inst) + } + return instances +} + +type Instance struct { + Peer peer.Peer + Exchange exchange.Interface + Blockstore blockstore.Blockstore +} + +// session creates a test bitswap session. +// +// NB: It's easy make mistakes by providing the same peer ID to two different +// sessions. To safeguard, use the SessionGenerator to generate sessions. It's +// just a much better idea. +func session(net tn.Network, rs mock.RoutingServer, id peer.ID) Instance { + p := peer.WithID(id) + + adapter := net.Adapter(p) + htc := rs.Client(p) + bstore := blockstore.NewBlockstore(ds_sync.MutexWrap(ds.NewMapDatastore())) + + const alwaysSendToPeer = true + ctx := context.TODO() + + bs := New(ctx, p, adapter, htc, bstore, alwaysSendToPeer) + + return Instance{ + Peer: p, + Exchange: bs, + Blockstore: bstore, + } +} From 274c71e6b7ac6776e3cca25a95ef33aa938f15e1 Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Sun, 23 Nov 2014 22:52:43 -0800 Subject: [PATCH 0158/1035] fix(bitswap) build-breaking compilation errors License: MIT Signed-off-by: Brian Tiger Chow This commit was moved from ipfs/go-bitswap@9c59e0f063e57c5530d10568be651df3f2bc53e1 --- bitswap/bitswap_test.go | 60 ++++++++++++++++++++--------------------- 1 file changed, 30 insertions(+), 30 deletions(-) diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index 7cd1c22f9..1da69560e 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -25,8 +25,8 @@ func TestClose(t *testing.T) { block := bgen.Next() bitswap := sesgen.Next() - bitswap.exchange.Close() - bitswap.exchange.GetBlock(context.Background(), block.Key()) + bitswap.Exchange.Close() + bitswap.Exchange.GetBlock(context.Background(), block.Key()) } func TestGetBlockTimeout(t *testing.T) { @@ -39,7 +39,7 @@ func TestGetBlockTimeout(t *testing.T) { ctx, _ := context.WithTimeout(context.Background(), time.Nanosecond) block := blocks.NewBlock([]byte("block")) - _, err := self.exchange.GetBlock(ctx, block.Key()) + _, err := self.Exchange.GetBlock(ctx, block.Key()) if err != context.DeadlineExceeded { t.Fatal("Expected DeadlineExceeded error") @@ -58,7 +58,7 @@ func TestProviderForKeyButNetworkCannotFind(t *testing.T) { solo := g.Next() ctx, _ := context.WithTimeout(context.Background(), time.Nanosecond) - _, err := solo.exchange.GetBlock(ctx, block.Key()) + _, err := solo.Exchange.GetBlock(ctx, block.Key()) if err != context.DeadlineExceeded { t.Fatal("Expected DeadlineExceeded error") @@ -76,17 +76,17 @@ func TestGetBlockFromPeerAfterPeerAnnounces(t *testing.T) { hasBlock := g.Next() - if err := hasBlock.blockstore.Put(block); err != nil { + if err := hasBlock.Blockstore.Put(block); err != nil { t.Fatal(err) } - if err := hasBlock.exchange.HasBlock(context.Background(), block); err != nil { + if err := hasBlock.Exchange.HasBlock(context.Background(), block); err != nil { t.Fatal(err) } wantsBlock := g.Next() ctx, _ := context.WithTimeout(context.Background(), time.Second) - received, err := wantsBlock.exchange.GetBlock(ctx, block.Key()) + received, err := wantsBlock.Exchange.GetBlock(ctx, block.Key()) if err != nil { t.Log(err) t.Fatal("Expected to succeed") @@ -135,9 +135,9 @@ func PerformDistributionTest(t *testing.T, numInstances, numBlocks int) { first := instances[0] for _, b := range blocks { - first.blockstore.Put(b) - first.exchange.HasBlock(context.Background(), b) - rs.Announce(first.peer, b.Key()) + first.Blockstore.Put(b) + first.Exchange.HasBlock(context.Background(), b) + rs.Announce(first.Peer, b.Key()) } t.Log("Distribute!") @@ -158,7 +158,7 @@ func PerformDistributionTest(t *testing.T, numInstances, numBlocks int) { for _, inst := range instances { for _, b := range blocks { - if _, err := inst.blockstore.Get(b.Key()); err != nil { + if _, err := inst.Blockstore.Get(b.Key()); err != nil { t.Fatal(err) } } @@ -166,8 +166,8 @@ func PerformDistributionTest(t *testing.T, numInstances, numBlocks int) { } func getOrFail(bitswap Instance, b *blocks.Block, t *testing.T, wg *sync.WaitGroup) { - if _, err := bitswap.blockstore.Get(b.Key()); err != nil { - _, err := bitswap.exchange.GetBlock(context.Background(), b.Key()) + if _, err := bitswap.Blockstore.Get(b.Key()); err != nil { + _, err := bitswap.Exchange.GetBlock(context.Background(), b.Key()) if err != nil { t.Fatal(err) } @@ -190,50 +190,50 @@ func TestSendToWantingPeer(t *testing.T) { w := sg.Next() o := sg.Next() - t.Logf("Session %v\n", me.peer) - t.Logf("Session %v\n", w.peer) - t.Logf("Session %v\n", o.peer) + t.Logf("Session %v\n", me.Peer) + t.Logf("Session %v\n", w.Peer) + t.Logf("Session %v\n", o.Peer) alpha := bg.Next() const timeout = 100 * time.Millisecond // FIXME don't depend on time - t.Logf("Peer %v attempts to get %v. NB: not available\n", w.peer, alpha.Key()) + t.Logf("Peer %v attempts to get %v. NB: not available\n", w.Peer, alpha.Key()) ctx, _ := context.WithTimeout(context.Background(), timeout) - _, err := w.exchange.GetBlock(ctx, alpha.Key()) + _, err := w.Exchange.GetBlock(ctx, alpha.Key()) if err == nil { t.Fatalf("Expected %v to NOT be available", alpha.Key()) } beta := bg.Next() - t.Logf("Peer %v announes availability of %v\n", w.peer, beta.Key()) + t.Logf("Peer %v announes availability of %v\n", w.Peer, beta.Key()) ctx, _ = context.WithTimeout(context.Background(), timeout) - if err := w.blockstore.Put(beta); err != nil { + if err := w.Blockstore.Put(beta); err != nil { t.Fatal(err) } - w.exchange.HasBlock(ctx, beta) + w.Exchange.HasBlock(ctx, beta) - t.Logf("%v gets %v from %v and discovers it wants %v\n", me.peer, beta.Key(), w.peer, alpha.Key()) + t.Logf("%v gets %v from %v and discovers it wants %v\n", me.Peer, beta.Key(), w.Peer, alpha.Key()) ctx, _ = context.WithTimeout(context.Background(), timeout) - if _, err := me.exchange.GetBlock(ctx, beta.Key()); err != nil { + if _, err := me.Exchange.GetBlock(ctx, beta.Key()); err != nil { t.Fatal(err) } - t.Logf("%v announces availability of %v\n", o.peer, alpha.Key()) + t.Logf("%v announces availability of %v\n", o.Peer, alpha.Key()) ctx, _ = context.WithTimeout(context.Background(), timeout) - if err := o.blockstore.Put(alpha); err != nil { + if err := o.Blockstore.Put(alpha); err != nil { t.Fatal(err) } - o.exchange.HasBlock(ctx, alpha) + o.Exchange.HasBlock(ctx, alpha) - t.Logf("%v requests %v\n", me.peer, alpha.Key()) + t.Logf("%v requests %v\n", me.Peer, alpha.Key()) ctx, _ = context.WithTimeout(context.Background(), timeout) - if _, err := me.exchange.GetBlock(ctx, alpha.Key()); err != nil { + if _, err := me.Exchange.GetBlock(ctx, alpha.Key()); err != nil { t.Fatal(err) } - t.Logf("%v should now have %v\n", w.peer, alpha.Key()) - block, err := w.blockstore.Get(alpha.Key()) + t.Logf("%v should now have %v\n", w.Peer, alpha.Key()) + block, err := w.Blockstore.Get(alpha.Key()) if err != nil { t.Fatal("Should not have received an error") } From f8dfc629b8cead25562bc66793432406ff94a549 Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Sun, 23 Nov 2014 22:46:11 -0800 Subject: [PATCH 0159/1035] fix(bs/notifications) use SubOnceEach to provide uniqueness guarantee License: MIT Signed-off-by: Brian Tiger Chow vendor forked pubsub to get SubOnceEach License: MIT Signed-off-by: Brian Tiger Chow This commit was moved from ipfs/go-bitswap@6fe3af111ae183b53dbdf7ea2802cb5215878abf --- bitswap/notifications/notifications.go | 28 ++++++-------------------- 1 file changed, 6 insertions(+), 22 deletions(-) diff --git a/bitswap/notifications/notifications.go b/bitswap/notifications/notifications.go index 20a0f623d..e9aac629c 100644 --- a/bitswap/notifications/notifications.go +++ b/bitswap/notifications/notifications.go @@ -2,7 +2,7 @@ package notifications import ( context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" - pubsub "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/tuxychandru/pubsub" + pubsub "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/maybebtc/pubsub" blocks "github.com/jbenet/go-ipfs/blocks" u "github.com/jbenet/go-ipfs/util" @@ -39,20 +39,16 @@ func (ps *impl) Shutdown() { func (ps *impl) Subscribe(ctx context.Context, keys ...u.Key) <-chan *blocks.Block { blocksCh := make(chan *blocks.Block, len(keys)) - valuesCh := make(chan interface{}, len(keys)) - ps.wrapped.AddSub(valuesCh, toStrings(keys)...) - + if len(keys) == 0 { + close(blocksCh) + return blocksCh + } + valuesCh := ps.wrapped.SubOnceEach(toStrings(keys)...) go func() { defer func() { - ps.wrapped.Unsub(valuesCh, toStrings(keys)...) close(blocksCh) }() - seen := make(map[u.Key]struct{}) - i := 0 // req'd because it only counts unique block sends for { - if i >= len(keys) { - return - } select { case <-ctx.Done(): return @@ -64,22 +60,10 @@ func (ps *impl) Subscribe(ctx context.Context, keys ...u.Key) <-chan *blocks.Blo if !ok { return } - if _, ok := seen[block.Key()]; ok { - continue - } select { case <-ctx.Done(): return case blocksCh <- block: // continue - // Unsub alone is insufficient for keeping out duplicates. - // It's a race to unsubscribe before pubsub handles the - // next Publish call. Therefore, must also check for - // duplicates manually. Unsub is a performance - // consideration to avoid lots of unnecessary channel - // chatter. - ps.wrapped.Unsub(valuesCh, string(block.Key())) - i++ - seen[block.Key()] = struct{}{} } } } From 5ebc0060239c850326bf820c99c3b9566ef072f9 Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Sun, 23 Nov 2014 22:47:06 -0800 Subject: [PATCH 0160/1035] fix(bitswap/testutils) vendor License: MIT Signed-off-by: Brian Tiger Chow This commit was moved from ipfs/go-bitswap@8adecf8eac7758ad64e9e76bdd7640975fccf0d5 --- bitswap/testutils.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bitswap/testutils.go b/bitswap/testutils.go index c32cee6f9..d0064173f 100644 --- a/bitswap/testutils.go +++ b/bitswap/testutils.go @@ -1,7 +1,7 @@ package bitswap import ( - "code.google.com/p/go.net/context" + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" ds "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore" ds_sync "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore/sync" "github.com/jbenet/go-ipfs/blocks" From 9d669c17fb7f49c671c92101c9c4e271be04a7aa Mon Sep 17 00:00:00 2001 From: Jeromy Date: Mon, 24 Nov 2014 08:28:48 +0000 Subject: [PATCH 0161/1035] fix issues in merkledag This commit was moved from ipfs/go-bitswap@42c3c413558b80bdcd0403f0ec0d76cc2c414bef --- bitswap/network/ipfs_impl.go | 3 --- 1 file changed, 3 deletions(-) diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index c94a4859f..1a3c11b44 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -1,8 +1,6 @@ package network import ( - "errors" - context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" bsmsg "github.com/jbenet/go-ipfs/exchange/bitswap/message" @@ -54,7 +52,6 @@ func (bsnet *impl) HandleMessage( // TODO(brian): put this in a helper function if bsmsg == nil || p == nil { - bsnet.receiver.ReceiveError(errors.New("ReceiveMessage returned nil peer or message")) return nil } From 03cbba45410c904e141f9ee8e326abae1211ee05 Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Wed, 26 Nov 2014 12:08:40 -0800 Subject: [PATCH 0162/1035] refactor(util) move block generator @whyrusleeping @jbenet Putting the block generator in a util dir until blocks. Can't put it in util/testutil because the util/testutil/dag-generator imports blockservice and blockservice uses the generator. Tough problem. This'll do for now. License: MIT Signed-off-by: Brian Tiger Chow This commit was moved from ipfs/go-bitswap@cba713cd1aa6662edb91d8636b8ae21417028679 --- bitswap/bitswap_test.go | 8 ++++---- bitswap/testutils.go | 30 ------------------------------ 2 files changed, 4 insertions(+), 34 deletions(-) diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index 1da69560e..ede87c474 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -7,8 +7,8 @@ import ( "time" context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" - blocks "github.com/jbenet/go-ipfs/blocks" + blocksutil "github.com/jbenet/go-ipfs/blocks/blocksutil" tn "github.com/jbenet/go-ipfs/exchange/bitswap/testnet" peer "github.com/jbenet/go-ipfs/peer" mock "github.com/jbenet/go-ipfs/routing/mock" @@ -20,7 +20,7 @@ func TestClose(t *testing.T) { vnet := tn.VirtualNetwork() rout := mock.VirtualRoutingServer() sesgen := NewSessionGenerator(vnet, rout) - bgen := NewBlockGenerator() + bgen := blocksutil.NewBlockGenerator() block := bgen.Next() bitswap := sesgen.Next() @@ -124,7 +124,7 @@ func PerformDistributionTest(t *testing.T, numInstances, numBlocks int) { net := tn.VirtualNetwork() rs := mock.VirtualRoutingServer() sg := NewSessionGenerator(net, rs) - bg := NewBlockGenerator() + bg := blocksutil.NewBlockGenerator() t.Log("Test a few nodes trying to get one file with a lot of blocks") @@ -184,7 +184,7 @@ func TestSendToWantingPeer(t *testing.T) { net := tn.VirtualNetwork() rs := mock.VirtualRoutingServer() sg := NewSessionGenerator(net, rs) - bg := NewBlockGenerator() + bg := blocksutil.NewBlockGenerator() me := sg.Next() w := sg.Next() diff --git a/bitswap/testutils.go b/bitswap/testutils.go index d0064173f..402a5b1d2 100644 --- a/bitswap/testutils.go +++ b/bitswap/testutils.go @@ -4,7 +4,6 @@ import ( "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" ds "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore" ds_sync "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore/sync" - "github.com/jbenet/go-ipfs/blocks" "github.com/jbenet/go-ipfs/blocks/blockstore" "github.com/jbenet/go-ipfs/exchange" tn "github.com/jbenet/go-ipfs/exchange/bitswap/testnet" @@ -12,35 +11,6 @@ import ( "github.com/jbenet/go-ipfs/routing/mock" ) -/* -TODO: This whole file needs somewhere better to live. -The issue is that its very difficult to move it somewhere else -without creating circular dependencies. -Additional thought required. -*/ - -func NewBlockGenerator() BlockGenerator { - return BlockGenerator{} -} - -type BlockGenerator struct { - seq int -} - -func (bg *BlockGenerator) Next() *blocks.Block { - bg.seq++ - return blocks.NewBlock([]byte(string(bg.seq))) -} - -func (bg *BlockGenerator) Blocks(n int) []*blocks.Block { - blocks := make([]*blocks.Block, 0) - for i := 0; i < n; i++ { - b := bg.Next() - blocks = append(blocks, b) - } - return blocks -} - func NewSessionGenerator( net tn.Network, rs mock.RoutingServer) SessionGenerator { return SessionGenerator{ From bb0471853ece64b1ed168ab7e1dec883ffa4e516 Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Wed, 26 Nov 2014 12:06:39 -0800 Subject: [PATCH 0163/1035] fix(notifications) prevent deadlock when context cancelled early + test(notifications) cc @whyrusleeping @jbenet License: MIT Signed-off-by: Brian Tiger Chow This commit was moved from ipfs/go-bitswap@58ad863d646793ad9c3f8f83c6f55b596aeafb20 --- bitswap/notifications/notifications.go | 8 +++--- bitswap/notifications/notifications_test.go | 30 +++++++++++++++++++++ 2 files changed, 34 insertions(+), 4 deletions(-) diff --git a/bitswap/notifications/notifications.go b/bitswap/notifications/notifications.go index e9aac629c..4616ac735 100644 --- a/bitswap/notifications/notifications.go +++ b/bitswap/notifications/notifications.go @@ -39,15 +39,15 @@ func (ps *impl) Shutdown() { func (ps *impl) Subscribe(ctx context.Context, keys ...u.Key) <-chan *blocks.Block { blocksCh := make(chan *blocks.Block, len(keys)) + valuesCh := make(chan interface{}, len(keys)) // provide our own channel to control buffer, prevent blocking if len(keys) == 0 { close(blocksCh) return blocksCh } - valuesCh := ps.wrapped.SubOnceEach(toStrings(keys)...) + ps.wrapped.AddSubOnceEach(valuesCh, toStrings(keys)...) go func() { - defer func() { - close(blocksCh) - }() + defer close(blocksCh) + defer ps.wrapped.Unsub(valuesCh) // with a len(keys) buffer, this is an optimization for { select { case <-ctx.Done(): diff --git a/bitswap/notifications/notifications_test.go b/bitswap/notifications/notifications_test.go index 6467f3d4f..3a6ada1ea 100644 --- a/bitswap/notifications/notifications_test.go +++ b/bitswap/notifications/notifications_test.go @@ -7,6 +7,8 @@ import ( context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" blocks "github.com/jbenet/go-ipfs/blocks" + blocksutil "github.com/jbenet/go-ipfs/blocks/blocksutil" + "github.com/jbenet/go-ipfs/util" ) func TestDuplicates(t *testing.T) { @@ -96,6 +98,34 @@ func TestCarryOnWhenDeadlineExpires(t *testing.T) { assertBlockChannelNil(t, blockChannel) } +func TestDoesNotDeadLockIfContextCancelledBeforePublish(t *testing.T) { + + g := blocksutil.NewBlockGenerator() + ctx, cancel := context.WithCancel(context.Background()) + n := New() + defer n.Shutdown() + + t.Log("generate a large number of blocks. exceed default buffer") + bs := g.Blocks(1000) + ks := func() []util.Key { + var keys []util.Key + for _, b := range bs { + keys = append(keys, b.Key()) + } + return keys + }() + + _ = n.Subscribe(ctx, ks...) // ignore received channel + + t.Log("cancel context before any blocks published") + cancel() + for _, b := range bs { + n.Publish(b) + } + + t.Log("publishing the large number of blocks to the ignored channel must not deadlock") +} + func assertBlockChannelNil(t *testing.T, blockChannel <-chan *blocks.Block) { _, ok := <-blockChannel if ok { From 41e3af1db17a8820932efe373b3a5f87408255cd Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Wed, 26 Nov 2014 12:58:35 -0800 Subject: [PATCH 0164/1035] fix(bitswap) pass derived context to called functions @whyrusleeping @jbenet License: MIT Signed-off-by: Brian Tiger Chow This commit was moved from ipfs/go-bitswap@2be5de8f6c357de216a61aebde8a72e087f2a408 --- bitswap/bitswap.go | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 95cb7ebf6..125561889 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -87,9 +87,13 @@ type bitswap struct { // deadline enforced by the context. func (bs *bitswap) GetBlock(parent context.Context, k u.Key) (*blocks.Block, error) { - // make sure to derive a new |ctx| and pass it to children. It's correct to - // listen on |parent| here, but incorrect to pass |parent| to new async - // functions. This is difficult to enforce. May this comment keep you safe. + // Any async work initiated by this function must end when this function + // returns. To ensure this, derive a new context. Note that it is okay to + // listen on parent in this scope, but NOT okay to pass |parent| to + // functions called by this one. Otherwise those functions won't return + // when this context Otherwise those functions won't return when this + // context's cancel func is executed. This is difficult to enforce. May + // this comment keep you safe. ctx, cancelFunc := context.WithCancel(parent) @@ -101,7 +105,7 @@ func (bs *bitswap) GetBlock(parent context.Context, k u.Key) (*blocks.Block, err log.Event(ctx, "GetBlockRequestEnd", &k) }() - promise, err := bs.GetBlocks(parent, []u.Key{k}) + promise, err := bs.GetBlocks(ctx, []u.Key{k}) if err != nil { return nil, err } From f32a106ed81ffe04dd94168d68adab2f57893d4a Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Wed, 26 Nov 2014 14:22:10 -0800 Subject: [PATCH 0165/1035] refactor(bitswap) perform Publish in HasBlock License: MIT Signed-off-by: Brian Tiger Chow This commit was moved from ipfs/go-bitswap@2ee030b643b9c9e53c632e3de7e6a77b8c3d3f65 --- bitswap/bitswap.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 125561889..490ae0d47 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -224,8 +224,10 @@ func (bs *bitswap) loop(parent context.Context) { // HasBlock announces the existance of a block to this bitswap service. The // service will potentially notify its peers. func (bs *bitswap) HasBlock(ctx context.Context, blk *blocks.Block) error { + // TODO check all errors log.Debugf("Has Block %s", blk.Key()) bs.wantlist.Remove(blk.Key()) + bs.notifications.Publish(blk) bs.sendToPeersThatWant(ctx, blk) return bs.routing.Provide(ctx, blk.Key()) } @@ -258,8 +260,6 @@ func (bs *bitswap) ReceiveMessage(ctx context.Context, p peer.Peer, incoming bsm log.Criticalf("error putting block: %s", err) continue // FIXME(brian): err ignored } - bs.notifications.Publish(block) - bs.wantlist.Remove(block.Key()) err := bs.HasBlock(ctx, block) if err != nil { log.Warningf("HasBlock errored: %s", err) From ed245dfe985dadb14f963d7cdc4bd5f06ecac879 Mon Sep 17 00:00:00 2001 From: Jeromy Date: Wed, 26 Nov 2014 22:50:41 +0000 Subject: [PATCH 0166/1035] some bitswap cleanup This commit was moved from ipfs/go-bitswap@15a7d870a6b4d207eb3ab97e0430931590aa637a --- bitswap/bitswap.go | 63 ++++++++++++++++++++++++------------ bitswap/strategy/strategy.go | 2 +- 2 files changed, 43 insertions(+), 22 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 490ae0d47..9cfe5875d 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -16,11 +16,14 @@ import ( strategy "github.com/jbenet/go-ipfs/exchange/bitswap/strategy" peer "github.com/jbenet/go-ipfs/peer" u "github.com/jbenet/go-ipfs/util" - "github.com/jbenet/go-ipfs/util/eventlog" + eventlog "github.com/jbenet/go-ipfs/util/eventlog" ) var log = eventlog.Logger("bitswap") +// Number of providers to request for sending a wantlist to +const maxProvidersPerRequest = 6 + // New initializes a BitSwap instance that communicates over the // provided BitSwapNetwork. This function registers the returned instance as // the network delegate. @@ -97,7 +100,7 @@ func (bs *bitswap) GetBlock(parent context.Context, k u.Key) (*blocks.Block, err ctx, cancelFunc := context.WithCancel(parent) - ctx = eventlog.ContextWithMetadata(ctx, eventlog.Uuid("GetBlockRequest")) + ctx = eventlog.ContextWithLoggable(ctx, eventlog.Uuid("GetBlockRequest")) log.Event(ctx, "GetBlockRequestBegin", &k) defer func() { @@ -176,14 +179,29 @@ func (bs *bitswap) sendWantListTo(ctx context.Context, peers <-chan peer.Peer) e return nil } +func (bs *bitswap) sendWantlistToProviders(ctx context.Context, ks []u.Key) { + done := make(chan struct{}) + for _, k := range ks { + go func(k u.Key) { + providers := bs.routing.FindProvidersAsync(ctx, k, maxProvidersPerRequest) + + err := bs.sendWantListTo(ctx, providers) + if err != nil { + log.Errorf("error sending wantlist: %s", err) + } + done <- struct{}{} + }(k) + } + for _ = range ks { + <-done + } +} + // TODO ensure only one active request per key func (bs *bitswap) loop(parent context.Context) { ctx, cancel := context.WithCancel(parent) - // Every so often, we should resend out our current want list - rebroadcastTime := time.Second * 5 - broadcastSignal := time.NewTicker(bs.strategy.GetRebroadcastDelay()) defer func() { cancel() // signal to derived async functions @@ -193,15 +211,12 @@ func (bs *bitswap) loop(parent context.Context) { for { select { case <-broadcastSignal.C: - for _, k := range bs.wantlist.Keys() { - providers := bs.routing.FindProvidersAsync(ctx, k, maxProvidersPerRequest) - err := bs.sendWantListTo(ctx, providers) - if err != nil { - log.Errorf("error sending wantlist: %s", err) - } - } + bs.sendWantlistToProviders(ctx, bs.wantlist.Keys()) case ks := <-bs.batchRequests: // TODO: implement batching on len(ks) > X for some X + // i.e. if given 20 keys, fetch first five, then next + // five, and so on, so we are more likely to be able to + // effectively stream the data if len(ks) == 0 { log.Warning("Received batch request for zero blocks") continue @@ -232,6 +247,18 @@ func (bs *bitswap) HasBlock(ctx context.Context, blk *blocks.Block) error { return bs.routing.Provide(ctx, blk.Key()) } +func (bs *bitswap) receiveBlock(ctx context.Context, block *blocks.Block) { + // TODO verify blocks? + if err := bs.blockstore.Put(block); err != nil { + log.Criticalf("error putting block: %s", err) + return + } + err := bs.HasBlock(ctx, block) + if err != nil { + log.Warningf("HasBlock errored: %s", err) + } +} + // TODO(brian): handle errors func (bs *bitswap) ReceiveMessage(ctx context.Context, p peer.Peer, incoming bsmsg.BitSwapMessage) ( peer.Peer, bsmsg.BitSwapMessage) { @@ -255,15 +282,7 @@ func (bs *bitswap) ReceiveMessage(ctx context.Context, p peer.Peer, incoming bsm bs.strategy.MessageReceived(p, incoming) // FIRST for _, block := range incoming.Blocks() { - // TODO verify blocks? - if err := bs.blockstore.Put(block); err != nil { - log.Criticalf("error putting block: %s", err) - continue // FIXME(brian): err ignored - } - err := bs.HasBlock(ctx, block) - if err != nil { - log.Warningf("HasBlock errored: %s", err) - } + go bs.receiveBlock(ctx, block) } for _, key := range incoming.Wantlist() { @@ -277,6 +296,8 @@ func (bs *bitswap) ReceiveMessage(ctx context.Context, p peer.Peer, incoming bsm blkmsg := bsmsg.New() // TODO: only send this the first time + // no sense in sending our wantlist to the + // same peer multiple times for _, k := range bs.wantlist.Keys() { blkmsg.AddWanted(k) } diff --git a/bitswap/strategy/strategy.go b/bitswap/strategy/strategy.go index d86092da6..fb353d84a 100644 --- a/bitswap/strategy/strategy.go +++ b/bitswap/strategy/strategy.go @@ -148,5 +148,5 @@ func (s *strategist) GetBatchSize() int { } func (s *strategist) GetRebroadcastDelay() time.Duration { - return time.Second * 2 + return time.Second * 5 } From b65f137f5c0016a38eaedfc32ac8f5dab5c8d912 Mon Sep 17 00:00:00 2001 From: Jeromy Date: Wed, 26 Nov 2014 23:48:43 +0000 Subject: [PATCH 0167/1035] document bitswap more This commit was moved from ipfs/go-bitswap@d12c96564c080c004bb3219d0976d363adda25af --- bitswap/bitswap.go | 30 ++++++++++++++++++++---------- 1 file changed, 20 insertions(+), 10 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 9cfe5875d..94c4cde88 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -22,7 +22,8 @@ import ( var log = eventlog.Logger("bitswap") // Number of providers to request for sending a wantlist to -const maxProvidersPerRequest = 6 +// TODO: if a 'non-nice' strategy is implemented, consider increasing this value +const maxProvidersPerRequest = 3 // New initializes a BitSwap instance that communicates over the // provided BitSwapNetwork. This function registers the returned instance as @@ -211,6 +212,7 @@ func (bs *bitswap) loop(parent context.Context) { for { select { case <-broadcastSignal.C: + // Resend unfulfilled wantlist keys bs.sendWantlistToProviders(ctx, bs.wantlist.Keys()) case ks := <-bs.batchRequests: // TODO: implement batching on len(ks) > X for some X @@ -224,6 +226,13 @@ func (bs *bitswap) loop(parent context.Context) { for _, k := range ks { bs.wantlist.Add(k) } + // NB: send want list to providers for the first peer in this list. + // the assumption is made that the providers of the first key in + // the set are likely to have others as well. + // This currently holds true in most every situation, since when + // pinning a file, you store and provide all blocks associated with + // it. Later, this assumption may not hold as true if we implement + // newer bitswap strategies. providers := bs.routing.FindProvidersAsync(ctx, ks[0], maxProvidersPerRequest) err := bs.sendWantListTo(ctx, providers) @@ -263,7 +272,6 @@ func (bs *bitswap) receiveBlock(ctx context.Context, block *blocks.Block) { func (bs *bitswap) ReceiveMessage(ctx context.Context, p peer.Peer, incoming bsmsg.BitSwapMessage) ( peer.Peer, bsmsg.BitSwapMessage) { log.Debugf("ReceiveMessage from %s", p) - log.Debugf("Message wantlist: %v", incoming.Wantlist()) if p == nil { log.Error("Received message from nil peer!") @@ -279,15 +287,17 @@ func (bs *bitswap) ReceiveMessage(ctx context.Context, p peer.Peer, incoming bsm // Record message bytes in ledger // TODO: this is bad, and could be easily abused. // Should only track *useful* messages in ledger - bs.strategy.MessageReceived(p, incoming) // FIRST + // This call records changes to wantlists, blocks received, + // and number of bytes transfered. + bs.strategy.MessageReceived(p, incoming) - for _, block := range incoming.Blocks() { - go bs.receiveBlock(ctx, block) - } + go func() { + for _, block := range incoming.Blocks() { + bs.receiveBlock(ctx, block) + } + }() for _, key := range incoming.Wantlist() { - // TODO: might be better to check if we have the block before checking - // if we should send it to someone if bs.strategy.ShouldSendBlockToPeer(key, p) { if block, errBlockNotFound := bs.blockstore.Get(key); errBlockNotFound != nil { continue @@ -303,12 +313,12 @@ func (bs *bitswap) ReceiveMessage(ctx context.Context, p peer.Peer, incoming bsm } blkmsg.AddBlock(block) - bs.strategy.MessageSent(p, blkmsg) bs.send(ctx, p, blkmsg) } } } + // TODO: consider changing this function to not return anything return nil, nil } @@ -326,7 +336,7 @@ func (bs *bitswap) send(ctx context.Context, p peer.Peer, m bsmsg.BitSwapMessage } func (bs *bitswap) sendToPeersThatWant(ctx context.Context, block *blocks.Block) { - log.Debugf("Sending %v to peers that want it", block.Key()) + log.Debugf("Sending %s to peers that want it", block) for _, p := range bs.strategy.Peers() { if bs.strategy.BlockIsWantedByPeer(block.Key(), p) { From 923c079931265373cd1434e80418c1e7b7ff8f7b Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Thu, 27 Nov 2014 16:01:25 -0800 Subject: [PATCH 0168/1035] doc(bitswap) fix duplicaduplication @whyrusleeping https://github.com/jbenet/go-ipfs/commit/ada571425bc688b459cd34810fd398e5547b48a0#commitcomment-8753622 License: MIT Signed-off-by: Brian Tiger Chow This commit was moved from ipfs/go-bitswap@badec8dc84d7f6578feb09bf5fd1054e1eb6312d --- bitswap/bitswap.go | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 94c4cde88..00b08a323 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -95,9 +95,8 @@ func (bs *bitswap) GetBlock(parent context.Context, k u.Key) (*blocks.Block, err // returns. To ensure this, derive a new context. Note that it is okay to // listen on parent in this scope, but NOT okay to pass |parent| to // functions called by this one. Otherwise those functions won't return - // when this context Otherwise those functions won't return when this - // context's cancel func is executed. This is difficult to enforce. May - // this comment keep you safe. + // when this context's cancel func is executed. This is difficult to + // enforce. May this comment keep you safe. ctx, cancelFunc := context.WithCancel(parent) From 612617671357437242aa5dd00da77fef42a397fc Mon Sep 17 00:00:00 2001 From: Jeromy Date: Mon, 1 Dec 2014 02:15:04 +0000 Subject: [PATCH 0169/1035] cleanup, use a workgroup over channels This commit was moved from ipfs/go-bitswap@27193bdec9545ce5e963b351dc0ee65ecb7428b1 --- bitswap/bitswap.go | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 00b08a323..debfd5f69 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -3,6 +3,7 @@ package bitswap import ( + "sync" "time" context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" @@ -180,8 +181,9 @@ func (bs *bitswap) sendWantListTo(ctx context.Context, peers <-chan peer.Peer) e } func (bs *bitswap) sendWantlistToProviders(ctx context.Context, ks []u.Key) { - done := make(chan struct{}) + wg := sync.WaitGroup{} for _, k := range ks { + wg.Add(1) go func(k u.Key) { providers := bs.routing.FindProvidersAsync(ctx, k, maxProvidersPerRequest) @@ -189,12 +191,10 @@ func (bs *bitswap) sendWantlistToProviders(ctx context.Context, ks []u.Key) { if err != nil { log.Errorf("error sending wantlist: %s", err) } - done <- struct{}{} + wg.Done() }(k) } - for _ = range ks { - <-done - } + wg.Wait() } // TODO ensure only one active request per key @@ -255,6 +255,7 @@ func (bs *bitswap) HasBlock(ctx context.Context, blk *blocks.Block) error { return bs.routing.Provide(ctx, blk.Key()) } +// receiveBlock handles storing the block in the blockstore and calling HasBlock func (bs *bitswap) receiveBlock(ctx context.Context, block *blocks.Block) { // TODO verify blocks? if err := bs.blockstore.Put(block); err != nil { From d6bcf1510c9dd3ad6fe26f5d9c9d4ab5a3333b7e Mon Sep 17 00:00:00 2001 From: Jeromy Date: Mon, 1 Dec 2014 21:38:16 +0000 Subject: [PATCH 0170/1035] switch over to using sendMessage vs sendRequest This commit was moved from ipfs/go-bitswap@96e4204fcb5a24923a28ee0dfd588c5d1f0d2050 --- bitswap/bitswap.go | 10 +++------- bitswap/network/ipfs_impl.go | 17 ++--------------- 2 files changed, 5 insertions(+), 22 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index debfd5f69..44d51cde2 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -151,6 +151,7 @@ func (bs *bitswap) sendWantListTo(ctx context.Context, peers <-chan peer.Peer) e message.AddWanted(wanted) } for peerToQuery := range peers { + log.Debug("sending query to: %s", peerToQuery) log.Event(ctx, "PeerToQuery", peerToQuery) go func(p peer.Peer) { @@ -161,20 +162,15 @@ func (bs *bitswap) sendWantListTo(ctx context.Context, peers <-chan peer.Peer) e return } - response, err := bs.sender.SendRequest(ctx, p, message) + err = bs.sender.SendMessage(ctx, p, message) if err != nil { - log.Errorf("Error sender.SendRequest(%s) = %s", p, err) + log.Errorf("Error sender.SendMessage(%s) = %s", p, err) return } // FIXME ensure accounting is handled correctly when // communication fails. May require slightly different API to // get better guarantees. May need shared sequence numbers. bs.strategy.MessageSent(p, message) - - if response == nil { - return - } - bs.ReceiveMessage(ctx, p, response) }(peerToQuery) } return nil diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index 1a3c11b44..f356285ef 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -48,21 +48,8 @@ func (bsnet *impl) HandleMessage( return nil } - p, bsmsg := bsnet.receiver.ReceiveMessage(ctx, incoming.Peer(), received) - - // TODO(brian): put this in a helper function - if bsmsg == nil || p == nil { - return nil - } - - outgoing, err := bsmsg.ToNet(p) - if err != nil { - go bsnet.receiver.ReceiveError(err) - return nil - } - - log.Debugf("Message size: %d", len(outgoing.Data())) - return outgoing + bsnet.receiver.ReceiveMessage(ctx, incoming.Peer(), received) + return nil } func (bsnet *impl) DialPeer(ctx context.Context, p peer.Peer) error { From 48481d4dade1b94e45085842a7dcc0a9b16474d8 Mon Sep 17 00:00:00 2001 From: Jeromy Date: Tue, 2 Dec 2014 07:34:39 +0000 Subject: [PATCH 0171/1035] make bitswap sub-RPC's timeout (slowly for now) This commit was moved from ipfs/go-bitswap@90f5ec0c51d5d2fb892cdbca2b0a6fa7730c36b1 --- bitswap/bitswap.go | 26 ++++++++++++++++++++++---- 1 file changed, 22 insertions(+), 4 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 44d51cde2..ac9224228 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -26,6 +26,9 @@ var log = eventlog.Logger("bitswap") // TODO: if a 'non-nice' strategy is implemented, consider increasing this value const maxProvidersPerRequest = 3 +const providerRequestTimeout = time.Second * 10 +const hasBlockTimeout = time.Second * 15 + // New initializes a BitSwap instance that communicates over the // provided BitSwapNetwork. This function registers the returned instance as // the network delegate. @@ -181,7 +184,8 @@ func (bs *bitswap) sendWantlistToProviders(ctx context.Context, ks []u.Key) { for _, k := range ks { wg.Add(1) go func(k u.Key) { - providers := bs.routing.FindProvidersAsync(ctx, k, maxProvidersPerRequest) + child, _ := context.WithTimeout(ctx, providerRequestTimeout) + providers := bs.routing.FindProvidersAsync(child, k, maxProvidersPerRequest) err := bs.sendWantListTo(ctx, providers) if err != nil { @@ -228,7 +232,8 @@ func (bs *bitswap) loop(parent context.Context) { // pinning a file, you store and provide all blocks associated with // it. Later, this assumption may not hold as true if we implement // newer bitswap strategies. - providers := bs.routing.FindProvidersAsync(ctx, ks[0], maxProvidersPerRequest) + child, _ := context.WithTimeout(ctx, providerRequestTimeout) + providers := bs.routing.FindProvidersAsync(child, ks[0], maxProvidersPerRequest) err := bs.sendWantListTo(ctx, providers) if err != nil { @@ -247,8 +252,21 @@ func (bs *bitswap) HasBlock(ctx context.Context, blk *blocks.Block) error { log.Debugf("Has Block %s", blk.Key()) bs.wantlist.Remove(blk.Key()) bs.notifications.Publish(blk) - bs.sendToPeersThatWant(ctx, blk) - return bs.routing.Provide(ctx, blk.Key()) + + var err error + wg := &sync.WaitGroup{} + wg.Add(2) + child, _ := context.WithTimeout(ctx, hasBlockTimeout) + go func() { + bs.sendToPeersThatWant(child, blk) + wg.Done() + }() + go func() { + err = bs.routing.Provide(child, blk.Key()) + wg.Done() + }() + wg.Wait() + return err } // receiveBlock handles storing the block in the blockstore and calling HasBlock From 0a2fab67f49e158e89c178989cd27a378215b5af Mon Sep 17 00:00:00 2001 From: Jeromy Date: Tue, 2 Dec 2014 08:03:00 +0000 Subject: [PATCH 0172/1035] remove unnecessary concurrency in last commit This commit was moved from ipfs/go-bitswap@1b7c0b14488320923ef6a17f0d656bf720b33549 --- bitswap/bitswap.go | 16 +++------------- bitswap/bitswap_test.go | 2 +- 2 files changed, 4 insertions(+), 14 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index ac9224228..e00b23f91 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -253,20 +253,10 @@ func (bs *bitswap) HasBlock(ctx context.Context, blk *blocks.Block) error { bs.wantlist.Remove(blk.Key()) bs.notifications.Publish(blk) - var err error - wg := &sync.WaitGroup{} - wg.Add(2) child, _ := context.WithTimeout(ctx, hasBlockTimeout) - go func() { - bs.sendToPeersThatWant(child, blk) - wg.Done() - }() - go func() { - err = bs.routing.Provide(child, blk.Key()) - wg.Done() - }() - wg.Wait() - return err + bs.sendToPeersThatWant(child, blk) + child, _ = context.WithTimeout(ctx, hasBlockTimeout) + return bs.routing.Provide(child, blk.Key()) } // receiveBlock handles storing the block in the blockstore and calling HasBlock diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index ede87c474..d26a8ffc9 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -235,7 +235,7 @@ func TestSendToWantingPeer(t *testing.T) { t.Logf("%v should now have %v\n", w.Peer, alpha.Key()) block, err := w.Blockstore.Get(alpha.Key()) if err != nil { - t.Fatal("Should not have received an error") + t.Fatalf("Should not have received an error: %s", err) } if block.Key() != alpha.Key() { t.Fatal("Expected to receive alpha from me") From f5900af9a51d32d25a4946b1b8301a3d96f6f585 Mon Sep 17 00:00:00 2001 From: Jeromy Date: Wed, 3 Dec 2014 19:46:01 +0000 Subject: [PATCH 0173/1035] add readme for bitswap This commit was moved from ipfs/go-bitswap@44ac859e3568d08cac8e60ad9b84c188c1562bc4 --- bitswap/README.md | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) create mode 100644 bitswap/README.md diff --git a/bitswap/README.md b/bitswap/README.md new file mode 100644 index 000000000..86b29e090 --- /dev/null +++ b/bitswap/README.md @@ -0,0 +1,24 @@ +#Welcome to Bitswap + +Bitswap is the module that is responsible for requesting blocks over the +network from other ipfs peers. + +##Main Operations +Bitswap has three main operations: + +###GetBlocks +`GetBlocks` is a bitswap method used to request multiple blocks that are likely to all be provided by the same peer (part of a single file, for example). + +###GetBlock +`GetBlock` is a special case of `GetBlocks` that just requests a single block. + +###HasBlock +`HasBlock` registers a local block with bitswap. Bitswap will then send that block to any connected peers who want it (strategy allowing), and announce to the DHT that the block is being provided. + +##Internal Details +All `GetBlock` requests are relayed into a single for-select loop via channels. Calls to `GetBlocks` will have `FindProviders` called for only the first key in the set initially, This is an optimization attempting to cut down on the number of RPCs required. After a timeout (specified by the strategies `GetRebroadcastDelay`) Bitswap will iterate through all keys still in the local wantlist, perform a find providers call for each, and sent the wantlist out to those providers. This is the fallback behaviour for cases where our initial assumption about one peer potentially having multiple blocks in a set does not hold true. + +When receiving messages, Bitswaps `ReceiveMessage` method is called. A bitswap message may contain the wantlist of the peer who sent the message, and an array of blocks that were on our local wantlist. Any blocks we receive in a bitswap message will be passed to `HasBlock`, and the other peers wantlist gets updated in the strategy by `bs.strategy.MessageReceived`. + +##Outstanding TODOs: +- Ensure only one request active per key From 05cd1bc7c7075271f38766b1ad51086b09844385 Mon Sep 17 00:00:00 2001 From: Jeromy Date: Wed, 3 Dec 2014 23:48:38 +0000 Subject: [PATCH 0174/1035] update bitswap readme This commit was moved from ipfs/go-bitswap@0c3dc47e6464ab8e3639531ca767d7d95b2e98b8 --- bitswap/README.md | 28 +++++++++++++++++++++++----- 1 file changed, 23 insertions(+), 5 deletions(-) diff --git a/bitswap/README.md b/bitswap/README.md index 86b29e090..5f55c6ee3 100644 --- a/bitswap/README.md +++ b/bitswap/README.md @@ -7,18 +7,36 @@ network from other ipfs peers. Bitswap has three main operations: ###GetBlocks -`GetBlocks` is a bitswap method used to request multiple blocks that are likely to all be provided by the same peer (part of a single file, for example). +`GetBlocks` is a bitswap method used to request multiple blocks that are likely +to all be provided by the same peer (part of a single file, for example). ###GetBlock `GetBlock` is a special case of `GetBlocks` that just requests a single block. ###HasBlock -`HasBlock` registers a local block with bitswap. Bitswap will then send that block to any connected peers who want it (strategy allowing), and announce to the DHT that the block is being provided. +`HasBlock` registers a local block with bitswap. Bitswap will then send that +block to any connected peers who want it (strategy allowing), and announce to +the DHT that the block is being provided. ##Internal Details -All `GetBlock` requests are relayed into a single for-select loop via channels. Calls to `GetBlocks` will have `FindProviders` called for only the first key in the set initially, This is an optimization attempting to cut down on the number of RPCs required. After a timeout (specified by the strategies `GetRebroadcastDelay`) Bitswap will iterate through all keys still in the local wantlist, perform a find providers call for each, and sent the wantlist out to those providers. This is the fallback behaviour for cases where our initial assumption about one peer potentially having multiple blocks in a set does not hold true. - -When receiving messages, Bitswaps `ReceiveMessage` method is called. A bitswap message may contain the wantlist of the peer who sent the message, and an array of blocks that were on our local wantlist. Any blocks we receive in a bitswap message will be passed to `HasBlock`, and the other peers wantlist gets updated in the strategy by `bs.strategy.MessageReceived`. +All `GetBlock` requests are relayed into a single for-select loop via channels. +Calls to `GetBlocks` will have `FindProviders` called for only the first key in +the set initially, This is an optimization attempting to cut down on the number +of RPCs required. After a timeout (specified by the strategies +`GetRebroadcastDelay`) Bitswap will iterate through all keys still in the local +wantlist, perform a find providers call for each, and sent the wantlist out to +those providers. This is the fallback behaviour for cases where our initial +assumption about one peer potentially having multiple blocks in a set does not +hold true. + +When receiving messages, Bitswaps `ReceiveMessage` method is called. A bitswap +message may contain the wantlist of the peer who sent the message, and an array +of blocks that were on our local wantlist. Any blocks we receive in a bitswap +message will be passed to `HasBlock`, and the other peers wantlist gets updated +in the strategy by `bs.strategy.MessageReceived`. +If another peers wantlist is received, Bitswap will call its strategies +`ShouldSendBlockToPeer` method to determine whether or not the other peer will +be sent the block they are requesting (if we even have it). ##Outstanding TODOs: - Ensure only one request active per key From a7df61242af4b5cfc69cd2391039e089d78c932f Mon Sep 17 00:00:00 2001 From: Jeromy Date: Thu, 4 Dec 2014 21:38:40 +0000 Subject: [PATCH 0175/1035] update bitswap readme This commit was moved from ipfs/go-bitswap@5512207a76c2e022b2643a1e520de7e2a221447f --- bitswap/README.md | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/bitswap/README.md b/bitswap/README.md index 5f55c6ee3..991d17213 100644 --- a/bitswap/README.md +++ b/bitswap/README.md @@ -1,22 +1,24 @@ -#Welcome to Bitswap +#Welcome to Bitswap (The data trading engine) -Bitswap is the module that is responsible for requesting blocks over the -network from other ipfs peers. +Bitswap is the module that is responsible for requesting and providing data +blocks over the network to and from other ipfs peers. The role of bitswap is +to be a merchant in the large global marketplace of data. ##Main Operations -Bitswap has three main operations: +Bitswap has three high level operations: ###GetBlocks `GetBlocks` is a bitswap method used to request multiple blocks that are likely -to all be provided by the same peer (part of a single file, for example). +to all be provided by the same set of peers (part of a single file, for example). ###GetBlock `GetBlock` is a special case of `GetBlocks` that just requests a single block. ###HasBlock `HasBlock` registers a local block with bitswap. Bitswap will then send that -block to any connected peers who want it (strategy allowing), and announce to -the DHT that the block is being provided. +block to any connected peers who want it (with the strategies approval), record +that transaction in the ledger and announce to the DHT that the block is being +provided. ##Internal Details All `GetBlock` requests are relayed into a single for-select loop via channels. @@ -39,4 +41,6 @@ If another peers wantlist is received, Bitswap will call its strategies be sent the block they are requesting (if we even have it). ##Outstanding TODOs: -- Ensure only one request active per key +[] Ensure only one request active per key +[] More involved strategies +[] Ensure only wanted blocks are counted in ledgers From 237bff4686f632719d7bb2f8cf0beb37d79c4278 Mon Sep 17 00:00:00 2001 From: Jeromy Johnson Date: Thu, 4 Dec 2014 21:48:11 +0000 Subject: [PATCH 0176/1035] Update README.md This commit was moved from ipfs/go-bitswap@0040487308cd4ed3bb1e0b1c3d1778a17bb762c4 --- bitswap/README.md | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/bitswap/README.md b/bitswap/README.md index 991d17213..bfa0aaa86 100644 --- a/bitswap/README.md +++ b/bitswap/README.md @@ -1,4 +1,5 @@ -#Welcome to Bitswap (The data trading engine) +#Welcome to Bitswap +###(The data trading engine) Bitswap is the module that is responsible for requesting and providing data blocks over the network to and from other ipfs peers. The role of bitswap is @@ -7,15 +8,15 @@ to be a merchant in the large global marketplace of data. ##Main Operations Bitswap has three high level operations: -###GetBlocks -`GetBlocks` is a bitswap method used to request multiple blocks that are likely +- **GetBlocks** + - `GetBlocks` is a bitswap method used to request multiple blocks that are likely to all be provided by the same set of peers (part of a single file, for example). -###GetBlock -`GetBlock` is a special case of `GetBlocks` that just requests a single block. +- **GetBlock** + - `GetBlock` is a special case of `GetBlocks` that just requests a single block. -###HasBlock -`HasBlock` registers a local block with bitswap. Bitswap will then send that +- **HasBlock** + - `HasBlock` registers a local block with bitswap. Bitswap will then send that block to any connected peers who want it (with the strategies approval), record that transaction in the ledger and announce to the DHT that the block is being provided. @@ -41,6 +42,6 @@ If another peers wantlist is received, Bitswap will call its strategies be sent the block they are requesting (if we even have it). ##Outstanding TODOs: -[] Ensure only one request active per key -[] More involved strategies -[] Ensure only wanted blocks are counted in ledgers +- [ ] Ensure only one request active per key +- [ ] More involved strategies +- [ ] Ensure only wanted blocks are counted in ledgers From 27ddb0009c91a2a5ef5386c1032809aea6875a03 Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Wed, 26 Nov 2014 17:51:21 -0800 Subject: [PATCH 0177/1035] refactor(bitswap) consolidate HasBlock License: MIT Signed-off-by: Brian Tiger Chow Conflicts: exchange/bitswap/bitswap.go This commit was moved from ipfs/go-bitswap@61599656758773863ef6d2f80601d05779ce472e --- bitswap/bitswap.go | 45 +++++++++++++++++++-------------------------- 1 file changed, 19 insertions(+), 26 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index e00b23f91..504a3dad9 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -248,30 +248,19 @@ func (bs *bitswap) loop(parent context.Context) { // HasBlock announces the existance of a block to this bitswap service. The // service will potentially notify its peers. func (bs *bitswap) HasBlock(ctx context.Context, blk *blocks.Block) error { - // TODO check all errors - log.Debugf("Has Block %s", blk.Key()) + if err := bs.blockstore.Put(blk); err != nil { + return err + } bs.wantlist.Remove(blk.Key()) bs.notifications.Publish(blk) - child, _ := context.WithTimeout(ctx, hasBlockTimeout) - bs.sendToPeersThatWant(child, blk) + if err := bs.sendToPeersThatWant(child, blk); err != nil { + return err + } child, _ = context.WithTimeout(ctx, hasBlockTimeout) return bs.routing.Provide(child, blk.Key()) } -// receiveBlock handles storing the block in the blockstore and calling HasBlock -func (bs *bitswap) receiveBlock(ctx context.Context, block *blocks.Block) { - // TODO verify blocks? - if err := bs.blockstore.Put(block); err != nil { - log.Criticalf("error putting block: %s", err) - return - } - err := bs.HasBlock(ctx, block) - if err != nil { - log.Warningf("HasBlock errored: %s", err) - } -} - // TODO(brian): handle errors func (bs *bitswap) ReceiveMessage(ctx context.Context, p peer.Peer, incoming bsmsg.BitSwapMessage) ( peer.Peer, bsmsg.BitSwapMessage) { @@ -297,7 +286,9 @@ func (bs *bitswap) ReceiveMessage(ctx context.Context, p peer.Peer, incoming bsm go func() { for _, block := range incoming.Blocks() { - bs.receiveBlock(ctx, block) + if err := bs.HasBlock(ctx, block); err != nil { + log.Error(err) + } } }() @@ -334,27 +325,29 @@ func (bs *bitswap) ReceiveError(err error) { // send strives to ensure that accounting is always performed when a message is // sent -func (bs *bitswap) send(ctx context.Context, p peer.Peer, m bsmsg.BitSwapMessage) { - bs.sender.SendMessage(ctx, p, m) - bs.strategy.MessageSent(p, m) +func (bs *bitswap) send(ctx context.Context, p peer.Peer, m bsmsg.BitSwapMessage) error { + if err := bs.sender.SendMessage(ctx, p, m); err != nil { + return err + } + return bs.strategy.MessageSent(p, m) } -func (bs *bitswap) sendToPeersThatWant(ctx context.Context, block *blocks.Block) { - log.Debugf("Sending %s to peers that want it", block) - +func (bs *bitswap) sendToPeersThatWant(ctx context.Context, block *blocks.Block) error { for _, p := range bs.strategy.Peers() { if bs.strategy.BlockIsWantedByPeer(block.Key(), p) { - log.Debugf("%v wants %v", p, block.Key()) if bs.strategy.ShouldSendBlockToPeer(block.Key(), p) { message := bsmsg.New() message.AddBlock(block) for _, wanted := range bs.wantlist.Keys() { message.AddWanted(wanted) } - bs.send(ctx, p, message) + if err := bs.send(ctx, p, message); err != nil { + return err + } } } } + return nil } func (bs *bitswap) Close() error { From 8310a795697d8bb1ef0440ef05db62f492b3adc2 Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Tue, 2 Dec 2014 19:37:50 -0800 Subject: [PATCH 0178/1035] move public method to top of file License: MIT Signed-off-by: Brian Tiger Chow This commit was moved from ipfs/go-bitswap@c0f399b96376f594eb6a371a22bc2eb9f7f25fdb --- bitswap/bitswap.go | 32 ++++++++++++++++---------------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 504a3dad9..4c8b1c160 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -145,6 +145,22 @@ func (bs *bitswap) GetBlocks(ctx context.Context, keys []u.Key) (<-chan *blocks. } } +// HasBlock announces the existance of a block to this bitswap service. The +// service will potentially notify its peers. +func (bs *bitswap) HasBlock(ctx context.Context, blk *blocks.Block) error { + if err := bs.blockstore.Put(blk); err != nil { + return err + } + bs.wantlist.Remove(blk.Key()) + bs.notifications.Publish(blk) + child, _ := context.WithTimeout(ctx, hasBlockTimeout) + if err := bs.sendToPeersThatWant(child, blk); err != nil { + return err + } + child, _ = context.WithTimeout(ctx, hasBlockTimeout) + return bs.routing.Provide(child, blk.Key()) +} + func (bs *bitswap) sendWantListTo(ctx context.Context, peers <-chan peer.Peer) error { if peers == nil { panic("Cant send wantlist to nil peerchan") @@ -245,22 +261,6 @@ func (bs *bitswap) loop(parent context.Context) { } } -// HasBlock announces the existance of a block to this bitswap service. The -// service will potentially notify its peers. -func (bs *bitswap) HasBlock(ctx context.Context, blk *blocks.Block) error { - if err := bs.blockstore.Put(blk); err != nil { - return err - } - bs.wantlist.Remove(blk.Key()) - bs.notifications.Publish(blk) - child, _ := context.WithTimeout(ctx, hasBlockTimeout) - if err := bs.sendToPeersThatWant(child, blk); err != nil { - return err - } - child, _ = context.WithTimeout(ctx, hasBlockTimeout) - return bs.routing.Provide(child, blk.Key()) -} - // TODO(brian): handle errors func (bs *bitswap) ReceiveMessage(ctx context.Context, p peer.Peer, incoming bsmsg.BitSwapMessage) ( peer.Peer, bsmsg.BitSwapMessage) { From 90b47feb00078a6804e463fd473b8961da0015b8 Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Tue, 2 Dec 2014 21:44:16 -0800 Subject: [PATCH 0179/1035] rm unnecessary concurrency License: MIT Signed-off-by: Brian Tiger Chow This commit was moved from ipfs/go-bitswap@ccf6d93b0248fc8ad54f9fc47498477812daa81e --- bitswap/bitswap.go | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 4c8b1c160..8dbf05314 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -284,13 +284,11 @@ func (bs *bitswap) ReceiveMessage(ctx context.Context, p peer.Peer, incoming bsm // and number of bytes transfered. bs.strategy.MessageReceived(p, incoming) - go func() { - for _, block := range incoming.Blocks() { - if err := bs.HasBlock(ctx, block); err != nil { - log.Error(err) - } + for _, block := range incoming.Blocks() { + if err := bs.HasBlock(ctx, block); err != nil { + log.Error(err) } - }() + } for _, key := range incoming.Wantlist() { if bs.strategy.ShouldSendBlockToPeer(key, p) { From 7430e01594fb5bba0db921b3338646ea3ec71d89 Mon Sep 17 00:00:00 2001 From: Jeromy Date: Sun, 7 Dec 2014 07:54:44 +0000 Subject: [PATCH 0180/1035] prevent sending of same block to a peer twice This commit was moved from ipfs/go-bitswap@13ab516c1c4ad7390dafe454b645338d0bd4fe30 --- bitswap/bitswap.go | 1 + bitswap/strategy/interface.go | 2 ++ bitswap/strategy/ledger.go | 11 ++++++++--- bitswap/strategy/strategy.go | 14 ++++++++++++++ 4 files changed, 25 insertions(+), 3 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 8dbf05314..64f293528 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -307,6 +307,7 @@ func (bs *bitswap) ReceiveMessage(ctx context.Context, p peer.Peer, incoming bsm blkmsg.AddBlock(block) bs.send(ctx, p, blkmsg) + bs.strategy.BlockSentToPeer(block.Key(), p) } } } diff --git a/bitswap/strategy/interface.go b/bitswap/strategy/interface.go index 503a50d41..58385f5b7 100644 --- a/bitswap/strategy/interface.go +++ b/bitswap/strategy/interface.go @@ -32,6 +32,8 @@ type Strategy interface { NumBytesReceivedFrom(peer.Peer) uint64 + BlockSentToPeer(u.Key, peer.Peer) + // Values determining bitswap behavioural patterns GetBatchSize() int GetRebroadcastDelay() time.Duration diff --git a/bitswap/strategy/ledger.go b/bitswap/strategy/ledger.go index 74feb3407..525b6af56 100644 --- a/bitswap/strategy/ledger.go +++ b/bitswap/strategy/ledger.go @@ -13,9 +13,10 @@ type keySet map[u.Key]struct{} func newLedger(p peer.Peer, strategy strategyFunc) *ledger { return &ledger{ - wantList: keySet{}, - Strategy: strategy, - Partner: p, + wantList: keySet{}, + Strategy: strategy, + Partner: p, + sentToPeer: make(map[u.Key]struct{}), } } @@ -40,6 +41,10 @@ type ledger struct { // wantList is a (bounded, small) set of keys that Partner desires. wantList keySet + // sentToPeer is a set of keys to ensure we dont send duplicate blocks + // to a given peer + sentToPeer map[u.Key]struct{} + Strategy strategyFunc } diff --git a/bitswap/strategy/strategy.go b/bitswap/strategy/strategy.go index fb353d84a..af1c35848 100644 --- a/bitswap/strategy/strategy.go +++ b/bitswap/strategy/strategy.go @@ -65,9 +65,23 @@ func (s *strategist) ShouldSendBlockToPeer(k u.Key, p peer.Peer) bool { defer s.lock.RUnlock() ledger := s.ledger(p) + + // Dont resend blocks + if _, ok := ledger.sentToPeer[k]; ok { + return false + } + return ledger.ShouldSend() } +func (s *strategist) BlockSentToPeer(k u.Key, p peer.Peer) { + s.lock.Lock() + defer s.lock.Unlock() + + ledger := s.ledger(p) + ledger.sentToPeer[k] = struct{}{} +} + func (s *strategist) Seed(int64) { s.lock.Lock() defer s.lock.Unlock() From 04983e0e28e5e0cd055c92323194b6c8c73b09a3 Mon Sep 17 00:00:00 2001 From: Jeromy Date: Sun, 7 Dec 2014 20:54:31 +0000 Subject: [PATCH 0181/1035] same block cant be sent twice to a peer within a certain time period This commit was moved from ipfs/go-bitswap@29aa7547bb6277fa1b3ec62c0c1cbff609727b36 --- bitswap/strategy/ledger.go | 4 ++-- bitswap/strategy/strategy.go | 9 ++++++--- 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/bitswap/strategy/ledger.go b/bitswap/strategy/ledger.go index 525b6af56..84e92d035 100644 --- a/bitswap/strategy/ledger.go +++ b/bitswap/strategy/ledger.go @@ -16,7 +16,7 @@ func newLedger(p peer.Peer, strategy strategyFunc) *ledger { wantList: keySet{}, Strategy: strategy, Partner: p, - sentToPeer: make(map[u.Key]struct{}), + sentToPeer: make(map[u.Key]time.Time), } } @@ -43,7 +43,7 @@ type ledger struct { // sentToPeer is a set of keys to ensure we dont send duplicate blocks // to a given peer - sentToPeer map[u.Key]struct{} + sentToPeer map[u.Key]time.Time Strategy strategyFunc } diff --git a/bitswap/strategy/strategy.go b/bitswap/strategy/strategy.go index af1c35848..fe7414caa 100644 --- a/bitswap/strategy/strategy.go +++ b/bitswap/strategy/strategy.go @@ -10,6 +10,8 @@ import ( u "github.com/jbenet/go-ipfs/util" ) +const resendTimeoutPeriod = time.Minute + var log = u.Logger("strategy") // TODO niceness should be on a per-peer basis. Use-case: Certain peers are @@ -66,8 +68,9 @@ func (s *strategist) ShouldSendBlockToPeer(k u.Key, p peer.Peer) bool { ledger := s.ledger(p) - // Dont resend blocks - if _, ok := ledger.sentToPeer[k]; ok { + // Dont resend blocks within a certain time period + t, ok := ledger.sentToPeer[k] + if ok && t.Add(resendTimeoutPeriod).After(time.Now()) { return false } @@ -79,7 +82,7 @@ func (s *strategist) BlockSentToPeer(k u.Key, p peer.Peer) { defer s.lock.Unlock() ledger := s.ledger(p) - ledger.sentToPeer[k] = struct{}{} + ledger.sentToPeer[k] = time.Now() } func (s *strategist) Seed(int64) { From bd5891f51a241ba4740b9c01530a7ab25ec16c74 Mon Sep 17 00:00:00 2001 From: Jeromy Date: Sun, 7 Dec 2014 21:03:54 +0000 Subject: [PATCH 0182/1035] log when dupe block is prevented This commit was moved from ipfs/go-bitswap@a48e70f92929ce844730c62c1485a108a24fbcbe --- bitswap/strategy/strategy.go | 1 + 1 file changed, 1 insertion(+) diff --git a/bitswap/strategy/strategy.go b/bitswap/strategy/strategy.go index fe7414caa..3993eba05 100644 --- a/bitswap/strategy/strategy.go +++ b/bitswap/strategy/strategy.go @@ -71,6 +71,7 @@ func (s *strategist) ShouldSendBlockToPeer(k u.Key, p peer.Peer) bool { // Dont resend blocks within a certain time period t, ok := ledger.sentToPeer[k] if ok && t.Add(resendTimeoutPeriod).After(time.Now()) { + log.Error("Prevented block resend!") return false } From d1f96c232b35681a4de3fbb5930fba08af58daa2 Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Mon, 8 Dec 2014 01:40:07 -0800 Subject: [PATCH 0183/1035] refactor(peer): create peer through peerstore for safety! use mockpeer.WithID methods to create peers in tests License: MIT Signed-off-by: Brian Tiger Chow This commit was moved from ipfs/go-bitswap@8bff08bddb7beaa0f1a59c2c74e40c8022b1d94b --- bitswap/bitswap_test.go | 4 ++-- bitswap/message/message_test.go | 8 ++++---- bitswap/strategy/strategy_test.go | 3 ++- bitswap/testnet/network_test.go | 13 +++++++------ bitswap/testutils.go | 8 +++++--- 5 files changed, 20 insertions(+), 16 deletions(-) diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index d26a8ffc9..b1fb52f44 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -10,7 +10,7 @@ import ( blocks "github.com/jbenet/go-ipfs/blocks" blocksutil "github.com/jbenet/go-ipfs/blocks/blocksutil" tn "github.com/jbenet/go-ipfs/exchange/bitswap/testnet" - peer "github.com/jbenet/go-ipfs/peer" + "github.com/jbenet/go-ipfs/peer/mock" mock "github.com/jbenet/go-ipfs/routing/mock" ) @@ -53,7 +53,7 @@ func TestProviderForKeyButNetworkCannotFind(t *testing.T) { g := NewSessionGenerator(net, rs) block := blocks.NewBlock([]byte("block")) - rs.Announce(peer.WithIDString("testing"), block.Key()) // but not on network + rs.Announce(mockpeer.WithIDString("testing"), block.Key()) // but not on network solo := g.Next() diff --git a/bitswap/message/message_test.go b/bitswap/message/message_test.go index de64b7925..daea58f90 100644 --- a/bitswap/message/message_test.go +++ b/bitswap/message/message_test.go @@ -6,7 +6,7 @@ import ( blocks "github.com/jbenet/go-ipfs/blocks" pb "github.com/jbenet/go-ipfs/exchange/bitswap/message/internal/pb" - peer "github.com/jbenet/go-ipfs/peer" + "github.com/jbenet/go-ipfs/peer/mock" u "github.com/jbenet/go-ipfs/util" ) @@ -89,7 +89,7 @@ func TestCopyProtoByValue(t *testing.T) { func TestToNetMethodSetsPeer(t *testing.T) { m := New() - p := peer.WithIDString("X") + p := mockpeer.WithIDString("X") netmsg, err := m.ToNet(p) if err != nil { t.Fatal(err) @@ -107,7 +107,7 @@ func TestToNetFromNetPreservesWantList(t *testing.T) { original.AddWanted(u.Key("T")) original.AddWanted(u.Key("F")) - p := peer.WithIDString("X") + p := mockpeer.WithIDString("X") netmsg, err := original.ToNet(p) if err != nil { t.Fatal(err) @@ -138,7 +138,7 @@ func TestToAndFromNetMessage(t *testing.T) { original.AddBlock(blocks.NewBlock([]byte("F"))) original.AddBlock(blocks.NewBlock([]byte("M"))) - p := peer.WithIDString("X") + p := mockpeer.WithIDString("X") netmsg, err := original.ToNet(p) if err != nil { t.Fatal(err) diff --git a/bitswap/strategy/strategy_test.go b/bitswap/strategy/strategy_test.go index d07af601b..4fdbc4ab5 100644 --- a/bitswap/strategy/strategy_test.go +++ b/bitswap/strategy/strategy_test.go @@ -7,6 +7,7 @@ import ( blocks "github.com/jbenet/go-ipfs/blocks" message "github.com/jbenet/go-ipfs/exchange/bitswap/message" peer "github.com/jbenet/go-ipfs/peer" + "github.com/jbenet/go-ipfs/peer/mock" ) type peerAndStrategist struct { @@ -16,7 +17,7 @@ type peerAndStrategist struct { func newPeerAndStrategist(idStr string) peerAndStrategist { return peerAndStrategist{ - Peer: peer.WithIDString(idStr), + Peer: mockpeer.WithIDString(idStr), Strategy: New(true), } } diff --git a/bitswap/testnet/network_test.go b/bitswap/testnet/network_test.go index 6f57aad50..eb3c83112 100644 --- a/bitswap/testnet/network_test.go +++ b/bitswap/testnet/network_test.go @@ -9,6 +9,7 @@ import ( bsmsg "github.com/jbenet/go-ipfs/exchange/bitswap/message" bsnet "github.com/jbenet/go-ipfs/exchange/bitswap/network" peer "github.com/jbenet/go-ipfs/peer" + "github.com/jbenet/go-ipfs/peer/mock" ) func TestSendRequestToCooperativePeer(t *testing.T) { @@ -18,8 +19,8 @@ func TestSendRequestToCooperativePeer(t *testing.T) { t.Log("Get two network adapters") - initiator := net.Adapter(peer.WithIDString("initiator")) - recipient := net.Adapter(peer.WithID(idOfRecipient)) + initiator := net.Adapter(mockpeer.WithIDString("initiator")) + recipient := net.Adapter(mockpeer.WithID(idOfRecipient)) expectedStr := "response from recipient" recipient.SetDelegate(lambda(func( @@ -43,7 +44,7 @@ func TestSendRequestToCooperativePeer(t *testing.T) { message := bsmsg.New() message.AddBlock(blocks.NewBlock([]byte("data"))) response, err := initiator.SendRequest( - context.Background(), peer.WithID(idOfRecipient), message) + context.Background(), mockpeer.WithID(idOfRecipient), message) if err != nil { t.Fatal(err) } @@ -61,8 +62,8 @@ func TestSendRequestToCooperativePeer(t *testing.T) { func TestSendMessageAsyncButWaitForResponse(t *testing.T) { net := VirtualNetwork() idOfResponder := []byte("responder") - waiter := net.Adapter(peer.WithIDString("waiter")) - responder := net.Adapter(peer.WithID(idOfResponder)) + waiter := net.Adapter(mockpeer.WithIDString("waiter")) + responder := net.Adapter(mockpeer.WithID(idOfResponder)) var wg sync.WaitGroup @@ -107,7 +108,7 @@ func TestSendMessageAsyncButWaitForResponse(t *testing.T) { messageSentAsync := bsmsg.New() messageSentAsync.AddBlock(blocks.NewBlock([]byte("data"))) errSending := waiter.SendMessage( - context.Background(), peer.WithID(idOfResponder), messageSentAsync) + context.Background(), mockpeer.WithID(idOfResponder), messageSentAsync) if errSending != nil { t.Fatal(errSending) } diff --git a/bitswap/testutils.go b/bitswap/testutils.go index 402a5b1d2..7f8ef8546 100644 --- a/bitswap/testutils.go +++ b/bitswap/testutils.go @@ -16,6 +16,7 @@ func NewSessionGenerator( return SessionGenerator{ net: net, rs: rs, + ps: peer.NewPeerstore(), seq: 0, } } @@ -24,11 +25,12 @@ type SessionGenerator struct { seq int net tn.Network rs mock.RoutingServer + ps peer.Peerstore } func (g *SessionGenerator) Next() Instance { g.seq++ - return session(g.net, g.rs, []byte(string(g.seq))) + return session(g.net, g.rs, g.ps, []byte(string(g.seq))) } func (g *SessionGenerator) Instances(n int) []Instance { @@ -51,8 +53,8 @@ type Instance struct { // NB: It's easy make mistakes by providing the same peer ID to two different // sessions. To safeguard, use the SessionGenerator to generate sessions. It's // just a much better idea. -func session(net tn.Network, rs mock.RoutingServer, id peer.ID) Instance { - p := peer.WithID(id) +func session(net tn.Network, rs mock.RoutingServer, ps peer.Peerstore, id peer.ID) Instance { + p := ps.WithID(id) adapter := net.Adapter(p) htc := rs.Client(p) From c9f43307d11a3736cdf34506eff37c7831e4b589 Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Mon, 8 Dec 2014 14:32:52 -0800 Subject: [PATCH 0184/1035] fix(core, peer) helpers to testutil, err handling License: MIT Signed-off-by: Brian Tiger Chow This commit was moved from ipfs/go-bitswap@011cb28d16901c612511787e61ebfd00bebd5c38 --- bitswap/bitswap_test.go | 4 ++-- bitswap/message/message_test.go | 8 ++++---- bitswap/strategy/strategy_test.go | 4 ++-- bitswap/testnet/network_test.go | 14 +++++++------- 4 files changed, 15 insertions(+), 15 deletions(-) diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index b1fb52f44..4d0b5e59d 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -10,8 +10,8 @@ import ( blocks "github.com/jbenet/go-ipfs/blocks" blocksutil "github.com/jbenet/go-ipfs/blocks/blocksutil" tn "github.com/jbenet/go-ipfs/exchange/bitswap/testnet" - "github.com/jbenet/go-ipfs/peer/mock" mock "github.com/jbenet/go-ipfs/routing/mock" + testutil "github.com/jbenet/go-ipfs/util/testutil" ) func TestClose(t *testing.T) { @@ -53,7 +53,7 @@ func TestProviderForKeyButNetworkCannotFind(t *testing.T) { g := NewSessionGenerator(net, rs) block := blocks.NewBlock([]byte("block")) - rs.Announce(mockpeer.WithIDString("testing"), block.Key()) // but not on network + rs.Announce(testutil.NewPeerWithIDString("testing"), block.Key()) // but not on network solo := g.Next() diff --git a/bitswap/message/message_test.go b/bitswap/message/message_test.go index daea58f90..5fe98634c 100644 --- a/bitswap/message/message_test.go +++ b/bitswap/message/message_test.go @@ -6,8 +6,8 @@ import ( blocks "github.com/jbenet/go-ipfs/blocks" pb "github.com/jbenet/go-ipfs/exchange/bitswap/message/internal/pb" - "github.com/jbenet/go-ipfs/peer/mock" u "github.com/jbenet/go-ipfs/util" + testutil "github.com/jbenet/go-ipfs/util/testutil" ) func TestAppendWanted(t *testing.T) { @@ -89,7 +89,7 @@ func TestCopyProtoByValue(t *testing.T) { func TestToNetMethodSetsPeer(t *testing.T) { m := New() - p := mockpeer.WithIDString("X") + p := testutil.NewPeerWithIDString("X") netmsg, err := m.ToNet(p) if err != nil { t.Fatal(err) @@ -107,7 +107,7 @@ func TestToNetFromNetPreservesWantList(t *testing.T) { original.AddWanted(u.Key("T")) original.AddWanted(u.Key("F")) - p := mockpeer.WithIDString("X") + p := testutil.NewPeerWithIDString("X") netmsg, err := original.ToNet(p) if err != nil { t.Fatal(err) @@ -138,7 +138,7 @@ func TestToAndFromNetMessage(t *testing.T) { original.AddBlock(blocks.NewBlock([]byte("F"))) original.AddBlock(blocks.NewBlock([]byte("M"))) - p := mockpeer.WithIDString("X") + p := testutil.NewPeerWithIDString("X") netmsg, err := original.ToNet(p) if err != nil { t.Fatal(err) diff --git a/bitswap/strategy/strategy_test.go b/bitswap/strategy/strategy_test.go index 4fdbc4ab5..e063dff68 100644 --- a/bitswap/strategy/strategy_test.go +++ b/bitswap/strategy/strategy_test.go @@ -7,7 +7,7 @@ import ( blocks "github.com/jbenet/go-ipfs/blocks" message "github.com/jbenet/go-ipfs/exchange/bitswap/message" peer "github.com/jbenet/go-ipfs/peer" - "github.com/jbenet/go-ipfs/peer/mock" + testutil "github.com/jbenet/go-ipfs/util/testutil" ) type peerAndStrategist struct { @@ -17,7 +17,7 @@ type peerAndStrategist struct { func newPeerAndStrategist(idStr string) peerAndStrategist { return peerAndStrategist{ - Peer: mockpeer.WithIDString(idStr), + Peer: testutil.NewPeerWithIDString(idStr), Strategy: New(true), } } diff --git a/bitswap/testnet/network_test.go b/bitswap/testnet/network_test.go index eb3c83112..0bfb0cb1e 100644 --- a/bitswap/testnet/network_test.go +++ b/bitswap/testnet/network_test.go @@ -9,7 +9,7 @@ import ( bsmsg "github.com/jbenet/go-ipfs/exchange/bitswap/message" bsnet "github.com/jbenet/go-ipfs/exchange/bitswap/network" peer "github.com/jbenet/go-ipfs/peer" - "github.com/jbenet/go-ipfs/peer/mock" + testutil "github.com/jbenet/go-ipfs/util/testutil" ) func TestSendRequestToCooperativePeer(t *testing.T) { @@ -19,8 +19,8 @@ func TestSendRequestToCooperativePeer(t *testing.T) { t.Log("Get two network adapters") - initiator := net.Adapter(mockpeer.WithIDString("initiator")) - recipient := net.Adapter(mockpeer.WithID(idOfRecipient)) + initiator := net.Adapter(testutil.NewPeerWithIDString("initiator")) + recipient := net.Adapter(testutil.NewPeerWithID(idOfRecipient)) expectedStr := "response from recipient" recipient.SetDelegate(lambda(func( @@ -44,7 +44,7 @@ func TestSendRequestToCooperativePeer(t *testing.T) { message := bsmsg.New() message.AddBlock(blocks.NewBlock([]byte("data"))) response, err := initiator.SendRequest( - context.Background(), mockpeer.WithID(idOfRecipient), message) + context.Background(), testutil.NewPeerWithID(idOfRecipient), message) if err != nil { t.Fatal(err) } @@ -62,8 +62,8 @@ func TestSendRequestToCooperativePeer(t *testing.T) { func TestSendMessageAsyncButWaitForResponse(t *testing.T) { net := VirtualNetwork() idOfResponder := []byte("responder") - waiter := net.Adapter(mockpeer.WithIDString("waiter")) - responder := net.Adapter(mockpeer.WithID(idOfResponder)) + waiter := net.Adapter(testutil.NewPeerWithIDString("waiter")) + responder := net.Adapter(testutil.NewPeerWithID(idOfResponder)) var wg sync.WaitGroup @@ -108,7 +108,7 @@ func TestSendMessageAsyncButWaitForResponse(t *testing.T) { messageSentAsync := bsmsg.New() messageSentAsync.AddBlock(blocks.NewBlock([]byte("data"))) errSending := waiter.SendMessage( - context.Background(), mockpeer.WithID(idOfResponder), messageSentAsync) + context.Background(), testutil.NewPeerWithID(idOfResponder), messageSentAsync) if errSending != nil { t.Fatal(errSending) } From f329f1e432771e82f2360460398e225df96d6264 Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Wed, 10 Dec 2014 01:59:08 -0800 Subject: [PATCH 0185/1035] fix(bs/testnet) rm named error Real version doesn't expose this License: MIT Signed-off-by: Brian Tiger Chow This commit was moved from ipfs/go-bitswap@aa10757f73ef1644a26b8426ce826fd85f82fed8 --- bitswap/testnet/network.go | 2 -- 1 file changed, 2 deletions(-) diff --git a/bitswap/testnet/network.go b/bitswap/testnet/network.go index 691b7cb42..7f82bcdce 100644 --- a/bitswap/testnet/network.go +++ b/bitswap/testnet/network.go @@ -102,8 +102,6 @@ func (n *network) deliver( return nil } -var NoResponse = errors.New("No response received from the receiver") - // TODO func (n *network) SendRequest( ctx context.Context, From 9bb746dc8710c73d71dfb8e56452e094cff35a00 Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Fri, 12 Dec 2014 20:05:54 -0800 Subject: [PATCH 0186/1035] refactor(mdag, bserv, bs) mocks, etc. License: MIT Signed-off-by: Brian Tiger Chow This commit was moved from ipfs/go-bitswap@c211b0611416276245ebe164e66c9f65a9e93be2 --- bitswap/bitswap_test.go | 14 +++++++------- bitswap/testutils.go | 37 +++++++++++++++++++++++++++---------- 2 files changed, 34 insertions(+), 17 deletions(-) diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index 4d0b5e59d..d57132fba 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -76,7 +76,7 @@ func TestGetBlockFromPeerAfterPeerAnnounces(t *testing.T) { hasBlock := g.Next() - if err := hasBlock.Blockstore.Put(block); err != nil { + if err := hasBlock.Blockstore().Put(block); err != nil { t.Fatal(err) } if err := hasBlock.Exchange.HasBlock(context.Background(), block); err != nil { @@ -135,7 +135,7 @@ func PerformDistributionTest(t *testing.T, numInstances, numBlocks int) { first := instances[0] for _, b := range blocks { - first.Blockstore.Put(b) + first.Blockstore().Put(b) first.Exchange.HasBlock(context.Background(), b) rs.Announce(first.Peer, b.Key()) } @@ -158,7 +158,7 @@ func PerformDistributionTest(t *testing.T, numInstances, numBlocks int) { for _, inst := range instances { for _, b := range blocks { - if _, err := inst.Blockstore.Get(b.Key()); err != nil { + if _, err := inst.Blockstore().Get(b.Key()); err != nil { t.Fatal(err) } } @@ -166,7 +166,7 @@ func PerformDistributionTest(t *testing.T, numInstances, numBlocks int) { } func getOrFail(bitswap Instance, b *blocks.Block, t *testing.T, wg *sync.WaitGroup) { - if _, err := bitswap.Blockstore.Get(b.Key()); err != nil { + if _, err := bitswap.Blockstore().Get(b.Key()); err != nil { _, err := bitswap.Exchange.GetBlock(context.Background(), b.Key()) if err != nil { t.Fatal(err) @@ -208,7 +208,7 @@ func TestSendToWantingPeer(t *testing.T) { beta := bg.Next() t.Logf("Peer %v announes availability of %v\n", w.Peer, beta.Key()) ctx, _ = context.WithTimeout(context.Background(), timeout) - if err := w.Blockstore.Put(beta); err != nil { + if err := w.Blockstore().Put(beta); err != nil { t.Fatal(err) } w.Exchange.HasBlock(ctx, beta) @@ -221,7 +221,7 @@ func TestSendToWantingPeer(t *testing.T) { t.Logf("%v announces availability of %v\n", o.Peer, alpha.Key()) ctx, _ = context.WithTimeout(context.Background(), timeout) - if err := o.Blockstore.Put(alpha); err != nil { + if err := o.Blockstore().Put(alpha); err != nil { t.Fatal(err) } o.Exchange.HasBlock(ctx, alpha) @@ -233,7 +233,7 @@ func TestSendToWantingPeer(t *testing.T) { } t.Logf("%v should now have %v\n", w.Peer, alpha.Key()) - block, err := w.Blockstore.Get(alpha.Key()) + block, err := w.Blockstore().Get(alpha.Key()) if err != nil { t.Fatalf("Should not have received an error: %s", err) } diff --git a/bitswap/testutils.go b/bitswap/testutils.go index 7f8ef8546..10a02606b 100644 --- a/bitswap/testutils.go +++ b/bitswap/testutils.go @@ -1,14 +1,18 @@ package bitswap import ( - "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" + "time" + + context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" ds "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore" ds_sync "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore/sync" - "github.com/jbenet/go-ipfs/blocks/blockstore" - "github.com/jbenet/go-ipfs/exchange" + blockstore "github.com/jbenet/go-ipfs/blocks/blockstore" + exchange "github.com/jbenet/go-ipfs/exchange" tn "github.com/jbenet/go-ipfs/exchange/bitswap/testnet" - "github.com/jbenet/go-ipfs/peer" - "github.com/jbenet/go-ipfs/routing/mock" + peer "github.com/jbenet/go-ipfs/peer" + mock "github.com/jbenet/go-ipfs/routing/mock" + datastore2 "github.com/jbenet/go-ipfs/util/datastore2" + delay "github.com/jbenet/go-ipfs/util/delay" ) func NewSessionGenerator( @@ -45,7 +49,17 @@ func (g *SessionGenerator) Instances(n int) []Instance { type Instance struct { Peer peer.Peer Exchange exchange.Interface - Blockstore blockstore.Blockstore + blockstore blockstore.Blockstore + + blockstoreDelay delay.D +} + +func (i *Instance) Blockstore() blockstore.Blockstore { + return i.blockstore +} + +func (i *Instance) SetBlockstoreLatency(t time.Duration) time.Duration { + return i.blockstoreDelay.Set(t) } // session creates a test bitswap session. @@ -58,7 +72,9 @@ func session(net tn.Network, rs mock.RoutingServer, ps peer.Peerstore, id peer.I adapter := net.Adapter(p) htc := rs.Client(p) - bstore := blockstore.NewBlockstore(ds_sync.MutexWrap(ds.NewMapDatastore())) + + bsdelay := delay.Fixed(0) + bstore := blockstore.NewBlockstore(ds_sync.MutexWrap(datastore2.WithDelay(ds.NewMapDatastore(), bsdelay))) const alwaysSendToPeer = true ctx := context.TODO() @@ -66,8 +82,9 @@ func session(net tn.Network, rs mock.RoutingServer, ps peer.Peerstore, id peer.I bs := New(ctx, p, adapter, htc, bstore, alwaysSendToPeer) return Instance{ - Peer: p, - Exchange: bs, - Blockstore: bstore, + Peer: p, + Exchange: bs, + blockstore: bstore, + blockstoreDelay: bsdelay, } } From e098136caa2ed8f597485c4a47186aa4116a5f27 Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Fri, 12 Dec 2014 22:28:24 -0800 Subject: [PATCH 0187/1035] feat(bs/testnet) use delay in virtual network License: MIT Signed-off-by: Brian Tiger Chow This commit was moved from ipfs/go-bitswap@7d7fd57a44119bbe6436040cb54997f226ed08d1 --- bitswap/bitswap_test.go | 13 +++++++------ bitswap/testnet/network.go | 9 +++++++-- bitswap/testnet/network_test.go | 5 +++-- 3 files changed, 17 insertions(+), 10 deletions(-) diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index d57132fba..21b259a7e 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -11,13 +11,14 @@ import ( blocksutil "github.com/jbenet/go-ipfs/blocks/blocksutil" tn "github.com/jbenet/go-ipfs/exchange/bitswap/testnet" mock "github.com/jbenet/go-ipfs/routing/mock" + delay "github.com/jbenet/go-ipfs/util/delay" testutil "github.com/jbenet/go-ipfs/util/testutil" ) func TestClose(t *testing.T) { // TODO t.Skip("TODO Bitswap's Close implementation is a WIP") - vnet := tn.VirtualNetwork() + vnet := tn.VirtualNetwork(delay.Fixed(0)) rout := mock.VirtualRoutingServer() sesgen := NewSessionGenerator(vnet, rout) bgen := blocksutil.NewBlockGenerator() @@ -31,7 +32,7 @@ func TestClose(t *testing.T) { func TestGetBlockTimeout(t *testing.T) { - net := tn.VirtualNetwork() + net := tn.VirtualNetwork(delay.Fixed(0)) rs := mock.VirtualRoutingServer() g := NewSessionGenerator(net, rs) @@ -48,7 +49,7 @@ func TestGetBlockTimeout(t *testing.T) { func TestProviderForKeyButNetworkCannotFind(t *testing.T) { - net := tn.VirtualNetwork() + net := tn.VirtualNetwork(delay.Fixed(0)) rs := mock.VirtualRoutingServer() g := NewSessionGenerator(net, rs) @@ -69,7 +70,7 @@ func TestProviderForKeyButNetworkCannotFind(t *testing.T) { func TestGetBlockFromPeerAfterPeerAnnounces(t *testing.T) { - net := tn.VirtualNetwork() + net := tn.VirtualNetwork(delay.Fixed(0)) rs := mock.VirtualRoutingServer() block := blocks.NewBlock([]byte("block")) g := NewSessionGenerator(net, rs) @@ -121,7 +122,7 @@ func PerformDistributionTest(t *testing.T, numInstances, numBlocks int) { if testing.Short() { t.SkipNow() } - net := tn.VirtualNetwork() + net := tn.VirtualNetwork(delay.Fixed(0)) rs := mock.VirtualRoutingServer() sg := NewSessionGenerator(net, rs) bg := blocksutil.NewBlockGenerator() @@ -181,7 +182,7 @@ func TestSendToWantingPeer(t *testing.T) { t.SkipNow() } - net := tn.VirtualNetwork() + net := tn.VirtualNetwork(delay.Fixed(0)) rs := mock.VirtualRoutingServer() sg := NewSessionGenerator(net, rs) bg := blocksutil.NewBlockGenerator() diff --git a/bitswap/testnet/network.go b/bitswap/testnet/network.go index 7f82bcdce..b8f61b413 100644 --- a/bitswap/testnet/network.go +++ b/bitswap/testnet/network.go @@ -10,6 +10,7 @@ import ( bsnet "github.com/jbenet/go-ipfs/exchange/bitswap/network" peer "github.com/jbenet/go-ipfs/peer" "github.com/jbenet/go-ipfs/util" + delay "github.com/jbenet/go-ipfs/util/delay" ) type Network interface { @@ -33,14 +34,16 @@ type Network interface { // network impl -func VirtualNetwork() Network { +func VirtualNetwork(d delay.D) Network { return &network{ clients: make(map[util.Key]bsnet.Receiver), + delay: d, } } type network struct { clients map[util.Key]bsnet.Receiver + delay delay.D } func (n *network) Adapter(p peer.Peer) bsnet.BitSwapNetwork { @@ -84,13 +87,15 @@ func (n *network) deliver( return errors.New("Invalid input") } + n.delay.Wait() + nextPeer, nextMsg := r.ReceiveMessage(context.TODO(), from, message) if (nextPeer == nil && nextMsg != nil) || (nextMsg == nil && nextPeer != nil) { return errors.New("Malformed client request") } - if nextPeer == nil && nextMsg == nil { + if nextPeer == nil && nextMsg == nil { // no response to send return nil } diff --git a/bitswap/testnet/network_test.go b/bitswap/testnet/network_test.go index 0bfb0cb1e..7a9f48e2d 100644 --- a/bitswap/testnet/network_test.go +++ b/bitswap/testnet/network_test.go @@ -9,11 +9,12 @@ import ( bsmsg "github.com/jbenet/go-ipfs/exchange/bitswap/message" bsnet "github.com/jbenet/go-ipfs/exchange/bitswap/network" peer "github.com/jbenet/go-ipfs/peer" + delay "github.com/jbenet/go-ipfs/util/delay" testutil "github.com/jbenet/go-ipfs/util/testutil" ) func TestSendRequestToCooperativePeer(t *testing.T) { - net := VirtualNetwork() + net := VirtualNetwork(delay.Fixed(0)) idOfRecipient := []byte("recipient") @@ -60,7 +61,7 @@ func TestSendRequestToCooperativePeer(t *testing.T) { } func TestSendMessageAsyncButWaitForResponse(t *testing.T) { - net := VirtualNetwork() + net := VirtualNetwork(delay.Fixed(0)) idOfResponder := []byte("responder") waiter := net.Adapter(testutil.NewPeerWithIDString("waiter")) responder := net.Adapter(testutil.NewPeerWithID(idOfResponder)) From 752ac9a0048ab79294bb4318234084c999c6d0db Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Fri, 12 Dec 2014 22:37:24 -0800 Subject: [PATCH 0188/1035] refac(bs/test) provide a shared net delay constant License: MIT Signed-off-by: Brian Tiger Chow This commit was moved from ipfs/go-bitswap@1c9c90aebea185f99dd7b93e41924f502e472c1d --- bitswap/bitswap_test.go | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index 21b259a7e..09018b870 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -15,10 +15,12 @@ import ( testutil "github.com/jbenet/go-ipfs/util/testutil" ) +const kNetworkDelay = 0 * time.Millisecond + func TestClose(t *testing.T) { // TODO t.Skip("TODO Bitswap's Close implementation is a WIP") - vnet := tn.VirtualNetwork(delay.Fixed(0)) + vnet := tn.VirtualNetwork(delay.Fixed(kNetworkDelay)) rout := mock.VirtualRoutingServer() sesgen := NewSessionGenerator(vnet, rout) bgen := blocksutil.NewBlockGenerator() @@ -32,7 +34,7 @@ func TestClose(t *testing.T) { func TestGetBlockTimeout(t *testing.T) { - net := tn.VirtualNetwork(delay.Fixed(0)) + net := tn.VirtualNetwork(delay.Fixed(kNetworkDelay)) rs := mock.VirtualRoutingServer() g := NewSessionGenerator(net, rs) @@ -49,7 +51,7 @@ func TestGetBlockTimeout(t *testing.T) { func TestProviderForKeyButNetworkCannotFind(t *testing.T) { - net := tn.VirtualNetwork(delay.Fixed(0)) + net := tn.VirtualNetwork(delay.Fixed(kNetworkDelay)) rs := mock.VirtualRoutingServer() g := NewSessionGenerator(net, rs) @@ -70,7 +72,7 @@ func TestProviderForKeyButNetworkCannotFind(t *testing.T) { func TestGetBlockFromPeerAfterPeerAnnounces(t *testing.T) { - net := tn.VirtualNetwork(delay.Fixed(0)) + net := tn.VirtualNetwork(delay.Fixed(kNetworkDelay)) rs := mock.VirtualRoutingServer() block := blocks.NewBlock([]byte("block")) g := NewSessionGenerator(net, rs) @@ -122,7 +124,7 @@ func PerformDistributionTest(t *testing.T, numInstances, numBlocks int) { if testing.Short() { t.SkipNow() } - net := tn.VirtualNetwork(delay.Fixed(0)) + net := tn.VirtualNetwork(delay.Fixed(kNetworkDelay)) rs := mock.VirtualRoutingServer() sg := NewSessionGenerator(net, rs) bg := blocksutil.NewBlockGenerator() @@ -182,7 +184,7 @@ func TestSendToWantingPeer(t *testing.T) { t.SkipNow() } - net := tn.VirtualNetwork(delay.Fixed(0)) + net := tn.VirtualNetwork(delay.Fixed(kNetworkDelay)) rs := mock.VirtualRoutingServer() sg := NewSessionGenerator(net, rs) bg := blocksutil.NewBlockGenerator() From f5676ba65554198ddfa9835c1055003afb06d0d9 Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Fri, 12 Dec 2014 22:56:36 -0800 Subject: [PATCH 0189/1035] refactor(mockrouting) misc License: MIT Signed-off-by: Brian Tiger Chow This commit was moved from ipfs/go-bitswap@1c24fdaa6daee490336637a83966a5414d6081b6 --- bitswap/bitswap_test.go | 20 +++++++++++--------- bitswap/testutils.go | 8 ++++---- 2 files changed, 15 insertions(+), 13 deletions(-) diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index 09018b870..d58ff596a 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -10,18 +10,20 @@ import ( blocks "github.com/jbenet/go-ipfs/blocks" blocksutil "github.com/jbenet/go-ipfs/blocks/blocksutil" tn "github.com/jbenet/go-ipfs/exchange/bitswap/testnet" - mock "github.com/jbenet/go-ipfs/routing/mock" + mockrouting "github.com/jbenet/go-ipfs/routing/mock" delay "github.com/jbenet/go-ipfs/util/delay" testutil "github.com/jbenet/go-ipfs/util/testutil" ) +// FIXME the tests are really sensitive to the network delay. fix them to work +// well under varying conditions const kNetworkDelay = 0 * time.Millisecond func TestClose(t *testing.T) { // TODO t.Skip("TODO Bitswap's Close implementation is a WIP") vnet := tn.VirtualNetwork(delay.Fixed(kNetworkDelay)) - rout := mock.VirtualRoutingServer() + rout := mockrouting.NewServer() sesgen := NewSessionGenerator(vnet, rout) bgen := blocksutil.NewBlockGenerator() @@ -35,7 +37,7 @@ func TestClose(t *testing.T) { func TestGetBlockTimeout(t *testing.T) { net := tn.VirtualNetwork(delay.Fixed(kNetworkDelay)) - rs := mock.VirtualRoutingServer() + rs := mockrouting.NewServer() g := NewSessionGenerator(net, rs) self := g.Next() @@ -52,11 +54,11 @@ func TestGetBlockTimeout(t *testing.T) { func TestProviderForKeyButNetworkCannotFind(t *testing.T) { net := tn.VirtualNetwork(delay.Fixed(kNetworkDelay)) - rs := mock.VirtualRoutingServer() + rs := mockrouting.NewServer() g := NewSessionGenerator(net, rs) block := blocks.NewBlock([]byte("block")) - rs.Announce(testutil.NewPeerWithIDString("testing"), block.Key()) // but not on network + rs.Client(testutil.NewPeerWithIDString("testing")).Provide(context.Background(), block.Key()) // but not on network solo := g.Next() @@ -73,7 +75,7 @@ func TestProviderForKeyButNetworkCannotFind(t *testing.T) { func TestGetBlockFromPeerAfterPeerAnnounces(t *testing.T) { net := tn.VirtualNetwork(delay.Fixed(kNetworkDelay)) - rs := mock.VirtualRoutingServer() + rs := mockrouting.NewServer() block := blocks.NewBlock([]byte("block")) g := NewSessionGenerator(net, rs) @@ -125,7 +127,7 @@ func PerformDistributionTest(t *testing.T, numInstances, numBlocks int) { t.SkipNow() } net := tn.VirtualNetwork(delay.Fixed(kNetworkDelay)) - rs := mock.VirtualRoutingServer() + rs := mockrouting.NewServer() sg := NewSessionGenerator(net, rs) bg := blocksutil.NewBlockGenerator() @@ -140,7 +142,7 @@ func PerformDistributionTest(t *testing.T, numInstances, numBlocks int) { for _, b := range blocks { first.Blockstore().Put(b) first.Exchange.HasBlock(context.Background(), b) - rs.Announce(first.Peer, b.Key()) + rs.Client(first.Peer).Provide(context.Background(), b.Key()) } t.Log("Distribute!") @@ -185,7 +187,7 @@ func TestSendToWantingPeer(t *testing.T) { } net := tn.VirtualNetwork(delay.Fixed(kNetworkDelay)) - rs := mock.VirtualRoutingServer() + rs := mockrouting.NewServer() sg := NewSessionGenerator(net, rs) bg := blocksutil.NewBlockGenerator() diff --git a/bitswap/testutils.go b/bitswap/testutils.go index 10a02606b..8ea4e7af8 100644 --- a/bitswap/testutils.go +++ b/bitswap/testutils.go @@ -10,13 +10,13 @@ import ( exchange "github.com/jbenet/go-ipfs/exchange" tn "github.com/jbenet/go-ipfs/exchange/bitswap/testnet" peer "github.com/jbenet/go-ipfs/peer" - mock "github.com/jbenet/go-ipfs/routing/mock" + mockrouting "github.com/jbenet/go-ipfs/routing/mock" datastore2 "github.com/jbenet/go-ipfs/util/datastore2" delay "github.com/jbenet/go-ipfs/util/delay" ) func NewSessionGenerator( - net tn.Network, rs mock.RoutingServer) SessionGenerator { + net tn.Network, rs mockrouting.Server) SessionGenerator { return SessionGenerator{ net: net, rs: rs, @@ -28,7 +28,7 @@ func NewSessionGenerator( type SessionGenerator struct { seq int net tn.Network - rs mock.RoutingServer + rs mockrouting.Server ps peer.Peerstore } @@ -67,7 +67,7 @@ func (i *Instance) SetBlockstoreLatency(t time.Duration) time.Duration { // NB: It's easy make mistakes by providing the same peer ID to two different // sessions. To safeguard, use the SessionGenerator to generate sessions. It's // just a much better idea. -func session(net tn.Network, rs mock.RoutingServer, ps peer.Peerstore, id peer.ID) Instance { +func session(net tn.Network, rs mockrouting.Server, ps peer.Peerstore, id peer.ID) Instance { p := ps.WithID(id) adapter := net.Adapter(p) From 32a67e7ac3663766437abe6e29573ba3b9915add Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Sat, 13 Dec 2014 05:34:11 -0800 Subject: [PATCH 0190/1035] feat(bs/testutil) use write cache License: MIT Signed-off-by: Brian Tiger Chow This commit was moved from ipfs/go-bitswap@aa4ba09b823a7b1b79a57c85383b86e9754eba7a --- bitswap/testutils.go | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/bitswap/testutils.go b/bitswap/testutils.go index 8ea4e7af8..9e9b80230 100644 --- a/bitswap/testutils.go +++ b/bitswap/testutils.go @@ -74,7 +74,12 @@ func session(net tn.Network, rs mockrouting.Server, ps peer.Peerstore, id peer.I htc := rs.Client(p) bsdelay := delay.Fixed(0) - bstore := blockstore.NewBlockstore(ds_sync.MutexWrap(datastore2.WithDelay(ds.NewMapDatastore(), bsdelay))) + const kWriteCacheElems = 100 + bstore, err := blockstore.WriteCached(blockstore.NewBlockstore(ds_sync.MutexWrap(datastore2.WithDelay(ds.NewMapDatastore(), bsdelay))), kWriteCacheElems) + if err != nil { + // FIXME perhaps change signature and return error. + panic(err.Error()) + } const alwaysSendToPeer = true ctx := context.TODO() From be60c11b6fcefdfd1fbc777565d5a4d15127a415 Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Sat, 13 Dec 2014 03:50:35 -0800 Subject: [PATCH 0191/1035] misc(bitswap/strat) rm noisy message License: MIT Signed-off-by: Brian Tiger Chow This commit was moved from ipfs/go-bitswap@33ae9d43e5ccf2850f261760a771d19aee93be52 --- bitswap/strategy/strategy.go | 1 - 1 file changed, 1 deletion(-) diff --git a/bitswap/strategy/strategy.go b/bitswap/strategy/strategy.go index 3993eba05..fe7414caa 100644 --- a/bitswap/strategy/strategy.go +++ b/bitswap/strategy/strategy.go @@ -71,7 +71,6 @@ func (s *strategist) ShouldSendBlockToPeer(k u.Key, p peer.Peer) bool { // Dont resend blocks within a certain time period t, ok := ledger.sentToPeer[k] if ok && t.Add(resendTimeoutPeriod).After(time.Now()) { - log.Error("Prevented block resend!") return false } From c7f5fff60b836129fbeb1b20c2bd1910d9874806 Mon Sep 17 00:00:00 2001 From: Jeromy Date: Wed, 10 Dec 2014 02:02:49 +0000 Subject: [PATCH 0192/1035] give sessiongenerator a master context for easy cancelling This commit was moved from ipfs/go-bitswap@1e5f280a28564f8561f0d5f80f990f043607f574 --- bitswap/testutils.go | 30 +++++++++++++++++++----------- 1 file changed, 19 insertions(+), 11 deletions(-) diff --git a/bitswap/testutils.go b/bitswap/testutils.go index 9e9b80230..bd86ba308 100644 --- a/bitswap/testutils.go +++ b/bitswap/testutils.go @@ -17,24 +17,33 @@ import ( func NewSessionGenerator( net tn.Network, rs mockrouting.Server) SessionGenerator { + ctx, cancel := context.WithCancel(context.TODO()) return SessionGenerator{ - net: net, - rs: rs, - ps: peer.NewPeerstore(), - seq: 0, + ps: peer.NewPeerstore(), + net: net, + rs: rs, + seq: 0, + ctx: ctx, + cancel: cancel, } } type SessionGenerator struct { - seq int - net tn.Network - rs mockrouting.Server - ps peer.Peerstore + seq int + net tn.Network + rs mockrouting.Server + ps peer.Peerstore + ctx context.Context + cancel context.CancelFunc +} + +func (g *SessionGenerator) Stop() { + g.cancel() } func (g *SessionGenerator) Next() Instance { g.seq++ - return session(g.net, g.rs, g.ps, []byte(string(g.seq))) + return session(g.ctx, g.net, g.rs, g.ps, []byte(string(g.seq))) } func (g *SessionGenerator) Instances(n int) []Instance { @@ -67,7 +76,7 @@ func (i *Instance) SetBlockstoreLatency(t time.Duration) time.Duration { // NB: It's easy make mistakes by providing the same peer ID to two different // sessions. To safeguard, use the SessionGenerator to generate sessions. It's // just a much better idea. -func session(net tn.Network, rs mockrouting.Server, ps peer.Peerstore, id peer.ID) Instance { +func session(ctx context.Context, net tn.Network, rs mockrouting.Server, ps peer.Peerstore, id peer.ID) Instance { p := ps.WithID(id) adapter := net.Adapter(p) @@ -82,7 +91,6 @@ func session(net tn.Network, rs mockrouting.Server, ps peer.Peerstore, id peer.I } const alwaysSendToPeer = true - ctx := context.TODO() bs := New(ctx, p, adapter, htc, bstore, alwaysSendToPeer) From 975301f050f4e3f41b4457caeb7c95cb6ae87df3 Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Sun, 14 Dec 2014 16:35:09 -0800 Subject: [PATCH 0193/1035] style: Stop -> Close() error for Closer interface License: MIT Signed-off-by: Brian Tiger Chow This commit was moved from ipfs/go-bitswap@00aeb1077c5d2dc40c31fcbe4f3d9b8d0a52bd56 --- bitswap/testutils.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/bitswap/testutils.go b/bitswap/testutils.go index bd86ba308..b8763952c 100644 --- a/bitswap/testutils.go +++ b/bitswap/testutils.go @@ -37,8 +37,9 @@ type SessionGenerator struct { cancel context.CancelFunc } -func (g *SessionGenerator) Stop() { +func (g *SessionGenerator) Close() error { g.cancel() + return nil // for Closer interface } func (g *SessionGenerator) Next() Instance { From 3e05fec256535980a46b8cbe62a418fb1aaceef8 Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Sun, 14 Dec 2014 16:37:42 -0800 Subject: [PATCH 0194/1035] doc TODO License: MIT Signed-off-by: Brian Tiger Chow This commit was moved from ipfs/go-bitswap@68ae22c6d5198b3b196084591ed140a338c0af46 --- bitswap/testutils.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bitswap/testutils.go b/bitswap/testutils.go index b8763952c..48cb11a45 100644 --- a/bitswap/testutils.go +++ b/bitswap/testutils.go @@ -23,7 +23,7 @@ func NewSessionGenerator( net: net, rs: rs, seq: 0, - ctx: ctx, + ctx: ctx, // TODO take ctx as param to Next, Instances cancel: cancel, } } From 4d8ffef5a1e0904d2fdb776a041a07ee94684ef3 Mon Sep 17 00:00:00 2001 From: Juan Batiz-Benet Date: Tue, 16 Dec 2014 08:55:46 -0800 Subject: [PATCH 0195/1035] Integrated new network into ipfs This commit was moved from ipfs/go-bitswap@6cebf01b41f91ddbc41a93b6f027e305b0f2b012 --- bitswap/message/message.go | 27 ++++++++++------ bitswap/message/message_test.go | 27 ++++------------ bitswap/network/ipfs_impl.go | 57 ++++++++++++++++++--------------- 3 files changed, 55 insertions(+), 56 deletions(-) diff --git a/bitswap/message/message.go b/bitswap/message/message.go index b69450a6f..d71833b93 100644 --- a/bitswap/message/message.go +++ b/bitswap/message/message.go @@ -1,13 +1,14 @@ package message import ( - proto "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto" + "io" + blocks "github.com/jbenet/go-ipfs/blocks" pb "github.com/jbenet/go-ipfs/exchange/bitswap/message/internal/pb" - netmsg "github.com/jbenet/go-ipfs/net/message" - nm "github.com/jbenet/go-ipfs/net/message" - peer "github.com/jbenet/go-ipfs/peer" + inet "github.com/jbenet/go-ipfs/net" u "github.com/jbenet/go-ipfs/util" + + ggio "code.google.com/p/gogoprotobuf/io" ) // TODO move message.go into the bitswap package @@ -38,7 +39,7 @@ type BitSwapMessage interface { type Exportable interface { ToProto() *pb.Message - ToNet(p peer.Peer) (nm.NetMessage, error) + ToNet(w io.Writer) error } type impl struct { @@ -92,11 +93,14 @@ func (m *impl) AddBlock(b *blocks.Block) { m.blocks[b.Key()] = b } -func FromNet(nmsg netmsg.NetMessage) (BitSwapMessage, error) { +func FromNet(r io.Reader) (BitSwapMessage, error) { + pbr := ggio.NewDelimitedReader(r, inet.MessageSizeMax) + pb := new(pb.Message) - if err := proto.Unmarshal(nmsg.Data(), pb); err != nil { + if err := pbr.ReadMsg(pb); err != nil { return nil, err } + m := newMessageFromProto(*pb) return m, nil } @@ -112,6 +116,11 @@ func (m *impl) ToProto() *pb.Message { return pb } -func (m *impl) ToNet(p peer.Peer) (nm.NetMessage, error) { - return nm.FromObject(p, m.ToProto()) +func (m *impl) ToNet(w io.Writer) error { + pbw := ggio.NewDelimitedWriter(w) + + if err := pbw.WriteMsg(m.ToProto()); err != nil { + return err + } + return nil } diff --git a/bitswap/message/message_test.go b/bitswap/message/message_test.go index 5fe98634c..681b60a6f 100644 --- a/bitswap/message/message_test.go +++ b/bitswap/message/message_test.go @@ -7,7 +7,6 @@ import ( blocks "github.com/jbenet/go-ipfs/blocks" pb "github.com/jbenet/go-ipfs/exchange/bitswap/message/internal/pb" u "github.com/jbenet/go-ipfs/util" - testutil "github.com/jbenet/go-ipfs/util/testutil" ) func TestAppendWanted(t *testing.T) { @@ -87,18 +86,6 @@ func TestCopyProtoByValue(t *testing.T) { } } -func TestToNetMethodSetsPeer(t *testing.T) { - m := New() - p := testutil.NewPeerWithIDString("X") - netmsg, err := m.ToNet(p) - if err != nil { - t.Fatal(err) - } - if !(netmsg.Peer().Key() == p.Key()) { - t.Fatal("Peer key is different") - } -} - func TestToNetFromNetPreservesWantList(t *testing.T) { original := New() original.AddWanted(u.Key("M")) @@ -107,13 +94,12 @@ func TestToNetFromNetPreservesWantList(t *testing.T) { original.AddWanted(u.Key("T")) original.AddWanted(u.Key("F")) - p := testutil.NewPeerWithIDString("X") - netmsg, err := original.ToNet(p) - if err != nil { + var buf bytes.Buffer + if err := original.ToNet(&buf); err != nil { t.Fatal(err) } - copied, err := FromNet(netmsg) + copied, err := FromNet(&buf) if err != nil { t.Fatal(err) } @@ -138,13 +124,12 @@ func TestToAndFromNetMessage(t *testing.T) { original.AddBlock(blocks.NewBlock([]byte("F"))) original.AddBlock(blocks.NewBlock([]byte("M"))) - p := testutil.NewPeerWithIDString("X") - netmsg, err := original.ToNet(p) - if err != nil { + var buf bytes.Buffer + if err := original.ToNet(&buf); err != nil { t.Fatal(err) } - m2, err := FromNet(netmsg) + m2, err := FromNet(&buf) if err != nil { t.Fatal(err) } diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index f356285ef..3e6e54787 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -5,7 +5,6 @@ import ( bsmsg "github.com/jbenet/go-ipfs/exchange/bitswap/message" inet "github.com/jbenet/go-ipfs/net" - netmsg "github.com/jbenet/go-ipfs/net/message" peer "github.com/jbenet/go-ipfs/peer" util "github.com/jbenet/go-ipfs/util" ) @@ -14,46 +13,48 @@ var log = util.Logger("bitswap_network") // NewFromIpfsNetwork returns a BitSwapNetwork supported by underlying IPFS // Dialer & Service -func NewFromIpfsNetwork(s inet.Service, dialer inet.Dialer) BitSwapNetwork { +func NewFromIpfsNetwork(n inet.Network) BitSwapNetwork { bitswapNetwork := impl{ - service: s, - dialer: dialer, + network: n, } - s.SetHandler(&bitswapNetwork) + n.SetHandler(inet.ProtocolBitswap, bitswapNetwork.handleNewStream) return &bitswapNetwork } // impl transforms the ipfs network interface, which sends and receives // NetMessage objects, into the bitswap network interface. type impl struct { - service inet.Service - dialer inet.Dialer + network inet.Network // inbound messages from the network are forwarded to the receiver receiver Receiver } -// HandleMessage marshals and unmarshals net messages, forwarding them to the -// BitSwapMessage receiver -func (bsnet *impl) HandleMessage( - ctx context.Context, incoming netmsg.NetMessage) netmsg.NetMessage { +// handleNewStream receives a new stream from the network. +func (bsnet *impl) handleNewStream(s inet.Stream) { if bsnet.receiver == nil { - return nil + return } - received, err := bsmsg.FromNet(incoming) - if err != nil { - go bsnet.receiver.ReceiveError(err) - return nil - } + go func() { + defer s.Close() + + received, err := bsmsg.FromNet(s) + if err != nil { + go bsnet.receiver.ReceiveError(err) + return + } + + p := s.Conn().RemotePeer() + ctx := context.Background() + bsnet.receiver.ReceiveMessage(ctx, p, received) + }() - bsnet.receiver.ReceiveMessage(ctx, incoming.Peer(), received) - return nil } func (bsnet *impl) DialPeer(ctx context.Context, p peer.Peer) error { - return bsnet.dialer.DialPeer(ctx, p) + return bsnet.network.DialPeer(ctx, p) } func (bsnet *impl) SendMessage( @@ -61,11 +62,13 @@ func (bsnet *impl) SendMessage( p peer.Peer, outgoing bsmsg.BitSwapMessage) error { - nmsg, err := outgoing.ToNet(p) + s, err := bsnet.network.NewStream(inet.ProtocolBitswap, p) if err != nil { return err } - return bsnet.service.SendMessage(ctx, nmsg) + defer s.Close() + + return outgoing.ToNet(s) } func (bsnet *impl) SendRequest( @@ -73,15 +76,17 @@ func (bsnet *impl) SendRequest( p peer.Peer, outgoing bsmsg.BitSwapMessage) (bsmsg.BitSwapMessage, error) { - outgoingMsg, err := outgoing.ToNet(p) + s, err := bsnet.network.NewStream(inet.ProtocolBitswap, p) if err != nil { return nil, err } - incomingMsg, err := bsnet.service.SendRequest(ctx, outgoingMsg) - if err != nil { + defer s.Close() + + if err := outgoing.ToNet(s); err != nil { return nil, err } - return bsmsg.FromNet(incomingMsg) + + return bsmsg.FromNet(s) } func (bsnet *impl) SetDelegate(r Receiver) { From 65f5949279f39e739db06867da1d7123efbaf132 Mon Sep 17 00:00:00 2001 From: Juan Batiz-Benet Date: Tue, 16 Dec 2014 14:53:02 -0800 Subject: [PATCH 0196/1035] make vendor This commit was moved from ipfs/go-bitswap@b3f309a92754c2f646546e07a18e8a67bc9aaec8 --- bitswap/message/message.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bitswap/message/message.go b/bitswap/message/message.go index d71833b93..62a39be91 100644 --- a/bitswap/message/message.go +++ b/bitswap/message/message.go @@ -8,7 +8,7 @@ import ( inet "github.com/jbenet/go-ipfs/net" u "github.com/jbenet/go-ipfs/util" - ggio "code.google.com/p/gogoprotobuf/io" + ggio "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/gogoprotobuf/io" ) // TODO move message.go into the bitswap package From b59bdae59ea940ad47c9abadc87c58f3a0ec7ef5 Mon Sep 17 00:00:00 2001 From: Jeromy Date: Wed, 10 Dec 2014 02:00:37 +0000 Subject: [PATCH 0197/1035] create wantlist object This commit was moved from ipfs/go-bitswap@e92ee20e0f6a74fe8c58d3b2a3597c2dd59ae0dd --- bitswap/wantlist/wantlist.go | 56 ++++++++++++++++++++++++++++++++++++ 1 file changed, 56 insertions(+) create mode 100644 bitswap/wantlist/wantlist.go diff --git a/bitswap/wantlist/wantlist.go b/bitswap/wantlist/wantlist.go new file mode 100644 index 000000000..041064901 --- /dev/null +++ b/bitswap/wantlist/wantlist.go @@ -0,0 +1,56 @@ +package wantlist + +import ( + u "github.com/jbenet/go-ipfs/util" + "sort" +) + +type Wantlist struct { + set map[u.Key]*Entry +} + +func NewWantlist() *Wantlist { + return &Wantlist{ + set: make(map[u.Key]*Entry), + } +} + +type Entry struct { + Value u.Key + Priority int +} + +func (w *Wantlist) Add(k u.Key, priority int) { + if _, ok := w.set[k]; ok { + return + } + w.set[k] = &Entry{ + Value: k, + Priority: priority, + } +} + +func (w *Wantlist) Remove(k u.Key) { + delete(w.set, k) +} + +func (w *Wantlist) Contains(k u.Key) bool { + _, ok := w.set[k] + return ok +} + +type entrySlice []*Entry + +func (es entrySlice) Len() int { return len(es) } +func (es entrySlice) Swap(i, j int) { es[i], es[j] = es[j], es[i] } +func (es entrySlice) Less(i, j int) bool { return es[i].Priority < es[j].Priority } + +func (w *Wantlist) Entries() []*Entry { + var es entrySlice + + for _, e := range w.set { + es = append(es, e) + } + sort.Sort(es) + return es +} From 54b8d028434d117aeea6b30f8518241e098913ed Mon Sep 17 00:00:00 2001 From: Jeromy Date: Wed, 10 Dec 2014 02:03:20 +0000 Subject: [PATCH 0198/1035] implement bitswap roundWorker make vendor This commit was moved from ipfs/go-bitswap@ade18f0ce36c848c1110a8595641d2998bb3893e --- bitswap/bitswap.go | 104 ++++++++++++++-------- bitswap/bitswap_test.go | 65 +++++++++++--- bitswap/message/internal/pb/message.pb.go | 64 ++++++++++++- bitswap/message/internal/pb/message.proto | 17 +++- bitswap/message/message.go | 92 ++++++++++++------- bitswap/message/message_test.go | 59 +++++++----- bitswap/strategy/interface.go | 3 + bitswap/strategy/ledger.go | 16 ++-- bitswap/strategy/strategy.go | 70 ++++++++++++++- bitswap/strategy/strategy_test.go | 2 +- 10 files changed, 378 insertions(+), 114 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 64f293528..1e0e86b61 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -15,6 +15,7 @@ import ( bsnet "github.com/jbenet/go-ipfs/exchange/bitswap/network" notifications "github.com/jbenet/go-ipfs/exchange/bitswap/notifications" strategy "github.com/jbenet/go-ipfs/exchange/bitswap/strategy" + wl "github.com/jbenet/go-ipfs/exchange/bitswap/wantlist" peer "github.com/jbenet/go-ipfs/peer" u "github.com/jbenet/go-ipfs/util" eventlog "github.com/jbenet/go-ipfs/util/eventlog" @@ -29,6 +30,8 @@ const maxProvidersPerRequest = 3 const providerRequestTimeout = time.Second * 10 const hasBlockTimeout = time.Second * 15 +const roundTime = time.Second / 2 + // New initializes a BitSwap instance that communicates over the // provided BitSwapNetwork. This function registers the returned instance as // the network delegate. @@ -41,6 +44,7 @@ func New(parent context.Context, p peer.Peer, network bsnet.BitSwapNetwork, rout notif := notifications.New() go func() { <-ctx.Done() + cancelFunc() notif.Shutdown() }() @@ -51,11 +55,12 @@ func New(parent context.Context, p peer.Peer, network bsnet.BitSwapNetwork, rout strategy: strategy.New(nice), routing: routing, sender: network, - wantlist: u.NewKeySet(), + wantlist: wl.NewWantlist(), batchRequests: make(chan []u.Key, 32), } network.SetDelegate(bs) go bs.loop(ctx) + go bs.roundWorker(ctx) return bs } @@ -85,7 +90,7 @@ type bitswap struct { // TODO(brian): save the strategy's state to the datastore strategy strategy.Strategy - wantlist u.KeySet + wantlist *wl.Wantlist // cancelFunc signals cancellation to the bitswap event loop cancelFunc func() @@ -166,8 +171,8 @@ func (bs *bitswap) sendWantListTo(ctx context.Context, peers <-chan peer.Peer) e panic("Cant send wantlist to nil peerchan") } message := bsmsg.New() - for _, wanted := range bs.wantlist.Keys() { - message.AddWanted(wanted) + for _, wanted := range bs.wantlist.Entries() { + message.AddEntry(wanted.Value, wanted.Priority, false) } for peerToQuery := range peers { log.Debug("sending query to: %s", peerToQuery) @@ -195,9 +200,9 @@ func (bs *bitswap) sendWantListTo(ctx context.Context, peers <-chan peer.Peer) e return nil } -func (bs *bitswap) sendWantlistToProviders(ctx context.Context, ks []u.Key) { +func (bs *bitswap) sendWantlistToProviders(ctx context.Context, wantlist *wl.Wantlist) { wg := sync.WaitGroup{} - for _, k := range ks { + for _, e := range wantlist.Entries() { wg.Add(1) go func(k u.Key) { child, _ := context.WithTimeout(ctx, providerRequestTimeout) @@ -208,11 +213,44 @@ func (bs *bitswap) sendWantlistToProviders(ctx context.Context, ks []u.Key) { log.Errorf("error sending wantlist: %s", err) } wg.Done() - }(k) + }(e.Value) } wg.Wait() } +func (bs *bitswap) roundWorker(ctx context.Context) { + roundTicker := time.NewTicker(roundTime) + bandwidthPerRound := 500000 + for { + select { + case <-ctx.Done(): + return + case <-roundTicker.C: + alloc, err := bs.strategy.GetAllocation(bandwidthPerRound, bs.blockstore) + if err != nil { + log.Critical("%s", err) + } + //log.Errorf("Allocation: %v", alloc) + bs.processStrategyAllocation(ctx, alloc) + } + } +} + +func (bs *bitswap) processStrategyAllocation(ctx context.Context, alloc []*strategy.Task) { + for _, t := range alloc { + for _, block := range t.Blocks { + message := bsmsg.New() + message.AddBlock(block) + for _, wanted := range bs.wantlist.Entries() { + message.AddEntry(wanted.Value, wanted.Priority, false) + } + if err := bs.send(ctx, t.Peer, message); err != nil { + log.Errorf("Message Send Failed: %s", err) + } + } + } +} + // TODO ensure only one active request per key func (bs *bitswap) loop(parent context.Context) { @@ -228,7 +266,7 @@ func (bs *bitswap) loop(parent context.Context) { select { case <-broadcastSignal.C: // Resend unfulfilled wantlist keys - bs.sendWantlistToProviders(ctx, bs.wantlist.Keys()) + bs.sendWantlistToProviders(ctx, bs.wantlist) case ks := <-bs.batchRequests: // TODO: implement batching on len(ks) > X for some X // i.e. if given 20 keys, fetch first five, then next @@ -239,7 +277,7 @@ func (bs *bitswap) loop(parent context.Context) { continue } for _, k := range ks { - bs.wantlist.Add(k) + bs.wantlist.Add(k, 1) } // NB: send want list to providers for the first peer in this list. // the assumption is made that the providers of the first key in @@ -277,45 +315,41 @@ func (bs *bitswap) ReceiveMessage(ctx context.Context, p peer.Peer, incoming bsm return nil, nil } - // Record message bytes in ledger - // TODO: this is bad, and could be easily abused. - // Should only track *useful* messages in ledger // This call records changes to wantlists, blocks received, // and number of bytes transfered. bs.strategy.MessageReceived(p, incoming) + // TODO: this is bad, and could be easily abused. + // Should only track *useful* messages in ledger + var blkeys []u.Key for _, block := range incoming.Blocks() { + blkeys = append(blkeys, block.Key()) if err := bs.HasBlock(ctx, block); err != nil { log.Error(err) } } - - for _, key := range incoming.Wantlist() { - if bs.strategy.ShouldSendBlockToPeer(key, p) { - if block, errBlockNotFound := bs.blockstore.Get(key); errBlockNotFound != nil { - continue - } else { - // Create a separate message to send this block in - blkmsg := bsmsg.New() - - // TODO: only send this the first time - // no sense in sending our wantlist to the - // same peer multiple times - for _, k := range bs.wantlist.Keys() { - blkmsg.AddWanted(k) - } - - blkmsg.AddBlock(block) - bs.send(ctx, p, blkmsg) - bs.strategy.BlockSentToPeer(block.Key(), p) - } - } + if len(blkeys) > 0 { + bs.cancelBlocks(ctx, blkeys) } // TODO: consider changing this function to not return anything return nil, nil } +func (bs *bitswap) cancelBlocks(ctx context.Context, bkeys []u.Key) { + message := bsmsg.New() + message.SetFull(false) + for _, k := range bkeys { + message.AddEntry(k, 0, true) + } + for _, p := range bs.strategy.Peers() { + err := bs.send(ctx, p, message) + if err != nil { + log.Errorf("Error sending message: %s", err) + } + } +} + func (bs *bitswap) ReceiveError(err error) { log.Errorf("Bitswap ReceiveError: %s", err) // TODO log the network error @@ -337,8 +371,8 @@ func (bs *bitswap) sendToPeersThatWant(ctx context.Context, block *blocks.Block) if bs.strategy.ShouldSendBlockToPeer(block.Key(), p) { message := bsmsg.New() message.AddBlock(block) - for _, wanted := range bs.wantlist.Keys() { - message.AddWanted(wanted) + for _, wanted := range bs.wantlist.Entries() { + message.AddEntry(wanted.Value, wanted.Priority, false) } if err := bs.send(ctx, p, message); err != nil { return err diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index d58ff596a..0e72883cc 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -11,6 +11,7 @@ import ( blocksutil "github.com/jbenet/go-ipfs/blocks/blocksutil" tn "github.com/jbenet/go-ipfs/exchange/bitswap/testnet" mockrouting "github.com/jbenet/go-ipfs/routing/mock" + u "github.com/jbenet/go-ipfs/util" delay "github.com/jbenet/go-ipfs/util/delay" testutil "github.com/jbenet/go-ipfs/util/testutil" ) @@ -25,6 +26,7 @@ func TestClose(t *testing.T) { vnet := tn.VirtualNetwork(delay.Fixed(kNetworkDelay)) rout := mockrouting.NewServer() sesgen := NewSessionGenerator(vnet, rout) + defer sesgen.Stop() bgen := blocksutil.NewBlockGenerator() block := bgen.Next() @@ -39,6 +41,7 @@ func TestGetBlockTimeout(t *testing.T) { net := tn.VirtualNetwork(delay.Fixed(kNetworkDelay)) rs := mockrouting.NewServer() g := NewSessionGenerator(net, rs) + defer g.Stop() self := g.Next() @@ -56,11 +59,13 @@ func TestProviderForKeyButNetworkCannotFind(t *testing.T) { net := tn.VirtualNetwork(delay.Fixed(kNetworkDelay)) rs := mockrouting.NewServer() g := NewSessionGenerator(net, rs) + defer g.Stop() block := blocks.NewBlock([]byte("block")) rs.Client(testutil.NewPeerWithIDString("testing")).Provide(context.Background(), block.Key()) // but not on network solo := g.Next() + defer solo.Exchange.Close() ctx, _ := context.WithTimeout(context.Background(), time.Nanosecond) _, err := solo.Exchange.GetBlock(ctx, block.Key()) @@ -78,8 +83,10 @@ func TestGetBlockFromPeerAfterPeerAnnounces(t *testing.T) { rs := mockrouting.NewServer() block := blocks.NewBlock([]byte("block")) g := NewSessionGenerator(net, rs) + defer g.Stop() hasBlock := g.Next() + defer hasBlock.Exchange.Close() if err := hasBlock.Blockstore().Put(block); err != nil { t.Fatal(err) @@ -89,6 +96,7 @@ func TestGetBlockFromPeerAfterPeerAnnounces(t *testing.T) { } wantsBlock := g.Next() + defer wantsBlock.Exchange.Close() ctx, _ := context.WithTimeout(context.Background(), time.Second) received, err := wantsBlock.Exchange.GetBlock(ctx, block.Key()) @@ -107,7 +115,7 @@ func TestLargeSwarm(t *testing.T) { t.SkipNow() } t.Parallel() - numInstances := 5 + numInstances := 500 numBlocks := 2 PerformDistributionTest(t, numInstances, numBlocks) } @@ -129,6 +137,7 @@ func PerformDistributionTest(t *testing.T, numInstances, numBlocks int) { net := tn.VirtualNetwork(delay.Fixed(kNetworkDelay)) rs := mockrouting.NewServer() sg := NewSessionGenerator(net, rs) + defer sg.Stop() bg := blocksutil.NewBlockGenerator() t.Log("Test a few nodes trying to get one file with a lot of blocks") @@ -138,24 +147,29 @@ func PerformDistributionTest(t *testing.T, numInstances, numBlocks int) { t.Log("Give the blocks to the first instance") + var blkeys []u.Key first := instances[0] for _, b := range blocks { first.Blockstore().Put(b) + blkeys = append(blkeys, b.Key()) first.Exchange.HasBlock(context.Background(), b) rs.Client(first.Peer).Provide(context.Background(), b.Key()) } t.Log("Distribute!") - var wg sync.WaitGroup - + wg := sync.WaitGroup{} for _, inst := range instances { - for _, b := range blocks { - wg.Add(1) - // NB: executing getOrFail concurrently puts tremendous pressure on - // the goroutine scheduler - getOrFail(inst, b, t, &wg) - } + wg.Add(1) + go func(inst Instance) { + defer wg.Done() + outch, err := inst.Exchange.GetBlocks(context.TODO(), blkeys) + if err != nil { + t.Fatal(err) + } + for _ = range outch { + } + }(inst) } wg.Wait() @@ -189,6 +203,7 @@ func TestSendToWantingPeer(t *testing.T) { net := tn.VirtualNetwork(delay.Fixed(kNetworkDelay)) rs := mockrouting.NewServer() sg := NewSessionGenerator(net, rs) + defer sg.Stop() bg := blocksutil.NewBlockGenerator() me := sg.Next() @@ -201,7 +216,7 @@ func TestSendToWantingPeer(t *testing.T) { alpha := bg.Next() - const timeout = 100 * time.Millisecond // FIXME don't depend on time + const timeout = 1000 * time.Millisecond // FIXME don't depend on time t.Logf("Peer %v attempts to get %v. NB: not available\n", w.Peer, alpha.Key()) ctx, _ := context.WithTimeout(context.Background(), timeout) @@ -246,3 +261,33 @@ func TestSendToWantingPeer(t *testing.T) { t.Fatal("Expected to receive alpha from me") } } + +func TestBasicBitswap(t *testing.T) { + net := tn.VirtualNetwork(delay.Fixed(kNetworkDelay)) + rs := mockrouting.NewServer() + sg := NewSessionGenerator(net, rs) + bg := blocksutil.NewBlockGenerator() + + t.Log("Test a few nodes trying to get one file with a lot of blocks") + + instances := sg.Instances(2) + blocks := bg.Blocks(1) + err := instances[0].Exchange.HasBlock(context.TODO(), blocks[0]) + if err != nil { + t.Fatal(err) + } + + ctx, _ := context.WithTimeout(context.TODO(), time.Second*5) + blk, err := instances[1].Exchange.GetBlock(ctx, blocks[0].Key()) + if err != nil { + t.Fatal(err) + } + + t.Log(blk) + for _, inst := range instances { + err := inst.Exchange.Close() + if err != nil { + t.Fatal(err) + } + } +} diff --git a/bitswap/message/internal/pb/message.pb.go b/bitswap/message/internal/pb/message.pb.go index f6f8a9bbc..4ddfc56f7 100644 --- a/bitswap/message/internal/pb/message.pb.go +++ b/bitswap/message/internal/pb/message.pb.go @@ -21,16 +21,16 @@ var _ = proto.Marshal var _ = math.Inf type Message struct { - Wantlist []string `protobuf:"bytes,1,rep,name=wantlist" json:"wantlist,omitempty"` - Blocks [][]byte `protobuf:"bytes,2,rep,name=blocks" json:"blocks,omitempty"` - XXX_unrecognized []byte `json:"-"` + Wantlist *Message_Wantlist `protobuf:"bytes,1,opt,name=wantlist" json:"wantlist,omitempty"` + Blocks [][]byte `protobuf:"bytes,2,rep,name=blocks" json:"blocks,omitempty"` + XXX_unrecognized []byte `json:"-"` } func (m *Message) Reset() { *m = Message{} } func (m *Message) String() string { return proto.CompactTextString(m) } func (*Message) ProtoMessage() {} -func (m *Message) GetWantlist() []string { +func (m *Message) GetWantlist() *Message_Wantlist { if m != nil { return m.Wantlist } @@ -44,5 +44,61 @@ func (m *Message) GetBlocks() [][]byte { return nil } +type Message_Wantlist struct { + Entries []*Message_Wantlist_Entry `protobuf:"bytes,1,rep,name=entries" json:"entries,omitempty"` + Full *bool `protobuf:"varint,2,opt,name=full" json:"full,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Message_Wantlist) Reset() { *m = Message_Wantlist{} } +func (m *Message_Wantlist) String() string { return proto.CompactTextString(m) } +func (*Message_Wantlist) ProtoMessage() {} + +func (m *Message_Wantlist) GetEntries() []*Message_Wantlist_Entry { + if m != nil { + return m.Entries + } + return nil +} + +func (m *Message_Wantlist) GetFull() bool { + if m != nil && m.Full != nil { + return *m.Full + } + return false +} + +type Message_Wantlist_Entry struct { + Block *string `protobuf:"bytes,1,opt,name=block" json:"block,omitempty"` + Priority *int32 `protobuf:"varint,2,opt,name=priority" json:"priority,omitempty"` + Cancel *bool `protobuf:"varint,3,opt,name=cancel" json:"cancel,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Message_Wantlist_Entry) Reset() { *m = Message_Wantlist_Entry{} } +func (m *Message_Wantlist_Entry) String() string { return proto.CompactTextString(m) } +func (*Message_Wantlist_Entry) ProtoMessage() {} + +func (m *Message_Wantlist_Entry) GetBlock() string { + if m != nil && m.Block != nil { + return *m.Block + } + return "" +} + +func (m *Message_Wantlist_Entry) GetPriority() int32 { + if m != nil && m.Priority != nil { + return *m.Priority + } + return 0 +} + +func (m *Message_Wantlist_Entry) GetCancel() bool { + if m != nil && m.Cancel != nil { + return *m.Cancel + } + return false +} + func init() { } diff --git a/bitswap/message/internal/pb/message.proto b/bitswap/message/internal/pb/message.proto index a8c6c7252..7c44f3a6b 100644 --- a/bitswap/message/internal/pb/message.proto +++ b/bitswap/message/internal/pb/message.proto @@ -1,6 +1,19 @@ package bitswap.message.pb; message Message { - repeated string wantlist = 1; - repeated bytes blocks = 2; + + message Wantlist { + + message Entry { + optional string block = 1; // the block key + optional int32 priority = 2; // the priority (normalized). default to 1 + optional bool cancel = 3; // whether this revokes an entry + } + + repeated Entry entries = 1; // a list of wantlist entries + optional bool full = 2; // whether this is the full wantlist. default to false + } + + optional Wantlist wantlist = 1; + repeated bytes blocks = 2; } diff --git a/bitswap/message/message.go b/bitswap/message/message.go index 62a39be91..288fc9da7 100644 --- a/bitswap/message/message.go +++ b/bitswap/message/message.go @@ -9,6 +9,7 @@ import ( u "github.com/jbenet/go-ipfs/util" ggio "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/gogoprotobuf/io" + proto "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto" ) // TODO move message.go into the bitswap package @@ -17,21 +18,21 @@ import ( type BitSwapMessage interface { // Wantlist returns a slice of unique keys that represent data wanted by // the sender. - Wantlist() []u.Key + Wantlist() []*Entry // Blocks returns a slice of unique blocks Blocks() []*blocks.Block - // AddWanted adds the key to the Wantlist. - // - // Insertion order determines priority. That is, earlier insertions are - // deemed higher priority than keys inserted later. - // - // t = 0, msg.AddWanted(A) - // t = 1, msg.AddWanted(B) - // - // implies Priority(A) > Priority(B) - AddWanted(u.Key) + // AddEntry adds an entry to the Wantlist. + AddEntry(u.Key, int, bool) + + // Sets whether or not the contained wantlist represents the entire wantlist + // true = full wantlist + // false = wantlist 'patch' + // default: true + SetFull(bool) + + Full() bool AddBlock(*blocks.Block) Exportable @@ -43,23 +44,30 @@ type Exportable interface { } type impl struct { - existsInWantlist map[u.Key]struct{} // map to detect duplicates - wantlist []u.Key // slice to preserve ordering - blocks map[u.Key]*blocks.Block // map to detect duplicates + full bool + wantlist map[u.Key]*Entry + blocks map[u.Key]*blocks.Block // map to detect duplicates } func New() BitSwapMessage { return &impl{ - blocks: make(map[u.Key]*blocks.Block), - existsInWantlist: make(map[u.Key]struct{}), - wantlist: make([]u.Key, 0), + blocks: make(map[u.Key]*blocks.Block), + wantlist: make(map[u.Key]*Entry), + full: true, } } +type Entry struct { + Key u.Key + Priority int + Cancel bool +} + func newMessageFromProto(pbm pb.Message) BitSwapMessage { m := New() - for _, s := range pbm.GetWantlist() { - m.AddWanted(u.Key(s)) + m.SetFull(pbm.GetWantlist().GetFull()) + for _, e := range pbm.GetWantlist().GetEntries() { + m.AddEntry(u.Key(e.GetBlock()), int(e.GetPriority()), e.GetCancel()) } for _, d := range pbm.GetBlocks() { b := blocks.NewBlock(d) @@ -68,8 +76,20 @@ func newMessageFromProto(pbm pb.Message) BitSwapMessage { return m } -func (m *impl) Wantlist() []u.Key { - return m.wantlist +func (m *impl) SetFull(full bool) { + m.full = full +} + +func (m *impl) Full() bool { + return m.full +} + +func (m *impl) Wantlist() []*Entry { + var out []*Entry + for _, e := range m.wantlist { + out = append(out, e) + } + return out } func (m *impl) Blocks() []*blocks.Block { @@ -80,13 +100,18 @@ func (m *impl) Blocks() []*blocks.Block { return bs } -func (m *impl) AddWanted(k u.Key) { - _, exists := m.existsInWantlist[k] +func (m *impl) AddEntry(k u.Key, priority int, cancel bool) { + e, exists := m.wantlist[k] if exists { - return + e.Priority = priority + e.Cancel = cancel + } else { + m.wantlist[k] = &Entry{ + Key: k, + Priority: priority, + Cancel: cancel, + } } - m.existsInWantlist[k] = struct{}{} - m.wantlist = append(m.wantlist, k) } func (m *impl) AddBlock(b *blocks.Block) { @@ -106,14 +131,19 @@ func FromNet(r io.Reader) (BitSwapMessage, error) { } func (m *impl) ToProto() *pb.Message { - pb := new(pb.Message) - for _, k := range m.Wantlist() { - pb.Wantlist = append(pb.Wantlist, string(k)) + pbm := new(pb.Message) + pbm.Wantlist = new(pb.Message_Wantlist) + for _, e := range m.wantlist { + pbm.Wantlist.Entries = append(pbm.Wantlist.Entries, &pb.Message_Wantlist_Entry{ + Block: proto.String(string(e.Key)), + Priority: proto.Int32(int32(e.Priority)), + Cancel: &e.Cancel, + }) } for _, b := range m.Blocks() { - pb.Blocks = append(pb.Blocks, b.Data) + pbm.Blocks = append(pbm.Blocks, b.Data) } - return pb + return pbm } func (m *impl) ToNet(w io.Writer) error { diff --git a/bitswap/message/message_test.go b/bitswap/message/message_test.go index 681b60a6f..29eb6eb4e 100644 --- a/bitswap/message/message_test.go +++ b/bitswap/message/message_test.go @@ -4,6 +4,8 @@ import ( "bytes" "testing" + proto "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto" + blocks "github.com/jbenet/go-ipfs/blocks" pb "github.com/jbenet/go-ipfs/exchange/bitswap/message/internal/pb" u "github.com/jbenet/go-ipfs/util" @@ -12,22 +14,26 @@ import ( func TestAppendWanted(t *testing.T) { const str = "foo" m := New() - m.AddWanted(u.Key(str)) + m.AddEntry(u.Key(str), 1, false) - if !contains(m.ToProto().GetWantlist(), str) { + if !wantlistContains(m.ToProto().GetWantlist(), str) { t.Fail() } + m.ToProto().GetWantlist().GetEntries() } func TestNewMessageFromProto(t *testing.T) { const str = "a_key" protoMessage := new(pb.Message) - protoMessage.Wantlist = []string{string(str)} - if !contains(protoMessage.Wantlist, str) { + protoMessage.Wantlist = new(pb.Message_Wantlist) + protoMessage.Wantlist.Entries = []*pb.Message_Wantlist_Entry{ + &pb.Message_Wantlist_Entry{Block: proto.String(str)}, + } + if !wantlistContains(protoMessage.Wantlist, str) { t.Fail() } m := newMessageFromProto(*protoMessage) - if !contains(m.ToProto().GetWantlist(), str) { + if !wantlistContains(m.ToProto().GetWantlist(), str) { t.Fail() } } @@ -57,7 +63,7 @@ func TestWantlist(t *testing.T) { keystrs := []string{"foo", "bar", "baz", "bat"} m := New() for _, s := range keystrs { - m.AddWanted(u.Key(s)) + m.AddEntry(u.Key(s), 1, false) } exported := m.Wantlist() @@ -65,12 +71,12 @@ func TestWantlist(t *testing.T) { present := false for _, s := range keystrs { - if s == string(k) { + if s == string(k.Key) { present = true } } if !present { - t.Logf("%v isn't in original list", string(k)) + t.Logf("%v isn't in original list", k.Key) t.Fail() } } @@ -80,19 +86,19 @@ func TestCopyProtoByValue(t *testing.T) { const str = "foo" m := New() protoBeforeAppend := m.ToProto() - m.AddWanted(u.Key(str)) - if contains(protoBeforeAppend.GetWantlist(), str) { + m.AddEntry(u.Key(str), 1, false) + if wantlistContains(protoBeforeAppend.GetWantlist(), str) { t.Fail() } } func TestToNetFromNetPreservesWantList(t *testing.T) { original := New() - original.AddWanted(u.Key("M")) - original.AddWanted(u.Key("B")) - original.AddWanted(u.Key("D")) - original.AddWanted(u.Key("T")) - original.AddWanted(u.Key("F")) + original.AddEntry(u.Key("M"), 1, false) + original.AddEntry(u.Key("B"), 1, false) + original.AddEntry(u.Key("D"), 1, false) + original.AddEntry(u.Key("T"), 1, false) + original.AddEntry(u.Key("F"), 1, false) var buf bytes.Buffer if err := original.ToNet(&buf); err != nil { @@ -106,11 +112,11 @@ func TestToNetFromNetPreservesWantList(t *testing.T) { keys := make(map[u.Key]bool) for _, k := range copied.Wantlist() { - keys[k] = true + keys[k.Key] = true } for _, k := range original.Wantlist() { - if _, ok := keys[k]; !ok { + if _, ok := keys[k.Key]; !ok { t.Fatalf("Key Missing: \"%v\"", k) } } @@ -146,9 +152,18 @@ func TestToAndFromNetMessage(t *testing.T) { } } -func contains(s []string, x string) bool { - for _, a := range s { - if a == x { +func wantlistContains(wantlist *pb.Message_Wantlist, x string) bool { + for _, e := range wantlist.GetEntries() { + if e.GetBlock() == x { + return true + } + } + return false +} + +func contains(strs []string, x string) bool { + for _, s := range strs { + if s == x { return true } } @@ -159,8 +174,8 @@ func TestDuplicates(t *testing.T) { b := blocks.NewBlock([]byte("foo")) msg := New() - msg.AddWanted(b.Key()) - msg.AddWanted(b.Key()) + msg.AddEntry(b.Key(), 1, false) + msg.AddEntry(b.Key(), 1, false) if len(msg.Wantlist()) != 1 { t.Fatal("Duplicate in BitSwapMessage") } diff --git a/bitswap/strategy/interface.go b/bitswap/strategy/interface.go index 58385f5b7..c74b58c42 100644 --- a/bitswap/strategy/interface.go +++ b/bitswap/strategy/interface.go @@ -3,6 +3,7 @@ package strategy import ( "time" + bstore "github.com/jbenet/go-ipfs/blocks/blockstore" bsmsg "github.com/jbenet/go-ipfs/exchange/bitswap/message" peer "github.com/jbenet/go-ipfs/peer" u "github.com/jbenet/go-ipfs/util" @@ -34,6 +35,8 @@ type Strategy interface { BlockSentToPeer(u.Key, peer.Peer) + GetAllocation(int, bstore.Blockstore) ([]*Task, error) + // Values determining bitswap behavioural patterns GetBatchSize() int GetRebroadcastDelay() time.Duration diff --git a/bitswap/strategy/ledger.go b/bitswap/strategy/ledger.go index 84e92d035..7ce7b73d9 100644 --- a/bitswap/strategy/ledger.go +++ b/bitswap/strategy/ledger.go @@ -3,6 +3,7 @@ package strategy import ( "time" + wl "github.com/jbenet/go-ipfs/exchange/bitswap/wantlist" peer "github.com/jbenet/go-ipfs/peer" u "github.com/jbenet/go-ipfs/util" ) @@ -13,7 +14,7 @@ type keySet map[u.Key]struct{} func newLedger(p peer.Peer, strategy strategyFunc) *ledger { return &ledger{ - wantList: keySet{}, + wantList: wl.NewWantlist(), Strategy: strategy, Partner: p, sentToPeer: make(map[u.Key]time.Time), @@ -39,7 +40,7 @@ type ledger struct { exchangeCount uint64 // wantList is a (bounded, small) set of keys that Partner desires. - wantList keySet + wantList *wl.Wantlist // sentToPeer is a set of keys to ensure we dont send duplicate blocks // to a given peer @@ -65,14 +66,17 @@ func (l *ledger) ReceivedBytes(n int) { } // TODO: this needs to be different. We need timeouts. -func (l *ledger) Wants(k u.Key) { +func (l *ledger) Wants(k u.Key, priority int) { log.Debugf("peer %s wants %s", l.Partner, k) - l.wantList[k] = struct{}{} + l.wantList.Add(k, priority) +} + +func (l *ledger) CancelWant(k u.Key) { + l.wantList.Remove(k) } func (l *ledger) WantListContains(k u.Key) bool { - _, ok := l.wantList[k] - return ok + return l.wantList.Contains(k) } func (l *ledger) ExchangeCount() uint64 { diff --git a/bitswap/strategy/strategy.go b/bitswap/strategy/strategy.go index fe7414caa..b21a3b2b1 100644 --- a/bitswap/strategy/strategy.go +++ b/bitswap/strategy/strategy.go @@ -5,7 +5,10 @@ import ( "sync" "time" + blocks "github.com/jbenet/go-ipfs/blocks" + bstore "github.com/jbenet/go-ipfs/blocks/blockstore" bsmsg "github.com/jbenet/go-ipfs/exchange/bitswap/message" + wl "github.com/jbenet/go-ipfs/exchange/bitswap/wantlist" peer "github.com/jbenet/go-ipfs/peer" u "github.com/jbenet/go-ipfs/util" ) @@ -77,6 +80,60 @@ func (s *strategist) ShouldSendBlockToPeer(k u.Key, p peer.Peer) bool { return ledger.ShouldSend() } +type Task struct { + Peer peer.Peer + Blocks []*blocks.Block +} + +func (s *strategist) GetAllocation(bandwidth int, bs bstore.Blockstore) ([]*Task, error) { + var tasks []*Task + + s.lock.RLock() + defer s.lock.RUnlock() + var partners []peer.Peer + for _, ledger := range s.ledgerMap { + if ledger.ShouldSend() { + partners = append(partners, ledger.Partner) + } + } + if len(partners) == 0 { + return nil, nil + } + + bandwidthPerPeer := bandwidth / len(partners) + for _, p := range partners { + blksForPeer, err := s.getSendableBlocks(s.ledger(p).wantList, bs, bandwidthPerPeer) + if err != nil { + return nil, err + } + tasks = append(tasks, &Task{ + Peer: p, + Blocks: blksForPeer, + }) + } + + return tasks, nil +} + +func (s *strategist) getSendableBlocks(wantlist *wl.Wantlist, bs bstore.Blockstore, bw int) ([]*blocks.Block, error) { + var outblocks []*blocks.Block + for _, e := range wantlist.Entries() { + block, err := bs.Get(e.Value) + if err == u.ErrNotFound { + continue + } + if err != nil { + return nil, err + } + outblocks = append(outblocks, block) + bw -= len(block.Data) + if bw <= 0 { + break + } + } + return outblocks, nil +} + func (s *strategist) BlockSentToPeer(k u.Key, p peer.Peer) { s.lock.Lock() defer s.lock.Unlock() @@ -106,8 +163,15 @@ func (s *strategist) MessageReceived(p peer.Peer, m bsmsg.BitSwapMessage) error return errors.New("Strategy received nil message") } l := s.ledger(p) - for _, key := range m.Wantlist() { - l.Wants(key) + if m.Full() { + l.wantList = wl.NewWantlist() + } + for _, e := range m.Wantlist() { + if e.Cancel { + l.CancelWant(e.Key) + } else { + l.Wants(e.Key, e.Priority) + } } for _, block := range m.Blocks() { // FIXME extract blocks.NumBytes(block) or block.NumBytes() method @@ -165,5 +229,5 @@ func (s *strategist) GetBatchSize() int { } func (s *strategist) GetRebroadcastDelay() time.Duration { - return time.Second * 5 + return time.Second * 10 } diff --git a/bitswap/strategy/strategy_test.go b/bitswap/strategy/strategy_test.go index e063dff68..687ea4d34 100644 --- a/bitswap/strategy/strategy_test.go +++ b/bitswap/strategy/strategy_test.go @@ -61,7 +61,7 @@ func TestBlockRecordedAsWantedAfterMessageReceived(t *testing.T) { block := blocks.NewBlock([]byte("data wanted by beggar")) messageFromBeggarToChooser := message.New() - messageFromBeggarToChooser.AddWanted(block.Key()) + messageFromBeggarToChooser.AddEntry(block.Key(), 1, false) chooser.MessageReceived(beggar.Peer, messageFromBeggarToChooser) // for this test, doesn't matter if you record that beggar sent From f51aab3187330b1f6b0ab1c6bba56747871f1425 Mon Sep 17 00:00:00 2001 From: Jeromy Date: Wed, 10 Dec 2014 07:57:39 +0000 Subject: [PATCH 0199/1035] extracted ledgerset from strategy, cleaned up a few comments from the PR This commit was moved from ipfs/go-bitswap@3818c938476e0ca1884c83c4b157b9759da9f556 --- bitswap/bitswap.go | 61 +++---- bitswap/bitswap_test.go | 64 +++---- bitswap/message/message.go | 4 +- bitswap/strategy/interface.go | 33 +--- bitswap/strategy/ledger.go | 11 +- bitswap/strategy/ledgerset.go | 125 ++++++++++++++ .../{strategy_test.go => ledgerset_test.go} | 51 +++--- bitswap/strategy/strategy.go | 158 +----------------- bitswap/wantlist/wantlist.go | 2 +- 9 files changed, 208 insertions(+), 301 deletions(-) create mode 100644 bitswap/strategy/ledgerset.go rename bitswap/strategy/{strategy_test.go => ledgerset_test.go} (56%) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 1e0e86b61..d9da3380c 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -27,11 +27,14 @@ var log = eventlog.Logger("bitswap") // TODO: if a 'non-nice' strategy is implemented, consider increasing this value const maxProvidersPerRequest = 3 -const providerRequestTimeout = time.Second * 10 -const hasBlockTimeout = time.Second * 15 +var providerRequestTimeout = time.Second * 10 +var hasBlockTimeout = time.Second * 15 +var rebroadcastDelay = time.Second * 10 const roundTime = time.Second / 2 +var bandwidthPerRound = 500000 + // New initializes a BitSwap instance that communicates over the // provided BitSwapNetwork. This function registers the returned instance as // the network delegate. @@ -53,13 +56,14 @@ func New(parent context.Context, p peer.Peer, network bsnet.BitSwapNetwork, rout cancelFunc: cancelFunc, notifications: notif, strategy: strategy.New(nice), + ledgerset: strategy.NewLedgerSet(), routing: routing, sender: network, - wantlist: wl.NewWantlist(), + wantlist: wl.New(), batchRequests: make(chan []u.Key, 32), } network.SetDelegate(bs) - go bs.loop(ctx) + go bs.clientWorker(ctx) go bs.roundWorker(ctx) return bs @@ -85,11 +89,11 @@ type bitswap struct { // have more than a single block in the set batchRequests chan []u.Key - // strategy listens to network traffic and makes decisions about how to - // interact with partners. - // TODO(brian): save the strategy's state to the datastore + // strategy makes decisions about how to interact with partners. strategy strategy.Strategy + ledgerset *strategy.LedgerSet + wantlist *wl.Wantlist // cancelFunc signals cancellation to the bitswap event loop @@ -159,10 +163,6 @@ func (bs *bitswap) HasBlock(ctx context.Context, blk *blocks.Block) error { bs.wantlist.Remove(blk.Key()) bs.notifications.Publish(blk) child, _ := context.WithTimeout(ctx, hasBlockTimeout) - if err := bs.sendToPeersThatWant(child, blk); err != nil { - return err - } - child, _ = context.WithTimeout(ctx, hasBlockTimeout) return bs.routing.Provide(child, blk.Key()) } @@ -194,7 +194,7 @@ func (bs *bitswap) sendWantListTo(ctx context.Context, peers <-chan peer.Peer) e // FIXME ensure accounting is handled correctly when // communication fails. May require slightly different API to // get better guarantees. May need shared sequence numbers. - bs.strategy.MessageSent(p, message) + bs.ledgerset.MessageSent(p, message) }(peerToQuery) } return nil @@ -220,17 +220,16 @@ func (bs *bitswap) sendWantlistToProviders(ctx context.Context, wantlist *wl.Wan func (bs *bitswap) roundWorker(ctx context.Context) { roundTicker := time.NewTicker(roundTime) - bandwidthPerRound := 500000 for { select { case <-ctx.Done(): return case <-roundTicker.C: - alloc, err := bs.strategy.GetAllocation(bandwidthPerRound, bs.blockstore) + alloc, err := bs.strategy.GetTasks(bandwidthPerRound, bs.ledgerset, bs.blockstore) if err != nil { log.Critical("%s", err) } - //log.Errorf("Allocation: %v", alloc) + log.Error(alloc) bs.processStrategyAllocation(ctx, alloc) } } @@ -241,9 +240,6 @@ func (bs *bitswap) processStrategyAllocation(ctx context.Context, alloc []*strat for _, block := range t.Blocks { message := bsmsg.New() message.AddBlock(block) - for _, wanted := range bs.wantlist.Entries() { - message.AddEntry(wanted.Value, wanted.Priority, false) - } if err := bs.send(ctx, t.Peer, message); err != nil { log.Errorf("Message Send Failed: %s", err) } @@ -252,11 +248,11 @@ func (bs *bitswap) processStrategyAllocation(ctx context.Context, alloc []*strat } // TODO ensure only one active request per key -func (bs *bitswap) loop(parent context.Context) { +func (bs *bitswap) clientWorker(parent context.Context) { ctx, cancel := context.WithCancel(parent) - broadcastSignal := time.NewTicker(bs.strategy.GetRebroadcastDelay()) + broadcastSignal := time.NewTicker(rebroadcastDelay) defer func() { cancel() // signal to derived async functions broadcastSignal.Stop() @@ -317,13 +313,14 @@ func (bs *bitswap) ReceiveMessage(ctx context.Context, p peer.Peer, incoming bsm // This call records changes to wantlists, blocks received, // and number of bytes transfered. - bs.strategy.MessageReceived(p, incoming) + bs.ledgerset.MessageReceived(p, incoming) // TODO: this is bad, and could be easily abused. // Should only track *useful* messages in ledger var blkeys []u.Key for _, block := range incoming.Blocks() { blkeys = append(blkeys, block.Key()) + log.Errorf("Got block: %s", block) if err := bs.HasBlock(ctx, block); err != nil { log.Error(err) } @@ -342,7 +339,7 @@ func (bs *bitswap) cancelBlocks(ctx context.Context, bkeys []u.Key) { for _, k := range bkeys { message.AddEntry(k, 0, true) } - for _, p := range bs.strategy.Peers() { + for _, p := range bs.ledgerset.Peers() { err := bs.send(ctx, p, message) if err != nil { log.Errorf("Error sending message: %s", err) @@ -362,25 +359,7 @@ func (bs *bitswap) send(ctx context.Context, p peer.Peer, m bsmsg.BitSwapMessage if err := bs.sender.SendMessage(ctx, p, m); err != nil { return err } - return bs.strategy.MessageSent(p, m) -} - -func (bs *bitswap) sendToPeersThatWant(ctx context.Context, block *blocks.Block) error { - for _, p := range bs.strategy.Peers() { - if bs.strategy.BlockIsWantedByPeer(block.Key(), p) { - if bs.strategy.ShouldSendBlockToPeer(block.Key(), p) { - message := bsmsg.New() - message.AddBlock(block) - for _, wanted := range bs.wantlist.Entries() { - message.AddEntry(wanted.Value, wanted.Priority, false) - } - if err := bs.send(ctx, p, message); err != nil { - return err - } - } - } - } - return nil + return bs.ledgerset.MessageSent(p, m) } func (bs *bitswap) Close() error { diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index 0e72883cc..9bf71dea6 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -206,60 +206,44 @@ func TestSendToWantingPeer(t *testing.T) { defer sg.Stop() bg := blocksutil.NewBlockGenerator() - me := sg.Next() - w := sg.Next() - o := sg.Next() + oldVal := rebroadcastDelay + rebroadcastDelay = time.Second / 2 + defer func() { rebroadcastDelay = oldVal }() - t.Logf("Session %v\n", me.Peer) - t.Logf("Session %v\n", w.Peer) - t.Logf("Session %v\n", o.Peer) + peerA := sg.Next() + peerB := sg.Next() - alpha := bg.Next() - - const timeout = 1000 * time.Millisecond // FIXME don't depend on time + t.Logf("Session %v\n", peerA.Peer) + t.Logf("Session %v\n", peerB.Peer) - t.Logf("Peer %v attempts to get %v. NB: not available\n", w.Peer, alpha.Key()) - ctx, _ := context.WithTimeout(context.Background(), timeout) - _, err := w.Exchange.GetBlock(ctx, alpha.Key()) - if err == nil { - t.Fatalf("Expected %v to NOT be available", alpha.Key()) - } + timeout := time.Second + waitTime := time.Second * 5 - beta := bg.Next() - t.Logf("Peer %v announes availability of %v\n", w.Peer, beta.Key()) - ctx, _ = context.WithTimeout(context.Background(), timeout) - if err := w.Blockstore().Put(beta); err != nil { + alpha := bg.Next() + // peerA requests and waits for block alpha + ctx, _ := context.WithTimeout(context.TODO(), waitTime) + alphaPromise, err := peerA.Exchange.GetBlocks(ctx, []u.Key{alpha.Key()}) + if err != nil { t.Fatal(err) } - w.Exchange.HasBlock(ctx, beta) - t.Logf("%v gets %v from %v and discovers it wants %v\n", me.Peer, beta.Key(), w.Peer, alpha.Key()) - ctx, _ = context.WithTimeout(context.Background(), timeout) - if _, err := me.Exchange.GetBlock(ctx, beta.Key()); err != nil { + // peerB announces to the network that he has block alpha + ctx, _ = context.WithTimeout(context.TODO(), timeout) + err = peerB.Exchange.HasBlock(ctx, alpha) + if err != nil { t.Fatal(err) } - t.Logf("%v announces availability of %v\n", o.Peer, alpha.Key()) - ctx, _ = context.WithTimeout(context.Background(), timeout) - if err := o.Blockstore().Put(alpha); err != nil { - t.Fatal(err) + // At some point, peerA should get alpha (or timeout) + blkrecvd, ok := <-alphaPromise + if !ok { + t.Fatal("context timed out and broke promise channel!") } - o.Exchange.HasBlock(ctx, alpha) - t.Logf("%v requests %v\n", me.Peer, alpha.Key()) - ctx, _ = context.WithTimeout(context.Background(), timeout) - if _, err := me.Exchange.GetBlock(ctx, alpha.Key()); err != nil { - t.Fatal(err) + if blkrecvd.Key() != alpha.Key() { + t.Fatal("Wrong block!") } - t.Logf("%v should now have %v\n", w.Peer, alpha.Key()) - block, err := w.Blockstore().Get(alpha.Key()) - if err != nil { - t.Fatalf("Should not have received an error: %s", err) - } - if block.Key() != alpha.Key() { - t.Fatal("Expected to receive alpha from me") - } } func TestBasicBitswap(t *testing.T) { diff --git a/bitswap/message/message.go b/bitswap/message/message.go index 288fc9da7..b636e2024 100644 --- a/bitswap/message/message.go +++ b/bitswap/message/message.go @@ -24,13 +24,13 @@ type BitSwapMessage interface { Blocks() []*blocks.Block // AddEntry adds an entry to the Wantlist. - AddEntry(u.Key, int, bool) + AddEntry(key u.Key, priority int, cancel bool) // Sets whether or not the contained wantlist represents the entire wantlist // true = full wantlist // false = wantlist 'patch' // default: true - SetFull(bool) + SetFull(isFull bool) Full() bool diff --git a/bitswap/strategy/interface.go b/bitswap/strategy/interface.go index c74b58c42..54af581f7 100644 --- a/bitswap/strategy/interface.go +++ b/bitswap/strategy/interface.go @@ -1,43 +1,12 @@ package strategy import ( - "time" - bstore "github.com/jbenet/go-ipfs/blocks/blockstore" - bsmsg "github.com/jbenet/go-ipfs/exchange/bitswap/message" - peer "github.com/jbenet/go-ipfs/peer" - u "github.com/jbenet/go-ipfs/util" ) type Strategy interface { - // Returns a slice of Peers with whom the local node has active sessions - Peers() []peer.Peer - - // BlockIsWantedByPeer returns true if peer wants the block given by this - // key - BlockIsWantedByPeer(u.Key, peer.Peer) bool - - // ShouldSendTo(Peer) decides whether to send data to this Peer - ShouldSendBlockToPeer(u.Key, peer.Peer) bool - // Seed initializes the decider to a deterministic state Seed(int64) - // MessageReceived records receipt of message for accounting purposes - MessageReceived(peer.Peer, bsmsg.BitSwapMessage) error - - // MessageSent records sending of message for accounting purposes - MessageSent(peer.Peer, bsmsg.BitSwapMessage) error - - NumBytesSentTo(peer.Peer) uint64 - - NumBytesReceivedFrom(peer.Peer) uint64 - - BlockSentToPeer(u.Key, peer.Peer) - - GetAllocation(int, bstore.Blockstore) ([]*Task, error) - - // Values determining bitswap behavioural patterns - GetBatchSize() int - GetRebroadcastDelay() time.Duration + GetTasks(bandwidth int, ledgers *LedgerSet, bs bstore.Blockstore) ([]*Task, error) } diff --git a/bitswap/strategy/ledger.go b/bitswap/strategy/ledger.go index 7ce7b73d9..684d383ef 100644 --- a/bitswap/strategy/ledger.go +++ b/bitswap/strategy/ledger.go @@ -12,10 +12,9 @@ import ( // access/lookups. type keySet map[u.Key]struct{} -func newLedger(p peer.Peer, strategy strategyFunc) *ledger { +func newLedger(p peer.Peer) *ledger { return &ledger{ - wantList: wl.NewWantlist(), - Strategy: strategy, + wantList: wl.New(), Partner: p, sentToPeer: make(map[u.Key]time.Time), } @@ -45,12 +44,6 @@ type ledger struct { // sentToPeer is a set of keys to ensure we dont send duplicate blocks // to a given peer sentToPeer map[u.Key]time.Time - - Strategy strategyFunc -} - -func (l *ledger) ShouldSend() bool { - return l.Strategy(l) } func (l *ledger) SentBytes(n int) { diff --git a/bitswap/strategy/ledgerset.go b/bitswap/strategy/ledgerset.go new file mode 100644 index 000000000..b5f03ae65 --- /dev/null +++ b/bitswap/strategy/ledgerset.go @@ -0,0 +1,125 @@ +package strategy + +import ( + "sync" + + bsmsg "github.com/jbenet/go-ipfs/exchange/bitswap/message" + wl "github.com/jbenet/go-ipfs/exchange/bitswap/wantlist" + peer "github.com/jbenet/go-ipfs/peer" + u "github.com/jbenet/go-ipfs/util" +) + +// LedgerMap lists Ledgers by their Partner key. +type ledgerMap map[peerKey]*ledger + +// FIXME share this externally +type peerKey u.Key + +type LedgerSet struct { + lock sync.RWMutex + ledgerMap ledgerMap +} + +func NewLedgerSet() *LedgerSet { + return &LedgerSet{ + ledgerMap: make(ledgerMap), + } +} + +// Returns a slice of Peers with whom the local node has active sessions +func (ls *LedgerSet) Peers() []peer.Peer { + ls.lock.RLock() + defer ls.lock.RUnlock() + + response := make([]peer.Peer, 0) + for _, ledger := range ls.ledgerMap { + response = append(response, ledger.Partner) + } + return response +} + +// BlockIsWantedByPeer returns true if peer wants the block given by this +// key +func (ls *LedgerSet) BlockIsWantedByPeer(k u.Key, p peer.Peer) bool { + ls.lock.RLock() + defer ls.lock.RUnlock() + + ledger := ls.ledger(p) + return ledger.WantListContains(k) +} + +// MessageReceived performs book-keeping. Returns error if passed invalid +// arguments. +func (ls *LedgerSet) MessageReceived(p peer.Peer, m bsmsg.BitSwapMessage) error { + ls.lock.Lock() + defer ls.lock.Unlock() + + // TODO find a more elegant way to handle this check + /* + if p == nil { + return errors.New("Strategy received nil peer") + } + if m == nil { + return errors.New("Strategy received nil message") + } + */ + l := ls.ledger(p) + if m.Full() { + l.wantList = wl.New() + } + for _, e := range m.Wantlist() { + if e.Cancel { + l.CancelWant(e.Key) + } else { + l.Wants(e.Key, e.Priority) + } + } + for _, block := range m.Blocks() { + // FIXME extract blocks.NumBytes(block) or block.NumBytes() method + l.ReceivedBytes(len(block.Data)) + } + return nil +} + +// TODO add contents of m.WantList() to my local wantlist? NB: could introduce +// race conditions where I send a message, but MessageSent gets handled after +// MessageReceived. The information in the local wantlist could become +// inconsistent. Would need to ensure that Sends and acknowledgement of the +// send happen atomically + +func (ls *LedgerSet) MessageSent(p peer.Peer, m bsmsg.BitSwapMessage) error { + ls.lock.Lock() + defer ls.lock.Unlock() + + l := ls.ledger(p) + for _, block := range m.Blocks() { + l.SentBytes(len(block.Data)) + l.wantList.Remove(block.Key()) + } + + return nil +} + +func (ls *LedgerSet) NumBytesSentTo(p peer.Peer) uint64 { + ls.lock.RLock() + defer ls.lock.RUnlock() + + return ls.ledger(p).Accounting.BytesSent +} + +func (ls *LedgerSet) NumBytesReceivedFrom(p peer.Peer) uint64 { + ls.lock.RLock() + defer ls.lock.RUnlock() + + return ls.ledger(p).Accounting.BytesRecv +} + +// ledger lazily instantiates a ledger +func (ls *LedgerSet) ledger(p peer.Peer) *ledger { + l, ok := ls.ledgerMap[peerKey(p.Key())] + if !ok { + l = newLedger(p) + ls.ledgerMap[peerKey(p.Key())] = l + } + return l +} diff --git a/bitswap/strategy/strategy_test.go b/bitswap/strategy/ledgerset_test.go similarity index 56% rename from bitswap/strategy/strategy_test.go rename to bitswap/strategy/ledgerset_test.go index 687ea4d34..795752a12 100644 --- a/bitswap/strategy/strategy_test.go +++ b/bitswap/strategy/ledgerset_test.go @@ -10,21 +10,22 @@ import ( testutil "github.com/jbenet/go-ipfs/util/testutil" ) -type peerAndStrategist struct { +type peerAndLedgerset struct { peer.Peer - Strategy + ls *LedgerSet } -func newPeerAndStrategist(idStr string) peerAndStrategist { - return peerAndStrategist{ - Peer: testutil.NewPeerWithIDString(idStr), - Strategy: New(true), +func newPeerAndLedgerset(idStr string) peerAndLedgerset { + return peerAndLedgerset{ + Peer: testutil.NewPeerWithIDString(idStr), + //Strategy: New(true), + ls: NewLedgerSet(), } } func TestConsistentAccounting(t *testing.T) { - sender := newPeerAndStrategist("Ernie") - receiver := newPeerAndStrategist("Bert") + sender := newPeerAndLedgerset("Ernie") + receiver := newPeerAndLedgerset("Bert") // Send messages from Ernie to Bert for i := 0; i < 1000; i++ { @@ -33,69 +34,69 @@ func TestConsistentAccounting(t *testing.T) { content := []string{"this", "is", "message", "i"} m.AddBlock(blocks.NewBlock([]byte(strings.Join(content, " ")))) - sender.MessageSent(receiver.Peer, m) - receiver.MessageReceived(sender.Peer, m) + sender.ls.MessageSent(receiver.Peer, m) + receiver.ls.MessageReceived(sender.Peer, m) } // Ensure sender records the change - if sender.NumBytesSentTo(receiver.Peer) == 0 { + if sender.ls.NumBytesSentTo(receiver.Peer) == 0 { t.Fatal("Sent bytes were not recorded") } // Ensure sender and receiver have the same values - if sender.NumBytesSentTo(receiver.Peer) != receiver.NumBytesReceivedFrom(sender.Peer) { + if sender.ls.NumBytesSentTo(receiver.Peer) != receiver.ls.NumBytesReceivedFrom(sender.Peer) { t.Fatal("Inconsistent book-keeping. Strategies don't agree") } // Ensure sender didn't record receving anything. And that the receiver // didn't record sending anything - if receiver.NumBytesSentTo(sender.Peer) != 0 || sender.NumBytesReceivedFrom(receiver.Peer) != 0 { + if receiver.ls.NumBytesSentTo(sender.Peer) != 0 || sender.ls.NumBytesReceivedFrom(receiver.Peer) != 0 { t.Fatal("Bert didn't send bytes to Ernie") } } func TestBlockRecordedAsWantedAfterMessageReceived(t *testing.T) { - beggar := newPeerAndStrategist("can't be chooser") - chooser := newPeerAndStrategist("chooses JIF") + beggar := newPeerAndLedgerset("can't be chooser") + chooser := newPeerAndLedgerset("chooses JIF") block := blocks.NewBlock([]byte("data wanted by beggar")) messageFromBeggarToChooser := message.New() messageFromBeggarToChooser.AddEntry(block.Key(), 1, false) - chooser.MessageReceived(beggar.Peer, messageFromBeggarToChooser) + chooser.ls.MessageReceived(beggar.Peer, messageFromBeggarToChooser) // for this test, doesn't matter if you record that beggar sent - if !chooser.BlockIsWantedByPeer(block.Key(), beggar.Peer) { + if !chooser.ls.BlockIsWantedByPeer(block.Key(), beggar.Peer) { t.Fatal("chooser failed to record that beggar wants block") } } func TestPeerIsAddedToPeersWhenMessageReceivedOrSent(t *testing.T) { - sanfrancisco := newPeerAndStrategist("sf") - seattle := newPeerAndStrategist("sea") + sanfrancisco := newPeerAndLedgerset("sf") + seattle := newPeerAndLedgerset("sea") m := message.New() - sanfrancisco.MessageSent(seattle.Peer, m) - seattle.MessageReceived(sanfrancisco.Peer, m) + sanfrancisco.ls.MessageSent(seattle.Peer, m) + seattle.ls.MessageReceived(sanfrancisco.Peer, m) if seattle.Peer.Key() == sanfrancisco.Peer.Key() { t.Fatal("Sanity Check: Peers have same Key!") } - if !peerIsPartner(seattle.Peer, sanfrancisco.Strategy) { + if !peerIsPartner(seattle.Peer, sanfrancisco.ls) { t.Fatal("Peer wasn't added as a Partner") } - if !peerIsPartner(sanfrancisco.Peer, seattle.Strategy) { + if !peerIsPartner(sanfrancisco.Peer, seattle.ls) { t.Fatal("Peer wasn't added as a Partner") } } -func peerIsPartner(p peer.Peer, s Strategy) bool { - for _, partner := range s.Peers() { +func peerIsPartner(p peer.Peer, ls *LedgerSet) bool { + for _, partner := range ls.Peers() { if partner.Key() == p.Key() { return true } diff --git a/bitswap/strategy/strategy.go b/bitswap/strategy/strategy.go index b21a3b2b1..d425fcc77 100644 --- a/bitswap/strategy/strategy.go +++ b/bitswap/strategy/strategy.go @@ -1,20 +1,13 @@ package strategy import ( - "errors" - "sync" - "time" - blocks "github.com/jbenet/go-ipfs/blocks" bstore "github.com/jbenet/go-ipfs/blocks/blockstore" - bsmsg "github.com/jbenet/go-ipfs/exchange/bitswap/message" wl "github.com/jbenet/go-ipfs/exchange/bitswap/wantlist" peer "github.com/jbenet/go-ipfs/peer" u "github.com/jbenet/go-ipfs/util" ) -const resendTimeoutPeriod = time.Minute - var log = u.Logger("strategy") // TODO niceness should be on a per-peer basis. Use-case: Certain peers are @@ -28,81 +21,37 @@ func New(nice bool) Strategy { stratFunc = standardStrategy } return &strategist{ - ledgerMap: ledgerMap{}, strategyFunc: stratFunc, } } type strategist struct { - lock sync.RWMutex - ledgerMap strategyFunc } -// LedgerMap lists Ledgers by their Partner key. -type ledgerMap map[peerKey]*ledger - -// FIXME share this externally -type peerKey u.Key - -// Peers returns a list of peers -func (s *strategist) Peers() []peer.Peer { - s.lock.RLock() - defer s.lock.RUnlock() - - response := make([]peer.Peer, 0) - for _, ledger := range s.ledgerMap { - response = append(response, ledger.Partner) - } - return response -} - -func (s *strategist) BlockIsWantedByPeer(k u.Key, p peer.Peer) bool { - s.lock.RLock() - defer s.lock.RUnlock() - - ledger := s.ledger(p) - return ledger.WantListContains(k) -} - -func (s *strategist) ShouldSendBlockToPeer(k u.Key, p peer.Peer) bool { - s.lock.RLock() - defer s.lock.RUnlock() - - ledger := s.ledger(p) - - // Dont resend blocks within a certain time period - t, ok := ledger.sentToPeer[k] - if ok && t.Add(resendTimeoutPeriod).After(time.Now()) { - return false - } - - return ledger.ShouldSend() -} - type Task struct { Peer peer.Peer Blocks []*blocks.Block } -func (s *strategist) GetAllocation(bandwidth int, bs bstore.Blockstore) ([]*Task, error) { +func (s *strategist) GetTasks(bandwidth int, ledgers *LedgerSet, bs bstore.Blockstore) ([]*Task, error) { var tasks []*Task - s.lock.RLock() - defer s.lock.RUnlock() + ledgers.lock.RLock() var partners []peer.Peer - for _, ledger := range s.ledgerMap { - if ledger.ShouldSend() { + for _, ledger := range ledgers.ledgerMap { + if s.strategyFunc(ledger) { partners = append(partners, ledger.Partner) } } + ledgers.lock.RUnlock() if len(partners) == 0 { return nil, nil } bandwidthPerPeer := bandwidth / len(partners) for _, p := range partners { - blksForPeer, err := s.getSendableBlocks(s.ledger(p).wantList, bs, bandwidthPerPeer) + blksForPeer, err := s.getSendableBlocks(ledgers.ledger(p).wantList, bs, bandwidthPerPeer) if err != nil { return nil, err } @@ -134,100 +83,7 @@ func (s *strategist) getSendableBlocks(wantlist *wl.Wantlist, bs bstore.Blocksto return outblocks, nil } -func (s *strategist) BlockSentToPeer(k u.Key, p peer.Peer) { - s.lock.Lock() - defer s.lock.Unlock() - - ledger := s.ledger(p) - ledger.sentToPeer[k] = time.Now() -} - +func test() {} func (s *strategist) Seed(int64) { - s.lock.Lock() - defer s.lock.Unlock() - // TODO } - -// MessageReceived performs book-keeping. Returns error if passed invalid -// arguments. -func (s *strategist) MessageReceived(p peer.Peer, m bsmsg.BitSwapMessage) error { - s.lock.Lock() - defer s.lock.Unlock() - - // TODO find a more elegant way to handle this check - if p == nil { - return errors.New("Strategy received nil peer") - } - if m == nil { - return errors.New("Strategy received nil message") - } - l := s.ledger(p) - if m.Full() { - l.wantList = wl.NewWantlist() - } - for _, e := range m.Wantlist() { - if e.Cancel { - l.CancelWant(e.Key) - } else { - l.Wants(e.Key, e.Priority) - } - } - for _, block := range m.Blocks() { - // FIXME extract blocks.NumBytes(block) or block.NumBytes() method - l.ReceivedBytes(len(block.Data)) - } - return nil -} - -// TODO add contents of m.WantList() to my local wantlist? NB: could introduce -// race conditions where I send a message, but MessageSent gets handled after -// MessageReceived. The information in the local wantlist could become -// inconsistent. Would need to ensure that Sends and acknowledgement of the -// send happen atomically - -func (s *strategist) MessageSent(p peer.Peer, m bsmsg.BitSwapMessage) error { - s.lock.Lock() - defer s.lock.Unlock() - - l := s.ledger(p) - for _, block := range m.Blocks() { - l.SentBytes(len(block.Data)) - } - - // TODO remove these blocks from peer's want list - - return nil -} - -func (s *strategist) NumBytesSentTo(p peer.Peer) uint64 { - s.lock.RLock() - defer s.lock.RUnlock() - - return s.ledger(p).Accounting.BytesSent -} - -func (s *strategist) NumBytesReceivedFrom(p peer.Peer) uint64 { - s.lock.RLock() - defer s.lock.RUnlock() - - return s.ledger(p).Accounting.BytesRecv -} - -// ledger lazily instantiates a ledger -func (s *strategist) ledger(p peer.Peer) *ledger { - l, ok := s.ledgerMap[peerKey(p.Key())] - if !ok { - l = newLedger(p, s.strategyFunc) - s.ledgerMap[peerKey(p.Key())] = l - } - return l -} - -func (s *strategist) GetBatchSize() int { - return 10 -} - -func (s *strategist) GetRebroadcastDelay() time.Duration { - return time.Second * 10 -} diff --git a/bitswap/wantlist/wantlist.go b/bitswap/wantlist/wantlist.go index 041064901..f9cf52eb2 100644 --- a/bitswap/wantlist/wantlist.go +++ b/bitswap/wantlist/wantlist.go @@ -9,7 +9,7 @@ type Wantlist struct { set map[u.Key]*Entry } -func NewWantlist() *Wantlist { +func New() *Wantlist { return &Wantlist{ set: make(map[u.Key]*Entry), } From e41d0ee64b464fc1b0e6d80480a2773a1307d1a6 Mon Sep 17 00:00:00 2001 From: Jeromy Date: Wed, 10 Dec 2014 18:47:11 +0000 Subject: [PATCH 0200/1035] dont spawn so many goroutines when rebroadcasting wantlist This commit was moved from ipfs/go-bitswap@13f98cb11826f26a1a7cc4b2f80061984dbdf838 --- bitswap/bitswap.go | 69 ++++++++++++++++++++++++++++++++++------------ 1 file changed, 51 insertions(+), 18 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index d9da3380c..33f37b107 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -201,21 +201,57 @@ func (bs *bitswap) sendWantListTo(ctx context.Context, peers <-chan peer.Peer) e } func (bs *bitswap) sendWantlistToProviders(ctx context.Context, wantlist *wl.Wantlist) { + provset := make(map[u.Key]peer.Peer) + provcollect := make(chan peer.Peer) + + ctx, cancel := context.WithCancel(ctx) + defer cancel() + wg := sync.WaitGroup{} + // Get providers for all entries in wantlist (could take a while) for _, e := range wantlist.Entries() { wg.Add(1) go func(k u.Key) { child, _ := context.WithTimeout(ctx, providerRequestTimeout) providers := bs.routing.FindProvidersAsync(child, k, maxProvidersPerRequest) - err := bs.sendWantListTo(ctx, providers) - if err != nil { - log.Errorf("error sending wantlist: %s", err) + for prov := range providers { + provcollect <- prov } wg.Done() }(e.Value) } - wg.Wait() + + // When all workers finish, close the providers channel + go func() { + wg.Wait() + close(provcollect) + }() + + // Filter out duplicates, + // no need to send our wantlists out twice in a given time period + for { + select { + case p, ok := <-provcollect: + if !ok { + break + } + provset[p.Key()] = p + case <-ctx.Done(): + log.Error("Context cancelled before we got all the providers!") + return + } + } + + message := bsmsg.New() + message.SetFull(true) + for _, e := range bs.wantlist.Entries() { + message.AddEntry(e.Value, e.Priority, false) + } + + for _, prov := range provset { + bs.send(ctx, prov, message) + } } func (bs *bitswap) roundWorker(ctx context.Context) { @@ -229,22 +265,25 @@ func (bs *bitswap) roundWorker(ctx context.Context) { if err != nil { log.Critical("%s", err) } - log.Error(alloc) - bs.processStrategyAllocation(ctx, alloc) + err = bs.processStrategyAllocation(ctx, alloc) + if err != nil { + log.Critical("Error processing strategy allocation: %s", err) + } } } } -func (bs *bitswap) processStrategyAllocation(ctx context.Context, alloc []*strategy.Task) { +func (bs *bitswap) processStrategyAllocation(ctx context.Context, alloc []*strategy.Task) error { for _, t := range alloc { for _, block := range t.Blocks { message := bsmsg.New() message.AddBlock(block) if err := bs.send(ctx, t.Peer, message); err != nil { - log.Errorf("Message Send Failed: %s", err) + return err } } } + return nil } // TODO ensure only one active request per key @@ -252,22 +291,16 @@ func (bs *bitswap) clientWorker(parent context.Context) { ctx, cancel := context.WithCancel(parent) - broadcastSignal := time.NewTicker(rebroadcastDelay) - defer func() { - cancel() // signal to derived async functions - broadcastSignal.Stop() - }() + broadcastSignal := time.After(rebroadcastDelay) + defer cancel() for { select { - case <-broadcastSignal.C: + case <-broadcastSignal: // Resend unfulfilled wantlist keys bs.sendWantlistToProviders(ctx, bs.wantlist) + broadcastSignal = time.After(rebroadcastDelay) case ks := <-bs.batchRequests: - // TODO: implement batching on len(ks) > X for some X - // i.e. if given 20 keys, fetch first five, then next - // five, and so on, so we are more likely to be able to - // effectively stream the data if len(ks) == 0 { log.Warning("Received batch request for zero blocks") continue From 31fdbbd891bd7e6502701f2ecaefc9abceaf9ed7 Mon Sep 17 00:00:00 2001 From: Jeromy Date: Wed, 10 Dec 2014 21:12:07 +0000 Subject: [PATCH 0201/1035] add priorities to GetBlocks requests, and add waitgroup to sendWantListTo This commit was moved from ipfs/go-bitswap@70c89ffbc202f4fd0f7730021a81d067d7267080 --- bitswap/bitswap.go | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 33f37b107..b3fc629b9 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -174,10 +174,12 @@ func (bs *bitswap) sendWantListTo(ctx context.Context, peers <-chan peer.Peer) e for _, wanted := range bs.wantlist.Entries() { message.AddEntry(wanted.Value, wanted.Priority, false) } + wg := sync.WaitGroup{} for peerToQuery := range peers { - log.Debug("sending query to: %s", peerToQuery) log.Event(ctx, "PeerToQuery", peerToQuery) + wg.Add(1) go func(p peer.Peer) { + defer wg.Done() log.Event(ctx, "DialPeer", p) err := bs.sender.DialPeer(ctx, p) @@ -197,6 +199,7 @@ func (bs *bitswap) sendWantListTo(ctx context.Context, peers <-chan peer.Peer) e bs.ledgerset.MessageSent(p, message) }(peerToQuery) } + wg.Wait() return nil } @@ -305,8 +308,8 @@ func (bs *bitswap) clientWorker(parent context.Context) { log.Warning("Received batch request for zero blocks") continue } - for _, k := range ks { - bs.wantlist.Add(k, 1) + for i, k := range ks { + bs.wantlist.Add(k, len(ks)-i) } // NB: send want list to providers for the first peer in this list. // the assumption is made that the providers of the first key in From eceb9c330c3a211903e5b8190195c7bfc635e1d9 Mon Sep 17 00:00:00 2001 From: Jeromy Date: Wed, 10 Dec 2014 23:01:56 +0000 Subject: [PATCH 0202/1035] blockstore.ErrNotFound, and proper wantlist sorting This commit was moved from ipfs/go-bitswap@ac563d7619aee662621fdcda4483930427d0b80e --- bitswap/strategy/strategy.go | 2 +- bitswap/wantlist/wantlist.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/bitswap/strategy/strategy.go b/bitswap/strategy/strategy.go index d425fcc77..ff7f4d74d 100644 --- a/bitswap/strategy/strategy.go +++ b/bitswap/strategy/strategy.go @@ -68,7 +68,7 @@ func (s *strategist) getSendableBlocks(wantlist *wl.Wantlist, bs bstore.Blocksto var outblocks []*blocks.Block for _, e := range wantlist.Entries() { block, err := bs.Get(e.Value) - if err == u.ErrNotFound { + if err == bstore.ErrNotFound { continue } if err != nil { diff --git a/bitswap/wantlist/wantlist.go b/bitswap/wantlist/wantlist.go index f9cf52eb2..d57b9d523 100644 --- a/bitswap/wantlist/wantlist.go +++ b/bitswap/wantlist/wantlist.go @@ -43,7 +43,7 @@ type entrySlice []*Entry func (es entrySlice) Len() int { return len(es) } func (es entrySlice) Swap(i, j int) { es[i], es[j] = es[j], es[i] } -func (es entrySlice) Less(i, j int) bool { return es[i].Priority < es[j].Priority } +func (es entrySlice) Less(i, j int) bool { return es[i].Priority > es[j].Priority } func (w *Wantlist) Entries() []*Entry { var es entrySlice From 58186ab5bbdf4781ab4c3e6f56c42574d413471c Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Sat, 13 Dec 2014 06:34:00 -0800 Subject: [PATCH 0203/1035] remove noisy statement License: MIT Signed-off-by: Brian Tiger Chow This commit was moved from ipfs/go-bitswap@4b4958e35b45d880d8da7690bbe82d570ed19a4d --- bitswap/bitswap.go | 1 - 1 file changed, 1 deletion(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index b3fc629b9..cae1baa33 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -356,7 +356,6 @@ func (bs *bitswap) ReceiveMessage(ctx context.Context, p peer.Peer, incoming bsm var blkeys []u.Key for _, block := range incoming.Blocks() { blkeys = append(blkeys, block.Key()) - log.Errorf("Got block: %s", block) if err := bs.HasBlock(ctx, block); err != nil { log.Error(err) } From e991d730511c3652ebf5b07cffe726c7d0ac58bb Mon Sep 17 00:00:00 2001 From: Jeromy Date: Sun, 14 Dec 2014 03:16:10 +0000 Subject: [PATCH 0204/1035] add locks to wantlist to avoid race condition This commit was moved from ipfs/go-bitswap@061f0d396fdd696a0cd82e3e935f1e05bfbc8941 --- bitswap/wantlist/wantlist.go | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/bitswap/wantlist/wantlist.go b/bitswap/wantlist/wantlist.go index d57b9d523..0de0ba803 100644 --- a/bitswap/wantlist/wantlist.go +++ b/bitswap/wantlist/wantlist.go @@ -3,9 +3,11 @@ package wantlist import ( u "github.com/jbenet/go-ipfs/util" "sort" + "sync" ) type Wantlist struct { + lk sync.RWMutex set map[u.Key]*Entry } @@ -21,6 +23,8 @@ type Entry struct { } func (w *Wantlist) Add(k u.Key, priority int) { + w.lk.Lock() + defer w.lk.Unlock() if _, ok := w.set[k]; ok { return } @@ -31,10 +35,14 @@ func (w *Wantlist) Add(k u.Key, priority int) { } func (w *Wantlist) Remove(k u.Key) { + w.lk.Lock() + defer w.lk.Unlock() delete(w.set, k) } func (w *Wantlist) Contains(k u.Key) bool { + w.lk.RLock() + defer w.lk.RUnlock() _, ok := w.set[k] return ok } @@ -46,6 +54,8 @@ func (es entrySlice) Swap(i, j int) { es[i], es[j] = es[j], es[i] } func (es entrySlice) Less(i, j int) bool { return es[i].Priority > es[j].Priority } func (w *Wantlist) Entries() []*Entry { + w.lk.RLock() + defer w.lk.RUnlock() var es entrySlice for _, e := range w.set { From fe1da2534ac454ce57c52723c57ffb623b17674a Mon Sep 17 00:00:00 2001 From: Jeromy Date: Mon, 15 Dec 2014 01:33:04 +0000 Subject: [PATCH 0205/1035] rewrite sendWantlistToProviders This commit was moved from ipfs/go-bitswap@e6a504fdbd550f91e7d13e41c2b1baf56e372b44 --- bitswap/bitswap.go | 52 +++++++++++++--------------------------------- 1 file changed, 15 insertions(+), 37 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index cae1baa33..ee80df950 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -19,6 +19,7 @@ import ( peer "github.com/jbenet/go-ipfs/peer" u "github.com/jbenet/go-ipfs/util" eventlog "github.com/jbenet/go-ipfs/util/eventlog" + pset "github.com/jbenet/go-ipfs/util/peerset" ) var log = eventlog.Logger("bitswap") @@ -204,57 +205,34 @@ func (bs *bitswap) sendWantListTo(ctx context.Context, peers <-chan peer.Peer) e } func (bs *bitswap) sendWantlistToProviders(ctx context.Context, wantlist *wl.Wantlist) { - provset := make(map[u.Key]peer.Peer) - provcollect := make(chan peer.Peer) - ctx, cancel := context.WithCancel(ctx) defer cancel() - wg := sync.WaitGroup{} + message := bsmsg.New() + message.SetFull(true) + for _, e := range bs.wantlist.Entries() { + message.AddEntry(e.Value, e.Priority, false) + } + + ps := pset.NewPeerSet() + // Get providers for all entries in wantlist (could take a while) + wg := sync.WaitGroup{} for _, e := range wantlist.Entries() { wg.Add(1) go func(k u.Key) { + defer wg.Done() child, _ := context.WithTimeout(ctx, providerRequestTimeout) providers := bs.routing.FindProvidersAsync(child, k, maxProvidersPerRequest) for prov := range providers { - provcollect <- prov + if ps.AddIfSmallerThan(prov, -1) { //Do once per peer + bs.send(ctx, prov, message) + } } - wg.Done() }(e.Value) } - - // When all workers finish, close the providers channel - go func() { - wg.Wait() - close(provcollect) - }() - - // Filter out duplicates, - // no need to send our wantlists out twice in a given time period - for { - select { - case p, ok := <-provcollect: - if !ok { - break - } - provset[p.Key()] = p - case <-ctx.Done(): - log.Error("Context cancelled before we got all the providers!") - return - } - } - - message := bsmsg.New() - message.SetFull(true) - for _, e := range bs.wantlist.Entries() { - message.AddEntry(e.Value, e.Priority, false) - } - - for _, prov := range provset { - bs.send(ctx, prov, message) - } + wg.Wait() } func (bs *bitswap) roundWorker(ctx context.Context) { From 95da2157b95160bbfc47b6e12d229b8d70bc6af8 Mon Sep 17 00:00:00 2001 From: Jeromy Date: Tue, 16 Dec 2014 02:01:21 +0000 Subject: [PATCH 0206/1035] tasklist queue for bitswap tasks This commit was moved from ipfs/go-bitswap@8fe456ba07e8c2a9dd35146fc8c81c68d0c8eaa1 --- bitswap/bitswap.go | 38 +++----- bitswap/bitswap_test.go | 12 +-- bitswap/strategy/interface.go | 2 +- bitswap/strategy/ledgerset.go | 140 ++++++++++++++++++++--------- bitswap/strategy/ledgerset_test.go | 26 +++--- bitswap/strategy/strategy.go | 18 ++-- bitswap/strategy/tasklist.go | 72 +++++++++++++++ 7 files changed, 211 insertions(+), 97 deletions(-) create mode 100644 bitswap/strategy/tasklist.go diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index ee80df950..c0df58551 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -56,8 +56,7 @@ func New(parent context.Context, p peer.Peer, network bsnet.BitSwapNetwork, rout blockstore: bstore, cancelFunc: cancelFunc, notifications: notif, - strategy: strategy.New(nice), - ledgerset: strategy.NewLedgerSet(), + ledgermanager: strategy.NewLedgerManager(bstore, ctx), routing: routing, sender: network, wantlist: wl.New(), @@ -93,7 +92,7 @@ type bitswap struct { // strategy makes decisions about how to interact with partners. strategy strategy.Strategy - ledgerset *strategy.LedgerSet + ledgermanager *strategy.LedgerManager wantlist *wl.Wantlist @@ -197,7 +196,7 @@ func (bs *bitswap) sendWantListTo(ctx context.Context, peers <-chan peer.Peer) e // FIXME ensure accounting is handled correctly when // communication fails. May require slightly different API to // get better guarantees. May need shared sequence numbers. - bs.ledgerset.MessageSent(p, message) + bs.ledgermanager.MessageSent(p, message) }(peerToQuery) } wg.Wait() @@ -236,35 +235,24 @@ func (bs *bitswap) sendWantlistToProviders(ctx context.Context, wantlist *wl.Wan } func (bs *bitswap) roundWorker(ctx context.Context) { - roundTicker := time.NewTicker(roundTime) for { select { case <-ctx.Done(): return - case <-roundTicker.C: - alloc, err := bs.strategy.GetTasks(bandwidthPerRound, bs.ledgerset, bs.blockstore) + case task := <-bs.ledgermanager.GetTaskChan(): + block, err := bs.blockstore.Get(task.Key) if err != nil { - log.Critical("%s", err) - } - err = bs.processStrategyAllocation(ctx, alloc) - if err != nil { - log.Critical("Error processing strategy allocation: %s", err) + log.Errorf("Expected to have block %s, but it was not found!", task.Key) + continue } - } - } -} -func (bs *bitswap) processStrategyAllocation(ctx context.Context, alloc []*strategy.Task) error { - for _, t := range alloc { - for _, block := range t.Blocks { message := bsmsg.New() message.AddBlock(block) - if err := bs.send(ctx, t.Peer, message); err != nil { - return err - } + // TODO: maybe add keys from our wantlist? + + bs.send(ctx, task.Target, message) } } - return nil } // TODO ensure only one active request per key @@ -327,7 +315,7 @@ func (bs *bitswap) ReceiveMessage(ctx context.Context, p peer.Peer, incoming bsm // This call records changes to wantlists, blocks received, // and number of bytes transfered. - bs.ledgerset.MessageReceived(p, incoming) + bs.ledgermanager.MessageReceived(p, incoming) // TODO: this is bad, and could be easily abused. // Should only track *useful* messages in ledger @@ -352,7 +340,7 @@ func (bs *bitswap) cancelBlocks(ctx context.Context, bkeys []u.Key) { for _, k := range bkeys { message.AddEntry(k, 0, true) } - for _, p := range bs.ledgerset.Peers() { + for _, p := range bs.ledgermanager.Peers() { err := bs.send(ctx, p, message) if err != nil { log.Errorf("Error sending message: %s", err) @@ -372,7 +360,7 @@ func (bs *bitswap) send(ctx context.Context, p peer.Peer, m bsmsg.BitSwapMessage if err := bs.sender.SendMessage(ctx, p, m); err != nil { return err } - return bs.ledgerset.MessageSent(p, m) + return bs.ledgermanager.MessageSent(p, m) } func (bs *bitswap) Close() error { diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index 9bf71dea6..2c04b0508 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -26,7 +26,7 @@ func TestClose(t *testing.T) { vnet := tn.VirtualNetwork(delay.Fixed(kNetworkDelay)) rout := mockrouting.NewServer() sesgen := NewSessionGenerator(vnet, rout) - defer sesgen.Stop() + defer sesgen.Close() bgen := blocksutil.NewBlockGenerator() block := bgen.Next() @@ -41,7 +41,7 @@ func TestGetBlockTimeout(t *testing.T) { net := tn.VirtualNetwork(delay.Fixed(kNetworkDelay)) rs := mockrouting.NewServer() g := NewSessionGenerator(net, rs) - defer g.Stop() + defer g.Close() self := g.Next() @@ -59,7 +59,7 @@ func TestProviderForKeyButNetworkCannotFind(t *testing.T) { net := tn.VirtualNetwork(delay.Fixed(kNetworkDelay)) rs := mockrouting.NewServer() g := NewSessionGenerator(net, rs) - defer g.Stop() + defer g.Close() block := blocks.NewBlock([]byte("block")) rs.Client(testutil.NewPeerWithIDString("testing")).Provide(context.Background(), block.Key()) // but not on network @@ -83,7 +83,7 @@ func TestGetBlockFromPeerAfterPeerAnnounces(t *testing.T) { rs := mockrouting.NewServer() block := blocks.NewBlock([]byte("block")) g := NewSessionGenerator(net, rs) - defer g.Stop() + defer g.Close() hasBlock := g.Next() defer hasBlock.Exchange.Close() @@ -137,7 +137,7 @@ func PerformDistributionTest(t *testing.T, numInstances, numBlocks int) { net := tn.VirtualNetwork(delay.Fixed(kNetworkDelay)) rs := mockrouting.NewServer() sg := NewSessionGenerator(net, rs) - defer sg.Stop() + defer sg.Close() bg := blocksutil.NewBlockGenerator() t.Log("Test a few nodes trying to get one file with a lot of blocks") @@ -203,7 +203,7 @@ func TestSendToWantingPeer(t *testing.T) { net := tn.VirtualNetwork(delay.Fixed(kNetworkDelay)) rs := mockrouting.NewServer() sg := NewSessionGenerator(net, rs) - defer sg.Stop() + defer sg.Close() bg := blocksutil.NewBlockGenerator() oldVal := rebroadcastDelay diff --git a/bitswap/strategy/interface.go b/bitswap/strategy/interface.go index 54af581f7..62cd77b8a 100644 --- a/bitswap/strategy/interface.go +++ b/bitswap/strategy/interface.go @@ -8,5 +8,5 @@ type Strategy interface { // Seed initializes the decider to a deterministic state Seed(int64) - GetTasks(bandwidth int, ledgers *LedgerSet, bs bstore.Blockstore) ([]*Task, error) + GetTasks(bandwidth int, ledgers *LedgerManager, bs bstore.Blockstore) ([]*Task, error) } diff --git a/bitswap/strategy/ledgerset.go b/bitswap/strategy/ledgerset.go index b5f03ae65..92808d2f0 100644 --- a/bitswap/strategy/ledgerset.go +++ b/bitswap/strategy/ledgerset.go @@ -3,6 +3,9 @@ package strategy import ( "sync" + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" + + bstore "github.com/jbenet/go-ipfs/blocks/blockstore" bsmsg "github.com/jbenet/go-ipfs/exchange/bitswap/message" wl "github.com/jbenet/go-ipfs/exchange/bitswap/wantlist" peer "github.com/jbenet/go-ipfs/peer" @@ -15,24 +18,62 @@ type ledgerMap map[peerKey]*ledger // FIXME share this externally type peerKey u.Key -type LedgerSet struct { - lock sync.RWMutex - ledgerMap ledgerMap +type LedgerManager struct { + lock sync.RWMutex + ledgerMap ledgerMap + bs bstore.Blockstore + tasklist *TaskList + taskOut chan *Task + workSignal chan struct{} + ctx context.Context } -func NewLedgerSet() *LedgerSet { - return &LedgerSet{ - ledgerMap: make(ledgerMap), +func NewLedgerManager(bs bstore.Blockstore, ctx context.Context) *LedgerManager { + lm := &LedgerManager{ + ledgerMap: make(ledgerMap), + bs: bs, + tasklist: NewTaskList(), + taskOut: make(chan *Task, 4), + workSignal: make(chan struct{}), + ctx: ctx, } + go lm.taskWorker() + return lm +} + +func (lm *LedgerManager) taskWorker() { + for { + nextTask := lm.tasklist.GetNext() + if nextTask == nil { + // No tasks in the list? + // Wait until there are! + select { + case <-lm.ctx.Done(): + return + case <-lm.workSignal: + } + continue + } + + select { + case <-lm.ctx.Done(): + return + case lm.taskOut <- nextTask: + } + } +} + +func (lm *LedgerManager) GetTaskChan() <-chan *Task { + return lm.taskOut } // Returns a slice of Peers with whom the local node has active sessions -func (ls *LedgerSet) Peers() []peer.Peer { - ls.lock.RLock() - defer ls.lock.RUnlock() +func (lm *LedgerManager) Peers() []peer.Peer { + lm.lock.RLock() + defer lm.lock.RUnlock() response := make([]peer.Peer, 0) - for _, ledger := range ls.ledgerMap { + for _, ledger := range lm.ledgerMap { response = append(response, ledger.Partner) } return response @@ -40,43 +81,55 @@ func (ls *LedgerSet) Peers() []peer.Peer { // BlockIsWantedByPeer returns true if peer wants the block given by this // key -func (ls *LedgerSet) BlockIsWantedByPeer(k u.Key, p peer.Peer) bool { - ls.lock.RLock() - defer ls.lock.RUnlock() +func (lm *LedgerManager) BlockIsWantedByPeer(k u.Key, p peer.Peer) bool { + lm.lock.RLock() + defer lm.lock.RUnlock() - ledger := ls.ledger(p) + ledger := lm.ledger(p) return ledger.WantListContains(k) } // MessageReceived performs book-keeping. Returns error if passed invalid // arguments. -func (ls *LedgerSet) MessageReceived(p peer.Peer, m bsmsg.BitSwapMessage) error { - ls.lock.Lock() - defer ls.lock.Unlock() - - // TODO find a more elegant way to handle this check - /* - if p == nil { - return errors.New("Strategy received nil peer") - } - if m == nil { - return errors.New("Strategy received nil message") - } - */ - l := ls.ledger(p) +func (lm *LedgerManager) MessageReceived(p peer.Peer, m bsmsg.BitSwapMessage) error { + lm.lock.Lock() + defer lm.lock.Unlock() + + l := lm.ledger(p) if m.Full() { l.wantList = wl.New() } for _, e := range m.Wantlist() { if e.Cancel { l.CancelWant(e.Key) + lm.tasklist.Cancel(e.Key, p) } else { l.Wants(e.Key, e.Priority) + lm.tasklist.Add(e.Key, e.Priority, p) + + // Signal task generation to restart (if stopped!) + select { + case lm.workSignal <- struct{}{}: + default: + } } } + for _, block := range m.Blocks() { // FIXME extract blocks.NumBytes(block) or block.NumBytes() method l.ReceivedBytes(len(block.Data)) + for _, l := range lm.ledgerMap { + if l.WantListContains(block.Key()) { + lm.tasklist.Add(block.Key(), 1, l.Partner) + + // Signal task generation to restart (if stopped!) + select { + case lm.workSignal <- struct{}{}: + default: + } + + } + } } return nil } @@ -87,39 +140,40 @@ func (ls *LedgerSet) MessageReceived(p peer.Peer, m bsmsg.BitSwapMessage) error // inconsistent. Would need to ensure that Sends and acknowledgement of the // send happen atomically -func (ls *LedgerSet) MessageSent(p peer.Peer, m bsmsg.BitSwapMessage) error { - ls.lock.Lock() - defer ls.lock.Unlock() +func (lm *LedgerManager) MessageSent(p peer.Peer, m bsmsg.BitSwapMessage) error { + lm.lock.Lock() + defer lm.lock.Unlock() - l := ls.ledger(p) + l := lm.ledger(p) for _, block := range m.Blocks() { l.SentBytes(len(block.Data)) l.wantList.Remove(block.Key()) + lm.tasklist.Cancel(block.Key(), p) } return nil } -func (ls *LedgerSet) NumBytesSentTo(p peer.Peer) uint64 { - ls.lock.RLock() - defer ls.lock.RUnlock() +func (lm *LedgerManager) NumBytesSentTo(p peer.Peer) uint64 { + lm.lock.RLock() + defer lm.lock.RUnlock() - return ls.ledger(p).Accounting.BytesSent + return lm.ledger(p).Accounting.BytesSent } -func (ls *LedgerSet) NumBytesReceivedFrom(p peer.Peer) uint64 { - ls.lock.RLock() - defer ls.lock.RUnlock() +func (lm *LedgerManager) NumBytesReceivedFrom(p peer.Peer) uint64 { + lm.lock.RLock() + defer lm.lock.RUnlock() - return ls.ledger(p).Accounting.BytesRecv + return lm.ledger(p).Accounting.BytesRecv } // ledger lazily instantiates a ledger -func (ls *LedgerSet) ledger(p peer.Peer) *ledger { - l, ok := ls.ledgerMap[peerKey(p.Key())] +func (lm *LedgerManager) ledger(p peer.Peer) *ledger { + l, ok := lm.ledgerMap[peerKey(p.Key())] if !ok { l = newLedger(p) - ls.ledgerMap[peerKey(p.Key())] = l + lm.ledgerMap[peerKey(p.Key())] = l } return l } diff --git a/bitswap/strategy/ledgerset_test.go b/bitswap/strategy/ledgerset_test.go index 795752a12..819489799 100644 --- a/bitswap/strategy/ledgerset_test.go +++ b/bitswap/strategy/ledgerset_test.go @@ -4,28 +4,30 @@ import ( "strings" "testing" + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" + blocks "github.com/jbenet/go-ipfs/blocks" message "github.com/jbenet/go-ipfs/exchange/bitswap/message" peer "github.com/jbenet/go-ipfs/peer" testutil "github.com/jbenet/go-ipfs/util/testutil" ) -type peerAndLedgerset struct { +type peerAndLedgermanager struct { peer.Peer - ls *LedgerSet + ls *LedgerManager } -func newPeerAndLedgerset(idStr string) peerAndLedgerset { - return peerAndLedgerset{ +func newPeerAndLedgermanager(idStr string) peerAndLedgermanager { + return peerAndLedgermanager{ Peer: testutil.NewPeerWithIDString(idStr), //Strategy: New(true), - ls: NewLedgerSet(), + ls: NewLedgerManager(nil, context.TODO()), } } func TestConsistentAccounting(t *testing.T) { - sender := newPeerAndLedgerset("Ernie") - receiver := newPeerAndLedgerset("Bert") + sender := newPeerAndLedgermanager("Ernie") + receiver := newPeerAndLedgermanager("Bert") // Send messages from Ernie to Bert for i := 0; i < 1000; i++ { @@ -56,8 +58,8 @@ func TestConsistentAccounting(t *testing.T) { } func TestBlockRecordedAsWantedAfterMessageReceived(t *testing.T) { - beggar := newPeerAndLedgerset("can't be chooser") - chooser := newPeerAndLedgerset("chooses JIF") + beggar := newPeerAndLedgermanager("can't be chooser") + chooser := newPeerAndLedgermanager("chooses JIF") block := blocks.NewBlock([]byte("data wanted by beggar")) @@ -74,8 +76,8 @@ func TestBlockRecordedAsWantedAfterMessageReceived(t *testing.T) { func TestPeerIsAddedToPeersWhenMessageReceivedOrSent(t *testing.T) { - sanfrancisco := newPeerAndLedgerset("sf") - seattle := newPeerAndLedgerset("sea") + sanfrancisco := newPeerAndLedgermanager("sf") + seattle := newPeerAndLedgermanager("sea") m := message.New() @@ -95,7 +97,7 @@ func TestPeerIsAddedToPeersWhenMessageReceivedOrSent(t *testing.T) { } } -func peerIsPartner(p peer.Peer, ls *LedgerSet) bool { +func peerIsPartner(p peer.Peer, ls *LedgerManager) bool { for _, partner := range ls.Peers() { if partner.Key() == p.Key() { return true diff --git a/bitswap/strategy/strategy.go b/bitswap/strategy/strategy.go index ff7f4d74d..5b0d9830d 100644 --- a/bitswap/strategy/strategy.go +++ b/bitswap/strategy/strategy.go @@ -1,15 +1,16 @@ package strategy import ( - blocks "github.com/jbenet/go-ipfs/blocks" - bstore "github.com/jbenet/go-ipfs/blocks/blockstore" - wl "github.com/jbenet/go-ipfs/exchange/bitswap/wantlist" - peer "github.com/jbenet/go-ipfs/peer" + //blocks "github.com/jbenet/go-ipfs/blocks" + //bstore "github.com/jbenet/go-ipfs/blocks/blockstore" + //wl "github.com/jbenet/go-ipfs/exchange/bitswap/wantlist" + //peer "github.com/jbenet/go-ipfs/peer" u "github.com/jbenet/go-ipfs/util" ) var log = u.Logger("strategy") +/* // TODO niceness should be on a per-peer basis. Use-case: Certain peers are // "trusted" and/or controlled by a single human user. The user may want for // these peers to exchange data freely @@ -29,12 +30,7 @@ type strategist struct { strategyFunc } -type Task struct { - Peer peer.Peer - Blocks []*blocks.Block -} - -func (s *strategist) GetTasks(bandwidth int, ledgers *LedgerSet, bs bstore.Blockstore) ([]*Task, error) { +func (s *strategist) GetTasks(bandwidth int, ledgers *LedgerManager, bs bstore.Blockstore) ([]*Task, error) { var tasks []*Task ledgers.lock.RLock() @@ -87,3 +83,5 @@ func test() {} func (s *strategist) Seed(int64) { // TODO } + +*/ diff --git a/bitswap/strategy/tasklist.go b/bitswap/strategy/tasklist.go new file mode 100644 index 000000000..fb8c64109 --- /dev/null +++ b/bitswap/strategy/tasklist.go @@ -0,0 +1,72 @@ +package strategy + +import ( + peer "github.com/jbenet/go-ipfs/peer" + u "github.com/jbenet/go-ipfs/util" +) + +// TODO: at some point, the strategy needs to plug in here +// to help decide how to sort tasks (on add) and how to select +// tasks (on getnext). For now, we are assuming a dumb/nice strategy. +type TaskList struct { + tasks []*Task + taskmap map[u.Key]*Task +} + +func NewTaskList() *TaskList { + return &TaskList{ + taskmap: make(map[u.Key]*Task), + } +} + +type Task struct { + Key u.Key + Target peer.Peer + theirPriority int +} + +// Add currently adds a new task to the end of the list +// TODO: make this into a priority queue +func (tl *TaskList) Add(block u.Key, priority int, to peer.Peer) { + if task, ok := tl.taskmap[to.Key()+block]; ok { + // TODO: when priority queue is implemented, + // rearrange this Task + task.theirPriority = priority + return + } + task := &Task{ + Key: block, + Target: to, + theirPriority: priority, + } + tl.tasks = append(tl.tasks, task) + tl.taskmap[to.Key()+block] = task +} + +// GetNext returns the next task to be performed by bitswap +// the task is then removed from the list +func (tl *TaskList) GetNext() *Task { + var out *Task + for len(tl.tasks) > 0 { + // TODO: instead of zero, use exponential distribution + // it will help reduce the chance of receiving + // the same block from multiple peers + out = tl.tasks[0] + tl.tasks = tl.tasks[1:] + delete(tl.taskmap, out.Target.Key()+out.Key) + // Filter out blocks that have been cancelled + if out.theirPriority >= 0 { + break + } + } + + return out +} + +// Cancel lazily cancels the sending of a block to a given peer +func (tl *TaskList) Cancel(k u.Key, p peer.Peer) { + t, ok := tl.taskmap[p.Key()+k] + if ok { + t.theirPriority = -1 + } +} From 466a4a8b849e8d71bb933af3a356ac0169b979ac Mon Sep 17 00:00:00 2001 From: Jeromy Date: Tue, 16 Dec 2014 02:14:30 +0000 Subject: [PATCH 0207/1035] renaming and removing empty strategy file This commit was moved from ipfs/go-bitswap@130391c7b7c0b920c53ba5bcbc370a53705c3312 --- .../{ledgerset.go => ledgermanager.go} | 2 + ...edgerset_test.go => ledgermanager_test.go} | 0 bitswap/strategy/strategy.go | 87 ------------------- 3 files changed, 2 insertions(+), 87 deletions(-) rename bitswap/strategy/{ledgerset.go => ledgermanager.go} (99%) rename bitswap/strategy/{ledgerset_test.go => ledgermanager_test.go} (100%) delete mode 100644 bitswap/strategy/strategy.go diff --git a/bitswap/strategy/ledgerset.go b/bitswap/strategy/ledgermanager.go similarity index 99% rename from bitswap/strategy/ledgerset.go rename to bitswap/strategy/ledgermanager.go index 92808d2f0..4712b6a3e 100644 --- a/bitswap/strategy/ledgerset.go +++ b/bitswap/strategy/ledgermanager.go @@ -12,6 +12,8 @@ import ( u "github.com/jbenet/go-ipfs/util" ) +var log = u.Logger("strategy") + // LedgerMap lists Ledgers by their Partner key. type ledgerMap map[peerKey]*ledger diff --git a/bitswap/strategy/ledgerset_test.go b/bitswap/strategy/ledgermanager_test.go similarity index 100% rename from bitswap/strategy/ledgerset_test.go rename to bitswap/strategy/ledgermanager_test.go diff --git a/bitswap/strategy/strategy.go b/bitswap/strategy/strategy.go deleted file mode 100644 index 5b0d9830d..000000000 --- a/bitswap/strategy/strategy.go +++ /dev/null @@ -1,87 +0,0 @@ -package strategy - -import ( - //blocks "github.com/jbenet/go-ipfs/blocks" - //bstore "github.com/jbenet/go-ipfs/blocks/blockstore" - //wl "github.com/jbenet/go-ipfs/exchange/bitswap/wantlist" - //peer "github.com/jbenet/go-ipfs/peer" - u "github.com/jbenet/go-ipfs/util" -) - -var log = u.Logger("strategy") - -/* -// TODO niceness should be on a per-peer basis. Use-case: Certain peers are -// "trusted" and/or controlled by a single human user. The user may want for -// these peers to exchange data freely -func New(nice bool) Strategy { - var stratFunc strategyFunc - if nice { - stratFunc = yesManStrategy - } else { - stratFunc = standardStrategy - } - return &strategist{ - strategyFunc: stratFunc, - } -} - -type strategist struct { - strategyFunc -} - -func (s *strategist) GetTasks(bandwidth int, ledgers *LedgerManager, bs bstore.Blockstore) ([]*Task, error) { - var tasks []*Task - - ledgers.lock.RLock() - var partners []peer.Peer - for _, ledger := range ledgers.ledgerMap { - if s.strategyFunc(ledger) { - partners = append(partners, ledger.Partner) - } - } - ledgers.lock.RUnlock() - if len(partners) == 0 { - return nil, nil - } - - bandwidthPerPeer := bandwidth / len(partners) - for _, p := range partners { - blksForPeer, err := s.getSendableBlocks(ledgers.ledger(p).wantList, bs, bandwidthPerPeer) - if err != nil { - return nil, err - } - tasks = append(tasks, &Task{ - Peer: p, - Blocks: blksForPeer, - }) - } - - return tasks, nil -} - -func (s *strategist) getSendableBlocks(wantlist *wl.Wantlist, bs bstore.Blockstore, bw int) ([]*blocks.Block, error) { - var outblocks []*blocks.Block - for _, e := range wantlist.Entries() { - block, err := bs.Get(e.Value) - if err == bstore.ErrNotFound { - continue - } - if err != nil { - return nil, err - } - outblocks = append(outblocks, block) - bw -= len(block.Data) - if bw <= 0 { - break - } - } - return outblocks, nil -} - -func test() {} -func (s *strategist) Seed(int64) { - // TODO -} - -*/ From 4043c4a18d96e1d4df0887a5e143818380c35732 Mon Sep 17 00:00:00 2001 From: Jeromy Date: Tue, 16 Dec 2014 04:52:55 +0000 Subject: [PATCH 0208/1035] some cleanup before CR This commit was moved from ipfs/go-bitswap@17d40121dc642881a904598f19486e786973a4a2 --- bitswap/bitswap.go | 11 ++++------- bitswap/strategy/interface.go | 12 ------------ bitswap/wantlist/wantlist.go | 13 +++++++++++++ 3 files changed, 17 insertions(+), 19 deletions(-) delete mode 100644 bitswap/strategy/interface.go diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index c0df58551..eb59542e4 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -32,10 +32,6 @@ var providerRequestTimeout = time.Second * 10 var hasBlockTimeout = time.Second * 15 var rebroadcastDelay = time.Second * 10 -const roundTime = time.Second / 2 - -var bandwidthPerRound = 500000 - // New initializes a BitSwap instance that communicates over the // provided BitSwapNetwork. This function registers the returned instance as // the network delegate. @@ -64,7 +60,7 @@ func New(parent context.Context, p peer.Peer, network bsnet.BitSwapNetwork, rout } network.SetDelegate(bs) go bs.clientWorker(ctx) - go bs.roundWorker(ctx) + go bs.taskWorker(ctx) return bs } @@ -90,7 +86,8 @@ type bitswap struct { batchRequests chan []u.Key // strategy makes decisions about how to interact with partners. - strategy strategy.Strategy + // TODO: strategy commented out until we have a use for it again + //strategy strategy.Strategy ledgermanager *strategy.LedgerManager @@ -234,7 +231,7 @@ func (bs *bitswap) sendWantlistToProviders(ctx context.Context, wantlist *wl.Wan wg.Wait() } -func (bs *bitswap) roundWorker(ctx context.Context) { +func (bs *bitswap) taskWorker(ctx context.Context) { for { select { case <-ctx.Done(): diff --git a/bitswap/strategy/interface.go b/bitswap/strategy/interface.go deleted file mode 100644 index 62cd77b8a..000000000 --- a/bitswap/strategy/interface.go +++ /dev/null @@ -1,12 +0,0 @@ -package strategy - -import ( - bstore "github.com/jbenet/go-ipfs/blocks/blockstore" -) - -type Strategy interface { - // Seed initializes the decider to a deterministic state - Seed(int64) - - GetTasks(bandwidth int, ledgers *LedgerManager, bs bstore.Blockstore) ([]*Task, error) -} diff --git a/bitswap/wantlist/wantlist.go b/bitswap/wantlist/wantlist.go index 0de0ba803..e20bb4457 100644 --- a/bitswap/wantlist/wantlist.go +++ b/bitswap/wantlist/wantlist.go @@ -56,6 +56,19 @@ func (es entrySlice) Less(i, j int) bool { return es[i].Priority > es[j].Priorit func (w *Wantlist) Entries() []*Entry { w.lk.RLock() defer w.lk.RUnlock() + + var es entrySlice + + for _, e := range w.set { + es = append(es, e) + } + sort.Sort(es) + return es +} + +func (w *Wantlist) SortedEntries() []*Entry { + w.lk.RLock() + defer w.lk.RUnlock() var es entrySlice for _, e := range w.set { From 698afa139a5a6f58038289132633ea45cde70833 Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Mon, 15 Dec 2014 22:38:57 -0800 Subject: [PATCH 0209/1035] refactor() message API performing CR in the form of a PR. Let me know what you think. License: MIT Signed-off-by: Brian Tiger Chow This commit was moved from ipfs/go-bitswap@fe040f76725658be57d78b68c26f7db542006567 --- bitswap/bitswap.go | 6 +++--- bitswap/message/message.go | 22 ++++++++++++++++++---- bitswap/message/message_test.go | 20 ++++++++++---------- bitswap/strategy/ledgermanager_test.go | 2 +- 4 files changed, 32 insertions(+), 18 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index eb59542e4..9b92bb0aa 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -169,7 +169,7 @@ func (bs *bitswap) sendWantListTo(ctx context.Context, peers <-chan peer.Peer) e } message := bsmsg.New() for _, wanted := range bs.wantlist.Entries() { - message.AddEntry(wanted.Value, wanted.Priority, false) + message.AddEntry(wanted.Value, wanted.Priority) } wg := sync.WaitGroup{} for peerToQuery := range peers { @@ -207,7 +207,7 @@ func (bs *bitswap) sendWantlistToProviders(ctx context.Context, wantlist *wl.Wan message := bsmsg.New() message.SetFull(true) for _, e := range bs.wantlist.Entries() { - message.AddEntry(e.Value, e.Priority, false) + message.AddEntry(e.Value, e.Priority) } ps := pset.NewPeerSet() @@ -335,7 +335,7 @@ func (bs *bitswap) cancelBlocks(ctx context.Context, bkeys []u.Key) { message := bsmsg.New() message.SetFull(false) for _, k := range bkeys { - message.AddEntry(k, 0, true) + message.Cancel(k) } for _, p := range bs.ledgermanager.Peers() { err := bs.send(ctx, p, message) diff --git a/bitswap/message/message.go b/bitswap/message/message.go index b636e2024..478d8e258 100644 --- a/bitswap/message/message.go +++ b/bitswap/message/message.go @@ -24,7 +24,9 @@ type BitSwapMessage interface { Blocks() []*blocks.Block // AddEntry adds an entry to the Wantlist. - AddEntry(key u.Key, priority int, cancel bool) + AddEntry(key u.Key, priority int) + + Cancel(key u.Key) // Sets whether or not the contained wantlist represents the entire wantlist // true = full wantlist @@ -50,6 +52,10 @@ type impl struct { } func New() BitSwapMessage { + return newMsg() +} + +func newMsg() *impl { return &impl{ blocks: make(map[u.Key]*blocks.Block), wantlist: make(map[u.Key]*Entry), @@ -64,10 +70,10 @@ type Entry struct { } func newMessageFromProto(pbm pb.Message) BitSwapMessage { - m := New() + m := newMsg() m.SetFull(pbm.GetWantlist().GetFull()) for _, e := range pbm.GetWantlist().GetEntries() { - m.AddEntry(u.Key(e.GetBlock()), int(e.GetPriority()), e.GetCancel()) + m.addEntry(u.Key(e.GetBlock()), int(e.GetPriority()), e.GetCancel()) } for _, d := range pbm.GetBlocks() { b := blocks.NewBlock(d) @@ -100,7 +106,15 @@ func (m *impl) Blocks() []*blocks.Block { return bs } -func (m *impl) AddEntry(k u.Key, priority int, cancel bool) { +func (m *impl) Cancel(k u.Key) { + m.addEntry(k, 0, true) +} + +func (m *impl) AddEntry(k u.Key, priority int) { + m.addEntry(k, priority, false) +} + +func (m *impl) addEntry(k u.Key, priority int, cancel bool) { e, exists := m.wantlist[k] if exists { e.Priority = priority diff --git a/bitswap/message/message_test.go b/bitswap/message/message_test.go index 29eb6eb4e..a0df38c0b 100644 --- a/bitswap/message/message_test.go +++ b/bitswap/message/message_test.go @@ -14,7 +14,7 @@ import ( func TestAppendWanted(t *testing.T) { const str = "foo" m := New() - m.AddEntry(u.Key(str), 1, false) + m.AddEntry(u.Key(str), 1) if !wantlistContains(m.ToProto().GetWantlist(), str) { t.Fail() @@ -63,7 +63,7 @@ func TestWantlist(t *testing.T) { keystrs := []string{"foo", "bar", "baz", "bat"} m := New() for _, s := range keystrs { - m.AddEntry(u.Key(s), 1, false) + m.AddEntry(u.Key(s), 1) } exported := m.Wantlist() @@ -86,7 +86,7 @@ func TestCopyProtoByValue(t *testing.T) { const str = "foo" m := New() protoBeforeAppend := m.ToProto() - m.AddEntry(u.Key(str), 1, false) + m.AddEntry(u.Key(str), 1) if wantlistContains(protoBeforeAppend.GetWantlist(), str) { t.Fail() } @@ -94,11 +94,11 @@ func TestCopyProtoByValue(t *testing.T) { func TestToNetFromNetPreservesWantList(t *testing.T) { original := New() - original.AddEntry(u.Key("M"), 1, false) - original.AddEntry(u.Key("B"), 1, false) - original.AddEntry(u.Key("D"), 1, false) - original.AddEntry(u.Key("T"), 1, false) - original.AddEntry(u.Key("F"), 1, false) + original.AddEntry(u.Key("M"), 1) + original.AddEntry(u.Key("B"), 1) + original.AddEntry(u.Key("D"), 1) + original.AddEntry(u.Key("T"), 1) + original.AddEntry(u.Key("F"), 1) var buf bytes.Buffer if err := original.ToNet(&buf); err != nil { @@ -174,8 +174,8 @@ func TestDuplicates(t *testing.T) { b := blocks.NewBlock([]byte("foo")) msg := New() - msg.AddEntry(b.Key(), 1, false) - msg.AddEntry(b.Key(), 1, false) + msg.AddEntry(b.Key(), 1) + msg.AddEntry(b.Key(), 1) if len(msg.Wantlist()) != 1 { t.Fatal("Duplicate in BitSwapMessage") } diff --git a/bitswap/strategy/ledgermanager_test.go b/bitswap/strategy/ledgermanager_test.go index 819489799..f2a98cb77 100644 --- a/bitswap/strategy/ledgermanager_test.go +++ b/bitswap/strategy/ledgermanager_test.go @@ -64,7 +64,7 @@ func TestBlockRecordedAsWantedAfterMessageReceived(t *testing.T) { block := blocks.NewBlock([]byte("data wanted by beggar")) messageFromBeggarToChooser := message.New() - messageFromBeggarToChooser.AddEntry(block.Key(), 1, false) + messageFromBeggarToChooser.AddEntry(block.Key(), 1) chooser.ls.MessageReceived(beggar.Peer, messageFromBeggarToChooser) // for this test, doesn't matter if you record that beggar sent From 4caa814afbf2927fe00e9d06cdabc8eac8876b8a Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Mon, 15 Dec 2014 22:42:38 -0800 Subject: [PATCH 0210/1035] remove dead code License: MIT Signed-off-by: Brian Tiger Chow This commit was moved from ipfs/go-bitswap@7071ef57778872ca85e1adcc2ea2f39858c05379 --- bitswap/strategy/ledger.go | 9 +++++++++ bitswap/strategy/math.go | 34 ---------------------------------- bitswap/strategy/math_test.go | 17 ----------------- 3 files changed, 9 insertions(+), 51 deletions(-) delete mode 100644 bitswap/strategy/math.go delete mode 100644 bitswap/strategy/math_test.go diff --git a/bitswap/strategy/ledger.go b/bitswap/strategy/ledger.go index 684d383ef..649c1e73e 100644 --- a/bitswap/strategy/ledger.go +++ b/bitswap/strategy/ledger.go @@ -46,6 +46,15 @@ type ledger struct { sentToPeer map[u.Key]time.Time } +type debtRatio struct { + BytesSent uint64 + BytesRecv uint64 +} + +func (dr *debtRatio) Value() float64 { + return float64(dr.BytesSent) / float64(dr.BytesRecv+1) +} + func (l *ledger) SentBytes(n int) { l.exchangeCount++ l.lastExchange = time.Now() diff --git a/bitswap/strategy/math.go b/bitswap/strategy/math.go deleted file mode 100644 index c5339e5b3..000000000 --- a/bitswap/strategy/math.go +++ /dev/null @@ -1,34 +0,0 @@ -package strategy - -import ( - "math" - "math/rand" -) - -type strategyFunc func(*ledger) bool - -// TODO avoid using rand.Float64 method. it uses a singleton lock and may cause -// performance issues. Instead, instantiate a rand struct and use that to call -// Float64() -func standardStrategy(l *ledger) bool { - return rand.Float64() <= probabilitySend(l.Accounting.Value()) -} - -func yesManStrategy(l *ledger) bool { - return true -} - -func probabilitySend(ratio float64) float64 { - x := 1 + math.Exp(6-3*ratio) - y := 1 / x - return 1 - y -} - -type debtRatio struct { - BytesSent uint64 - BytesRecv uint64 -} - -func (dr *debtRatio) Value() float64 { - return float64(dr.BytesSent) / float64(dr.BytesRecv+1) -} diff --git a/bitswap/strategy/math_test.go b/bitswap/strategy/math_test.go deleted file mode 100644 index 58092bc09..000000000 --- a/bitswap/strategy/math_test.go +++ /dev/null @@ -1,17 +0,0 @@ -package strategy - -import ( - "testing" -) - -func TestProbabilitySendDecreasesAsRatioIncreases(t *testing.T) { - grateful := debtRatio{BytesSent: 0, BytesRecv: 10000} - pWhenGrateful := probabilitySend(grateful.Value()) - - abused := debtRatio{BytesSent: 10000, BytesRecv: 0} - pWhenAbused := probabilitySend(abused.Value()) - - if pWhenGrateful < pWhenAbused { - t.Fail() - } -} From 7891a9023ead9dbb49585055caf4c9b484649d5d Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Mon, 15 Dec 2014 22:46:10 -0800 Subject: [PATCH 0211/1035] queue-like naming License: MIT Signed-off-by: Brian Tiger Chow This commit was moved from ipfs/go-bitswap@66c94d7760917822e8e8d13a494f4a46f1f51fda --- bitswap/strategy/ledgermanager.go | 6 +++--- bitswap/strategy/tasklist.go | 10 +++++----- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/bitswap/strategy/ledgermanager.go b/bitswap/strategy/ledgermanager.go index 4712b6a3e..73cd94711 100644 --- a/bitswap/strategy/ledgermanager.go +++ b/bitswap/strategy/ledgermanager.go @@ -45,7 +45,7 @@ func NewLedgerManager(bs bstore.Blockstore, ctx context.Context) *LedgerManager func (lm *LedgerManager) taskWorker() { for { - nextTask := lm.tasklist.GetNext() + nextTask := lm.tasklist.Pop() if nextTask == nil { // No tasks in the list? // Wait until there are! @@ -107,7 +107,7 @@ func (lm *LedgerManager) MessageReceived(p peer.Peer, m bsmsg.BitSwapMessage) er lm.tasklist.Cancel(e.Key, p) } else { l.Wants(e.Key, e.Priority) - lm.tasklist.Add(e.Key, e.Priority, p) + lm.tasklist.Push(e.Key, e.Priority, p) // Signal task generation to restart (if stopped!) select { @@ -122,7 +122,7 @@ func (lm *LedgerManager) MessageReceived(p peer.Peer, m bsmsg.BitSwapMessage) er l.ReceivedBytes(len(block.Data)) for _, l := range lm.ledgerMap { if l.WantListContains(block.Key()) { - lm.tasklist.Add(block.Key(), 1, l.Partner) + lm.tasklist.Push(block.Key(), 1, l.Partner) // Signal task generation to restart (if stopped!) select { diff --git a/bitswap/strategy/tasklist.go b/bitswap/strategy/tasklist.go index fb8c64109..f0a1b7d00 100644 --- a/bitswap/strategy/tasklist.go +++ b/bitswap/strategy/tasklist.go @@ -25,9 +25,9 @@ type Task struct { theirPriority int } -// Add currently adds a new task to the end of the list +// Push currently adds a new task to the end of the list // TODO: make this into a priority queue -func (tl *TaskList) Add(block u.Key, priority int, to peer.Peer) { +func (tl *TaskList) Push(block u.Key, priority int, to peer.Peer) { if task, ok := tl.taskmap[to.Key()+block]; ok { // TODO: when priority queue is implemented, // rearrange this Task @@ -43,9 +43,9 @@ func (tl *TaskList) Add(block u.Key, priority int, to peer.Peer) { tl.taskmap[to.Key()+block] = task } -// GetNext returns the next task to be performed by bitswap -// the task is then removed from the list -func (tl *TaskList) GetNext() *Task { +// Pop returns the next task to be performed by bitswap the task is then +// removed from the list +func (tl *TaskList) Pop() *Task { var out *Task for len(tl.tasks) > 0 { // TODO: instead of zero, use exponential distribution From a03069f79768fb301f2daea6f6771b90f06e14c4 Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Mon, 15 Dec 2014 22:52:27 -0800 Subject: [PATCH 0212/1035] name findOrCreate License: MIT Signed-off-by: Brian Tiger Chow This commit was moved from ipfs/go-bitswap@365f43ad685aca12e2787a47e9187dca61ac2ada --- bitswap/strategy/ledgermanager.go | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/bitswap/strategy/ledgermanager.go b/bitswap/strategy/ledgermanager.go index 73cd94711..d6699e9f0 100644 --- a/bitswap/strategy/ledgermanager.go +++ b/bitswap/strategy/ledgermanager.go @@ -87,7 +87,7 @@ func (lm *LedgerManager) BlockIsWantedByPeer(k u.Key, p peer.Peer) bool { lm.lock.RLock() defer lm.lock.RUnlock() - ledger := lm.ledger(p) + ledger := lm.findOrCreate(p) return ledger.WantListContains(k) } @@ -97,7 +97,7 @@ func (lm *LedgerManager) MessageReceived(p peer.Peer, m bsmsg.BitSwapMessage) er lm.lock.Lock() defer lm.lock.Unlock() - l := lm.ledger(p) + l := lm.findOrCreate(p) if m.Full() { l.wantList = wl.New() } @@ -146,7 +146,7 @@ func (lm *LedgerManager) MessageSent(p peer.Peer, m bsmsg.BitSwapMessage) error lm.lock.Lock() defer lm.lock.Unlock() - l := lm.ledger(p) + l := lm.findOrCreate(p) for _, block := range m.Blocks() { l.SentBytes(len(block.Data)) l.wantList.Remove(block.Key()) @@ -160,18 +160,18 @@ func (lm *LedgerManager) NumBytesSentTo(p peer.Peer) uint64 { lm.lock.RLock() defer lm.lock.RUnlock() - return lm.ledger(p).Accounting.BytesSent + return lm.findOrCreate(p).Accounting.BytesSent } func (lm *LedgerManager) NumBytesReceivedFrom(p peer.Peer) uint64 { lm.lock.RLock() defer lm.lock.RUnlock() - return lm.ledger(p).Accounting.BytesRecv + return lm.findOrCreate(p).Accounting.BytesRecv } // ledger lazily instantiates a ledger -func (lm *LedgerManager) ledger(p peer.Peer) *ledger { +func (lm *LedgerManager) findOrCreate(p peer.Peer) *ledger { l, ok := lm.ledgerMap[peerKey(p.Key())] if !ok { l = newLedger(p) From 31a6f2fc7a11b86ae1c735910d55451950159b02 Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Mon, 15 Dec 2014 23:00:53 -0800 Subject: [PATCH 0213/1035] avoid attaching context to object when it's not necessary. License: MIT Signed-off-by: Brian Tiger Chow This commit was moved from ipfs/go-bitswap@211fa2386a154efdfaf7ab5685f27c66ad19a3f4 --- bitswap/strategy/ledgermanager.go | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/bitswap/strategy/ledgermanager.go b/bitswap/strategy/ledgermanager.go index d6699e9f0..df10072eb 100644 --- a/bitswap/strategy/ledgermanager.go +++ b/bitswap/strategy/ledgermanager.go @@ -27,7 +27,6 @@ type LedgerManager struct { tasklist *TaskList taskOut chan *Task workSignal chan struct{} - ctx context.Context } func NewLedgerManager(bs bstore.Blockstore, ctx context.Context) *LedgerManager { @@ -37,20 +36,19 @@ func NewLedgerManager(bs bstore.Blockstore, ctx context.Context) *LedgerManager tasklist: NewTaskList(), taskOut: make(chan *Task, 4), workSignal: make(chan struct{}), - ctx: ctx, } - go lm.taskWorker() + go lm.taskWorker(ctx) return lm } -func (lm *LedgerManager) taskWorker() { +func (lm *LedgerManager) taskWorker(ctx context.Context) { for { nextTask := lm.tasklist.Pop() if nextTask == nil { // No tasks in the list? // Wait until there are! select { - case <-lm.ctx.Done(): + case <-ctx.Done(): return case <-lm.workSignal: } @@ -58,7 +56,7 @@ func (lm *LedgerManager) taskWorker() { } select { - case <-lm.ctx.Done(): + case <-ctx.Done(): return case lm.taskOut <- nextTask: } From 59a180a7ed8f6b045e49a68e4307471d71f0832b Mon Sep 17 00:00:00 2001 From: Jeromy Date: Tue, 16 Dec 2014 18:33:36 +0000 Subject: [PATCH 0214/1035] refactor peerSet This commit was moved from ipfs/go-bitswap@b88f039420613c93ffd13e81e2c4c66f5edc76cb --- bitswap/bitswap.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 9b92bb0aa..5cf28c96d 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -222,7 +222,7 @@ func (bs *bitswap) sendWantlistToProviders(ctx context.Context, wantlist *wl.Wan providers := bs.routing.FindProvidersAsync(child, k, maxProvidersPerRequest) for prov := range providers { - if ps.AddIfSmallerThan(prov, -1) { //Do once per peer + if ps.TryAdd(prov) { //Do once per peer bs.send(ctx, prov, message) } } From c0c8a969005eba5b4d775fe95631213a59c31638 Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Tue, 16 Dec 2014 20:26:41 -0800 Subject: [PATCH 0215/1035] fix(test): nil Blockstore License: MIT Signed-off-by: Brian Tiger Chow This commit was moved from ipfs/go-bitswap@9fac2f30bfb2adff4d9c91f96bf7142de6dba2ad --- bitswap/strategy/ledgermanager_test.go | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/bitswap/strategy/ledgermanager_test.go b/bitswap/strategy/ledgermanager_test.go index f2a98cb77..eb89c9959 100644 --- a/bitswap/strategy/ledgermanager_test.go +++ b/bitswap/strategy/ledgermanager_test.go @@ -4,9 +4,11 @@ import ( "strings" "testing" - "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" - + context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" + ds "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore" + sync "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore/sync" blocks "github.com/jbenet/go-ipfs/blocks" + blockstore "github.com/jbenet/go-ipfs/blocks/blockstore" message "github.com/jbenet/go-ipfs/exchange/bitswap/message" peer "github.com/jbenet/go-ipfs/peer" testutil "github.com/jbenet/go-ipfs/util/testutil" @@ -21,7 +23,7 @@ func newPeerAndLedgermanager(idStr string) peerAndLedgermanager { return peerAndLedgermanager{ Peer: testutil.NewPeerWithIDString(idStr), //Strategy: New(true), - ls: NewLedgerManager(nil, context.TODO()), + ls: NewLedgerManager(blockstore.NewBlockstore(sync.MutexWrap(ds.NewMapDatastore())), context.TODO()), } } From cb6ac68f0a8fa60546c573254797eaa1a81b443b Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Tue, 16 Dec 2014 20:10:20 -0800 Subject: [PATCH 0216/1035] style: line wrapping License: MIT Signed-off-by: Brian Tiger Chow This commit was moved from ipfs/go-bitswap@6d88f9aaf7971deac5c32b4f82a74c3f2b255603 --- bitswap/bitswap.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 5cf28c96d..bccd04418 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -32,10 +32,10 @@ var providerRequestTimeout = time.Second * 10 var hasBlockTimeout = time.Second * 15 var rebroadcastDelay = time.Second * 10 -// New initializes a BitSwap instance that communicates over the -// provided BitSwapNetwork. This function registers the returned instance as -// the network delegate. -// Runs until context is cancelled +// New initializes a BitSwap instance that communicates over the provided +// BitSwapNetwork. This function registers the returned instance as the network +// delegate. +// Runs until context is cancelled. func New(parent context.Context, p peer.Peer, network bsnet.BitSwapNetwork, routing bsnet.Routing, bstore blockstore.Blockstore, nice bool) exchange.Interface { From eeffb2b3a5f2204a7e20eaa1623847d6a428b04b Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Tue, 16 Dec 2014 20:09:13 -0800 Subject: [PATCH 0217/1035] fix: move to callsite so public callers don't experience the internal timeout rule License: MIT Signed-off-by: Brian Tiger Chow This commit was moved from ipfs/go-bitswap@bf61c8ce5c124c27e8b06f9e80d70e6b01d4011f --- bitswap/bitswap.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index bccd04418..57ae6a6ac 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -159,8 +159,7 @@ func (bs *bitswap) HasBlock(ctx context.Context, blk *blocks.Block) error { } bs.wantlist.Remove(blk.Key()) bs.notifications.Publish(blk) - child, _ := context.WithTimeout(ctx, hasBlockTimeout) - return bs.routing.Provide(child, blk.Key()) + return bs.routing.Provide(ctx, blk.Key()) } func (bs *bitswap) sendWantListTo(ctx context.Context, peers <-chan peer.Peer) error { @@ -319,7 +318,8 @@ func (bs *bitswap) ReceiveMessage(ctx context.Context, p peer.Peer, incoming bsm var blkeys []u.Key for _, block := range incoming.Blocks() { blkeys = append(blkeys, block.Key()) - if err := bs.HasBlock(ctx, block); err != nil { + hasBlockCtx, _ := context.WithTimeout(ctx, hasBlockTimeout) + if err := bs.HasBlock(hasBlockCtx, block); err != nil { log.Error(err) } } From 5b50d34bf2303a3e28d00db5b9ae2fd762abb061 Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Tue, 16 Dec 2014 20:09:57 -0800 Subject: [PATCH 0218/1035] style constify variables good to const until it's required for them to be variable. TODO pass them in as configuration options This commit was moved from ipfs/go-bitswap@f03e629fe01a87a5f6276eaa2fce5fbbfa628962 --- bitswap/bitswap.go | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 57ae6a6ac..e95ffbc4f 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -24,13 +24,17 @@ import ( var log = eventlog.Logger("bitswap") -// Number of providers to request for sending a wantlist to -// TODO: if a 'non-nice' strategy is implemented, consider increasing this value -const maxProvidersPerRequest = 3 +const ( + // Number of providers to request for sending a wantlist to + // TODO: if a 'non-nice' strategy is implemented, consider increasing this value + maxProvidersPerRequest = 3 + providerRequestTimeout = time.Second * 10 + hasBlockTimeout = time.Second * 15 +) -var providerRequestTimeout = time.Second * 10 -var hasBlockTimeout = time.Second * 15 -var rebroadcastDelay = time.Second * 10 +var ( + rebroadcastDelay = time.Second * 10 +) // New initializes a BitSwap instance that communicates over the provided // BitSwapNetwork. This function registers the returned instance as the network From a08988a16604b0f6c9ea0a2fb394104cdee76112 Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Tue, 16 Dec 2014 20:27:35 -0800 Subject: [PATCH 0219/1035] refactor: change Tasks to Outbox notice that moving the blockstore fetch into the manager removes the weird error handling case. License: MIT Signed-off-by: Brian Tiger Chow This commit was moved from ipfs/go-bitswap@9069a8aa5ddfe56a58edbb95c8081a488597ae2c --- bitswap/bitswap.go | 14 ++------------ bitswap/strategy/ledgermanager.go | 25 +++++++++++++++++++------ 2 files changed, 21 insertions(+), 18 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index e95ffbc4f..4458db946 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -239,18 +239,8 @@ func (bs *bitswap) taskWorker(ctx context.Context) { select { case <-ctx.Done(): return - case task := <-bs.ledgermanager.GetTaskChan(): - block, err := bs.blockstore.Get(task.Key) - if err != nil { - log.Errorf("Expected to have block %s, but it was not found!", task.Key) - continue - } - - message := bsmsg.New() - message.AddBlock(block) - // TODO: maybe add keys from our wantlist? - - bs.send(ctx, task.Target, message) + case envelope := <-bs.ledgermanager.Outbox(): + bs.send(ctx, envelope.Peer, envelope.Message) } } } diff --git a/bitswap/strategy/ledgermanager.go b/bitswap/strategy/ledgermanager.go index df10072eb..3c79c855c 100644 --- a/bitswap/strategy/ledgermanager.go +++ b/bitswap/strategy/ledgermanager.go @@ -20,12 +20,17 @@ type ledgerMap map[peerKey]*ledger // FIXME share this externally type peerKey u.Key +type Envelope struct { + Peer peer.Peer + Message bsmsg.BitSwapMessage +} + type LedgerManager struct { lock sync.RWMutex ledgerMap ledgerMap bs bstore.Blockstore tasklist *TaskList - taskOut chan *Task + outbox chan Envelope workSignal chan struct{} } @@ -34,7 +39,7 @@ func NewLedgerManager(bs bstore.Blockstore, ctx context.Context) *LedgerManager ledgerMap: make(ledgerMap), bs: bs, tasklist: NewTaskList(), - taskOut: make(chan *Task, 4), + outbox: make(chan Envelope, 4), // TODO extract constant workSignal: make(chan struct{}), } go lm.taskWorker(ctx) @@ -54,17 +59,25 @@ func (lm *LedgerManager) taskWorker(ctx context.Context) { } continue } - + block, err := lm.bs.Get(nextTask.Key) + if err != nil { + continue // TODO maybe return an error + } + // construct message here so we can make decisions about any additional + // information we may want to include at this time. + m := bsmsg.New() + m.AddBlock(block) + // TODO: maybe add keys from our wantlist? select { case <-ctx.Done(): return - case lm.taskOut <- nextTask: + case lm.outbox <- Envelope{Peer: nextTask.Target, Message: m}: } } } -func (lm *LedgerManager) GetTaskChan() <-chan *Task { - return lm.taskOut +func (lm *LedgerManager) Outbox() <-chan Envelope { + return lm.outbox } // Returns a slice of Peers with whom the local node has active sessions From 35b7a9f4e126f640c1c1d643b96eda8f4c77d25f Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Tue, 16 Dec 2014 20:31:49 -0800 Subject: [PATCH 0220/1035] refactor: avoid loop reuse License: MIT Signed-off-by: Brian Tiger Chow This commit was moved from ipfs/go-bitswap@33d8110e41b8eb7e0e6c3a180da31f33ec0b4052 --- bitswap/bitswap.go | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 4458db946..998114192 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -309,14 +309,16 @@ func (bs *bitswap) ReceiveMessage(ctx context.Context, p peer.Peer, incoming bsm // TODO: this is bad, and could be easily abused. // Should only track *useful* messages in ledger - var blkeys []u.Key for _, block := range incoming.Blocks() { - blkeys = append(blkeys, block.Key()) hasBlockCtx, _ := context.WithTimeout(ctx, hasBlockTimeout) if err := bs.HasBlock(hasBlockCtx, block); err != nil { log.Error(err) } } + var blkeys []u.Key + for _, block := range incoming.Blocks() { + blkeys = append(blkeys, block.Key()) + } if len(blkeys) > 0 { bs.cancelBlocks(ctx, blkeys) } From ccedef2c86e8fe85a1535b0739a9bd051ed1d430 Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Tue, 16 Dec 2014 20:35:26 -0800 Subject: [PATCH 0221/1035] fix: move the check into the function. function should be a no-op when passed an empty slice License: MIT Signed-off-by: Brian Tiger Chow This commit was moved from ipfs/go-bitswap@2b07d00f3c63f09b9da46b2d3c114bd869165a4f --- bitswap/bitswap.go | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 998114192..f1ae4b556 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -315,19 +315,20 @@ func (bs *bitswap) ReceiveMessage(ctx context.Context, p peer.Peer, incoming bsm log.Error(err) } } - var blkeys []u.Key + var keys []u.Key for _, block := range incoming.Blocks() { - blkeys = append(blkeys, block.Key()) - } - if len(blkeys) > 0 { - bs.cancelBlocks(ctx, blkeys) + keys = append(keys, block.Key()) } + bs.cancelBlocks(ctx, keys) // TODO: consider changing this function to not return anything return nil, nil } func (bs *bitswap) cancelBlocks(ctx context.Context, bkeys []u.Key) { + if len(bkeys) < 1 { + return + } message := bsmsg.New() message.SetFull(false) for _, k := range bkeys { From 7b985610e4d6e0d698837699275add990e8167cf Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Tue, 16 Dec 2014 20:52:30 -0800 Subject: [PATCH 0222/1035] refactor: context first in argument list (merely by convention) License: MIT Signed-off-by: Brian Tiger Chow This commit was moved from ipfs/go-bitswap@997165aaaaa8ef5de2f5021e42cea21490dff008 --- bitswap/bitswap.go | 2 +- bitswap/strategy/ledgermanager.go | 2 +- bitswap/strategy/ledgermanager_test.go | 3 ++- 3 files changed, 4 insertions(+), 3 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index f1ae4b556..cae7ab1e8 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -56,7 +56,7 @@ func New(parent context.Context, p peer.Peer, network bsnet.BitSwapNetwork, rout blockstore: bstore, cancelFunc: cancelFunc, notifications: notif, - ledgermanager: strategy.NewLedgerManager(bstore, ctx), + ledgermanager: strategy.NewLedgerManager(ctx, bstore), routing: routing, sender: network, wantlist: wl.New(), diff --git a/bitswap/strategy/ledgermanager.go b/bitswap/strategy/ledgermanager.go index 3c79c855c..1ea61bb7d 100644 --- a/bitswap/strategy/ledgermanager.go +++ b/bitswap/strategy/ledgermanager.go @@ -34,7 +34,7 @@ type LedgerManager struct { workSignal chan struct{} } -func NewLedgerManager(bs bstore.Blockstore, ctx context.Context) *LedgerManager { +func NewLedgerManager(ctx context.Context, bs bstore.Blockstore) *LedgerManager { lm := &LedgerManager{ ledgerMap: make(ledgerMap), bs: bs, diff --git a/bitswap/strategy/ledgermanager_test.go b/bitswap/strategy/ledgermanager_test.go index eb89c9959..5c78f2f81 100644 --- a/bitswap/strategy/ledgermanager_test.go +++ b/bitswap/strategy/ledgermanager_test.go @@ -23,7 +23,8 @@ func newPeerAndLedgermanager(idStr string) peerAndLedgermanager { return peerAndLedgermanager{ Peer: testutil.NewPeerWithIDString(idStr), //Strategy: New(true), - ls: NewLedgerManager(blockstore.NewBlockstore(sync.MutexWrap(ds.NewMapDatastore())), context.TODO()), + ls: NewLedgerManager(context.TODO(), + blockstore.NewBlockstore(sync.MutexWrap(ds.NewMapDatastore()))), } } From b3a5039715a7704c89cae9d1cb55d5b65a61c460 Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Tue, 16 Dec 2014 20:56:18 -0800 Subject: [PATCH 0223/1035] doc: comment License: MIT Signed-off-by: Brian Tiger Chow This commit was moved from ipfs/go-bitswap@7aa16ef277b5029942155cb6f92c33cca29b7445 --- bitswap/strategy/tasklist.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/bitswap/strategy/tasklist.go b/bitswap/strategy/tasklist.go index f0a1b7d00..19bb9748e 100644 --- a/bitswap/strategy/tasklist.go +++ b/bitswap/strategy/tasklist.go @@ -43,8 +43,7 @@ func (tl *TaskList) Push(block u.Key, priority int, to peer.Peer) { tl.taskmap[to.Key()+block] = task } -// Pop returns the next task to be performed by bitswap the task is then -// removed from the list +// Pop 'pops' the next task to be performed. Returns nil no task exists. func (tl *TaskList) Pop() *Task { var out *Task for len(tl.tasks) > 0 { From c68f47e63cdde775339cb210192682a9a3a4546e Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Tue, 16 Dec 2014 21:01:01 -0800 Subject: [PATCH 0224/1035] refactor: taskKey := p.Key() + block.Key() for clarity and to avoid errors, define a function License: MIT Signed-off-by: Brian Tiger Chow This commit was moved from ipfs/go-bitswap@e84b37a7fb9c74335da36a9481cd00fcda73eaa6 --- bitswap/strategy/ledgermanager.go | 1 - bitswap/strategy/tasklist.go | 17 +++++++++++------ 2 files changed, 11 insertions(+), 7 deletions(-) diff --git a/bitswap/strategy/ledgermanager.go b/bitswap/strategy/ledgermanager.go index 1ea61bb7d..6c6f7ee75 100644 --- a/bitswap/strategy/ledgermanager.go +++ b/bitswap/strategy/ledgermanager.go @@ -17,7 +17,6 @@ var log = u.Logger("strategy") // LedgerMap lists Ledgers by their Partner key. type ledgerMap map[peerKey]*ledger -// FIXME share this externally type peerKey u.Key type Envelope struct { diff --git a/bitswap/strategy/tasklist.go b/bitswap/strategy/tasklist.go index 19bb9748e..0e8948cbb 100644 --- a/bitswap/strategy/tasklist.go +++ b/bitswap/strategy/tasklist.go @@ -10,12 +10,12 @@ import ( // tasks (on getnext). For now, we are assuming a dumb/nice strategy. type TaskList struct { tasks []*Task - taskmap map[u.Key]*Task + taskmap map[string]*Task } func NewTaskList() *TaskList { return &TaskList{ - taskmap: make(map[u.Key]*Task), + taskmap: make(map[string]*Task), } } @@ -28,7 +28,7 @@ type Task struct { // Push currently adds a new task to the end of the list // TODO: make this into a priority queue func (tl *TaskList) Push(block u.Key, priority int, to peer.Peer) { - if task, ok := tl.taskmap[to.Key()+block]; ok { + if task, ok := tl.taskmap[taskKey(to, block)]; ok { // TODO: when priority queue is implemented, // rearrange this Task task.theirPriority = priority @@ -40,7 +40,7 @@ func (tl *TaskList) Push(block u.Key, priority int, to peer.Peer) { theirPriority: priority, } tl.tasks = append(tl.tasks, task) - tl.taskmap[to.Key()+block] = task + tl.taskmap[taskKey(to, block)] = task } // Pop 'pops' the next task to be performed. Returns nil no task exists. @@ -52,7 +52,7 @@ func (tl *TaskList) Pop() *Task { // the same block from multiple peers out = tl.tasks[0] tl.tasks = tl.tasks[1:] - delete(tl.taskmap, out.Target.Key()+out.Key) + delete(tl.taskmap, taskKey(out.Target, out.Key)) // Filter out blocks that have been cancelled if out.theirPriority >= 0 { break @@ -64,8 +64,13 @@ func (tl *TaskList) Pop() *Task { // Cancel lazily cancels the sending of a block to a given peer func (tl *TaskList) Cancel(k u.Key, p peer.Peer) { - t, ok := tl.taskmap[p.Key()+k] + t, ok := tl.taskmap[taskKey(p, k)] if ok { t.theirPriority = -1 } } + +// taskKey returns a key that uniquely identifies a task. +func taskKey(p peer.Peer, k u.Key) string { + return string(p.Key() + k) +} From 109149671b0a14eacdfd503f1aaa852bd8195700 Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Tue, 16 Dec 2014 21:06:10 -0800 Subject: [PATCH 0225/1035] unexport task and taskList the less bitswap has to know about, the easier it'll be for readers. (This now returns Messages.) License: MIT Signed-off-by: Brian Tiger Chow This commit was moved from ipfs/go-bitswap@37a5fc29fbce4478efa843284611d2aaf8ed43d2 --- bitswap/strategy/ledgermanager.go | 4 ++-- bitswap/strategy/tasklist.go | 12 ++++++------ 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/bitswap/strategy/ledgermanager.go b/bitswap/strategy/ledgermanager.go index 6c6f7ee75..77b5d66b1 100644 --- a/bitswap/strategy/ledgermanager.go +++ b/bitswap/strategy/ledgermanager.go @@ -28,7 +28,7 @@ type LedgerManager struct { lock sync.RWMutex ledgerMap ledgerMap bs bstore.Blockstore - tasklist *TaskList + tasklist *taskList outbox chan Envelope workSignal chan struct{} } @@ -37,7 +37,7 @@ func NewLedgerManager(ctx context.Context, bs bstore.Blockstore) *LedgerManager lm := &LedgerManager{ ledgerMap: make(ledgerMap), bs: bs, - tasklist: NewTaskList(), + tasklist: newTaskList(), outbox: make(chan Envelope, 4), // TODO extract constant workSignal: make(chan struct{}), } diff --git a/bitswap/strategy/tasklist.go b/bitswap/strategy/tasklist.go index 0e8948cbb..8e89c238b 100644 --- a/bitswap/strategy/tasklist.go +++ b/bitswap/strategy/tasklist.go @@ -8,13 +8,13 @@ import ( // TODO: at some point, the strategy needs to plug in here // to help decide how to sort tasks (on add) and how to select // tasks (on getnext). For now, we are assuming a dumb/nice strategy. -type TaskList struct { +type taskList struct { tasks []*Task taskmap map[string]*Task } -func NewTaskList() *TaskList { - return &TaskList{ +func newTaskList() *taskList { + return &taskList{ taskmap: make(map[string]*Task), } } @@ -27,7 +27,7 @@ type Task struct { // Push currently adds a new task to the end of the list // TODO: make this into a priority queue -func (tl *TaskList) Push(block u.Key, priority int, to peer.Peer) { +func (tl *taskList) Push(block u.Key, priority int, to peer.Peer) { if task, ok := tl.taskmap[taskKey(to, block)]; ok { // TODO: when priority queue is implemented, // rearrange this Task @@ -44,7 +44,7 @@ func (tl *TaskList) Push(block u.Key, priority int, to peer.Peer) { } // Pop 'pops' the next task to be performed. Returns nil no task exists. -func (tl *TaskList) Pop() *Task { +func (tl *taskList) Pop() *Task { var out *Task for len(tl.tasks) > 0 { // TODO: instead of zero, use exponential distribution @@ -63,7 +63,7 @@ func (tl *TaskList) Pop() *Task { } // Cancel lazily cancels the sending of a block to a given peer -func (tl *TaskList) Cancel(k u.Key, p peer.Peer) { +func (tl *taskList) Cancel(k u.Key, p peer.Peer) { t, ok := tl.taskmap[taskKey(p, k)] if ok { t.theirPriority = -1 From 1fe75bd75f36add085ef61d108f44c749518f7aa Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Tue, 16 Dec 2014 21:08:28 -0800 Subject: [PATCH 0226/1035] refactor: remove peerKey type we've been using maps with peers long enough now that this probably is no longer necessary License: MIT Signed-off-by: Brian Tiger Chow This commit was moved from ipfs/go-bitswap@98fe773ae46d36aaa480cb481cbe21aa2ce79f48 --- bitswap/strategy/ledgermanager.go | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/bitswap/strategy/ledgermanager.go b/bitswap/strategy/ledgermanager.go index 77b5d66b1..4bc8f2efc 100644 --- a/bitswap/strategy/ledgermanager.go +++ b/bitswap/strategy/ledgermanager.go @@ -15,9 +15,7 @@ import ( var log = u.Logger("strategy") // LedgerMap lists Ledgers by their Partner key. -type ledgerMap map[peerKey]*ledger - -type peerKey u.Key +type ledgerMap map[u.Key]*ledger type Envelope struct { Peer peer.Peer @@ -182,10 +180,10 @@ func (lm *LedgerManager) NumBytesReceivedFrom(p peer.Peer) uint64 { // ledger lazily instantiates a ledger func (lm *LedgerManager) findOrCreate(p peer.Peer) *ledger { - l, ok := lm.ledgerMap[peerKey(p.Key())] + l, ok := lm.ledgerMap[p.Key()] if !ok { l = newLedger(p) - lm.ledgerMap[peerKey(p.Key())] = l + lm.ledgerMap[p.Key()] = l } return l } From 9d5b921c4101273fa1552a8285fd326fb2c0906e Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Tue, 16 Dec 2014 21:25:37 -0800 Subject: [PATCH 0227/1035] add comment to fix race License: MIT Signed-off-by: Brian Tiger Chow This commit was moved from ipfs/go-bitswap@c8e48477acdf8ca2d02b3efef6d437eeff682046 --- bitswap/strategy/ledgermanager.go | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/bitswap/strategy/ledgermanager.go b/bitswap/strategy/ledgermanager.go index 4bc8f2efc..d328510a1 100644 --- a/bitswap/strategy/ledgermanager.go +++ b/bitswap/strategy/ledgermanager.go @@ -23,9 +23,11 @@ type Envelope struct { } type LedgerManager struct { - lock sync.RWMutex - ledgerMap ledgerMap - bs bstore.Blockstore + lock sync.RWMutex + ledgerMap ledgerMap + bs bstore.Blockstore + // FIXME tasklist isn't threadsafe nor is it protected by a mutex. consider + // a way to avoid sharing the tasklist between the worker and the receiver tasklist *taskList outbox chan Envelope workSignal chan struct{} From cb17834b0f83996dcaf367ae0359ca2c3902cf84 Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Tue, 16 Dec 2014 21:28:55 -0800 Subject: [PATCH 0228/1035] perf: avoid lots of communication by signaling once at end of method License: MIT Signed-off-by: Brian Tiger Chow This commit was moved from ipfs/go-bitswap@fcaf7f56a345945e055301f0425fa92d6abd7fec --- bitswap/strategy/ledgermanager.go | 25 ++++++++++++------------- 1 file changed, 12 insertions(+), 13 deletions(-) diff --git a/bitswap/strategy/ledgermanager.go b/bitswap/strategy/ledgermanager.go index d328510a1..a84a5b7c8 100644 --- a/bitswap/strategy/ledgermanager.go +++ b/bitswap/strategy/ledgermanager.go @@ -104,6 +104,16 @@ func (lm *LedgerManager) BlockIsWantedByPeer(k u.Key, p peer.Peer) bool { // MessageReceived performs book-keeping. Returns error if passed invalid // arguments. func (lm *LedgerManager) MessageReceived(p peer.Peer, m bsmsg.BitSwapMessage) error { + newWorkExists := false + defer func() { + if newWorkExists { + // Signal task generation to restart (if stopped!) + select { + case lm.workSignal <- struct{}{}: + default: + } + } + }() lm.lock.Lock() defer lm.lock.Unlock() @@ -117,13 +127,8 @@ func (lm *LedgerManager) MessageReceived(p peer.Peer, m bsmsg.BitSwapMessage) er lm.tasklist.Cancel(e.Key, p) } else { l.Wants(e.Key, e.Priority) + newWorkExists = true lm.tasklist.Push(e.Key, e.Priority, p) - - // Signal task generation to restart (if stopped!) - select { - case lm.workSignal <- struct{}{}: - default: - } } } @@ -132,14 +137,8 @@ func (lm *LedgerManager) MessageReceived(p peer.Peer, m bsmsg.BitSwapMessage) er l.ReceivedBytes(len(block.Data)) for _, l := range lm.ledgerMap { if l.WantListContains(block.Key()) { + newWorkExists = true lm.tasklist.Push(block.Key(), 1, l.Partner) - - // Signal task generation to restart (if stopped!) - select { - case lm.workSignal <- struct{}{}: - default: - } - } } } From c1c771d57286bc1dee6a5b5e925dba5a3811a8e8 Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Tue, 16 Dec 2014 21:36:55 -0800 Subject: [PATCH 0229/1035] it's not a queue yet but it's okay to name it as such License: MIT Signed-off-by: Brian Tiger Chow This commit was moved from ipfs/go-bitswap@c93269ee324e8c990544680ae02da5510fd384fb --- bitswap/strategy/ledgermanager.go | 18 +++++++++--------- bitswap/strategy/{tasklist.go => taskqueue.go} | 12 ++++++------ 2 files changed, 15 insertions(+), 15 deletions(-) rename bitswap/strategy/{tasklist.go => taskqueue.go} (87%) diff --git a/bitswap/strategy/ledgermanager.go b/bitswap/strategy/ledgermanager.go index a84a5b7c8..47117553c 100644 --- a/bitswap/strategy/ledgermanager.go +++ b/bitswap/strategy/ledgermanager.go @@ -26,9 +26,9 @@ type LedgerManager struct { lock sync.RWMutex ledgerMap ledgerMap bs bstore.Blockstore - // FIXME tasklist isn't threadsafe nor is it protected by a mutex. consider - // a way to avoid sharing the tasklist between the worker and the receiver - tasklist *taskList + // FIXME taskqueue isn't threadsafe nor is it protected by a mutex. consider + // a way to avoid sharing the taskqueue between the worker and the receiver + taskqueue *taskQueue outbox chan Envelope workSignal chan struct{} } @@ -37,7 +37,7 @@ func NewLedgerManager(ctx context.Context, bs bstore.Blockstore) *LedgerManager lm := &LedgerManager{ ledgerMap: make(ledgerMap), bs: bs, - tasklist: newTaskList(), + taskqueue: newTaskQueue(), outbox: make(chan Envelope, 4), // TODO extract constant workSignal: make(chan struct{}), } @@ -47,7 +47,7 @@ func NewLedgerManager(ctx context.Context, bs bstore.Blockstore) *LedgerManager func (lm *LedgerManager) taskWorker(ctx context.Context) { for { - nextTask := lm.tasklist.Pop() + nextTask := lm.taskqueue.Pop() if nextTask == nil { // No tasks in the list? // Wait until there are! @@ -124,11 +124,11 @@ func (lm *LedgerManager) MessageReceived(p peer.Peer, m bsmsg.BitSwapMessage) er for _, e := range m.Wantlist() { if e.Cancel { l.CancelWant(e.Key) - lm.tasklist.Cancel(e.Key, p) + lm.taskqueue.Cancel(e.Key, p) } else { l.Wants(e.Key, e.Priority) newWorkExists = true - lm.tasklist.Push(e.Key, e.Priority, p) + lm.taskqueue.Push(e.Key, e.Priority, p) } } @@ -138,7 +138,7 @@ func (lm *LedgerManager) MessageReceived(p peer.Peer, m bsmsg.BitSwapMessage) er for _, l := range lm.ledgerMap { if l.WantListContains(block.Key()) { newWorkExists = true - lm.tasklist.Push(block.Key(), 1, l.Partner) + lm.taskqueue.Push(block.Key(), 1, l.Partner) } } } @@ -159,7 +159,7 @@ func (lm *LedgerManager) MessageSent(p peer.Peer, m bsmsg.BitSwapMessage) error for _, block := range m.Blocks() { l.SentBytes(len(block.Data)) l.wantList.Remove(block.Key()) - lm.tasklist.Cancel(block.Key(), p) + lm.taskqueue.Cancel(block.Key(), p) } return nil diff --git a/bitswap/strategy/tasklist.go b/bitswap/strategy/taskqueue.go similarity index 87% rename from bitswap/strategy/tasklist.go rename to bitswap/strategy/taskqueue.go index 8e89c238b..fbb21926e 100644 --- a/bitswap/strategy/tasklist.go +++ b/bitswap/strategy/taskqueue.go @@ -8,13 +8,13 @@ import ( // TODO: at some point, the strategy needs to plug in here // to help decide how to sort tasks (on add) and how to select // tasks (on getnext). For now, we are assuming a dumb/nice strategy. -type taskList struct { +type taskQueue struct { tasks []*Task taskmap map[string]*Task } -func newTaskList() *taskList { - return &taskList{ +func newTaskQueue() *taskQueue { + return &taskQueue{ taskmap: make(map[string]*Task), } } @@ -27,7 +27,7 @@ type Task struct { // Push currently adds a new task to the end of the list // TODO: make this into a priority queue -func (tl *taskList) Push(block u.Key, priority int, to peer.Peer) { +func (tl *taskQueue) Push(block u.Key, priority int, to peer.Peer) { if task, ok := tl.taskmap[taskKey(to, block)]; ok { // TODO: when priority queue is implemented, // rearrange this Task @@ -44,7 +44,7 @@ func (tl *taskList) Push(block u.Key, priority int, to peer.Peer) { } // Pop 'pops' the next task to be performed. Returns nil no task exists. -func (tl *taskList) Pop() *Task { +func (tl *taskQueue) Pop() *Task { var out *Task for len(tl.tasks) > 0 { // TODO: instead of zero, use exponential distribution @@ -63,7 +63,7 @@ func (tl *taskList) Pop() *Task { } // Cancel lazily cancels the sending of a block to a given peer -func (tl *taskList) Cancel(k u.Key, p peer.Peer) { +func (tl *taskQueue) Cancel(k u.Key, p peer.Peer) { t, ok := tl.taskmap[taskKey(p, k)] if ok { t.theirPriority = -1 From 42ec57a35fefe18b5ca68374554ccd85bc13f7e9 Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Tue, 16 Dec 2014 21:42:30 -0800 Subject: [PATCH 0230/1035] tq.Cancel -> tq.Remove License: MIT Signed-off-by: Brian Tiger Chow This commit was moved from ipfs/go-bitswap@404ac1d27de720ce9f170c401070dde526efedf9 --- bitswap/strategy/ledgermanager.go | 4 ++-- bitswap/strategy/taskqueue.go | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/bitswap/strategy/ledgermanager.go b/bitswap/strategy/ledgermanager.go index 47117553c..23c5e2df0 100644 --- a/bitswap/strategy/ledgermanager.go +++ b/bitswap/strategy/ledgermanager.go @@ -124,7 +124,7 @@ func (lm *LedgerManager) MessageReceived(p peer.Peer, m bsmsg.BitSwapMessage) er for _, e := range m.Wantlist() { if e.Cancel { l.CancelWant(e.Key) - lm.taskqueue.Cancel(e.Key, p) + lm.taskqueue.Remove(e.Key, p) } else { l.Wants(e.Key, e.Priority) newWorkExists = true @@ -159,7 +159,7 @@ func (lm *LedgerManager) MessageSent(p peer.Peer, m bsmsg.BitSwapMessage) error for _, block := range m.Blocks() { l.SentBytes(len(block.Data)) l.wantList.Remove(block.Key()) - lm.taskqueue.Cancel(block.Key(), p) + lm.taskqueue.Remove(block.Key(), p) } return nil diff --git a/bitswap/strategy/taskqueue.go b/bitswap/strategy/taskqueue.go index fbb21926e..b721431ba 100644 --- a/bitswap/strategy/taskqueue.go +++ b/bitswap/strategy/taskqueue.go @@ -62,8 +62,8 @@ func (tl *taskQueue) Pop() *Task { return out } -// Cancel lazily cancels the sending of a block to a given peer -func (tl *taskQueue) Cancel(k u.Key, p peer.Peer) { +// Remove lazily removes a task from the queue +func (tl *taskQueue) Remove(k u.Key, p peer.Peer) { t, ok := tl.taskmap[taskKey(p, k)] if ok { t.theirPriority = -1 From 5828813cd12213ad090b7e6092b01b02664ca0cf Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Tue, 16 Dec 2014 21:43:38 -0800 Subject: [PATCH 0231/1035] privatize Task License: MIT Signed-off-by: Brian Tiger Chow This commit was moved from ipfs/go-bitswap@cc677a409cffb46e058ae0074b9123b49766270a --- bitswap/strategy/taskqueue.go | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/bitswap/strategy/taskqueue.go b/bitswap/strategy/taskqueue.go index b721431ba..0b92b256a 100644 --- a/bitswap/strategy/taskqueue.go +++ b/bitswap/strategy/taskqueue.go @@ -9,17 +9,17 @@ import ( // to help decide how to sort tasks (on add) and how to select // tasks (on getnext). For now, we are assuming a dumb/nice strategy. type taskQueue struct { - tasks []*Task - taskmap map[string]*Task + tasks []*task + taskmap map[string]*task } func newTaskQueue() *taskQueue { return &taskQueue{ - taskmap: make(map[string]*Task), + taskmap: make(map[string]*task), } } -type Task struct { +type task struct { Key u.Key Target peer.Peer theirPriority int @@ -30,11 +30,11 @@ type Task struct { func (tl *taskQueue) Push(block u.Key, priority int, to peer.Peer) { if task, ok := tl.taskmap[taskKey(to, block)]; ok { // TODO: when priority queue is implemented, - // rearrange this Task + // rearrange this task task.theirPriority = priority return } - task := &Task{ + task := &task{ Key: block, Target: to, theirPriority: priority, @@ -44,8 +44,8 @@ func (tl *taskQueue) Push(block u.Key, priority int, to peer.Peer) { } // Pop 'pops' the next task to be performed. Returns nil no task exists. -func (tl *taskQueue) Pop() *Task { - var out *Task +func (tl *taskQueue) Pop() *task { + var out *task for len(tl.tasks) > 0 { // TODO: instead of zero, use exponential distribution // it will help reduce the chance of receiving From dc4701b0644b0822de66d288e27bb36d85a4af2a Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Tue, 16 Dec 2014 21:51:00 -0800 Subject: [PATCH 0232/1035] doc: add comment to Envelope License: MIT Signed-off-by: Brian Tiger Chow This commit was moved from ipfs/go-bitswap@b527a68b05c73746e9507d3172db5b9b26bc3d0d --- bitswap/strategy/ledgermanager.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/bitswap/strategy/ledgermanager.go b/bitswap/strategy/ledgermanager.go index 23c5e2df0..a2701c208 100644 --- a/bitswap/strategy/ledgermanager.go +++ b/bitswap/strategy/ledgermanager.go @@ -17,8 +17,11 @@ var log = u.Logger("strategy") // LedgerMap lists Ledgers by their Partner key. type ledgerMap map[u.Key]*ledger +// Envelope contains a message for a Peer type Envelope struct { - Peer peer.Peer + // Peer is the intended recipient + Peer peer.Peer + // Message is the payload Message bsmsg.BitSwapMessage } From c70f206598ae1936622f4ec2ab86d8604cba39d0 Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Tue, 16 Dec 2014 22:08:53 -0800 Subject: [PATCH 0233/1035] refactor: re-use wantlist.Entry type wherever it makes sense it seems to make sense since, in each place, the Key and Priority represent the same information b/c you know the saying... "It is better to have 100 functions operate on one data structure than 10 functions on 10 data structures." License: MIT Signed-off-by: Brian Tiger Chow This commit was moved from ipfs/go-bitswap@02fca42a69942442f76527c94669d1da11017d0c --- bitswap/bitswap.go | 6 +++--- bitswap/message/message.go | 14 ++++++++------ bitswap/strategy/ledgermanager.go | 2 +- bitswap/strategy/taskqueue.go | 22 ++++++++++++---------- bitswap/wantlist/wantlist.go | 4 ++-- 5 files changed, 26 insertions(+), 22 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index cae7ab1e8..d9b3c52ef 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -172,7 +172,7 @@ func (bs *bitswap) sendWantListTo(ctx context.Context, peers <-chan peer.Peer) e } message := bsmsg.New() for _, wanted := range bs.wantlist.Entries() { - message.AddEntry(wanted.Value, wanted.Priority) + message.AddEntry(wanted.Key, wanted.Priority) } wg := sync.WaitGroup{} for peerToQuery := range peers { @@ -210,7 +210,7 @@ func (bs *bitswap) sendWantlistToProviders(ctx context.Context, wantlist *wl.Wan message := bsmsg.New() message.SetFull(true) for _, e := range bs.wantlist.Entries() { - message.AddEntry(e.Value, e.Priority) + message.AddEntry(e.Key, e.Priority) } ps := pset.NewPeerSet() @@ -229,7 +229,7 @@ func (bs *bitswap) sendWantlistToProviders(ctx context.Context, wantlist *wl.Wan bs.send(ctx, prov, message) } } - }(e.Value) + }(e.Key) } wg.Wait() } diff --git a/bitswap/message/message.go b/bitswap/message/message.go index 478d8e258..245fc35fb 100644 --- a/bitswap/message/message.go +++ b/bitswap/message/message.go @@ -5,6 +5,7 @@ import ( blocks "github.com/jbenet/go-ipfs/blocks" pb "github.com/jbenet/go-ipfs/exchange/bitswap/message/internal/pb" + wantlist "github.com/jbenet/go-ipfs/exchange/bitswap/wantlist" inet "github.com/jbenet/go-ipfs/net" u "github.com/jbenet/go-ipfs/util" @@ -64,9 +65,8 @@ func newMsg() *impl { } type Entry struct { - Key u.Key - Priority int - Cancel bool + wantlist.Entry + Cancel bool } func newMessageFromProto(pbm pb.Message) BitSwapMessage { @@ -121,9 +121,11 @@ func (m *impl) addEntry(k u.Key, priority int, cancel bool) { e.Cancel = cancel } else { m.wantlist[k] = &Entry{ - Key: k, - Priority: priority, - Cancel: cancel, + Entry: wantlist.Entry{ + Key: k, + Priority: priority, + }, + Cancel: cancel, } } } diff --git a/bitswap/strategy/ledgermanager.go b/bitswap/strategy/ledgermanager.go index a2701c208..26e47e14e 100644 --- a/bitswap/strategy/ledgermanager.go +++ b/bitswap/strategy/ledgermanager.go @@ -61,7 +61,7 @@ func (lm *LedgerManager) taskWorker(ctx context.Context) { } continue } - block, err := lm.bs.Get(nextTask.Key) + block, err := lm.bs.Get(nextTask.Entry.Key) if err != nil { continue // TODO maybe return an error } diff --git a/bitswap/strategy/taskqueue.go b/bitswap/strategy/taskqueue.go index 0b92b256a..d5a4eb886 100644 --- a/bitswap/strategy/taskqueue.go +++ b/bitswap/strategy/taskqueue.go @@ -1,6 +1,7 @@ package strategy import ( + wantlist "github.com/jbenet/go-ipfs/exchange/bitswap/wantlist" peer "github.com/jbenet/go-ipfs/peer" u "github.com/jbenet/go-ipfs/util" ) @@ -20,9 +21,8 @@ func newTaskQueue() *taskQueue { } type task struct { - Key u.Key - Target peer.Peer - theirPriority int + Entry wantlist.Entry + Target peer.Peer } // Push currently adds a new task to the end of the list @@ -31,13 +31,15 @@ func (tl *taskQueue) Push(block u.Key, priority int, to peer.Peer) { if task, ok := tl.taskmap[taskKey(to, block)]; ok { // TODO: when priority queue is implemented, // rearrange this task - task.theirPriority = priority + task.Entry.Priority = priority return } task := &task{ - Key: block, - Target: to, - theirPriority: priority, + Entry: wantlist.Entry{ + Key: block, + Priority: priority, + }, + Target: to, } tl.tasks = append(tl.tasks, task) tl.taskmap[taskKey(to, block)] = task @@ -52,9 +54,9 @@ func (tl *taskQueue) Pop() *task { // the same block from multiple peers out = tl.tasks[0] tl.tasks = tl.tasks[1:] - delete(tl.taskmap, taskKey(out.Target, out.Key)) + delete(tl.taskmap, taskKey(out.Target, out.Entry.Key)) // Filter out blocks that have been cancelled - if out.theirPriority >= 0 { + if out.Entry.Priority >= 0 { // FIXME separate the "cancel" signal from priority break } } @@ -66,7 +68,7 @@ func (tl *taskQueue) Pop() *task { func (tl *taskQueue) Remove(k u.Key, p peer.Peer) { t, ok := tl.taskmap[taskKey(p, k)] if ok { - t.theirPriority = -1 + t.Entry.Priority = -1 } } diff --git a/bitswap/wantlist/wantlist.go b/bitswap/wantlist/wantlist.go index e20bb4457..2c50daa49 100644 --- a/bitswap/wantlist/wantlist.go +++ b/bitswap/wantlist/wantlist.go @@ -18,7 +18,7 @@ func New() *Wantlist { } type Entry struct { - Value u.Key + Key u.Key Priority int } @@ -29,7 +29,7 @@ func (w *Wantlist) Add(k u.Key, priority int) { return } w.set[k] = &Entry{ - Value: k, + Key: k, Priority: priority, } } From eaca42ed565f9796da0e0081610f245cd2a3003e Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Tue, 16 Dec 2014 22:12:27 -0800 Subject: [PATCH 0234/1035] refactor: separate responsibilties Before, priority carried two pieces of information. One: priority as defined by remote peer Two: whether task is trashed This assumes the protocol is defined for natural numbers instead of integers. That may not always be the case. Better to leave that assumption outside so this package isn't coupled to the whims of the protocol. The protocol may be changed to allow any integer value to be used. Hopefully by that time, new responsibilties weren't added to the Priority variable. License: MIT Signed-off-by: Brian Tiger Chow This commit was moved from ipfs/go-bitswap@fa38bee3dc8b41c8fcde86ff3dca6e6e6dd6e471 --- bitswap/strategy/taskqueue.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/bitswap/strategy/taskqueue.go b/bitswap/strategy/taskqueue.go index d5a4eb886..4dbfdd92b 100644 --- a/bitswap/strategy/taskqueue.go +++ b/bitswap/strategy/taskqueue.go @@ -23,6 +23,7 @@ func newTaskQueue() *taskQueue { type task struct { Entry wantlist.Entry Target peer.Peer + Trash bool } // Push currently adds a new task to the end of the list @@ -55,12 +56,11 @@ func (tl *taskQueue) Pop() *task { out = tl.tasks[0] tl.tasks = tl.tasks[1:] delete(tl.taskmap, taskKey(out.Target, out.Entry.Key)) - // Filter out blocks that have been cancelled - if out.Entry.Priority >= 0 { // FIXME separate the "cancel" signal from priority - break + if out.Trash { + continue // discarding tasks that have been removed } + break // and return |out| } - return out } @@ -68,7 +68,7 @@ func (tl *taskQueue) Pop() *task { func (tl *taskQueue) Remove(k u.Key, p peer.Peer) { t, ok := tl.taskmap[taskKey(p, k)] if ok { - t.Entry.Priority = -1 + t.Trash = true } } From dcf6c3091b10c26331fd7b3eb3a846c48888c397 Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Tue, 16 Dec 2014 22:20:21 -0800 Subject: [PATCH 0235/1035] mv comment License: MIT Signed-off-by: Brian Tiger Chow This commit was moved from ipfs/go-bitswap@fc6936d8d9d57b0019ec94a3a848e5a80c1689fe --- bitswap/strategy/taskqueue.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bitswap/strategy/taskqueue.go b/bitswap/strategy/taskqueue.go index 4dbfdd92b..69bb95cd4 100644 --- a/bitswap/strategy/taskqueue.go +++ b/bitswap/strategy/taskqueue.go @@ -10,6 +10,7 @@ import ( // to help decide how to sort tasks (on add) and how to select // tasks (on getnext). For now, we are assuming a dumb/nice strategy. type taskQueue struct { + // TODO: make this into a priority queue tasks []*task taskmap map[string]*task } @@ -27,7 +28,6 @@ type task struct { } // Push currently adds a new task to the end of the list -// TODO: make this into a priority queue func (tl *taskQueue) Push(block u.Key, priority int, to peer.Peer) { if task, ok := tl.taskmap[taskKey(to, block)]; ok { // TODO: when priority queue is implemented, From f3b90bedb3bbda01d5b038478a5a63f7b228d790 Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Tue, 16 Dec 2014 22:21:36 -0800 Subject: [PATCH 0236/1035] refactor: remove ledgerMap type it's only used in two places, but i think we've been using maps on IPFS types so much now that the specificity is no longer necessary License: MIT Signed-off-by: Brian Tiger Chow This commit was moved from ipfs/go-bitswap@ee31c51815773bc2ab97f51c644102213e898f2f --- bitswap/strategy/ledgermanager.go | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) diff --git a/bitswap/strategy/ledgermanager.go b/bitswap/strategy/ledgermanager.go index 26e47e14e..258f92fd1 100644 --- a/bitswap/strategy/ledgermanager.go +++ b/bitswap/strategy/ledgermanager.go @@ -3,8 +3,7 @@ package strategy import ( "sync" - "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" - + context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" bstore "github.com/jbenet/go-ipfs/blocks/blockstore" bsmsg "github.com/jbenet/go-ipfs/exchange/bitswap/message" wl "github.com/jbenet/go-ipfs/exchange/bitswap/wantlist" @@ -14,9 +13,6 @@ import ( var log = u.Logger("strategy") -// LedgerMap lists Ledgers by their Partner key. -type ledgerMap map[u.Key]*ledger - // Envelope contains a message for a Peer type Envelope struct { // Peer is the intended recipient @@ -26,8 +22,9 @@ type Envelope struct { } type LedgerManager struct { - lock sync.RWMutex - ledgerMap ledgerMap + lock sync.RWMutex + // ledgerMap lists Ledgers by their Partner key. + ledgerMap map[u.Key]*ledger bs bstore.Blockstore // FIXME taskqueue isn't threadsafe nor is it protected by a mutex. consider // a way to avoid sharing the taskqueue between the worker and the receiver @@ -38,7 +35,7 @@ type LedgerManager struct { func NewLedgerManager(ctx context.Context, bs bstore.Blockstore) *LedgerManager { lm := &LedgerManager{ - ledgerMap: make(ledgerMap), + ledgerMap: make(map[u.Key]*ledger), bs: bs, taskqueue: newTaskQueue(), outbox: make(chan Envelope, 4), // TODO extract constant From e41d3fcf45cd45c0c04987da4bf2fab2945a4932 Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Tue, 16 Dec 2014 22:24:54 -0800 Subject: [PATCH 0237/1035] refactor: put mutex next to the things it protects If we put the lock next to the fields it protects, it can sometimes make it easier to reason about threadsafety. In this case, it reveals that the task queue (not threadsafe) isn't protected by the mutex, yet shared between the worker and callers. @whyrusleeping License: MIT Signed-off-by: Brian Tiger Chow This commit was moved from ipfs/go-bitswap@0fd3c1a343f5c69dc51e7da2259b01c458d9ca1a --- bitswap/strategy/ledgermanager.go | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/bitswap/strategy/ledgermanager.go b/bitswap/strategy/ledgermanager.go index 258f92fd1..92e6ea9c2 100644 --- a/bitswap/strategy/ledgermanager.go +++ b/bitswap/strategy/ledgermanager.go @@ -22,15 +22,19 @@ type Envelope struct { } type LedgerManager struct { - lock sync.RWMutex - // ledgerMap lists Ledgers by their Partner key. - ledgerMap map[u.Key]*ledger - bs bstore.Blockstore // FIXME taskqueue isn't threadsafe nor is it protected by a mutex. consider // a way to avoid sharing the taskqueue between the worker and the receiver - taskqueue *taskQueue - outbox chan Envelope + taskqueue *taskQueue + workSignal chan struct{} + + outbox chan Envelope + + bs bstore.Blockstore + + lock sync.RWMutex + // ledgerMap lists Ledgers by their Partner key. + ledgerMap map[u.Key]*ledger } func NewLedgerManager(ctx context.Context, bs bstore.Blockstore) *LedgerManager { From ed51abf27a6751d049c035cee3689e797744ebe3 Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Tue, 16 Dec 2014 22:46:53 -0800 Subject: [PATCH 0238/1035] refactor: wantlist splits into WL and ThreadSafe WL bitswap keeps the threadsafe version. observing the ledger shows that it doesn't need it anymore (ledgermanager is protected and safe). License: MIT Signed-off-by: Brian Tiger Chow This commit was moved from ipfs/go-bitswap@b34e4df9c98dcf0588953aaa7feb2a02c6b07068 --- bitswap/bitswap.go | 8 ++-- bitswap/wantlist/wantlist.go | 86 +++++++++++++++++++++++++++--------- 2 files changed, 70 insertions(+), 24 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index d9b3c52ef..473bf117e 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -15,7 +15,7 @@ import ( bsnet "github.com/jbenet/go-ipfs/exchange/bitswap/network" notifications "github.com/jbenet/go-ipfs/exchange/bitswap/notifications" strategy "github.com/jbenet/go-ipfs/exchange/bitswap/strategy" - wl "github.com/jbenet/go-ipfs/exchange/bitswap/wantlist" + wantlist "github.com/jbenet/go-ipfs/exchange/bitswap/wantlist" peer "github.com/jbenet/go-ipfs/peer" u "github.com/jbenet/go-ipfs/util" eventlog "github.com/jbenet/go-ipfs/util/eventlog" @@ -59,7 +59,7 @@ func New(parent context.Context, p peer.Peer, network bsnet.BitSwapNetwork, rout ledgermanager: strategy.NewLedgerManager(ctx, bstore), routing: routing, sender: network, - wantlist: wl.New(), + wantlist: wantlist.NewThreadSafe(), batchRequests: make(chan []u.Key, 32), } network.SetDelegate(bs) @@ -95,7 +95,7 @@ type bitswap struct { ledgermanager *strategy.LedgerManager - wantlist *wl.Wantlist + wantlist *wantlist.ThreadSafe // cancelFunc signals cancellation to the bitswap event loop cancelFunc func() @@ -203,7 +203,7 @@ func (bs *bitswap) sendWantListTo(ctx context.Context, peers <-chan peer.Peer) e return nil } -func (bs *bitswap) sendWantlistToProviders(ctx context.Context, wantlist *wl.Wantlist) { +func (bs *bitswap) sendWantlistToProviders(ctx context.Context, wantlist *wantlist.ThreadSafe) { ctx, cancel := context.WithCancel(ctx) defer cancel() diff --git a/bitswap/wantlist/wantlist.go b/bitswap/wantlist/wantlist.go index 2c50daa49..6ef018668 100644 --- a/bitswap/wantlist/wantlist.go +++ b/bitswap/wantlist/wantlist.go @@ -6,25 +6,86 @@ import ( "sync" ) +type ThreadSafe struct { + lk sync.RWMutex + Wantlist +} + +// not threadsafe type Wantlist struct { - lk sync.RWMutex set map[u.Key]*Entry } +type Entry struct { + Key u.Key + Priority int +} + +type entrySlice []*Entry + +func (es entrySlice) Len() int { return len(es) } +func (es entrySlice) Swap(i, j int) { es[i], es[j] = es[j], es[i] } +func (es entrySlice) Less(i, j int) bool { return es[i].Priority > es[j].Priority } + +func NewThreadSafe() *ThreadSafe { + return &ThreadSafe{ + Wantlist: *New(), + } +} + func New() *Wantlist { return &Wantlist{ set: make(map[u.Key]*Entry), } } -type Entry struct { - Key u.Key - Priority int +func (w *ThreadSafe) Add(k u.Key, priority int) { + // TODO rm defer for perf + w.lk.Lock() + defer w.lk.Unlock() + w.Wantlist.Add(k, priority) } -func (w *Wantlist) Add(k u.Key, priority int) { +func (w *ThreadSafe) Remove(k u.Key) { + // TODO rm defer for perf w.lk.Lock() defer w.lk.Unlock() + w.Wantlist.Remove(k) +} + +func (w *ThreadSafe) Contains(k u.Key) bool { + // TODO rm defer for perf + w.lk.RLock() + defer w.lk.RUnlock() + return w.Wantlist.Contains(k) +} + +func (w *ThreadSafe) Entries() []*Entry { + w.lk.RLock() + defer w.lk.RUnlock() + var es entrySlice + for _, e := range w.set { + es = append(es, e) + } + // TODO rename SortedEntries (state that they're sorted so callers know + // they're paying an expense) + sort.Sort(es) + return es +} + +func (w *ThreadSafe) SortedEntries() []*Entry { + w.lk.RLock() + defer w.lk.RUnlock() + var es entrySlice + + for _, e := range w.set { + es = append(es, e) + } + sort.Sort(es) + return es +} + +func (w *Wantlist) Add(k u.Key, priority int) { if _, ok := w.set[k]; ok { return } @@ -35,28 +96,15 @@ func (w *Wantlist) Add(k u.Key, priority int) { } func (w *Wantlist) Remove(k u.Key) { - w.lk.Lock() - defer w.lk.Unlock() delete(w.set, k) } func (w *Wantlist) Contains(k u.Key) bool { - w.lk.RLock() - defer w.lk.RUnlock() _, ok := w.set[k] return ok } -type entrySlice []*Entry - -func (es entrySlice) Len() int { return len(es) } -func (es entrySlice) Swap(i, j int) { es[i], es[j] = es[j], es[i] } -func (es entrySlice) Less(i, j int) bool { return es[i].Priority > es[j].Priority } - func (w *Wantlist) Entries() []*Entry { - w.lk.RLock() - defer w.lk.RUnlock() - var es entrySlice for _, e := range w.set { @@ -67,8 +115,6 @@ func (w *Wantlist) Entries() []*Entry { } func (w *Wantlist) SortedEntries() []*Entry { - w.lk.RLock() - defer w.lk.RUnlock() var es entrySlice for _, e := range w.set { From 684549821ebcbbce683d95ea3944c0a145c89157 Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Tue, 16 Dec 2014 22:52:29 -0800 Subject: [PATCH 0239/1035] rename to strategy.LedgerManager to decision.Engine License: MIT Signed-off-by: Brian Tiger Chow This commit was moved from ipfs/go-bitswap@65280c14cb0d5cbb3d2e8cfa53b61ea6f097240a --- bitswap/bitswap.go | 21 ++-- .../ledgermanager.go => decision/engine.go} | 102 +++++++++--------- .../engine_test.go} | 38 +++---- bitswap/{strategy => decision}/ledger.go | 2 +- bitswap/decision/ledger_test.go | 1 + bitswap/{strategy => decision}/taskqueue.go | 2 +- bitswap/strategy/ledger_test.go | 1 - 7 files changed, 81 insertions(+), 86 deletions(-) rename bitswap/{strategy/ledgermanager.go => decision/engine.go} (61%) rename bitswap/{strategy/ledgermanager_test.go => decision/engine_test.go} (70%) rename bitswap/{strategy => decision}/ledger.go (99%) create mode 100644 bitswap/decision/ledger_test.go rename bitswap/{strategy => decision}/taskqueue.go (99%) delete mode 100644 bitswap/strategy/ledger_test.go diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 473bf117e..d0e49d182 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -7,14 +7,13 @@ import ( "time" context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" - blocks "github.com/jbenet/go-ipfs/blocks" blockstore "github.com/jbenet/go-ipfs/blocks/blockstore" exchange "github.com/jbenet/go-ipfs/exchange" + decision "github.com/jbenet/go-ipfs/exchange/bitswap/decision" bsmsg "github.com/jbenet/go-ipfs/exchange/bitswap/message" bsnet "github.com/jbenet/go-ipfs/exchange/bitswap/network" notifications "github.com/jbenet/go-ipfs/exchange/bitswap/notifications" - strategy "github.com/jbenet/go-ipfs/exchange/bitswap/strategy" wantlist "github.com/jbenet/go-ipfs/exchange/bitswap/wantlist" peer "github.com/jbenet/go-ipfs/peer" u "github.com/jbenet/go-ipfs/util" @@ -56,7 +55,7 @@ func New(parent context.Context, p peer.Peer, network bsnet.BitSwapNetwork, rout blockstore: bstore, cancelFunc: cancelFunc, notifications: notif, - ledgermanager: strategy.NewLedgerManager(ctx, bstore), + engine: decision.NewEngine(ctx, bstore), routing: routing, sender: network, wantlist: wantlist.NewThreadSafe(), @@ -89,11 +88,7 @@ type bitswap struct { // have more than a single block in the set batchRequests chan []u.Key - // strategy makes decisions about how to interact with partners. - // TODO: strategy commented out until we have a use for it again - //strategy strategy.Strategy - - ledgermanager *strategy.LedgerManager + engine *decision.Engine wantlist *wantlist.ThreadSafe @@ -196,7 +191,7 @@ func (bs *bitswap) sendWantListTo(ctx context.Context, peers <-chan peer.Peer) e // FIXME ensure accounting is handled correctly when // communication fails. May require slightly different API to // get better guarantees. May need shared sequence numbers. - bs.ledgermanager.MessageSent(p, message) + bs.engine.MessageSent(p, message) }(peerToQuery) } wg.Wait() @@ -239,7 +234,7 @@ func (bs *bitswap) taskWorker(ctx context.Context) { select { case <-ctx.Done(): return - case envelope := <-bs.ledgermanager.Outbox(): + case envelope := <-bs.engine.Outbox(): bs.send(ctx, envelope.Peer, envelope.Message) } } @@ -305,7 +300,7 @@ func (bs *bitswap) ReceiveMessage(ctx context.Context, p peer.Peer, incoming bsm // This call records changes to wantlists, blocks received, // and number of bytes transfered. - bs.ledgermanager.MessageReceived(p, incoming) + bs.engine.MessageReceived(p, incoming) // TODO: this is bad, and could be easily abused. // Should only track *useful* messages in ledger @@ -334,7 +329,7 @@ func (bs *bitswap) cancelBlocks(ctx context.Context, bkeys []u.Key) { for _, k := range bkeys { message.Cancel(k) } - for _, p := range bs.ledgermanager.Peers() { + for _, p := range bs.engine.Peers() { err := bs.send(ctx, p, message) if err != nil { log.Errorf("Error sending message: %s", err) @@ -354,7 +349,7 @@ func (bs *bitswap) send(ctx context.Context, p peer.Peer, m bsmsg.BitSwapMessage if err := bs.sender.SendMessage(ctx, p, m); err != nil { return err } - return bs.ledgermanager.MessageSent(p, m) + return bs.engine.MessageSent(p, m) } func (bs *bitswap) Close() error { diff --git a/bitswap/strategy/ledgermanager.go b/bitswap/decision/engine.go similarity index 61% rename from bitswap/strategy/ledgermanager.go rename to bitswap/decision/engine.go index 92e6ea9c2..3b81d2582 100644 --- a/bitswap/strategy/ledgermanager.go +++ b/bitswap/decision/engine.go @@ -1,4 +1,4 @@ -package strategy +package decision import ( "sync" @@ -11,7 +11,7 @@ import ( u "github.com/jbenet/go-ipfs/util" ) -var log = u.Logger("strategy") +var log = u.Logger("engine") // Envelope contains a message for a Peer type Envelope struct { @@ -21,7 +21,7 @@ type Envelope struct { Message bsmsg.BitSwapMessage } -type LedgerManager struct { +type Engine struct { // FIXME taskqueue isn't threadsafe nor is it protected by a mutex. consider // a way to avoid sharing the taskqueue between the worker and the receiver taskqueue *taskQueue @@ -37,32 +37,32 @@ type LedgerManager struct { ledgerMap map[u.Key]*ledger } -func NewLedgerManager(ctx context.Context, bs bstore.Blockstore) *LedgerManager { - lm := &LedgerManager{ +func NewEngine(ctx context.Context, bs bstore.Blockstore) *Engine { + e := &Engine{ ledgerMap: make(map[u.Key]*ledger), bs: bs, taskqueue: newTaskQueue(), outbox: make(chan Envelope, 4), // TODO extract constant workSignal: make(chan struct{}), } - go lm.taskWorker(ctx) - return lm + go e.taskWorker(ctx) + return e } -func (lm *LedgerManager) taskWorker(ctx context.Context) { +func (e *Engine) taskWorker(ctx context.Context) { for { - nextTask := lm.taskqueue.Pop() + nextTask := e.taskqueue.Pop() if nextTask == nil { // No tasks in the list? // Wait until there are! select { case <-ctx.Done(): return - case <-lm.workSignal: + case <-e.workSignal: } continue } - block, err := lm.bs.Get(nextTask.Entry.Key) + block, err := e.bs.Get(nextTask.Entry.Key) if err != nil { continue // TODO maybe return an error } @@ -74,22 +74,22 @@ func (lm *LedgerManager) taskWorker(ctx context.Context) { select { case <-ctx.Done(): return - case lm.outbox <- Envelope{Peer: nextTask.Target, Message: m}: + case e.outbox <- Envelope{Peer: nextTask.Target, Message: m}: } } } -func (lm *LedgerManager) Outbox() <-chan Envelope { - return lm.outbox +func (e *Engine) Outbox() <-chan Envelope { + return e.outbox } // Returns a slice of Peers with whom the local node has active sessions -func (lm *LedgerManager) Peers() []peer.Peer { - lm.lock.RLock() - defer lm.lock.RUnlock() +func (e *Engine) Peers() []peer.Peer { + e.lock.RLock() + defer e.lock.RUnlock() response := make([]peer.Peer, 0) - for _, ledger := range lm.ledgerMap { + for _, ledger := range e.ledgerMap { response = append(response, ledger.Partner) } return response @@ -97,52 +97,52 @@ func (lm *LedgerManager) Peers() []peer.Peer { // BlockIsWantedByPeer returns true if peer wants the block given by this // key -func (lm *LedgerManager) BlockIsWantedByPeer(k u.Key, p peer.Peer) bool { - lm.lock.RLock() - defer lm.lock.RUnlock() +func (e *Engine) BlockIsWantedByPeer(k u.Key, p peer.Peer) bool { + e.lock.RLock() + defer e.lock.RUnlock() - ledger := lm.findOrCreate(p) + ledger := e.findOrCreate(p) return ledger.WantListContains(k) } // MessageReceived performs book-keeping. Returns error if passed invalid // arguments. -func (lm *LedgerManager) MessageReceived(p peer.Peer, m bsmsg.BitSwapMessage) error { +func (e *Engine) MessageReceived(p peer.Peer, m bsmsg.BitSwapMessage) error { newWorkExists := false defer func() { if newWorkExists { // Signal task generation to restart (if stopped!) select { - case lm.workSignal <- struct{}{}: + case e.workSignal <- struct{}{}: default: } } }() - lm.lock.Lock() - defer lm.lock.Unlock() + e.lock.Lock() + defer e.lock.Unlock() - l := lm.findOrCreate(p) + l := e.findOrCreate(p) if m.Full() { l.wantList = wl.New() } - for _, e := range m.Wantlist() { - if e.Cancel { - l.CancelWant(e.Key) - lm.taskqueue.Remove(e.Key, p) + for _, entry := range m.Wantlist() { + if entry.Cancel { + l.CancelWant(entry.Key) + e.taskqueue.Remove(entry.Key, p) } else { - l.Wants(e.Key, e.Priority) + l.Wants(entry.Key, entry.Priority) newWorkExists = true - lm.taskqueue.Push(e.Key, e.Priority, p) + e.taskqueue.Push(entry.Key, entry.Priority, p) } } for _, block := range m.Blocks() { // FIXME extract blocks.NumBytes(block) or block.NumBytes() method l.ReceivedBytes(len(block.Data)) - for _, l := range lm.ledgerMap { + for _, l := range e.ledgerMap { if l.WantListContains(block.Key()) { newWorkExists = true - lm.taskqueue.Push(block.Key(), 1, l.Partner) + e.taskqueue.Push(block.Key(), 1, l.Partner) } } } @@ -155,40 +155,40 @@ func (lm *LedgerManager) MessageReceived(p peer.Peer, m bsmsg.BitSwapMessage) er // inconsistent. Would need to ensure that Sends and acknowledgement of the // send happen atomically -func (lm *LedgerManager) MessageSent(p peer.Peer, m bsmsg.BitSwapMessage) error { - lm.lock.Lock() - defer lm.lock.Unlock() +func (e *Engine) MessageSent(p peer.Peer, m bsmsg.BitSwapMessage) error { + e.lock.Lock() + defer e.lock.Unlock() - l := lm.findOrCreate(p) + l := e.findOrCreate(p) for _, block := range m.Blocks() { l.SentBytes(len(block.Data)) l.wantList.Remove(block.Key()) - lm.taskqueue.Remove(block.Key(), p) + e.taskqueue.Remove(block.Key(), p) } return nil } -func (lm *LedgerManager) NumBytesSentTo(p peer.Peer) uint64 { - lm.lock.RLock() - defer lm.lock.RUnlock() +func (e *Engine) NumBytesSentTo(p peer.Peer) uint64 { + e.lock.RLock() + defer e.lock.RUnlock() - return lm.findOrCreate(p).Accounting.BytesSent + return e.findOrCreate(p).Accounting.BytesSent } -func (lm *LedgerManager) NumBytesReceivedFrom(p peer.Peer) uint64 { - lm.lock.RLock() - defer lm.lock.RUnlock() +func (e *Engine) NumBytesReceivedFrom(p peer.Peer) uint64 { + e.lock.RLock() + defer e.lock.RUnlock() - return lm.findOrCreate(p).Accounting.BytesRecv + return e.findOrCreate(p).Accounting.BytesRecv } // ledger lazily instantiates a ledger -func (lm *LedgerManager) findOrCreate(p peer.Peer) *ledger { - l, ok := lm.ledgerMap[p.Key()] +func (e *Engine) findOrCreate(p peer.Peer) *ledger { + l, ok := e.ledgerMap[p.Key()] if !ok { l = newLedger(p) - lm.ledgerMap[p.Key()] = l + e.ledgerMap[p.Key()] = l } return l } diff --git a/bitswap/strategy/ledgermanager_test.go b/bitswap/decision/engine_test.go similarity index 70% rename from bitswap/strategy/ledgermanager_test.go rename to bitswap/decision/engine_test.go index 5c78f2f81..592236c3e 100644 --- a/bitswap/strategy/ledgermanager_test.go +++ b/bitswap/decision/engine_test.go @@ -1,4 +1,4 @@ -package strategy +package decision import ( "strings" @@ -14,16 +14,16 @@ import ( testutil "github.com/jbenet/go-ipfs/util/testutil" ) -type peerAndLedgermanager struct { +type peerAndEngine struct { peer.Peer - ls *LedgerManager + Engine *Engine } -func newPeerAndLedgermanager(idStr string) peerAndLedgermanager { - return peerAndLedgermanager{ +func newPeerAndLedgermanager(idStr string) peerAndEngine { + return peerAndEngine{ Peer: testutil.NewPeerWithIDString(idStr), //Strategy: New(true), - ls: NewLedgerManager(context.TODO(), + Engine: NewEngine(context.TODO(), blockstore.NewBlockstore(sync.MutexWrap(ds.NewMapDatastore()))), } } @@ -39,23 +39,23 @@ func TestConsistentAccounting(t *testing.T) { content := []string{"this", "is", "message", "i"} m.AddBlock(blocks.NewBlock([]byte(strings.Join(content, " ")))) - sender.ls.MessageSent(receiver.Peer, m) - receiver.ls.MessageReceived(sender.Peer, m) + sender.Engine.MessageSent(receiver.Peer, m) + receiver.Engine.MessageReceived(sender.Peer, m) } // Ensure sender records the change - if sender.ls.NumBytesSentTo(receiver.Peer) == 0 { + if sender.Engine.NumBytesSentTo(receiver.Peer) == 0 { t.Fatal("Sent bytes were not recorded") } // Ensure sender and receiver have the same values - if sender.ls.NumBytesSentTo(receiver.Peer) != receiver.ls.NumBytesReceivedFrom(sender.Peer) { + if sender.Engine.NumBytesSentTo(receiver.Peer) != receiver.Engine.NumBytesReceivedFrom(sender.Peer) { t.Fatal("Inconsistent book-keeping. Strategies don't agree") } // Ensure sender didn't record receving anything. And that the receiver // didn't record sending anything - if receiver.ls.NumBytesSentTo(sender.Peer) != 0 || sender.ls.NumBytesReceivedFrom(receiver.Peer) != 0 { + if receiver.Engine.NumBytesSentTo(sender.Peer) != 0 || sender.Engine.NumBytesReceivedFrom(receiver.Peer) != 0 { t.Fatal("Bert didn't send bytes to Ernie") } } @@ -69,10 +69,10 @@ func TestBlockRecordedAsWantedAfterMessageReceived(t *testing.T) { messageFromBeggarToChooser := message.New() messageFromBeggarToChooser.AddEntry(block.Key(), 1) - chooser.ls.MessageReceived(beggar.Peer, messageFromBeggarToChooser) + chooser.Engine.MessageReceived(beggar.Peer, messageFromBeggarToChooser) // for this test, doesn't matter if you record that beggar sent - if !chooser.ls.BlockIsWantedByPeer(block.Key(), beggar.Peer) { + if !chooser.Engine.BlockIsWantedByPeer(block.Key(), beggar.Peer) { t.Fatal("chooser failed to record that beggar wants block") } } @@ -84,24 +84,24 @@ func TestPeerIsAddedToPeersWhenMessageReceivedOrSent(t *testing.T) { m := message.New() - sanfrancisco.ls.MessageSent(seattle.Peer, m) - seattle.ls.MessageReceived(sanfrancisco.Peer, m) + sanfrancisco.Engine.MessageSent(seattle.Peer, m) + seattle.Engine.MessageReceived(sanfrancisco.Peer, m) if seattle.Peer.Key() == sanfrancisco.Peer.Key() { t.Fatal("Sanity Check: Peers have same Key!") } - if !peerIsPartner(seattle.Peer, sanfrancisco.ls) { + if !peerIsPartner(seattle.Peer, sanfrancisco.Engine) { t.Fatal("Peer wasn't added as a Partner") } - if !peerIsPartner(sanfrancisco.Peer, seattle.ls) { + if !peerIsPartner(sanfrancisco.Peer, seattle.Engine) { t.Fatal("Peer wasn't added as a Partner") } } -func peerIsPartner(p peer.Peer, ls *LedgerManager) bool { - for _, partner := range ls.Peers() { +func peerIsPartner(p peer.Peer, e *Engine) bool { + for _, partner := range e.Peers() { if partner.Key() == p.Key() { return true } diff --git a/bitswap/strategy/ledger.go b/bitswap/decision/ledger.go similarity index 99% rename from bitswap/strategy/ledger.go rename to bitswap/decision/ledger.go index 649c1e73e..eea87af1f 100644 --- a/bitswap/strategy/ledger.go +++ b/bitswap/decision/ledger.go @@ -1,4 +1,4 @@ -package strategy +package decision import ( "time" diff --git a/bitswap/decision/ledger_test.go b/bitswap/decision/ledger_test.go new file mode 100644 index 000000000..a6dd04e35 --- /dev/null +++ b/bitswap/decision/ledger_test.go @@ -0,0 +1 @@ +package decision diff --git a/bitswap/strategy/taskqueue.go b/bitswap/decision/taskqueue.go similarity index 99% rename from bitswap/strategy/taskqueue.go rename to bitswap/decision/taskqueue.go index 69bb95cd4..1cf279ef7 100644 --- a/bitswap/strategy/taskqueue.go +++ b/bitswap/decision/taskqueue.go @@ -1,4 +1,4 @@ -package strategy +package decision import ( wantlist "github.com/jbenet/go-ipfs/exchange/bitswap/wantlist" diff --git a/bitswap/strategy/ledger_test.go b/bitswap/strategy/ledger_test.go deleted file mode 100644 index 4271d525c..000000000 --- a/bitswap/strategy/ledger_test.go +++ /dev/null @@ -1 +0,0 @@ -package strategy From c4774ccbbc40a3d4e92e33b5629b84f641f4ae6c Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Tue, 16 Dec 2014 23:03:37 -0800 Subject: [PATCH 0240/1035] rm empty file License: MIT Signed-off-by: Brian Tiger Chow This commit was moved from ipfs/go-bitswap@0426b97e1ee5ec3d17db15f13e3d1eb3d03cb3b4 --- bitswap/decision/ledger_test.go | 1 - 1 file changed, 1 deletion(-) delete mode 100644 bitswap/decision/ledger_test.go diff --git a/bitswap/decision/ledger_test.go b/bitswap/decision/ledger_test.go deleted file mode 100644 index a6dd04e35..000000000 --- a/bitswap/decision/ledger_test.go +++ /dev/null @@ -1 +0,0 @@ -package decision From 64c15c338cbf143033b23856be97a884047ba003 Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Tue, 16 Dec 2014 23:07:58 -0800 Subject: [PATCH 0241/1035] rename to peerRequestQueue this opens up the possibility of having multiple queues. And for all outgoing messages to be managed by the decision engine License: MIT Signed-off-by: Brian Tiger Chow This commit was moved from ipfs/go-bitswap@b3712aae17f7d297bf3f5bb79c2af5bcbbf2c2a9 --- bitswap/decision/engine.go | 27 ++++++++++++++------------- 1 file changed, 14 insertions(+), 13 deletions(-) diff --git a/bitswap/decision/engine.go b/bitswap/decision/engine.go index 3b81d2582..b8018eef0 100644 --- a/bitswap/decision/engine.go +++ b/bitswap/decision/engine.go @@ -22,9 +22,10 @@ type Envelope struct { } type Engine struct { - // FIXME taskqueue isn't threadsafe nor is it protected by a mutex. consider - // a way to avoid sharing the taskqueue between the worker and the receiver - taskqueue *taskQueue + // FIXME peerRequestQueue isn't threadsafe nor is it protected by a mutex. + // consider a way to avoid sharing the peerRequestQueue between the worker + // and the receiver + peerRequestQueue *taskQueue workSignal chan struct{} @@ -39,11 +40,11 @@ type Engine struct { func NewEngine(ctx context.Context, bs bstore.Blockstore) *Engine { e := &Engine{ - ledgerMap: make(map[u.Key]*ledger), - bs: bs, - taskqueue: newTaskQueue(), - outbox: make(chan Envelope, 4), // TODO extract constant - workSignal: make(chan struct{}), + ledgerMap: make(map[u.Key]*ledger), + bs: bs, + peerRequestQueue: newTaskQueue(), + outbox: make(chan Envelope, 4), // TODO extract constant + workSignal: make(chan struct{}), } go e.taskWorker(ctx) return e @@ -51,7 +52,7 @@ func NewEngine(ctx context.Context, bs bstore.Blockstore) *Engine { func (e *Engine) taskWorker(ctx context.Context) { for { - nextTask := e.taskqueue.Pop() + nextTask := e.peerRequestQueue.Pop() if nextTask == nil { // No tasks in the list? // Wait until there are! @@ -128,11 +129,11 @@ func (e *Engine) MessageReceived(p peer.Peer, m bsmsg.BitSwapMessage) error { for _, entry := range m.Wantlist() { if entry.Cancel { l.CancelWant(entry.Key) - e.taskqueue.Remove(entry.Key, p) + e.peerRequestQueue.Remove(entry.Key, p) } else { l.Wants(entry.Key, entry.Priority) newWorkExists = true - e.taskqueue.Push(entry.Key, entry.Priority, p) + e.peerRequestQueue.Push(entry.Key, entry.Priority, p) } } @@ -142,7 +143,7 @@ func (e *Engine) MessageReceived(p peer.Peer, m bsmsg.BitSwapMessage) error { for _, l := range e.ledgerMap { if l.WantListContains(block.Key()) { newWorkExists = true - e.taskqueue.Push(block.Key(), 1, l.Partner) + e.peerRequestQueue.Push(block.Key(), 1, l.Partner) } } } @@ -163,7 +164,7 @@ func (e *Engine) MessageSent(p peer.Peer, m bsmsg.BitSwapMessage) error { for _, block := range m.Blocks() { l.SentBytes(len(block.Data)) l.wantList.Remove(block.Key()) - e.taskqueue.Remove(block.Key(), p) + e.peerRequestQueue.Remove(block.Key(), p) } return nil From ed4cf98dc7cf0cc7458fa5a98382f944b76aaa84 Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Tue, 16 Dec 2014 23:32:04 -0800 Subject: [PATCH 0242/1035] fix: don't sort the output of Entries() only sort SortedEntries() License: MIT Signed-off-by: Brian Tiger Chow This commit was moved from ipfs/go-bitswap@c8f74e553c421cacccff306c0db0eb49338a36f5 --- bitswap/wantlist/wantlist.go | 20 ++------------------ 1 file changed, 2 insertions(+), 18 deletions(-) diff --git a/bitswap/wantlist/wantlist.go b/bitswap/wantlist/wantlist.go index 6ef018668..22b2c1c2c 100644 --- a/bitswap/wantlist/wantlist.go +++ b/bitswap/wantlist/wantlist.go @@ -63,26 +63,13 @@ func (w *ThreadSafe) Contains(k u.Key) bool { func (w *ThreadSafe) Entries() []*Entry { w.lk.RLock() defer w.lk.RUnlock() - var es entrySlice - for _, e := range w.set { - es = append(es, e) - } - // TODO rename SortedEntries (state that they're sorted so callers know - // they're paying an expense) - sort.Sort(es) - return es + return w.Wantlist.Entries() } func (w *ThreadSafe) SortedEntries() []*Entry { w.lk.RLock() defer w.lk.RUnlock() - var es entrySlice - - for _, e := range w.set { - es = append(es, e) - } - sort.Sort(es) - return es + return w.Wantlist.SortedEntries() } func (w *Wantlist) Add(k u.Key, priority int) { @@ -106,17 +93,14 @@ func (w *Wantlist) Contains(k u.Key) bool { func (w *Wantlist) Entries() []*Entry { var es entrySlice - for _, e := range w.set { es = append(es, e) } - sort.Sort(es) return es } func (w *Wantlist) SortedEntries() []*Entry { var es entrySlice - for _, e := range w.set { es = append(es, e) } From d6f877727d2a936467378482758e017e92998410 Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Tue, 16 Dec 2014 23:39:39 -0800 Subject: [PATCH 0243/1035] rm unused method License: MIT Signed-off-by: Brian Tiger Chow This commit was moved from ipfs/go-bitswap@c11cf9a035529583d96ad2133f5ec5708f1f5b16 --- bitswap/decision/engine.go | 10 ---------- bitswap/decision/engine_test.go | 17 ----------------- 2 files changed, 27 deletions(-) diff --git a/bitswap/decision/engine.go b/bitswap/decision/engine.go index b8018eef0..e34b6a225 100644 --- a/bitswap/decision/engine.go +++ b/bitswap/decision/engine.go @@ -96,16 +96,6 @@ func (e *Engine) Peers() []peer.Peer { return response } -// BlockIsWantedByPeer returns true if peer wants the block given by this -// key -func (e *Engine) BlockIsWantedByPeer(k u.Key, p peer.Peer) bool { - e.lock.RLock() - defer e.lock.RUnlock() - - ledger := e.findOrCreate(p) - return ledger.WantListContains(k) -} - // MessageReceived performs book-keeping. Returns error if passed invalid // arguments. func (e *Engine) MessageReceived(p peer.Peer, m bsmsg.BitSwapMessage) error { diff --git a/bitswap/decision/engine_test.go b/bitswap/decision/engine_test.go index 592236c3e..5b1740754 100644 --- a/bitswap/decision/engine_test.go +++ b/bitswap/decision/engine_test.go @@ -60,23 +60,6 @@ func TestConsistentAccounting(t *testing.T) { } } -func TestBlockRecordedAsWantedAfterMessageReceived(t *testing.T) { - beggar := newPeerAndLedgermanager("can't be chooser") - chooser := newPeerAndLedgermanager("chooses JIF") - - block := blocks.NewBlock([]byte("data wanted by beggar")) - - messageFromBeggarToChooser := message.New() - messageFromBeggarToChooser.AddEntry(block.Key(), 1) - - chooser.Engine.MessageReceived(beggar.Peer, messageFromBeggarToChooser) - // for this test, doesn't matter if you record that beggar sent - - if !chooser.Engine.BlockIsWantedByPeer(block.Key(), beggar.Peer) { - t.Fatal("chooser failed to record that beggar wants block") - } -} - func TestPeerIsAddedToPeersWhenMessageReceivedOrSent(t *testing.T) { sanfrancisco := newPeerAndLedgermanager("sf") From dd48a827e9b04394fffc293b6a75d45e6f606ddc Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Tue, 16 Dec 2014 23:41:02 -0800 Subject: [PATCH 0244/1035] add comment License: MIT Signed-off-by: Brian Tiger Chow This commit was moved from ipfs/go-bitswap@fa7cfe40f00d11729db959516ad32288d23669a2 --- bitswap/wantlist/wantlist.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/bitswap/wantlist/wantlist.go b/bitswap/wantlist/wantlist.go index 22b2c1c2c..1bf662102 100644 --- a/bitswap/wantlist/wantlist.go +++ b/bitswap/wantlist/wantlist.go @@ -17,6 +17,8 @@ type Wantlist struct { } type Entry struct { + // TODO consider making entries immutable so they can be shared safely and + // slices can be copied efficiently. Key u.Key Priority int } From 305e04af3f7ff0fd687f7e0dd9f516a57a52af13 Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Tue, 16 Dec 2014 23:43:23 -0800 Subject: [PATCH 0245/1035] unexport functions License: MIT Signed-off-by: Brian Tiger Chow This commit was moved from ipfs/go-bitswap@5d6118424d323049a2e90936b96d77f6a977454f --- bitswap/decision/engine.go | 12 ++++-------- bitswap/decision/engine_test.go | 6 +++--- 2 files changed, 7 insertions(+), 11 deletions(-) diff --git a/bitswap/decision/engine.go b/bitswap/decision/engine.go index e34b6a225..1a46d4535 100644 --- a/bitswap/decision/engine.go +++ b/bitswap/decision/engine.go @@ -160,17 +160,13 @@ func (e *Engine) MessageSent(p peer.Peer, m bsmsg.BitSwapMessage) error { return nil } -func (e *Engine) NumBytesSentTo(p peer.Peer) uint64 { - e.lock.RLock() - defer e.lock.RUnlock() - +func (e *Engine) numBytesSentTo(p peer.Peer) uint64 { + // NB not threadsafe return e.findOrCreate(p).Accounting.BytesSent } -func (e *Engine) NumBytesReceivedFrom(p peer.Peer) uint64 { - e.lock.RLock() - defer e.lock.RUnlock() - +func (e *Engine) numBytesReceivedFrom(p peer.Peer) uint64 { + // NB not threadsafe return e.findOrCreate(p).Accounting.BytesRecv } diff --git a/bitswap/decision/engine_test.go b/bitswap/decision/engine_test.go index 5b1740754..148937573 100644 --- a/bitswap/decision/engine_test.go +++ b/bitswap/decision/engine_test.go @@ -44,18 +44,18 @@ func TestConsistentAccounting(t *testing.T) { } // Ensure sender records the change - if sender.Engine.NumBytesSentTo(receiver.Peer) == 0 { + if sender.Engine.numBytesSentTo(receiver.Peer) == 0 { t.Fatal("Sent bytes were not recorded") } // Ensure sender and receiver have the same values - if sender.Engine.NumBytesSentTo(receiver.Peer) != receiver.Engine.NumBytesReceivedFrom(sender.Peer) { + if sender.Engine.numBytesSentTo(receiver.Peer) != receiver.Engine.numBytesReceivedFrom(sender.Peer) { t.Fatal("Inconsistent book-keeping. Strategies don't agree") } // Ensure sender didn't record receving anything. And that the receiver // didn't record sending anything - if receiver.Engine.NumBytesSentTo(sender.Peer) != 0 || sender.Engine.NumBytesReceivedFrom(receiver.Peer) != 0 { + if receiver.Engine.numBytesSentTo(sender.Peer) != 0 || sender.Engine.numBytesReceivedFrom(receiver.Peer) != 0 { t.Fatal("Bert didn't send bytes to Ernie") } } From 31fbf417cb6d0ea3b15363b91fa506295f786c2f Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Tue, 16 Dec 2014 23:54:24 -0800 Subject: [PATCH 0246/1035] fix: check blockstore before adding task addresses https://github.com/jbenet/go-ipfs/pull/438#discussion_r21953742 License: MIT Signed-off-by: Brian Tiger Chow This commit was moved from ipfs/go-bitswap@edaafa969cc90c3f054158c04faf23e474f3b74f --- bitswap/decision/engine.go | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/bitswap/decision/engine.go b/bitswap/decision/engine.go index 1a46d4535..29ee9dce2 100644 --- a/bitswap/decision/engine.go +++ b/bitswap/decision/engine.go @@ -122,8 +122,10 @@ func (e *Engine) MessageReceived(p peer.Peer, m bsmsg.BitSwapMessage) error { e.peerRequestQueue.Remove(entry.Key, p) } else { l.Wants(entry.Key, entry.Priority) - newWorkExists = true - e.peerRequestQueue.Push(entry.Key, entry.Priority, p) + if exists, err := e.bs.Has(entry.Key); err == nil && exists { + newWorkExists = true + e.peerRequestQueue.Push(entry.Key, entry.Priority, p) + } } } From 2e9a5a5d62e816dbe738b946e5c9e29e4a7367d9 Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Tue, 16 Dec 2014 23:57:28 -0800 Subject: [PATCH 0247/1035] log unusual event License: MIT Signed-off-by: Brian Tiger Chow This commit was moved from ipfs/go-bitswap@82a02928f0c5237b5f341774f5e73094b15e8bf6 --- bitswap/decision/engine.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/bitswap/decision/engine.go b/bitswap/decision/engine.go index 29ee9dce2..d50c5c0c6 100644 --- a/bitswap/decision/engine.go +++ b/bitswap/decision/engine.go @@ -65,7 +65,8 @@ func (e *Engine) taskWorker(ctx context.Context) { } block, err := e.bs.Get(nextTask.Entry.Key) if err != nil { - continue // TODO maybe return an error + log.Warning("engine: task exists to send block, but block is not in blockstore") + continue } // construct message here so we can make decisions about any additional // information we may want to include at this time. From 42af0be32ddf5277c9f201259d0e3716fd7b9f42 Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Wed, 17 Dec 2014 00:24:59 -0800 Subject: [PATCH 0248/1035] refactor: *Entry -> Entry in many places, entries are assigned from one slice to another and in different goroutines. In one place, entries were modified (in the queue). To avoid shared mutable state, probably best to handle entries by value. License: MIT Signed-off-by: Brian Tiger Chow This commit was moved from ipfs/go-bitswap@c17545f63bfdb7142be9d475b3fe76eddd2fa069 --- bitswap/message/message.go | 12 ++++++------ bitswap/wantlist/wantlist.go | 16 ++++++++-------- 2 files changed, 14 insertions(+), 14 deletions(-) diff --git a/bitswap/message/message.go b/bitswap/message/message.go index 245fc35fb..7f7f1d08e 100644 --- a/bitswap/message/message.go +++ b/bitswap/message/message.go @@ -19,7 +19,7 @@ import ( type BitSwapMessage interface { // Wantlist returns a slice of unique keys that represent data wanted by // the sender. - Wantlist() []*Entry + Wantlist() []Entry // Blocks returns a slice of unique blocks Blocks() []*blocks.Block @@ -48,7 +48,7 @@ type Exportable interface { type impl struct { full bool - wantlist map[u.Key]*Entry + wantlist map[u.Key]Entry blocks map[u.Key]*blocks.Block // map to detect duplicates } @@ -59,7 +59,7 @@ func New() BitSwapMessage { func newMsg() *impl { return &impl{ blocks: make(map[u.Key]*blocks.Block), - wantlist: make(map[u.Key]*Entry), + wantlist: make(map[u.Key]Entry), full: true, } } @@ -90,8 +90,8 @@ func (m *impl) Full() bool { return m.full } -func (m *impl) Wantlist() []*Entry { - var out []*Entry +func (m *impl) Wantlist() []Entry { + var out []Entry for _, e := range m.wantlist { out = append(out, e) } @@ -120,7 +120,7 @@ func (m *impl) addEntry(k u.Key, priority int, cancel bool) { e.Priority = priority e.Cancel = cancel } else { - m.wantlist[k] = &Entry{ + m.wantlist[k] = Entry{ Entry: wantlist.Entry{ Key: k, Priority: priority, diff --git a/bitswap/wantlist/wantlist.go b/bitswap/wantlist/wantlist.go index 1bf662102..aa58ee155 100644 --- a/bitswap/wantlist/wantlist.go +++ b/bitswap/wantlist/wantlist.go @@ -13,7 +13,7 @@ type ThreadSafe struct { // not threadsafe type Wantlist struct { - set map[u.Key]*Entry + set map[u.Key]Entry } type Entry struct { @@ -23,7 +23,7 @@ type Entry struct { Priority int } -type entrySlice []*Entry +type entrySlice []Entry func (es entrySlice) Len() int { return len(es) } func (es entrySlice) Swap(i, j int) { es[i], es[j] = es[j], es[i] } @@ -37,7 +37,7 @@ func NewThreadSafe() *ThreadSafe { func New() *Wantlist { return &Wantlist{ - set: make(map[u.Key]*Entry), + set: make(map[u.Key]Entry), } } @@ -62,13 +62,13 @@ func (w *ThreadSafe) Contains(k u.Key) bool { return w.Wantlist.Contains(k) } -func (w *ThreadSafe) Entries() []*Entry { +func (w *ThreadSafe) Entries() []Entry { w.lk.RLock() defer w.lk.RUnlock() return w.Wantlist.Entries() } -func (w *ThreadSafe) SortedEntries() []*Entry { +func (w *ThreadSafe) SortedEntries() []Entry { w.lk.RLock() defer w.lk.RUnlock() return w.Wantlist.SortedEntries() @@ -78,7 +78,7 @@ func (w *Wantlist) Add(k u.Key, priority int) { if _, ok := w.set[k]; ok { return } - w.set[k] = &Entry{ + w.set[k] = Entry{ Key: k, Priority: priority, } @@ -93,7 +93,7 @@ func (w *Wantlist) Contains(k u.Key) bool { return ok } -func (w *Wantlist) Entries() []*Entry { +func (w *Wantlist) Entries() []Entry { var es entrySlice for _, e := range w.set { es = append(es, e) @@ -101,7 +101,7 @@ func (w *Wantlist) Entries() []*Entry { return es } -func (w *Wantlist) SortedEntries() []*Entry { +func (w *Wantlist) SortedEntries() []Entry { var es entrySlice for _, e := range w.set { es = append(es, e) From 7d297bc977b6e9a074e1eee48f345015bea0f614 Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Wed, 17 Dec 2014 01:52:37 -0800 Subject: [PATCH 0249/1035] extract constants License: MIT Signed-off-by: Brian Tiger Chow This commit was moved from ipfs/go-bitswap@fa030d66695cb2c459b398e2034c76dde684c092 --- bitswap/bitswap.go | 3 ++- bitswap/decision/engine.go | 6 +++++- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index d0e49d182..11c6affa8 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -29,6 +29,7 @@ const ( maxProvidersPerRequest = 3 providerRequestTimeout = time.Second * 10 hasBlockTimeout = time.Second * 15 + sizeBatchRequestChan = 32 ) var ( @@ -59,7 +60,7 @@ func New(parent context.Context, p peer.Peer, network bsnet.BitSwapNetwork, rout routing: routing, sender: network, wantlist: wantlist.NewThreadSafe(), - batchRequests: make(chan []u.Key, 32), + batchRequests: make(chan []u.Key, sizeBatchRequestChan), } network.SetDelegate(bs) go bs.clientWorker(ctx) diff --git a/bitswap/decision/engine.go b/bitswap/decision/engine.go index d50c5c0c6..aade14955 100644 --- a/bitswap/decision/engine.go +++ b/bitswap/decision/engine.go @@ -13,6 +13,10 @@ import ( var log = u.Logger("engine") +const ( + sizeOutboxChan = 4 +) + // Envelope contains a message for a Peer type Envelope struct { // Peer is the intended recipient @@ -43,7 +47,7 @@ func NewEngine(ctx context.Context, bs bstore.Blockstore) *Engine { ledgerMap: make(map[u.Key]*ledger), bs: bs, peerRequestQueue: newTaskQueue(), - outbox: make(chan Envelope, 4), // TODO extract constant + outbox: make(chan Envelope, sizeOutboxChan), workSignal: make(chan struct{}), } go e.taskWorker(ctx) From febb9971fc934953b6ac87ef8caa0d1eb16c9ffc Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Wed, 17 Dec 2014 02:24:16 -0800 Subject: [PATCH 0250/1035] refactor(bs/decision.Engine): pass in Entry License: MIT Signed-off-by: Brian Tiger Chow This commit was moved from ipfs/go-bitswap@574213ffa71652a773488a7c9695a82d89fc53f9 --- bitswap/decision/engine.go | 4 ++-- bitswap/decision/taskqueue.go | 13 +++++-------- 2 files changed, 7 insertions(+), 10 deletions(-) diff --git a/bitswap/decision/engine.go b/bitswap/decision/engine.go index aade14955..813268f5b 100644 --- a/bitswap/decision/engine.go +++ b/bitswap/decision/engine.go @@ -129,7 +129,7 @@ func (e *Engine) MessageReceived(p peer.Peer, m bsmsg.BitSwapMessage) error { l.Wants(entry.Key, entry.Priority) if exists, err := e.bs.Has(entry.Key); err == nil && exists { newWorkExists = true - e.peerRequestQueue.Push(entry.Key, entry.Priority, p) + e.peerRequestQueue.Push(entry.Entry, p) } } } @@ -140,7 +140,7 @@ func (e *Engine) MessageReceived(p peer.Peer, m bsmsg.BitSwapMessage) error { for _, l := range e.ledgerMap { if l.WantListContains(block.Key()) { newWorkExists = true - e.peerRequestQueue.Push(block.Key(), 1, l.Partner) + e.peerRequestQueue.Push(wl.Entry{block.Key(), 1}, l.Partner) } } } diff --git a/bitswap/decision/taskqueue.go b/bitswap/decision/taskqueue.go index 1cf279ef7..b6341c9b2 100644 --- a/bitswap/decision/taskqueue.go +++ b/bitswap/decision/taskqueue.go @@ -28,22 +28,19 @@ type task struct { } // Push currently adds a new task to the end of the list -func (tl *taskQueue) Push(block u.Key, priority int, to peer.Peer) { - if task, ok := tl.taskmap[taskKey(to, block)]; ok { +func (tl *taskQueue) Push(entry wantlist.Entry, to peer.Peer) { + if task, ok := tl.taskmap[taskKey(to, entry.Key)]; ok { // TODO: when priority queue is implemented, // rearrange this task - task.Entry.Priority = priority + task.Entry.Priority = entry.Priority return } task := &task{ - Entry: wantlist.Entry{ - Key: block, - Priority: priority, - }, + Entry: entry, Target: to, } tl.tasks = append(tl.tasks, task) - tl.taskmap[taskKey(to, block)] = task + tl.taskmap[taskKey(to, entry.Key)] = task } // Pop 'pops' the next task to be performed. Returns nil no task exists. From 26c5ed39d0741bd01a27a9158a3f789e4215f16b Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Wed, 17 Dec 2014 02:24:54 -0800 Subject: [PATCH 0251/1035] fix: add lock to taskQueue @whyrusleeping may wanna have a look and make sure i didn't screw anything up here BenchmarkInstantaneousAddCat1MB-4 200 10763761 ns/op 97.42 MB/s BenchmarkInstantaneousAddCat2MB-4 panic: runtime error: invalid memory address or nil pointer dereference [signal 0xb code=0x1 addr=0x0 pc=0xbedd] goroutine 14297 [running]: github.com/jbenet/go-ipfs/exchange/bitswap/decision.(*taskQueue).Remove(0xc2087553a0, 0xc2085ef200, 0x22, 0x56f570, 0xc208367a40) /Users/btc/go/src/github.com/jbenet/go-ipfs/exchange/bitswap/decision/taskqueue.go:66 +0x82 github.com/jbenet/go-ipfs/exchange/bitswap/decision.(*Engine).MessageSent(0xc20871b5c0, 0x56f570, 0xc208367a40, 0x570040, 0xc208753d40, 0x0, 0x0) /Users/btc/go/src/github.com/jbenet/go-ipfs/exchange/bitswap/decision/engine.go:177 +0x29e github.com/jbenet/go-ipfs/exchange/bitswap.(*bitswap).send(0xc20871b7a0, 0x56f4d8, 0xc208379800, 0x56f570, 0xc208367a40, 0x570040, 0xc208753d40, 0x0, 0x0) /Users/btc/go/src/github.com/jbenet/go-ipfs/exchange/bitswap/bitswap.go:352 +0x11c github.com/jbenet/go-ipfs/exchange/bitswap.(*bitswap).taskWorker(0xc20871b7a0, 0x56f4d8, 0xc208379800) /Users/btc/go/src/github.com/jbenet/go-ipfs/exchange/bitswap/bitswap.go:238 +0x165 created by github.com/jbenet/go-ipfs/exchange/bitswap.New /Users/btc/go/src/github.com/jbenet/go-ipfs/exchange/bitswap/bitswap.go:66 +0x49e This commit was moved from ipfs/go-bitswap@c3d5b6ee5e64eeb5b933e256ea35e306b14c1f84 --- bitswap/decision/taskqueue.go | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/bitswap/decision/taskqueue.go b/bitswap/decision/taskqueue.go index b6341c9b2..a76c56e9b 100644 --- a/bitswap/decision/taskqueue.go +++ b/bitswap/decision/taskqueue.go @@ -1,6 +1,8 @@ package decision import ( + "sync" + wantlist "github.com/jbenet/go-ipfs/exchange/bitswap/wantlist" peer "github.com/jbenet/go-ipfs/peer" u "github.com/jbenet/go-ipfs/util" @@ -11,6 +13,7 @@ import ( // tasks (on getnext). For now, we are assuming a dumb/nice strategy. type taskQueue struct { // TODO: make this into a priority queue + lock sync.Mutex tasks []*task taskmap map[string]*task } @@ -29,6 +32,8 @@ type task struct { // Push currently adds a new task to the end of the list func (tl *taskQueue) Push(entry wantlist.Entry, to peer.Peer) { + tl.lock.Lock() + defer tl.lock.Unlock() if task, ok := tl.taskmap[taskKey(to, entry.Key)]; ok { // TODO: when priority queue is implemented, // rearrange this task @@ -45,6 +50,8 @@ func (tl *taskQueue) Push(entry wantlist.Entry, to peer.Peer) { // Pop 'pops' the next task to be performed. Returns nil no task exists. func (tl *taskQueue) Pop() *task { + tl.lock.Lock() + defer tl.lock.Unlock() var out *task for len(tl.tasks) > 0 { // TODO: instead of zero, use exponential distribution @@ -63,10 +70,12 @@ func (tl *taskQueue) Pop() *task { // Remove lazily removes a task from the queue func (tl *taskQueue) Remove(k u.Key, p peer.Peer) { + tl.lock.Lock() t, ok := tl.taskmap[taskKey(p, k)] if ok { t.Trash = true } + tl.lock.Unlock() } // taskKey returns a key that uniquely identifies a task. From e54dc6bed100eb561cf514ee6a2c231956fa41cb Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Wed, 17 Dec 2014 03:11:57 -0800 Subject: [PATCH 0252/1035] doc: some comments about the future of the decision engine License: MIT Signed-off-by: Brian Tiger Chow This commit was moved from ipfs/go-bitswap@74d13f3c5286c2cbf67d0beed04173c71b42bee3 --- bitswap/decision/engine.go | 44 ++++++++++++++++++++++++++++++++++---- 1 file changed, 40 insertions(+), 4 deletions(-) diff --git a/bitswap/decision/engine.go b/bitswap/decision/engine.go index 813268f5b..ea4539437 100644 --- a/bitswap/decision/engine.go +++ b/bitswap/decision/engine.go @@ -11,6 +11,36 @@ import ( u "github.com/jbenet/go-ipfs/util" ) +// TODO consider taking responsibility for other types of requests. For +// example, there could be a |cancelQueue| for all of the cancellation +// messages that need to go out. There could also be a |wantlistQueue| for +// the local peer's wantlists. Alternatively, these could all be bundled +// into a single, intelligent global queue that efficiently +// batches/combines and takes all of these into consideration. +// +// Right now, messages go onto the network for four reasons: +// 1. an initial `sendwantlist` message to a provider of the first key in a request +// 2. a periodic full sweep of `sendwantlist` messages to all providers +// 3. upon receipt of blocks, a `cancel` message to all peers +// 4. draining the priority queue of `blockrequests` from peers +// +// Presently, only `blockrequests` are handled by the decision engine. +// However, there is an opportunity to give it more responsibility! If the +// decision engine is given responsibility for all of the others, it can +// intelligently decide how to combine requests efficiently. +// +// Some examples of what would be possible: +// +// * when sending out the wantlists, include `cancel` requests +// * when handling `blockrequests`, include `sendwantlist` and `cancel` as appropriate +// * when handling `cancel`, if we recently received a wanted block from a +// peer, include a partial wantlist that contains a few other high priority +// blocks +// +// In a sense, if we treat the decision engine as a black box, it could do +// whatever it sees fit to produce desired outcomes (get wanted keys +// quickly, maintain good relationships with peers, etc). + var log = u.Logger("engine") const ( @@ -26,18 +56,24 @@ type Envelope struct { } type Engine struct { - // FIXME peerRequestQueue isn't threadsafe nor is it protected by a mutex. - // consider a way to avoid sharing the peerRequestQueue between the worker - // and the receiver + // peerRequestQueue is a priority queue of requests received from peers. + // Requests are popped from the queue, packaged up, and placed in the + // outbox. peerRequestQueue *taskQueue + // FIXME it's a bit odd for the client and the worker to both share memory + // (both modify the peerRequestQueue) and also to communicate over the + // workSignal channel. consider sending requests over the channel and + // allowing the worker to have exclusive access to the peerRequestQueue. In + // that case, no lock would be required. workSignal chan struct{} + // outbox contains outgoing messages to peers outbox chan Envelope bs bstore.Blockstore - lock sync.RWMutex + lock sync.RWMutex // protects the fields immediatly below // ledgerMap lists Ledgers by their Partner key. ledgerMap map[u.Key]*ledger } From 7ea0bb48def1e366a5ea9cd509cb0de39f96d918 Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Wed, 17 Dec 2014 03:37:49 -0800 Subject: [PATCH 0253/1035] fix: batches of blocks have equal priority addresses... https://github.com/jbenet/go-ipfs/pull/438/files#r21878994 License: MIT Signed-off-by: Brian Tiger Chow This commit was moved from ipfs/go-bitswap@4fd7da036c44d8426f4bd0405202bdbd26cc905c --- bitswap/bitswap.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 11c6affa8..149996b3a 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -3,6 +3,7 @@ package bitswap import ( + "math" "sync" "time" @@ -30,6 +31,8 @@ const ( providerRequestTimeout = time.Second * 10 hasBlockTimeout = time.Second * 15 sizeBatchRequestChan = 32 + // kMaxPriority is the max priority as defined by the bitswap protocol + kMaxPriority = math.MaxInt32 ) var ( @@ -261,7 +264,7 @@ func (bs *bitswap) clientWorker(parent context.Context) { continue } for i, k := range ks { - bs.wantlist.Add(k, len(ks)-i) + bs.wantlist.Add(k, kMaxPriority-i) } // NB: send want list to providers for the first peer in this list. // the assumption is made that the providers of the first key in From eb75b0a752220833b4e12d80c0263698e6047a81 Mon Sep 17 00:00:00 2001 From: Jeromy Date: Wed, 17 Dec 2014 19:27:41 +0000 Subject: [PATCH 0254/1035] clean peerset constructor names This commit was moved from ipfs/go-bitswap@fad1c7daa22daee13e63475ab52b2f0e46560f9a --- bitswap/bitswap.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 149996b3a..912ed1210 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -212,7 +212,7 @@ func (bs *bitswap) sendWantlistToProviders(ctx context.Context, wantlist *wantli message.AddEntry(e.Key, e.Priority) } - ps := pset.NewPeerSet() + ps := pset.New() // Get providers for all entries in wantlist (could take a while) wg := sync.WaitGroup{} From 251c9b0363e1bcb46f8329ea6de45e095874bb0b Mon Sep 17 00:00:00 2001 From: Juan Batiz-Benet Date: Fri, 19 Dec 2014 12:19:56 -0800 Subject: [PATCH 0255/1035] peer change: peer.Peer -> peer.ID this is a major refactor of the entire codebase it changes the monolithic peer.Peer into using a peer.ID and a peer.Peerstore. Other changes: - removed handshake3. - testutil vastly simplified peer - secio bugfix + debugging logs - testutil: RandKeyPair - backpressure bugfix: w.o.w. - peer: added hex enc/dec - peer: added a PeerInfo struct PeerInfo is a small struct used to pass around a peer with a set of addresses and keys. This is not meant to be a complete view of the system, but rather to model updates to the peerstore. It is used by things like the routing system. - updated peer/queue + peerset - latency metrics - testutil: use crand for PeerID gen RandPeerID generates random "valid" peer IDs. it does not NEED to generate keys because it is as if we lost the key right away. fine to read some randomness and hash it. to generate proper keys and an ID, use: sk, pk, _ := testutil.RandKeyPair() id, _ := peer.IDFromPublicKey(pk) Also added RandPeerIDFatal helper - removed old spipe - updated seccat - core: cleanup initIdentity - removed old getFromPeerList This commit was moved from ipfs/go-bitswap@42f61ec0e8028854683e51f6d9cf6d20a8507d2d --- bitswap/bitswap.go | 29 ++++++++------- bitswap/bitswap_test.go | 8 ++-- bitswap/decision/engine.go | 24 ++++++------ bitswap/decision/engine_test.go | 12 +++--- bitswap/decision/ledger.go | 4 +- bitswap/decision/taskqueue.go | 10 ++--- bitswap/network/interface.go | 12 +++--- bitswap/network/ipfs_impl.go | 6 +-- bitswap/testnet/network.go | 65 ++++++++++++++++----------------- bitswap/testnet/network_test.go | 48 +++++++++++++----------- bitswap/testutils.go | 9 ++--- 11 files changed, 116 insertions(+), 111 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 912ed1210..376391263 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -8,6 +8,7 @@ import ( "time" context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" + blocks "github.com/jbenet/go-ipfs/blocks" blockstore "github.com/jbenet/go-ipfs/blocks/blockstore" exchange "github.com/jbenet/go-ipfs/exchange" @@ -43,7 +44,7 @@ var ( // BitSwapNetwork. This function registers the returned instance as the network // delegate. // Runs until context is cancelled. -func New(parent context.Context, p peer.Peer, network bsnet.BitSwapNetwork, routing bsnet.Routing, +func New(parent context.Context, p peer.ID, network bsnet.BitSwapNetwork, routing bsnet.Routing, bstore blockstore.Blockstore, nice bool) exchange.Interface { ctx, cancelFunc := context.WithCancel(parent) @@ -165,7 +166,7 @@ func (bs *bitswap) HasBlock(ctx context.Context, blk *blocks.Block) error { return bs.routing.Provide(ctx, blk.Key()) } -func (bs *bitswap) sendWantListTo(ctx context.Context, peers <-chan peer.Peer) error { +func (bs *bitswap) sendWantListTo(ctx context.Context, peers <-chan peer.PeerInfo) error { if peers == nil { panic("Cant send wantlist to nil peerchan") } @@ -175,9 +176,9 @@ func (bs *bitswap) sendWantListTo(ctx context.Context, peers <-chan peer.Peer) e } wg := sync.WaitGroup{} for peerToQuery := range peers { - log.Event(ctx, "PeerToQuery", peerToQuery) + log.Event(ctx, "PeerToQuery", peerToQuery.ID) wg.Add(1) - go func(p peer.Peer) { + go func(p peer.ID) { defer wg.Done() log.Event(ctx, "DialPeer", p) @@ -196,7 +197,7 @@ func (bs *bitswap) sendWantListTo(ctx context.Context, peers <-chan peer.Peer) e // communication fails. May require slightly different API to // get better guarantees. May need shared sequence numbers. bs.engine.MessageSent(p, message) - }(peerToQuery) + }(peerToQuery.ID) } wg.Wait() return nil @@ -224,8 +225,8 @@ func (bs *bitswap) sendWantlistToProviders(ctx context.Context, wantlist *wantli providers := bs.routing.FindProvidersAsync(child, k, maxProvidersPerRequest) for prov := range providers { - if ps.TryAdd(prov) { //Do once per peer - bs.send(ctx, prov, message) + if ps.TryAdd(prov.ID) { //Do once per peer + bs.send(ctx, prov.ID, message) } } }(e.Key) @@ -287,19 +288,19 @@ func (bs *bitswap) clientWorker(parent context.Context) { } // TODO(brian): handle errors -func (bs *bitswap) ReceiveMessage(ctx context.Context, p peer.Peer, incoming bsmsg.BitSwapMessage) ( - peer.Peer, bsmsg.BitSwapMessage) { +func (bs *bitswap) ReceiveMessage(ctx context.Context, p peer.ID, incoming bsmsg.BitSwapMessage) ( + peer.ID, bsmsg.BitSwapMessage) { log.Debugf("ReceiveMessage from %s", p) - if p == nil { + if p == "" { log.Error("Received message from nil peer!") // TODO propagate the error upward - return nil, nil + return "", nil } if incoming == nil { log.Error("Got nil bitswap message!") // TODO propagate the error upward - return nil, nil + return "", nil } // This call records changes to wantlists, blocks received, @@ -321,7 +322,7 @@ func (bs *bitswap) ReceiveMessage(ctx context.Context, p peer.Peer, incoming bsm bs.cancelBlocks(ctx, keys) // TODO: consider changing this function to not return anything - return nil, nil + return "", nil } func (bs *bitswap) cancelBlocks(ctx context.Context, bkeys []u.Key) { @@ -349,7 +350,7 @@ func (bs *bitswap) ReceiveError(err error) { // send strives to ensure that accounting is always performed when a message is // sent -func (bs *bitswap) send(ctx context.Context, p peer.Peer, m bsmsg.BitSwapMessage) error { +func (bs *bitswap) send(ctx context.Context, p peer.ID, m bsmsg.BitSwapMessage) error { if err := bs.sender.SendMessage(ctx, p, m); err != nil { return err } diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index 2c04b0508..42bdd631c 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -7,13 +7,14 @@ import ( "time" context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" + blocks "github.com/jbenet/go-ipfs/blocks" blocksutil "github.com/jbenet/go-ipfs/blocks/blocksutil" tn "github.com/jbenet/go-ipfs/exchange/bitswap/testnet" + peer "github.com/jbenet/go-ipfs/peer" mockrouting "github.com/jbenet/go-ipfs/routing/mock" u "github.com/jbenet/go-ipfs/util" delay "github.com/jbenet/go-ipfs/util/delay" - testutil "github.com/jbenet/go-ipfs/util/testutil" ) // FIXME the tests are really sensitive to the network delay. fix them to work @@ -62,7 +63,8 @@ func TestProviderForKeyButNetworkCannotFind(t *testing.T) { defer g.Close() block := blocks.NewBlock([]byte("block")) - rs.Client(testutil.NewPeerWithIDString("testing")).Provide(context.Background(), block.Key()) // but not on network + pinfo := peer.PeerInfo{ID: peer.ID("testing")} + rs.Client(pinfo).Provide(context.Background(), block.Key()) // but not on network solo := g.Next() defer solo.Exchange.Close() @@ -153,7 +155,7 @@ func PerformDistributionTest(t *testing.T, numInstances, numBlocks int) { first.Blockstore().Put(b) blkeys = append(blkeys, b.Key()) first.Exchange.HasBlock(context.Background(), b) - rs.Client(first.Peer).Provide(context.Background(), b.Key()) + rs.Client(peer.PeerInfo{ID: first.Peer}).Provide(context.Background(), b.Key()) } t.Log("Distribute!") diff --git a/bitswap/decision/engine.go b/bitswap/decision/engine.go index ea4539437..da5ccfe6d 100644 --- a/bitswap/decision/engine.go +++ b/bitswap/decision/engine.go @@ -50,7 +50,7 @@ const ( // Envelope contains a message for a Peer type Envelope struct { // Peer is the intended recipient - Peer peer.Peer + Peer peer.ID // Message is the payload Message bsmsg.BitSwapMessage } @@ -75,12 +75,12 @@ type Engine struct { lock sync.RWMutex // protects the fields immediatly below // ledgerMap lists Ledgers by their Partner key. - ledgerMap map[u.Key]*ledger + ledgerMap map[peer.ID]*ledger } func NewEngine(ctx context.Context, bs bstore.Blockstore) *Engine { e := &Engine{ - ledgerMap: make(map[u.Key]*ledger), + ledgerMap: make(map[peer.ID]*ledger), bs: bs, peerRequestQueue: newTaskQueue(), outbox: make(chan Envelope, sizeOutboxChan), @@ -126,11 +126,11 @@ func (e *Engine) Outbox() <-chan Envelope { } // Returns a slice of Peers with whom the local node has active sessions -func (e *Engine) Peers() []peer.Peer { +func (e *Engine) Peers() []peer.ID { e.lock.RLock() defer e.lock.RUnlock() - response := make([]peer.Peer, 0) + response := make([]peer.ID, 0) for _, ledger := range e.ledgerMap { response = append(response, ledger.Partner) } @@ -139,7 +139,7 @@ func (e *Engine) Peers() []peer.Peer { // MessageReceived performs book-keeping. Returns error if passed invalid // arguments. -func (e *Engine) MessageReceived(p peer.Peer, m bsmsg.BitSwapMessage) error { +func (e *Engine) MessageReceived(p peer.ID, m bsmsg.BitSwapMessage) error { newWorkExists := false defer func() { if newWorkExists { @@ -189,7 +189,7 @@ func (e *Engine) MessageReceived(p peer.Peer, m bsmsg.BitSwapMessage) error { // inconsistent. Would need to ensure that Sends and acknowledgement of the // send happen atomically -func (e *Engine) MessageSent(p peer.Peer, m bsmsg.BitSwapMessage) error { +func (e *Engine) MessageSent(p peer.ID, m bsmsg.BitSwapMessage) error { e.lock.Lock() defer e.lock.Unlock() @@ -203,22 +203,22 @@ func (e *Engine) MessageSent(p peer.Peer, m bsmsg.BitSwapMessage) error { return nil } -func (e *Engine) numBytesSentTo(p peer.Peer) uint64 { +func (e *Engine) numBytesSentTo(p peer.ID) uint64 { // NB not threadsafe return e.findOrCreate(p).Accounting.BytesSent } -func (e *Engine) numBytesReceivedFrom(p peer.Peer) uint64 { +func (e *Engine) numBytesReceivedFrom(p peer.ID) uint64 { // NB not threadsafe return e.findOrCreate(p).Accounting.BytesRecv } // ledger lazily instantiates a ledger -func (e *Engine) findOrCreate(p peer.Peer) *ledger { - l, ok := e.ledgerMap[p.Key()] +func (e *Engine) findOrCreate(p peer.ID) *ledger { + l, ok := e.ledgerMap[p] if !ok { l = newLedger(p) - e.ledgerMap[p.Key()] = l + e.ledgerMap[p] = l } return l } diff --git a/bitswap/decision/engine_test.go b/bitswap/decision/engine_test.go index 148937573..0196863b3 100644 --- a/bitswap/decision/engine_test.go +++ b/bitswap/decision/engine_test.go @@ -7,21 +7,21 @@ import ( context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" ds "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore" sync "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore/sync" + blocks "github.com/jbenet/go-ipfs/blocks" blockstore "github.com/jbenet/go-ipfs/blocks/blockstore" message "github.com/jbenet/go-ipfs/exchange/bitswap/message" peer "github.com/jbenet/go-ipfs/peer" - testutil "github.com/jbenet/go-ipfs/util/testutil" ) type peerAndEngine struct { - peer.Peer + Peer peer.ID Engine *Engine } func newPeerAndLedgermanager(idStr string) peerAndEngine { return peerAndEngine{ - Peer: testutil.NewPeerWithIDString(idStr), + Peer: peer.ID(idStr), //Strategy: New(true), Engine: NewEngine(context.TODO(), blockstore.NewBlockstore(sync.MutexWrap(ds.NewMapDatastore()))), @@ -70,7 +70,7 @@ func TestPeerIsAddedToPeersWhenMessageReceivedOrSent(t *testing.T) { sanfrancisco.Engine.MessageSent(seattle.Peer, m) seattle.Engine.MessageReceived(sanfrancisco.Peer, m) - if seattle.Peer.Key() == sanfrancisco.Peer.Key() { + if seattle.Peer == sanfrancisco.Peer { t.Fatal("Sanity Check: Peers have same Key!") } @@ -83,9 +83,9 @@ func TestPeerIsAddedToPeersWhenMessageReceivedOrSent(t *testing.T) { } } -func peerIsPartner(p peer.Peer, e *Engine) bool { +func peerIsPartner(p peer.ID, e *Engine) bool { for _, partner := range e.Peers() { - if partner.Key() == p.Key() { + if partner == p { return true } } diff --git a/bitswap/decision/ledger.go b/bitswap/decision/ledger.go index eea87af1f..f2b824603 100644 --- a/bitswap/decision/ledger.go +++ b/bitswap/decision/ledger.go @@ -12,7 +12,7 @@ import ( // access/lookups. type keySet map[u.Key]struct{} -func newLedger(p peer.Peer) *ledger { +func newLedger(p peer.ID) *ledger { return &ledger{ wantList: wl.New(), Partner: p, @@ -24,7 +24,7 @@ func newLedger(p peer.Peer) *ledger { // NOT threadsafe type ledger struct { // Partner is the remote Peer. - Partner peer.Peer + Partner peer.ID // Accounting tracks bytes sent and recieved. Accounting debtRatio diff --git a/bitswap/decision/taskqueue.go b/bitswap/decision/taskqueue.go index a76c56e9b..c86a73371 100644 --- a/bitswap/decision/taskqueue.go +++ b/bitswap/decision/taskqueue.go @@ -26,12 +26,12 @@ func newTaskQueue() *taskQueue { type task struct { Entry wantlist.Entry - Target peer.Peer + Target peer.ID Trash bool } // Push currently adds a new task to the end of the list -func (tl *taskQueue) Push(entry wantlist.Entry, to peer.Peer) { +func (tl *taskQueue) Push(entry wantlist.Entry, to peer.ID) { tl.lock.Lock() defer tl.lock.Unlock() if task, ok := tl.taskmap[taskKey(to, entry.Key)]; ok { @@ -69,7 +69,7 @@ func (tl *taskQueue) Pop() *task { } // Remove lazily removes a task from the queue -func (tl *taskQueue) Remove(k u.Key, p peer.Peer) { +func (tl *taskQueue) Remove(k u.Key, p peer.ID) { tl.lock.Lock() t, ok := tl.taskmap[taskKey(p, k)] if ok { @@ -79,6 +79,6 @@ func (tl *taskQueue) Remove(k u.Key, p peer.Peer) { } // taskKey returns a key that uniquely identifies a task. -func taskKey(p peer.Peer, k u.Key) string { - return string(p.Key() + k) +func taskKey(p peer.ID, k u.Key) string { + return string(p) + string(k) } diff --git a/bitswap/network/interface.go b/bitswap/network/interface.go index 44557b064..94ceadbff 100644 --- a/bitswap/network/interface.go +++ b/bitswap/network/interface.go @@ -12,18 +12,18 @@ import ( type BitSwapNetwork interface { // DialPeer ensures there is a connection to peer. - DialPeer(context.Context, peer.Peer) error + DialPeer(context.Context, peer.ID) error // SendMessage sends a BitSwap message to a peer. SendMessage( context.Context, - peer.Peer, + peer.ID, bsmsg.BitSwapMessage) error // SendRequest sends a BitSwap message to a peer and waits for a response. SendRequest( context.Context, - peer.Peer, + peer.ID, bsmsg.BitSwapMessage) (incoming bsmsg.BitSwapMessage, err error) // SetDelegate registers the Reciver to handle messages received from the @@ -34,15 +34,15 @@ type BitSwapNetwork interface { // Implement Receiver to receive messages from the BitSwapNetwork type Receiver interface { ReceiveMessage( - ctx context.Context, sender peer.Peer, incoming bsmsg.BitSwapMessage) ( - destination peer.Peer, outgoing bsmsg.BitSwapMessage) + ctx context.Context, sender peer.ID, incoming bsmsg.BitSwapMessage) ( + destination peer.ID, outgoing bsmsg.BitSwapMessage) ReceiveError(error) } type Routing interface { // FindProvidersAsync returns a channel of providers for the given key - FindProvidersAsync(context.Context, u.Key, int) <-chan peer.Peer + FindProvidersAsync(context.Context, u.Key, int) <-chan peer.PeerInfo // Provide provides the key to the network Provide(context.Context, u.Key) error diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index 3e6e54787..3a7a06091 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -53,13 +53,13 @@ func (bsnet *impl) handleNewStream(s inet.Stream) { } -func (bsnet *impl) DialPeer(ctx context.Context, p peer.Peer) error { +func (bsnet *impl) DialPeer(ctx context.Context, p peer.ID) error { return bsnet.network.DialPeer(ctx, p) } func (bsnet *impl) SendMessage( ctx context.Context, - p peer.Peer, + p peer.ID, outgoing bsmsg.BitSwapMessage) error { s, err := bsnet.network.NewStream(inet.ProtocolBitswap, p) @@ -73,7 +73,7 @@ func (bsnet *impl) SendMessage( func (bsnet *impl) SendRequest( ctx context.Context, - p peer.Peer, + p peer.ID, outgoing bsmsg.BitSwapMessage) (bsmsg.BitSwapMessage, error) { s, err := bsnet.network.NewStream(inet.ProtocolBitswap, p) diff --git a/bitswap/testnet/network.go b/bitswap/testnet/network.go index b8f61b413..9e17b67f4 100644 --- a/bitswap/testnet/network.go +++ b/bitswap/testnet/network.go @@ -1,33 +1,32 @@ package bitswap import ( - "bytes" "errors" "fmt" context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" + bsmsg "github.com/jbenet/go-ipfs/exchange/bitswap/message" bsnet "github.com/jbenet/go-ipfs/exchange/bitswap/network" peer "github.com/jbenet/go-ipfs/peer" - "github.com/jbenet/go-ipfs/util" delay "github.com/jbenet/go-ipfs/util/delay" ) type Network interface { - Adapter(peer.Peer) bsnet.BitSwapNetwork + Adapter(peer.ID) bsnet.BitSwapNetwork - HasPeer(peer.Peer) bool + HasPeer(peer.ID) bool SendMessage( ctx context.Context, - from peer.Peer, - to peer.Peer, + from peer.ID, + to peer.ID, message bsmsg.BitSwapMessage) error SendRequest( ctx context.Context, - from peer.Peer, - to peer.Peer, + from peer.ID, + to peer.ID, message bsmsg.BitSwapMessage) ( incoming bsmsg.BitSwapMessage, err error) } @@ -36,27 +35,27 @@ type Network interface { func VirtualNetwork(d delay.D) Network { return &network{ - clients: make(map[util.Key]bsnet.Receiver), + clients: make(map[peer.ID]bsnet.Receiver), delay: d, } } type network struct { - clients map[util.Key]bsnet.Receiver + clients map[peer.ID]bsnet.Receiver delay delay.D } -func (n *network) Adapter(p peer.Peer) bsnet.BitSwapNetwork { +func (n *network) Adapter(p peer.ID) bsnet.BitSwapNetwork { client := &networkClient{ local: p, network: n, } - n.clients[p.Key()] = client + n.clients[p] = client return client } -func (n *network) HasPeer(p peer.Peer) bool { - _, found := n.clients[p.Key()] +func (n *network) HasPeer(p peer.ID) bool { + _, found := n.clients[p] return found } @@ -64,11 +63,11 @@ func (n *network) HasPeer(p peer.Peer) bool { // TODO what does the network layer do with errors received from services? func (n *network) SendMessage( ctx context.Context, - from peer.Peer, - to peer.Peer, + from peer.ID, + to peer.ID, message bsmsg.BitSwapMessage) error { - receiver, ok := n.clients[to.Key()] + receiver, ok := n.clients[to] if !ok { return errors.New("Cannot locate peer on network") } @@ -82,8 +81,8 @@ func (n *network) SendMessage( } func (n *network) deliver( - r bsnet.Receiver, from peer.Peer, message bsmsg.BitSwapMessage) error { - if message == nil || from == nil { + r bsnet.Receiver, from peer.ID, message bsmsg.BitSwapMessage) error { + if message == nil || from == "" { return errors.New("Invalid input") } @@ -91,15 +90,15 @@ func (n *network) deliver( nextPeer, nextMsg := r.ReceiveMessage(context.TODO(), from, message) - if (nextPeer == nil && nextMsg != nil) || (nextMsg == nil && nextPeer != nil) { + if (nextPeer == "" && nextMsg != nil) || (nextMsg == nil && nextPeer != "") { return errors.New("Malformed client request") } - if nextPeer == nil && nextMsg == nil { // no response to send + if nextPeer == "" && nextMsg == nil { // no response to send return nil } - nextReceiver, ok := n.clients[nextPeer.Key()] + nextReceiver, ok := n.clients[nextPeer] if !ok { return errors.New("Cannot locate peer on network") } @@ -110,32 +109,32 @@ func (n *network) deliver( // TODO func (n *network) SendRequest( ctx context.Context, - from peer.Peer, - to peer.Peer, + from peer.ID, + to peer.ID, message bsmsg.BitSwapMessage) ( incoming bsmsg.BitSwapMessage, err error) { - r, ok := n.clients[to.Key()] + r, ok := n.clients[to] if !ok { return nil, errors.New("Cannot locate peer on network") } nextPeer, nextMsg := r.ReceiveMessage(context.TODO(), from, message) // TODO dedupe code - if (nextPeer == nil && nextMsg != nil) || (nextMsg == nil && nextPeer != nil) { + if (nextPeer == "" && nextMsg != nil) || (nextMsg == nil && nextPeer != "") { r.ReceiveError(errors.New("Malformed client request")) return nil, nil } // TODO dedupe code - if nextPeer == nil && nextMsg == nil { + if nextPeer == "" && nextMsg == nil { return nil, nil } // TODO test when receiver doesn't immediately respond to the initiator of the request - if !bytes.Equal(nextPeer.ID(), from.ID()) { + if nextPeer != from { go func() { - nextReceiver, ok := n.clients[nextPeer.Key()] + nextReceiver, ok := n.clients[nextPeer] if !ok { // TODO log the error? } @@ -147,26 +146,26 @@ func (n *network) SendRequest( } type networkClient struct { - local peer.Peer + local peer.ID bsnet.Receiver network Network } func (nc *networkClient) SendMessage( ctx context.Context, - to peer.Peer, + to peer.ID, message bsmsg.BitSwapMessage) error { return nc.network.SendMessage(ctx, nc.local, to, message) } func (nc *networkClient) SendRequest( ctx context.Context, - to peer.Peer, + to peer.ID, message bsmsg.BitSwapMessage) (incoming bsmsg.BitSwapMessage, err error) { return nc.network.SendRequest(ctx, nc.local, to, message) } -func (nc *networkClient) DialPeer(ctx context.Context, p peer.Peer) error { +func (nc *networkClient) DialPeer(ctx context.Context, p peer.ID) error { // no need to do anything because dialing isn't a thing in this test net. if !nc.network.HasPeer(p) { return fmt.Errorf("Peer not in network: %s", p) diff --git a/bitswap/testnet/network_test.go b/bitswap/testnet/network_test.go index 7a9f48e2d..d47cb71e7 100644 --- a/bitswap/testnet/network_test.go +++ b/bitswap/testnet/network_test.go @@ -5,30 +5,30 @@ import ( "testing" context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" + blocks "github.com/jbenet/go-ipfs/blocks" bsmsg "github.com/jbenet/go-ipfs/exchange/bitswap/message" bsnet "github.com/jbenet/go-ipfs/exchange/bitswap/network" peer "github.com/jbenet/go-ipfs/peer" delay "github.com/jbenet/go-ipfs/util/delay" - testutil "github.com/jbenet/go-ipfs/util/testutil" ) func TestSendRequestToCooperativePeer(t *testing.T) { net := VirtualNetwork(delay.Fixed(0)) - idOfRecipient := []byte("recipient") + idOfRecipient := peer.ID("recipient") t.Log("Get two network adapters") - initiator := net.Adapter(testutil.NewPeerWithIDString("initiator")) - recipient := net.Adapter(testutil.NewPeerWithID(idOfRecipient)) + initiator := net.Adapter(peer.ID("initiator")) + recipient := net.Adapter(idOfRecipient) expectedStr := "response from recipient" recipient.SetDelegate(lambda(func( ctx context.Context, - from peer.Peer, + from peer.ID, incoming bsmsg.BitSwapMessage) ( - peer.Peer, bsmsg.BitSwapMessage) { + peer.ID, bsmsg.BitSwapMessage) { t.Log("Recipient received a message from the network") @@ -45,13 +45,17 @@ func TestSendRequestToCooperativePeer(t *testing.T) { message := bsmsg.New() message.AddBlock(blocks.NewBlock([]byte("data"))) response, err := initiator.SendRequest( - context.Background(), testutil.NewPeerWithID(idOfRecipient), message) + context.Background(), idOfRecipient, message) if err != nil { t.Fatal(err) } t.Log("Check the contents of the response from recipient") + if response == nil { + t.Fatal("Should have received a response") + } + for _, blockFromRecipient := range response.Blocks() { if string(blockFromRecipient.Data) == expectedStr { return @@ -62,9 +66,9 @@ func TestSendRequestToCooperativePeer(t *testing.T) { func TestSendMessageAsyncButWaitForResponse(t *testing.T) { net := VirtualNetwork(delay.Fixed(0)) - idOfResponder := []byte("responder") - waiter := net.Adapter(testutil.NewPeerWithIDString("waiter")) - responder := net.Adapter(testutil.NewPeerWithID(idOfResponder)) + idOfResponder := peer.ID("responder") + waiter := net.Adapter(peer.ID("waiter")) + responder := net.Adapter(idOfResponder) var wg sync.WaitGroup @@ -74,9 +78,9 @@ func TestSendMessageAsyncButWaitForResponse(t *testing.T) { responder.SetDelegate(lambda(func( ctx context.Context, - fromWaiter peer.Peer, + fromWaiter peer.ID, msgFromWaiter bsmsg.BitSwapMessage) ( - peer.Peer, bsmsg.BitSwapMessage) { + peer.ID, bsmsg.BitSwapMessage) { msgToWaiter := bsmsg.New() msgToWaiter.AddBlock(blocks.NewBlock([]byte(expectedStr))) @@ -86,9 +90,9 @@ func TestSendMessageAsyncButWaitForResponse(t *testing.T) { waiter.SetDelegate(lambda(func( ctx context.Context, - fromResponder peer.Peer, + fromResponder peer.ID, msgFromResponder bsmsg.BitSwapMessage) ( - peer.Peer, bsmsg.BitSwapMessage) { + peer.ID, bsmsg.BitSwapMessage) { // TODO assert that this came from the correct peer and that the message contents are as expected ok := false @@ -103,13 +107,13 @@ func TestSendMessageAsyncButWaitForResponse(t *testing.T) { t.Fatal("Message not received from the responder") } - return nil, nil + return "", nil })) messageSentAsync := bsmsg.New() messageSentAsync.AddBlock(blocks.NewBlock([]byte("data"))) errSending := waiter.SendMessage( - context.Background(), testutil.NewPeerWithID(idOfResponder), messageSentAsync) + context.Background(), idOfResponder, messageSentAsync) if errSending != nil { t.Fatal(errSending) } @@ -117,8 +121,8 @@ func TestSendMessageAsyncButWaitForResponse(t *testing.T) { wg.Wait() // until waiter delegate function is executed } -type receiverFunc func(ctx context.Context, p peer.Peer, - incoming bsmsg.BitSwapMessage) (peer.Peer, bsmsg.BitSwapMessage) +type receiverFunc func(ctx context.Context, p peer.ID, + incoming bsmsg.BitSwapMessage) (peer.ID, bsmsg.BitSwapMessage) // lambda returns a Receiver instance given a receiver function func lambda(f receiverFunc) bsnet.Receiver { @@ -128,13 +132,13 @@ func lambda(f receiverFunc) bsnet.Receiver { } type lambdaImpl struct { - f func(ctx context.Context, p peer.Peer, incoming bsmsg.BitSwapMessage) ( - peer.Peer, bsmsg.BitSwapMessage) + f func(ctx context.Context, p peer.ID, incoming bsmsg.BitSwapMessage) ( + peer.ID, bsmsg.BitSwapMessage) } func (lam *lambdaImpl) ReceiveMessage(ctx context.Context, - p peer.Peer, incoming bsmsg.BitSwapMessage) ( - peer.Peer, bsmsg.BitSwapMessage) { + p peer.ID, incoming bsmsg.BitSwapMessage) ( + peer.ID, bsmsg.BitSwapMessage) { return lam.f(ctx, p, incoming) } diff --git a/bitswap/testutils.go b/bitswap/testutils.go index 48cb11a45..09ac1c363 100644 --- a/bitswap/testutils.go +++ b/bitswap/testutils.go @@ -44,7 +44,7 @@ func (g *SessionGenerator) Close() error { func (g *SessionGenerator) Next() Instance { g.seq++ - return session(g.ctx, g.net, g.rs, g.ps, []byte(string(g.seq))) + return session(g.ctx, g.net, g.rs, g.ps, peer.ID(g.seq)) } func (g *SessionGenerator) Instances(n int) []Instance { @@ -57,7 +57,7 @@ func (g *SessionGenerator) Instances(n int) []Instance { } type Instance struct { - Peer peer.Peer + Peer peer.ID Exchange exchange.Interface blockstore blockstore.Blockstore @@ -77,11 +77,10 @@ func (i *Instance) SetBlockstoreLatency(t time.Duration) time.Duration { // NB: It's easy make mistakes by providing the same peer ID to two different // sessions. To safeguard, use the SessionGenerator to generate sessions. It's // just a much better idea. -func session(ctx context.Context, net tn.Network, rs mockrouting.Server, ps peer.Peerstore, id peer.ID) Instance { - p := ps.WithID(id) +func session(ctx context.Context, net tn.Network, rs mockrouting.Server, ps peer.Peerstore, p peer.ID) Instance { adapter := net.Adapter(p) - htc := rs.Client(p) + htc := rs.Client(peer.PeerInfo{ID: p}) bsdelay := delay.Fixed(0) const kWriteCacheElems = 100 From 4e1047e168f1f3d364525e4161b456df88933247 Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Thu, 18 Dec 2014 19:24:37 -0500 Subject: [PATCH 0256/1035] fix: data race in test https://build.protocol-dev.com/job/race/9352/console @jbenet @whyrusleeping pinging you guys to spread awareness about the delay.D type for configurable delays License: MIT Signed-off-by: Brian Tiger Chow This commit was moved from ipfs/go-bitswap@17b4a8634b92ada004a97e8802b5b928aef0fa86 --- bitswap/bitswap.go | 7 ++++--- bitswap/bitswap_test.go | 5 ++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 376391263..0dcbc0649 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -19,6 +19,7 @@ import ( wantlist "github.com/jbenet/go-ipfs/exchange/bitswap/wantlist" peer "github.com/jbenet/go-ipfs/peer" u "github.com/jbenet/go-ipfs/util" + "github.com/jbenet/go-ipfs/util/delay" eventlog "github.com/jbenet/go-ipfs/util/eventlog" pset "github.com/jbenet/go-ipfs/util/peerset" ) @@ -37,7 +38,7 @@ const ( ) var ( - rebroadcastDelay = time.Second * 10 + rebroadcastDelay = delay.Fixed(time.Second * 10) ) // New initializes a BitSwap instance that communicates over the provided @@ -250,7 +251,7 @@ func (bs *bitswap) clientWorker(parent context.Context) { ctx, cancel := context.WithCancel(parent) - broadcastSignal := time.After(rebroadcastDelay) + broadcastSignal := time.After(rebroadcastDelay.Get()) defer cancel() for { @@ -258,7 +259,7 @@ func (bs *bitswap) clientWorker(parent context.Context) { case <-broadcastSignal: // Resend unfulfilled wantlist keys bs.sendWantlistToProviders(ctx, bs.wantlist) - broadcastSignal = time.After(rebroadcastDelay) + broadcastSignal = time.After(rebroadcastDelay.Get()) case ks := <-bs.batchRequests: if len(ks) == 0 { log.Warning("Received batch request for zero blocks") diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index 42bdd631c..e0f2740e0 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -208,9 +208,8 @@ func TestSendToWantingPeer(t *testing.T) { defer sg.Close() bg := blocksutil.NewBlockGenerator() - oldVal := rebroadcastDelay - rebroadcastDelay = time.Second / 2 - defer func() { rebroadcastDelay = oldVal }() + prev := rebroadcastDelay.Set(time.Second / 2) + defer func() { rebroadcastDelay.Set(prev) }() peerA := sg.Next() peerB := sg.Next() From a6df2c7d8521d298d6f84ad845d6d794ad65e49b Mon Sep 17 00:00:00 2001 From: Juan Batiz-Benet Date: Tue, 23 Dec 2014 04:13:52 -0800 Subject: [PATCH 0257/1035] bitswap: network interface changed Had to change the network interface from DialPeer(peer.ID) to DialPeer(peer.PeerInfo), so that addresses of a provider are handed to the network. @maybebtc and I are discussing whether this should go all the way down to the network, or whether the network _should always work_ with just an ID (which means the network needs to be able to resolve ID -> Addresses, using the routing system. This latter point might mean that "routing" might need to break down into subcomponents. It's a bit sketchy that the Network would become smarter than just dial/listen and I/O, but maybe there's a distinction between net.Network, and something like a peernet.Network that has routing built in...) This commit was moved from ipfs/go-bitswap@c21868538a1762ae35269dad06fcba7642ff5ac5 --- bitswap/bitswap.go | 12 +++++++----- bitswap/network/interface.go | 2 +- bitswap/network/ipfs_impl.go | 5 +++-- bitswap/testnet/network.go | 6 +++--- 4 files changed, 14 insertions(+), 11 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 0dcbc0649..f4a170e78 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -176,14 +176,16 @@ func (bs *bitswap) sendWantListTo(ctx context.Context, peers <-chan peer.PeerInf message.AddEntry(wanted.Key, wanted.Priority) } wg := sync.WaitGroup{} - for peerToQuery := range peers { - log.Event(ctx, "PeerToQuery", peerToQuery.ID) + for pi := range peers { + log.Debugf("bitswap.sendWantListTo: %s %s", pi.ID, pi.Addrs) + log.Event(ctx, "PeerToQuery", pi.ID) wg.Add(1) - go func(p peer.ID) { + go func(pi peer.PeerInfo) { defer wg.Done() + p := pi.ID log.Event(ctx, "DialPeer", p) - err := bs.sender.DialPeer(ctx, p) + err := bs.sender.DialPeer(ctx, pi) if err != nil { log.Errorf("Error sender.DialPeer(%s): %s", p, err) return @@ -198,7 +200,7 @@ func (bs *bitswap) sendWantListTo(ctx context.Context, peers <-chan peer.PeerInf // communication fails. May require slightly different API to // get better guarantees. May need shared sequence numbers. bs.engine.MessageSent(p, message) - }(peerToQuery.ID) + }(pi) } wg.Wait() return nil diff --git a/bitswap/network/interface.go b/bitswap/network/interface.go index 94ceadbff..61837149d 100644 --- a/bitswap/network/interface.go +++ b/bitswap/network/interface.go @@ -12,7 +12,7 @@ import ( type BitSwapNetwork interface { // DialPeer ensures there is a connection to peer. - DialPeer(context.Context, peer.ID) error + DialPeer(context.Context, peer.PeerInfo) error // SendMessage sends a BitSwap message to a peer. SendMessage( diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index 3a7a06091..f94d64000 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -53,8 +53,9 @@ func (bsnet *impl) handleNewStream(s inet.Stream) { } -func (bsnet *impl) DialPeer(ctx context.Context, p peer.ID) error { - return bsnet.network.DialPeer(ctx, p) +func (bsnet *impl) DialPeer(ctx context.Context, p peer.PeerInfo) error { + bsnet.network.Peerstore().AddAddresses(p.ID, p.Addrs) + return bsnet.network.DialPeer(ctx, p.ID) } func (bsnet *impl) SendMessage( diff --git a/bitswap/testnet/network.go b/bitswap/testnet/network.go index 9e17b67f4..179918258 100644 --- a/bitswap/testnet/network.go +++ b/bitswap/testnet/network.go @@ -165,10 +165,10 @@ func (nc *networkClient) SendRequest( return nc.network.SendRequest(ctx, nc.local, to, message) } -func (nc *networkClient) DialPeer(ctx context.Context, p peer.ID) error { +func (nc *networkClient) DialPeer(ctx context.Context, p peer.PeerInfo) error { // no need to do anything because dialing isn't a thing in this test net. - if !nc.network.HasPeer(p) { - return fmt.Errorf("Peer not in network: %s", p) + if !nc.network.HasPeer(p.ID) { + return fmt.Errorf("Peer not in network: %s", p.ID) } return nil } From 8a2fac523b8a848822f226a36f563697334be7c3 Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Tue, 23 Dec 2014 08:35:40 -0500 Subject: [PATCH 0258/1035] Revert "bitswap: network interface changed" This reverts commit bf88f1aec5e3d397f97d64de52b52686cc7a8c8f. This commit was moved from ipfs/go-bitswap@847826d96166aca623d502f8b03a8bd892e9c683 --- bitswap/bitswap.go | 12 +++++------- bitswap/network/interface.go | 2 +- bitswap/network/ipfs_impl.go | 5 ++--- bitswap/testnet/network.go | 6 +++--- 4 files changed, 11 insertions(+), 14 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index f4a170e78..0dcbc0649 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -176,16 +176,14 @@ func (bs *bitswap) sendWantListTo(ctx context.Context, peers <-chan peer.PeerInf message.AddEntry(wanted.Key, wanted.Priority) } wg := sync.WaitGroup{} - for pi := range peers { - log.Debugf("bitswap.sendWantListTo: %s %s", pi.ID, pi.Addrs) - log.Event(ctx, "PeerToQuery", pi.ID) + for peerToQuery := range peers { + log.Event(ctx, "PeerToQuery", peerToQuery.ID) wg.Add(1) - go func(pi peer.PeerInfo) { + go func(p peer.ID) { defer wg.Done() - p := pi.ID log.Event(ctx, "DialPeer", p) - err := bs.sender.DialPeer(ctx, pi) + err := bs.sender.DialPeer(ctx, p) if err != nil { log.Errorf("Error sender.DialPeer(%s): %s", p, err) return @@ -200,7 +198,7 @@ func (bs *bitswap) sendWantListTo(ctx context.Context, peers <-chan peer.PeerInf // communication fails. May require slightly different API to // get better guarantees. May need shared sequence numbers. bs.engine.MessageSent(p, message) - }(pi) + }(peerToQuery.ID) } wg.Wait() return nil diff --git a/bitswap/network/interface.go b/bitswap/network/interface.go index 61837149d..94ceadbff 100644 --- a/bitswap/network/interface.go +++ b/bitswap/network/interface.go @@ -12,7 +12,7 @@ import ( type BitSwapNetwork interface { // DialPeer ensures there is a connection to peer. - DialPeer(context.Context, peer.PeerInfo) error + DialPeer(context.Context, peer.ID) error // SendMessage sends a BitSwap message to a peer. SendMessage( diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index f94d64000..3a7a06091 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -53,9 +53,8 @@ func (bsnet *impl) handleNewStream(s inet.Stream) { } -func (bsnet *impl) DialPeer(ctx context.Context, p peer.PeerInfo) error { - bsnet.network.Peerstore().AddAddresses(p.ID, p.Addrs) - return bsnet.network.DialPeer(ctx, p.ID) +func (bsnet *impl) DialPeer(ctx context.Context, p peer.ID) error { + return bsnet.network.DialPeer(ctx, p) } func (bsnet *impl) SendMessage( diff --git a/bitswap/testnet/network.go b/bitswap/testnet/network.go index 179918258..9e17b67f4 100644 --- a/bitswap/testnet/network.go +++ b/bitswap/testnet/network.go @@ -165,10 +165,10 @@ func (nc *networkClient) SendRequest( return nc.network.SendRequest(ctx, nc.local, to, message) } -func (nc *networkClient) DialPeer(ctx context.Context, p peer.PeerInfo) error { +func (nc *networkClient) DialPeer(ctx context.Context, p peer.ID) error { // no need to do anything because dialing isn't a thing in this test net. - if !nc.network.HasPeer(p.ID) { - return fmt.Errorf("Peer not in network: %s", p.ID) + if !nc.network.HasPeer(p) { + return fmt.Errorf("Peer not in network: %s", p) } return nil } From 8b9f54083ed4a2be1db4f97ef68e53b2676e2cfe Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Tue, 23 Dec 2014 06:57:13 -0500 Subject: [PATCH 0259/1035] fix(bitswap) always dial This commit was moved from ipfs/go-bitswap@05c10446fb2321ce7937112468ac4f608b5627b7 --- bitswap/bitswap.go | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 0dcbc0649..8d75e10b7 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -19,9 +19,10 @@ import ( wantlist "github.com/jbenet/go-ipfs/exchange/bitswap/wantlist" peer "github.com/jbenet/go-ipfs/peer" u "github.com/jbenet/go-ipfs/util" + errors "github.com/jbenet/go-ipfs/util/debugerror" "github.com/jbenet/go-ipfs/util/delay" eventlog "github.com/jbenet/go-ipfs/util/eventlog" - pset "github.com/jbenet/go-ipfs/util/peerset" + pset "github.com/jbenet/go-ipfs/util/peerset" // TODO move this to peerstore ) var log = eventlog.Logger("bitswap") @@ -352,8 +353,13 @@ func (bs *bitswap) ReceiveError(err error) { // send strives to ensure that accounting is always performed when a message is // sent func (bs *bitswap) send(ctx context.Context, p peer.ID, m bsmsg.BitSwapMessage) error { + log.Event(ctx, "DialPeer", p) + err := bs.sender.DialPeer(ctx, p) + if err != nil { + return errors.Wrap(err) + } if err := bs.sender.SendMessage(ctx, p, m); err != nil { - return err + return errors.Wrap(err) } return bs.engine.MessageSent(p, m) } From b0d3af02fbd43223b48953d0a88977ee38abab37 Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Tue, 23 Dec 2014 06:59:55 -0500 Subject: [PATCH 0260/1035] fix(bitswap) always use prvivate `send` method to send cc @whyrusleeping This commit was moved from ipfs/go-bitswap@a247e24cf34b9ad36fa6ebc953202f3be59c0afc --- bitswap/bitswap.go | 17 ++--------------- 1 file changed, 2 insertions(+), 15 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 8d75e10b7..a17cb4254 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -182,23 +182,10 @@ func (bs *bitswap) sendWantListTo(ctx context.Context, peers <-chan peer.PeerInf wg.Add(1) go func(p peer.ID) { defer wg.Done() - - log.Event(ctx, "DialPeer", p) - err := bs.sender.DialPeer(ctx, p) - if err != nil { - log.Errorf("Error sender.DialPeer(%s): %s", p, err) - return - } - - err = bs.sender.SendMessage(ctx, p, message) - if err != nil { - log.Errorf("Error sender.SendMessage(%s) = %s", p, err) + if err := bs.send(ctx, p, message); err != nil { + log.Error(err) return } - // FIXME ensure accounting is handled correctly when - // communication fails. May require slightly different API to - // get better guarantees. May need shared sequence numbers. - bs.engine.MessageSent(p, message) }(peerToQuery.ID) } wg.Wait() From ae59ace0b8a24918da4619195a01d4df8fe33df5 Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Tue, 23 Dec 2014 07:01:19 -0500 Subject: [PATCH 0261/1035] style(bitswap) rename This commit was moved from ipfs/go-bitswap@e42045ade48be097d66418113aa9b6e17af96ed8 --- bitswap/bitswap.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index a17cb4254..699380ca1 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -202,7 +202,7 @@ func (bs *bitswap) sendWantlistToProviders(ctx context.Context, wantlist *wantli message.AddEntry(e.Key, e.Priority) } - ps := pset.New() + set := pset.New() // Get providers for all entries in wantlist (could take a while) wg := sync.WaitGroup{} @@ -214,7 +214,7 @@ func (bs *bitswap) sendWantlistToProviders(ctx context.Context, wantlist *wantli providers := bs.routing.FindProvidersAsync(child, k, maxProvidersPerRequest) for prov := range providers { - if ps.TryAdd(prov.ID) { //Do once per peer + if set.TryAdd(prov.ID) { //Do once per peer bs.send(ctx, prov.ID, message) } } From 655ebb00afede90c7e701ed9889fcdee1d186704 Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Tue, 23 Dec 2014 07:07:19 -0500 Subject: [PATCH 0262/1035] style(bitswap) public methods at top This commit was moved from ipfs/go-bitswap@cb8a96a123cf1c3011293e8765382fe297eb3ad7 --- bitswap/network/ipfs_impl.go | 46 ++++++++++++++++++------------------ 1 file changed, 23 insertions(+), 23 deletions(-) diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index 3a7a06091..e1b316627 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -30,29 +30,6 @@ type impl struct { receiver Receiver } -// handleNewStream receives a new stream from the network. -func (bsnet *impl) handleNewStream(s inet.Stream) { - - if bsnet.receiver == nil { - return - } - - go func() { - defer s.Close() - - received, err := bsmsg.FromNet(s) - if err != nil { - go bsnet.receiver.ReceiveError(err) - return - } - - p := s.Conn().RemotePeer() - ctx := context.Background() - bsnet.receiver.ReceiveMessage(ctx, p, received) - }() - -} - func (bsnet *impl) DialPeer(ctx context.Context, p peer.ID) error { return bsnet.network.DialPeer(ctx, p) } @@ -92,3 +69,26 @@ func (bsnet *impl) SendRequest( func (bsnet *impl) SetDelegate(r Receiver) { bsnet.receiver = r } + +// handleNewStream receives a new stream from the network. +func (bsnet *impl) handleNewStream(s inet.Stream) { + + if bsnet.receiver == nil { + return + } + + go func() { + defer s.Close() + + received, err := bsmsg.FromNet(s) + if err != nil { + go bsnet.receiver.ReceiveError(err) + return + } + + p := s.Conn().RemotePeer() + ctx := context.Background() + bsnet.receiver.ReceiveMessage(ctx, p, received) + }() + +} From 860d633e2cbca7d7ada8c5969f8e2790a4611a4d Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Tue, 23 Dec 2014 07:07:58 -0500 Subject: [PATCH 0263/1035] feat(bitswap/network) expose peerstore This commit was moved from ipfs/go-bitswap@3468d94b41661f1391536d9ae9b87eeb01247b9f --- bitswap/network/interface.go | 2 ++ bitswap/network/ipfs_impl.go | 4 ++++ bitswap/testnet/network.go | 12 +++++++++--- 3 files changed, 15 insertions(+), 3 deletions(-) diff --git a/bitswap/network/interface.go b/bitswap/network/interface.go index 94ceadbff..fc9a7ddaa 100644 --- a/bitswap/network/interface.go +++ b/bitswap/network/interface.go @@ -26,6 +26,8 @@ type BitSwapNetwork interface { peer.ID, bsmsg.BitSwapMessage) (incoming bsmsg.BitSwapMessage, err error) + Peerstore() peer.Peerstore + // SetDelegate registers the Reciver to handle messages received from the // network. SetDelegate(Receiver) diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index e1b316627..ea52ad8d7 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -70,6 +70,10 @@ func (bsnet *impl) SetDelegate(r Receiver) { bsnet.receiver = r } +func (bsnet *impl) Peerstore() peer.Peerstore { + return bsnet.Peerstore() +} + // handleNewStream receives a new stream from the network. func (bsnet *impl) handleNewStream(s inet.Stream) { diff --git a/bitswap/testnet/network.go b/bitswap/testnet/network.go index 9e17b67f4..aa9b879fc 100644 --- a/bitswap/testnet/network.go +++ b/bitswap/testnet/network.go @@ -47,8 +47,9 @@ type network struct { func (n *network) Adapter(p peer.ID) bsnet.BitSwapNetwork { client := &networkClient{ - local: p, - network: n, + local: p, + network: n, + peerstore: peer.NewPeerstore(), } n.clients[p] = client return client @@ -148,7 +149,8 @@ func (n *network) SendRequest( type networkClient struct { local peer.ID bsnet.Receiver - network Network + network Network + peerstore peer.Peerstore } func (nc *networkClient) SendMessage( @@ -176,3 +178,7 @@ func (nc *networkClient) DialPeer(ctx context.Context, p peer.ID) error { func (nc *networkClient) SetDelegate(r bsnet.Receiver) { nc.Receiver = r } + +func (nc *networkClient) Peerstore() peer.Peerstore { + return nc.peerstore +} From 900475afe028b55951768cee1e5e42d222d289bc Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Tue, 23 Dec 2014 07:08:53 -0500 Subject: [PATCH 0264/1035] style(bitswap) rename to network This commit was moved from ipfs/go-bitswap@90324639eaa8e402ec9665a76616f0a1224476d3 --- bitswap/bitswap.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 699380ca1..f94838fb2 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -64,7 +64,7 @@ func New(parent context.Context, p peer.ID, network bsnet.BitSwapNetwork, routin notifications: notif, engine: decision.NewEngine(ctx, bstore), routing: routing, - sender: network, + network: network, wantlist: wantlist.NewThreadSafe(), batchRequests: make(chan []u.Key, sizeBatchRequestChan), } @@ -78,8 +78,8 @@ func New(parent context.Context, p peer.ID, network bsnet.BitSwapNetwork, routin // bitswap instances implement the bitswap protocol. type bitswap struct { - // sender delivers messages on behalf of the session - sender bsnet.BitSwapNetwork + // network delivers messages on behalf of the session + network bsnet.BitSwapNetwork // blockstore is the local database // NB: ensure threadsafety @@ -341,11 +341,11 @@ func (bs *bitswap) ReceiveError(err error) { // sent func (bs *bitswap) send(ctx context.Context, p peer.ID, m bsmsg.BitSwapMessage) error { log.Event(ctx, "DialPeer", p) - err := bs.sender.DialPeer(ctx, p) + err := bs.network.DialPeer(ctx, p) if err != nil { return errors.Wrap(err) } - if err := bs.sender.SendMessage(ctx, p, m); err != nil { + if err := bs.network.SendMessage(ctx, p, m); err != nil { return errors.Wrap(err) } return bs.engine.MessageSent(p, m) From a8f727f7acc4641a13dd6fd2c401d5e0295f8ed1 Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Tue, 23 Dec 2014 07:15:52 -0500 Subject: [PATCH 0265/1035] fix(bitswap) always add addresses This commit was moved from ipfs/go-bitswap@f76fe2adb4d417ea85ab3209479ab636f9b3aa04 --- bitswap/bitswap.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index f94838fb2..20db60a00 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -180,6 +180,7 @@ func (bs *bitswap) sendWantListTo(ctx context.Context, peers <-chan peer.PeerInf for peerToQuery := range peers { log.Event(ctx, "PeerToQuery", peerToQuery.ID) wg.Add(1) + bs.network.Peerstore().AddAddresses(peerToQuery.ID, peerToQuery.Addrs) go func(p peer.ID) { defer wg.Done() if err := bs.send(ctx, p, message); err != nil { @@ -212,8 +213,8 @@ func (bs *bitswap) sendWantlistToProviders(ctx context.Context, wantlist *wantli defer wg.Done() child, _ := context.WithTimeout(ctx, providerRequestTimeout) providers := bs.routing.FindProvidersAsync(child, k, maxProvidersPerRequest) - for prov := range providers { + bs.network.Peerstore().AddAddresses(prov.ID, prov.Addrs) if set.TryAdd(prov.ID) { //Do once per peer bs.send(ctx, prov.ID, message) } @@ -265,7 +266,6 @@ func (bs *bitswap) clientWorker(parent context.Context) { // newer bitswap strategies. child, _ := context.WithTimeout(ctx, providerRequestTimeout) providers := bs.routing.FindProvidersAsync(child, ks[0], maxProvidersPerRequest) - err := bs.sendWantListTo(ctx, providers) if err != nil { log.Errorf("error sending wantlist: %s", err) From 2cc1e65e31acb377828fb7b300109edb0b34eeef Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Tue, 23 Dec 2014 08:16:05 -0500 Subject: [PATCH 0266/1035] refactor(bitswap) bitswap.Network now abstracts ipfs.Network + ipfs.Routing @jbenet @whyrusleeping the next commit will change bitswap.Network.FindProviders to only deal with IDs This commit was moved from ipfs/go-bitswap@4ab8ad567c0fcb92684a6cc2eb822695df1208fa --- bitswap/bitswap.go | 12 ++++------ bitswap/bitswap_test.go | 39 ++++++++++++++------------------- bitswap/network/interface.go | 2 ++ bitswap/network/ipfs_impl.go | 14 +++++++++++- bitswap/testnet/network.go | 22 ++++++++++++++++--- bitswap/testnet/network_test.go | 5 +++-- bitswap/testutils.go | 12 ++++------ 7 files changed, 61 insertions(+), 45 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 20db60a00..58cdb54a5 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -46,7 +46,7 @@ var ( // BitSwapNetwork. This function registers the returned instance as the network // delegate. // Runs until context is cancelled. -func New(parent context.Context, p peer.ID, network bsnet.BitSwapNetwork, routing bsnet.Routing, +func New(parent context.Context, p peer.ID, network bsnet.BitSwapNetwork, bstore blockstore.Blockstore, nice bool) exchange.Interface { ctx, cancelFunc := context.WithCancel(parent) @@ -63,7 +63,6 @@ func New(parent context.Context, p peer.ID, network bsnet.BitSwapNetwork, routin cancelFunc: cancelFunc, notifications: notif, engine: decision.NewEngine(ctx, bstore), - routing: routing, network: network, wantlist: wantlist.NewThreadSafe(), batchRequests: make(chan []u.Key, sizeBatchRequestChan), @@ -85,9 +84,6 @@ type bitswap struct { // NB: ensure threadsafety blockstore blockstore.Blockstore - // routing interface for communication - routing bsnet.Routing - notifications notifications.PubSub // Requests for a set of related blocks @@ -165,7 +161,7 @@ func (bs *bitswap) HasBlock(ctx context.Context, blk *blocks.Block) error { } bs.wantlist.Remove(blk.Key()) bs.notifications.Publish(blk) - return bs.routing.Provide(ctx, blk.Key()) + return bs.network.Provide(ctx, blk.Key()) } func (bs *bitswap) sendWantListTo(ctx context.Context, peers <-chan peer.PeerInfo) error { @@ -212,7 +208,7 @@ func (bs *bitswap) sendWantlistToProviders(ctx context.Context, wantlist *wantli go func(k u.Key) { defer wg.Done() child, _ := context.WithTimeout(ctx, providerRequestTimeout) - providers := bs.routing.FindProvidersAsync(child, k, maxProvidersPerRequest) + providers := bs.network.FindProvidersAsync(child, k, maxProvidersPerRequest) for prov := range providers { bs.network.Peerstore().AddAddresses(prov.ID, prov.Addrs) if set.TryAdd(prov.ID) { //Do once per peer @@ -265,7 +261,7 @@ func (bs *bitswap) clientWorker(parent context.Context) { // it. Later, this assumption may not hold as true if we implement // newer bitswap strategies. child, _ := context.WithTimeout(ctx, providerRequestTimeout) - providers := bs.routing.FindProvidersAsync(child, ks[0], maxProvidersPerRequest) + providers := bs.network.FindProvidersAsync(child, ks[0], maxProvidersPerRequest) err := bs.sendWantListTo(ctx, providers) if err != nil { log.Errorf("error sending wantlist: %s", err) diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index e0f2740e0..6da4aaeff 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -24,9 +24,8 @@ const kNetworkDelay = 0 * time.Millisecond func TestClose(t *testing.T) { // TODO t.Skip("TODO Bitswap's Close implementation is a WIP") - vnet := tn.VirtualNetwork(delay.Fixed(kNetworkDelay)) - rout := mockrouting.NewServer() - sesgen := NewSessionGenerator(vnet, rout) + vnet := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay)) + sesgen := NewSessionGenerator(vnet) defer sesgen.Close() bgen := blocksutil.NewBlockGenerator() @@ -39,9 +38,8 @@ func TestClose(t *testing.T) { func TestGetBlockTimeout(t *testing.T) { - net := tn.VirtualNetwork(delay.Fixed(kNetworkDelay)) - rs := mockrouting.NewServer() - g := NewSessionGenerator(net, rs) + net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay)) + g := NewSessionGenerator(net) defer g.Close() self := g.Next() @@ -55,11 +53,11 @@ func TestGetBlockTimeout(t *testing.T) { } } -func TestProviderForKeyButNetworkCannotFind(t *testing.T) { +func TestProviderForKeyButNetworkCannotFind(t *testing.T) { // TODO revisit this - net := tn.VirtualNetwork(delay.Fixed(kNetworkDelay)) rs := mockrouting.NewServer() - g := NewSessionGenerator(net, rs) + net := tn.VirtualNetwork(rs, delay.Fixed(kNetworkDelay)) + g := NewSessionGenerator(net) defer g.Close() block := blocks.NewBlock([]byte("block")) @@ -81,10 +79,9 @@ func TestProviderForKeyButNetworkCannotFind(t *testing.T) { func TestGetBlockFromPeerAfterPeerAnnounces(t *testing.T) { - net := tn.VirtualNetwork(delay.Fixed(kNetworkDelay)) - rs := mockrouting.NewServer() + net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay)) block := blocks.NewBlock([]byte("block")) - g := NewSessionGenerator(net, rs) + g := NewSessionGenerator(net) defer g.Close() hasBlock := g.Next() @@ -136,9 +133,8 @@ func PerformDistributionTest(t *testing.T, numInstances, numBlocks int) { if testing.Short() { t.SkipNow() } - net := tn.VirtualNetwork(delay.Fixed(kNetworkDelay)) - rs := mockrouting.NewServer() - sg := NewSessionGenerator(net, rs) + net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay)) + sg := NewSessionGenerator(net) defer sg.Close() bg := blocksutil.NewBlockGenerator() @@ -152,10 +148,9 @@ func PerformDistributionTest(t *testing.T, numInstances, numBlocks int) { var blkeys []u.Key first := instances[0] for _, b := range blocks { - first.Blockstore().Put(b) + first.Blockstore().Put(b) // TODO remove. don't need to do this. bitswap owns block blkeys = append(blkeys, b.Key()) first.Exchange.HasBlock(context.Background(), b) - rs.Client(peer.PeerInfo{ID: first.Peer}).Provide(context.Background(), b.Key()) } t.Log("Distribute!") @@ -202,9 +197,8 @@ func TestSendToWantingPeer(t *testing.T) { t.SkipNow() } - net := tn.VirtualNetwork(delay.Fixed(kNetworkDelay)) - rs := mockrouting.NewServer() - sg := NewSessionGenerator(net, rs) + net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay)) + sg := NewSessionGenerator(net) defer sg.Close() bg := blocksutil.NewBlockGenerator() @@ -248,9 +242,8 @@ func TestSendToWantingPeer(t *testing.T) { } func TestBasicBitswap(t *testing.T) { - net := tn.VirtualNetwork(delay.Fixed(kNetworkDelay)) - rs := mockrouting.NewServer() - sg := NewSessionGenerator(net, rs) + net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay)) + sg := NewSessionGenerator(net) bg := blocksutil.NewBlockGenerator() t.Log("Test a few nodes trying to get one file with a lot of blocks") diff --git a/bitswap/network/interface.go b/bitswap/network/interface.go index fc9a7ddaa..3bf5eb0f6 100644 --- a/bitswap/network/interface.go +++ b/bitswap/network/interface.go @@ -31,6 +31,8 @@ type BitSwapNetwork interface { // SetDelegate registers the Reciver to handle messages received from the // network. SetDelegate(Receiver) + + Routing } // Implement Receiver to receive messages from the BitSwapNetwork diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index ea52ad8d7..4258579eb 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -13,9 +13,10 @@ var log = util.Logger("bitswap_network") // NewFromIpfsNetwork returns a BitSwapNetwork supported by underlying IPFS // Dialer & Service -func NewFromIpfsNetwork(n inet.Network) BitSwapNetwork { +func NewFromIpfsNetwork(n inet.Network, r Routing) BitSwapNetwork { bitswapNetwork := impl{ network: n, + routing: r, } n.SetHandler(inet.ProtocolBitswap, bitswapNetwork.handleNewStream) return &bitswapNetwork @@ -25,6 +26,7 @@ func NewFromIpfsNetwork(n inet.Network) BitSwapNetwork { // NetMessage objects, into the bitswap network interface. type impl struct { network inet.Network + routing Routing // inbound messages from the network are forwarded to the receiver receiver Receiver @@ -74,6 +76,16 @@ func (bsnet *impl) Peerstore() peer.Peerstore { return bsnet.Peerstore() } +// FindProvidersAsync returns a channel of providers for the given key +func (bsnet *impl) FindProvidersAsync(ctx context.Context, k util.Key, max int) <-chan peer.PeerInfo { // TODO change to return ID + return bsnet.routing.FindProvidersAsync(ctx, k, max) +} + +// Provide provides the key to the network +func (bsnet *impl) Provide(ctx context.Context, k util.Key) error { + return bsnet.routing.Provide(ctx, k) +} + // handleNewStream receives a new stream from the network. func (bsnet *impl) handleNewStream(s inet.Stream) { diff --git a/bitswap/testnet/network.go b/bitswap/testnet/network.go index aa9b879fc..08c30a7d4 100644 --- a/bitswap/testnet/network.go +++ b/bitswap/testnet/network.go @@ -5,10 +5,12 @@ import ( "fmt" context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" + "github.com/jbenet/go-ipfs/routing/mock" bsmsg "github.com/jbenet/go-ipfs/exchange/bitswap/message" bsnet "github.com/jbenet/go-ipfs/exchange/bitswap/network" peer "github.com/jbenet/go-ipfs/peer" + "github.com/jbenet/go-ipfs/util" delay "github.com/jbenet/go-ipfs/util/delay" ) @@ -33,16 +35,18 @@ type Network interface { // network impl -func VirtualNetwork(d delay.D) Network { +func VirtualNetwork(rs mockrouting.Server, d delay.D) Network { return &network{ clients: make(map[peer.ID]bsnet.Receiver), delay: d, + routingserver: rs, } } type network struct { - clients map[peer.ID]bsnet.Receiver - delay delay.D + clients map[peer.ID]bsnet.Receiver + routingserver mockrouting.Server + delay delay.D } func (n *network) Adapter(p peer.ID) bsnet.BitSwapNetwork { @@ -50,6 +54,7 @@ func (n *network) Adapter(p peer.ID) bsnet.BitSwapNetwork { local: p, network: n, peerstore: peer.NewPeerstore(), + routing: n.routingserver.Client(peer.PeerInfo{ID: p}), } n.clients[p] = client return client @@ -151,6 +156,7 @@ type networkClient struct { bsnet.Receiver network Network peerstore peer.Peerstore + routing bsnet.Routing } func (nc *networkClient) SendMessage( @@ -167,6 +173,16 @@ func (nc *networkClient) SendRequest( return nc.network.SendRequest(ctx, nc.local, to, message) } +// FindProvidersAsync returns a channel of providers for the given key +func (nc *networkClient) FindProvidersAsync(ctx context.Context, k util.Key, max int) <-chan peer.PeerInfo { // TODO change to return ID + return nc.routing.FindProvidersAsync(ctx, k, max) +} + +// Provide provides the key to the network +func (nc *networkClient) Provide(ctx context.Context, k util.Key) error { + return nc.routing.Provide(ctx, k) +} + func (nc *networkClient) DialPeer(ctx context.Context, p peer.ID) error { // no need to do anything because dialing isn't a thing in this test net. if !nc.network.HasPeer(p) { diff --git a/bitswap/testnet/network_test.go b/bitswap/testnet/network_test.go index d47cb71e7..0728f63d6 100644 --- a/bitswap/testnet/network_test.go +++ b/bitswap/testnet/network_test.go @@ -11,10 +11,11 @@ import ( bsnet "github.com/jbenet/go-ipfs/exchange/bitswap/network" peer "github.com/jbenet/go-ipfs/peer" delay "github.com/jbenet/go-ipfs/util/delay" + mockrouting "github.com/jbenet/go-ipfs/routing/mock" ) func TestSendRequestToCooperativePeer(t *testing.T) { - net := VirtualNetwork(delay.Fixed(0)) + net := VirtualNetwork(mockrouting.NewServer(),delay.Fixed(0)) idOfRecipient := peer.ID("recipient") @@ -65,7 +66,7 @@ func TestSendRequestToCooperativePeer(t *testing.T) { } func TestSendMessageAsyncButWaitForResponse(t *testing.T) { - net := VirtualNetwork(delay.Fixed(0)) + net := VirtualNetwork(mockrouting.NewServer(), delay.Fixed(0)) idOfResponder := peer.ID("responder") waiter := net.Adapter(peer.ID("waiter")) responder := net.Adapter(idOfResponder) diff --git a/bitswap/testutils.go b/bitswap/testutils.go index 09ac1c363..70c1bd7a5 100644 --- a/bitswap/testutils.go +++ b/bitswap/testutils.go @@ -10,18 +10,16 @@ import ( exchange "github.com/jbenet/go-ipfs/exchange" tn "github.com/jbenet/go-ipfs/exchange/bitswap/testnet" peer "github.com/jbenet/go-ipfs/peer" - mockrouting "github.com/jbenet/go-ipfs/routing/mock" datastore2 "github.com/jbenet/go-ipfs/util/datastore2" delay "github.com/jbenet/go-ipfs/util/delay" ) func NewSessionGenerator( - net tn.Network, rs mockrouting.Server) SessionGenerator { + net tn.Network) SessionGenerator { ctx, cancel := context.WithCancel(context.TODO()) return SessionGenerator{ ps: peer.NewPeerstore(), net: net, - rs: rs, seq: 0, ctx: ctx, // TODO take ctx as param to Next, Instances cancel: cancel, @@ -31,7 +29,6 @@ func NewSessionGenerator( type SessionGenerator struct { seq int net tn.Network - rs mockrouting.Server ps peer.Peerstore ctx context.Context cancel context.CancelFunc @@ -44,7 +41,7 @@ func (g *SessionGenerator) Close() error { func (g *SessionGenerator) Next() Instance { g.seq++ - return session(g.ctx, g.net, g.rs, g.ps, peer.ID(g.seq)) + return session(g.ctx, g.net, g.ps, peer.ID(g.seq)) } func (g *SessionGenerator) Instances(n int) []Instance { @@ -77,10 +74,9 @@ func (i *Instance) SetBlockstoreLatency(t time.Duration) time.Duration { // NB: It's easy make mistakes by providing the same peer ID to two different // sessions. To safeguard, use the SessionGenerator to generate sessions. It's // just a much better idea. -func session(ctx context.Context, net tn.Network, rs mockrouting.Server, ps peer.Peerstore, p peer.ID) Instance { +func session(ctx context.Context, net tn.Network, ps peer.Peerstore, p peer.ID) Instance { adapter := net.Adapter(p) - htc := rs.Client(peer.PeerInfo{ID: p}) bsdelay := delay.Fixed(0) const kWriteCacheElems = 100 @@ -92,7 +88,7 @@ func session(ctx context.Context, net tn.Network, rs mockrouting.Server, ps peer const alwaysSendToPeer = true - bs := New(ctx, p, adapter, htc, bstore, alwaysSendToPeer) + bs := New(ctx, p, adapter, bstore, alwaysSendToPeer) return Instance{ Peer: p, From 6fd3638913b404c99fd1d9649ba28b0e936b209d Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Tue, 23 Dec 2014 08:33:36 -0500 Subject: [PATCH 0267/1035] refactor(bitswap) change PeerInfo to ID in bitswap package @jbenet @whyrusleeping This commit replaces peer.PeerInfo with peer.ID in the bitswap package This commit was moved from ipfs/go-bitswap@aabe0a29352791ed285e0e2def61704fc5d8101b --- bitswap/bitswap.go | 12 +++++------- bitswap/network/interface.go | 2 +- bitswap/network/ipfs_impl.go | 22 +++++++++++++++++----- bitswap/testnet/network.go | 29 ++++++++++++++++++++++++----- 4 files changed, 47 insertions(+), 18 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 58cdb54a5..58c7a3584 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -164,7 +164,7 @@ func (bs *bitswap) HasBlock(ctx context.Context, blk *blocks.Block) error { return bs.network.Provide(ctx, blk.Key()) } -func (bs *bitswap) sendWantListTo(ctx context.Context, peers <-chan peer.PeerInfo) error { +func (bs *bitswap) sendWantListTo(ctx context.Context, peers <-chan peer.ID) error { if peers == nil { panic("Cant send wantlist to nil peerchan") } @@ -174,16 +174,15 @@ func (bs *bitswap) sendWantListTo(ctx context.Context, peers <-chan peer.PeerInf } wg := sync.WaitGroup{} for peerToQuery := range peers { - log.Event(ctx, "PeerToQuery", peerToQuery.ID) + log.Event(ctx, "PeerToQuery", peerToQuery) wg.Add(1) - bs.network.Peerstore().AddAddresses(peerToQuery.ID, peerToQuery.Addrs) go func(p peer.ID) { defer wg.Done() if err := bs.send(ctx, p, message); err != nil { log.Error(err) return } - }(peerToQuery.ID) + }(peerToQuery) } wg.Wait() return nil @@ -210,9 +209,8 @@ func (bs *bitswap) sendWantlistToProviders(ctx context.Context, wantlist *wantli child, _ := context.WithTimeout(ctx, providerRequestTimeout) providers := bs.network.FindProvidersAsync(child, k, maxProvidersPerRequest) for prov := range providers { - bs.network.Peerstore().AddAddresses(prov.ID, prov.Addrs) - if set.TryAdd(prov.ID) { //Do once per peer - bs.send(ctx, prov.ID, message) + if set.TryAdd(prov) { //Do once per peer + bs.send(ctx, prov, message) } } }(e.Key) diff --git a/bitswap/network/interface.go b/bitswap/network/interface.go index 3bf5eb0f6..08e65cf10 100644 --- a/bitswap/network/interface.go +++ b/bitswap/network/interface.go @@ -46,7 +46,7 @@ type Receiver interface { type Routing interface { // FindProvidersAsync returns a channel of providers for the given key - FindProvidersAsync(context.Context, u.Key, int) <-chan peer.PeerInfo + FindProvidersAsync(context.Context, u.Key, int) <-chan peer.ID // Provide provides the key to the network Provide(context.Context, u.Key) error diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index 4258579eb..6205e9c29 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -2,10 +2,10 @@ package network import ( context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" - bsmsg "github.com/jbenet/go-ipfs/exchange/bitswap/message" inet "github.com/jbenet/go-ipfs/net" peer "github.com/jbenet/go-ipfs/peer" + routing "github.com/jbenet/go-ipfs/routing" util "github.com/jbenet/go-ipfs/util" ) @@ -13,7 +13,7 @@ var log = util.Logger("bitswap_network") // NewFromIpfsNetwork returns a BitSwapNetwork supported by underlying IPFS // Dialer & Service -func NewFromIpfsNetwork(n inet.Network, r Routing) BitSwapNetwork { +func NewFromIpfsNetwork(n inet.Network, r routing.IpfsRouting) BitSwapNetwork { bitswapNetwork := impl{ network: n, routing: r, @@ -26,7 +26,7 @@ func NewFromIpfsNetwork(n inet.Network, r Routing) BitSwapNetwork { // NetMessage objects, into the bitswap network interface. type impl struct { network inet.Network - routing Routing + routing routing.IpfsRouting // inbound messages from the network are forwarded to the receiver receiver Receiver @@ -77,8 +77,20 @@ func (bsnet *impl) Peerstore() peer.Peerstore { } // FindProvidersAsync returns a channel of providers for the given key -func (bsnet *impl) FindProvidersAsync(ctx context.Context, k util.Key, max int) <-chan peer.PeerInfo { // TODO change to return ID - return bsnet.routing.FindProvidersAsync(ctx, k, max) +func (bsnet *impl) FindProvidersAsync(ctx context.Context, k util.Key, max int) <-chan peer.ID { + out := make(chan peer.ID) + go func() { + defer close(out) + providers := bsnet.routing.FindProvidersAsync(ctx, k, max) + for info := range providers { + bsnet.network.Peerstore().AddAddresses(info.ID, info.Addrs) + select { + case <-ctx.Done(): + case out <- info.ID: + } + } + }() + return out } // Provide provides the key to the network diff --git a/bitswap/testnet/network.go b/bitswap/testnet/network.go index 08c30a7d4..0461508ea 100644 --- a/bitswap/testnet/network.go +++ b/bitswap/testnet/network.go @@ -5,6 +5,7 @@ import ( "fmt" context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" + "github.com/jbenet/go-ipfs/routing" "github.com/jbenet/go-ipfs/routing/mock" bsmsg "github.com/jbenet/go-ipfs/exchange/bitswap/message" @@ -37,8 +38,8 @@ type Network interface { func VirtualNetwork(rs mockrouting.Server, d delay.D) Network { return &network{ - clients: make(map[peer.ID]bsnet.Receiver), - delay: d, + clients: make(map[peer.ID]bsnet.Receiver), + delay: d, routingserver: rs, } } @@ -156,7 +157,7 @@ type networkClient struct { bsnet.Receiver network Network peerstore peer.Peerstore - routing bsnet.Routing + routing routing.IpfsRouting } func (nc *networkClient) SendMessage( @@ -174,8 +175,26 @@ func (nc *networkClient) SendRequest( } // FindProvidersAsync returns a channel of providers for the given key -func (nc *networkClient) FindProvidersAsync(ctx context.Context, k util.Key, max int) <-chan peer.PeerInfo { // TODO change to return ID - return nc.routing.FindProvidersAsync(ctx, k, max) +func (nc *networkClient) FindProvidersAsync(ctx context.Context, k util.Key, max int) <-chan peer.ID { + + // NB: this function duplicates the PeerInfo -> ID transformation in the + // bitswap network adapter. Not to worry. This network client will be + // deprecated once the ipfsnet.Mock is added. The code below is only + // temporary. + + out := make(chan peer.ID) + go func() { + defer close(out) + providers := nc.routing.FindProvidersAsync(ctx, k, max) + for info := range providers { + nc.peerstore.AddAddresses(info.ID, info.Addrs) + select { + case <-ctx.Done(): + case out <- info.ID: + } + } + }() + return out } // Provide provides the key to the network From aff6490fd768ff2df12e9d3dd174bc155c2723ad Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Tue, 23 Dec 2014 08:44:25 -0500 Subject: [PATCH 0268/1035] chore(bitswap) remove Peerstore() methods from bitswap.Network interface This commit was moved from ipfs/go-bitswap@39138d13b6e34daf01b7d2d159482a4974b7385a --- bitswap/network/interface.go | 2 -- bitswap/network/ipfs_impl.go | 4 ---- bitswap/testnet/network.go | 17 +++++------------ 3 files changed, 5 insertions(+), 18 deletions(-) diff --git a/bitswap/network/interface.go b/bitswap/network/interface.go index 08e65cf10..1bc14ca88 100644 --- a/bitswap/network/interface.go +++ b/bitswap/network/interface.go @@ -26,8 +26,6 @@ type BitSwapNetwork interface { peer.ID, bsmsg.BitSwapMessage) (incoming bsmsg.BitSwapMessage, err error) - Peerstore() peer.Peerstore - // SetDelegate registers the Reciver to handle messages received from the // network. SetDelegate(Receiver) diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index 6205e9c29..5388c8e6d 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -72,10 +72,6 @@ func (bsnet *impl) SetDelegate(r Receiver) { bsnet.receiver = r } -func (bsnet *impl) Peerstore() peer.Peerstore { - return bsnet.Peerstore() -} - // FindProvidersAsync returns a channel of providers for the given key func (bsnet *impl) FindProvidersAsync(ctx context.Context, k util.Key, max int) <-chan peer.ID { out := make(chan peer.ID) diff --git a/bitswap/testnet/network.go b/bitswap/testnet/network.go index 0461508ea..3201ad5c4 100644 --- a/bitswap/testnet/network.go +++ b/bitswap/testnet/network.go @@ -52,10 +52,9 @@ type network struct { func (n *network) Adapter(p peer.ID) bsnet.BitSwapNetwork { client := &networkClient{ - local: p, - network: n, - peerstore: peer.NewPeerstore(), - routing: n.routingserver.Client(peer.PeerInfo{ID: p}), + local: p, + network: n, + routing: n.routingserver.Client(peer.PeerInfo{ID: p}), } n.clients[p] = client return client @@ -155,9 +154,8 @@ func (n *network) SendRequest( type networkClient struct { local peer.ID bsnet.Receiver - network Network - peerstore peer.Peerstore - routing routing.IpfsRouting + network Network + routing routing.IpfsRouting } func (nc *networkClient) SendMessage( @@ -187,7 +185,6 @@ func (nc *networkClient) FindProvidersAsync(ctx context.Context, k util.Key, max defer close(out) providers := nc.routing.FindProvidersAsync(ctx, k, max) for info := range providers { - nc.peerstore.AddAddresses(info.ID, info.Addrs) select { case <-ctx.Done(): case out <- info.ID: @@ -213,7 +210,3 @@ func (nc *networkClient) DialPeer(ctx context.Context, p peer.ID) error { func (nc *networkClient) SetDelegate(r bsnet.Receiver) { nc.Receiver = r } - -func (nc *networkClient) Peerstore() peer.Peerstore { - return nc.peerstore -} From f040c5597fe90b4a259d7e07847e4cee094ce182 Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Tue, 23 Dec 2014 15:05:35 -0500 Subject: [PATCH 0269/1035] misc docs and fmting This commit was moved from ipfs/go-bitswap@ca182aed2c39305b4a0a557d13606464dccc0a2b --- bitswap/bitswap.go | 4 +++- bitswap/testnet/network_test.go | 4 ++-- bitswap/testutils.go | 1 + 3 files changed, 6 insertions(+), 3 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 58c7a3584..4ff23aee2 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -28,7 +28,9 @@ import ( var log = eventlog.Logger("bitswap") const ( - // Number of providers to request for sending a wantlist to + // maxProvidersPerRequest specifies the maximum number of providers desired + // from the network. This value is specified because the network streams + // results. // TODO: if a 'non-nice' strategy is implemented, consider increasing this value maxProvidersPerRequest = 3 providerRequestTimeout = time.Second * 10 diff --git a/bitswap/testnet/network_test.go b/bitswap/testnet/network_test.go index 0728f63d6..1418497f0 100644 --- a/bitswap/testnet/network_test.go +++ b/bitswap/testnet/network_test.go @@ -10,12 +10,12 @@ import ( bsmsg "github.com/jbenet/go-ipfs/exchange/bitswap/message" bsnet "github.com/jbenet/go-ipfs/exchange/bitswap/network" peer "github.com/jbenet/go-ipfs/peer" - delay "github.com/jbenet/go-ipfs/util/delay" mockrouting "github.com/jbenet/go-ipfs/routing/mock" + delay "github.com/jbenet/go-ipfs/util/delay" ) func TestSendRequestToCooperativePeer(t *testing.T) { - net := VirtualNetwork(mockrouting.NewServer(),delay.Fixed(0)) + net := VirtualNetwork(mockrouting.NewServer(), delay.Fixed(0)) idOfRecipient := peer.ID("recipient") diff --git a/bitswap/testutils.go b/bitswap/testutils.go index 70c1bd7a5..1ff520512 100644 --- a/bitswap/testutils.go +++ b/bitswap/testutils.go @@ -26,6 +26,7 @@ func NewSessionGenerator( } } +// TODO move this SessionGenerator to the core package and export it as the core generator type SessionGenerator struct { seq int net tn.Network From a667b7c429827fe5358608f2a8492c0057ba36a9 Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Tue, 23 Dec 2014 15:08:05 -0500 Subject: [PATCH 0270/1035] fix(bitswap) remove peerstore This commit was moved from ipfs/go-bitswap@4c6a60126f9f1392806ff2ef2e47027f14ee3aaf --- bitswap/testutils.go | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/bitswap/testutils.go b/bitswap/testutils.go index 1ff520512..c75dc61db 100644 --- a/bitswap/testutils.go +++ b/bitswap/testutils.go @@ -18,7 +18,6 @@ func NewSessionGenerator( net tn.Network) SessionGenerator { ctx, cancel := context.WithCancel(context.TODO()) return SessionGenerator{ - ps: peer.NewPeerstore(), net: net, seq: 0, ctx: ctx, // TODO take ctx as param to Next, Instances @@ -30,7 +29,6 @@ func NewSessionGenerator( type SessionGenerator struct { seq int net tn.Network - ps peer.Peerstore ctx context.Context cancel context.CancelFunc } @@ -42,7 +40,7 @@ func (g *SessionGenerator) Close() error { func (g *SessionGenerator) Next() Instance { g.seq++ - return session(g.ctx, g.net, g.ps, peer.ID(g.seq)) + return session(g.ctx, g.net, peer.ID(g.seq)) } func (g *SessionGenerator) Instances(n int) []Instance { @@ -75,7 +73,7 @@ func (i *Instance) SetBlockstoreLatency(t time.Duration) time.Duration { // NB: It's easy make mistakes by providing the same peer ID to two different // sessions. To safeguard, use the SessionGenerator to generate sessions. It's // just a much better idea. -func session(ctx context.Context, net tn.Network, ps peer.Peerstore, p peer.ID) Instance { +func session(ctx context.Context, net tn.Network, p peer.ID) Instance { adapter := net.Adapter(p) From 051d8f5405c23203c4aa2dd2ed323f7d61a10ae1 Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Tue, 23 Dec 2014 15:10:45 -0500 Subject: [PATCH 0271/1035] use testutil peer in sess This commit was moved from ipfs/go-bitswap@941474791e89b7e441839dbcc91b16e9e625d36c --- bitswap/testutils.go | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/bitswap/testutils.go b/bitswap/testutils.go index c75dc61db..f636eddd6 100644 --- a/bitswap/testutils.go +++ b/bitswap/testutils.go @@ -12,6 +12,7 @@ import ( peer "github.com/jbenet/go-ipfs/peer" datastore2 "github.com/jbenet/go-ipfs/util/datastore2" delay "github.com/jbenet/go-ipfs/util/delay" + testutil "github.com/jbenet/go-ipfs/util/testutil" ) func NewSessionGenerator( @@ -40,7 +41,11 @@ func (g *SessionGenerator) Close() error { func (g *SessionGenerator) Next() Instance { g.seq++ - return session(g.ctx, g.net, peer.ID(g.seq)) + p, err := testutil.RandPeer() + if err != nil { + panic("FIXME") // TODO change signature + } + return session(g.ctx, g.net, p) } func (g *SessionGenerator) Instances(n int) []Instance { @@ -73,9 +78,9 @@ func (i *Instance) SetBlockstoreLatency(t time.Duration) time.Duration { // NB: It's easy make mistakes by providing the same peer ID to two different // sessions. To safeguard, use the SessionGenerator to generate sessions. It's // just a much better idea. -func session(ctx context.Context, net tn.Network, p peer.ID) Instance { +func session(ctx context.Context, net tn.Network, p testutil.Peer) Instance { - adapter := net.Adapter(p) + adapter := net.Adapter(p.ID()) bsdelay := delay.Fixed(0) const kWriteCacheElems = 100 @@ -87,10 +92,10 @@ func session(ctx context.Context, net tn.Network, p peer.ID) Instance { const alwaysSendToPeer = true - bs := New(ctx, p, adapter, bstore, alwaysSendToPeer) + bs := New(ctx, p.ID(), adapter, bstore, alwaysSendToPeer) return Instance{ - Peer: p, + Peer: p.ID(), Exchange: bs, blockstore: bstore, blockstoreDelay: bsdelay, From 0e8680c548dc36bcf54f7d3d1c1f6910b7b54ba4 Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Tue, 23 Dec 2014 18:08:16 -0500 Subject: [PATCH 0272/1035] refactor(bitswap/testnet) slim down interface so it'll be easier to create another implementation using the new mocknet This commit was moved from ipfs/go-bitswap@20acf8b7408bb5b3f14048c91ab42ad2580c5bca --- bitswap/testnet/network.go | 22 ++++------------------ 1 file changed, 4 insertions(+), 18 deletions(-) diff --git a/bitswap/testnet/network.go b/bitswap/testnet/network.go index 3201ad5c4..f45202630 100644 --- a/bitswap/testnet/network.go +++ b/bitswap/testnet/network.go @@ -5,13 +5,12 @@ import ( "fmt" context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" - "github.com/jbenet/go-ipfs/routing" - "github.com/jbenet/go-ipfs/routing/mock" - bsmsg "github.com/jbenet/go-ipfs/exchange/bitswap/message" bsnet "github.com/jbenet/go-ipfs/exchange/bitswap/network" peer "github.com/jbenet/go-ipfs/peer" - "github.com/jbenet/go-ipfs/util" + routing "github.com/jbenet/go-ipfs/routing" + mockrouting "github.com/jbenet/go-ipfs/routing/mock" + util "github.com/jbenet/go-ipfs/util" delay "github.com/jbenet/go-ipfs/util/delay" ) @@ -19,19 +18,6 @@ type Network interface { Adapter(peer.ID) bsnet.BitSwapNetwork HasPeer(peer.ID) bool - - SendMessage( - ctx context.Context, - from peer.ID, - to peer.ID, - message bsmsg.BitSwapMessage) error - - SendRequest( - ctx context.Context, - from peer.ID, - to peer.ID, - message bsmsg.BitSwapMessage) ( - incoming bsmsg.BitSwapMessage, err error) } // network impl @@ -154,7 +140,7 @@ func (n *network) SendRequest( type networkClient struct { local peer.ID bsnet.Receiver - network Network + network *network routing routing.IpfsRouting } From f035ef5dcafaa97b961a7b22893e5b5a17e55322 Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Tue, 23 Dec 2014 18:18:32 -0500 Subject: [PATCH 0273/1035] pass peer into testnet adapter method This commit was moved from ipfs/go-bitswap@4edd768c700d91fb7b5b635faa466f7757238825 --- bitswap/testnet/network.go | 11 ++++++----- bitswap/testnet/network_test.go | 18 +++++++++--------- bitswap/testutils.go | 2 +- 3 files changed, 16 insertions(+), 15 deletions(-) diff --git a/bitswap/testnet/network.go b/bitswap/testnet/network.go index f45202630..26566bf7e 100644 --- a/bitswap/testnet/network.go +++ b/bitswap/testnet/network.go @@ -12,10 +12,11 @@ import ( mockrouting "github.com/jbenet/go-ipfs/routing/mock" util "github.com/jbenet/go-ipfs/util" delay "github.com/jbenet/go-ipfs/util/delay" + testutil "github.com/jbenet/go-ipfs/util/testutil" ) type Network interface { - Adapter(peer.ID) bsnet.BitSwapNetwork + Adapter(testutil.Peer) bsnet.BitSwapNetwork HasPeer(peer.ID) bool } @@ -36,13 +37,13 @@ type network struct { delay delay.D } -func (n *network) Adapter(p peer.ID) bsnet.BitSwapNetwork { +func (n *network) Adapter(p testutil.Peer) bsnet.BitSwapNetwork { client := &networkClient{ - local: p, + local: p.ID(), network: n, - routing: n.routingserver.Client(peer.PeerInfo{ID: p}), + routing: n.routingserver.Client(peer.PeerInfo{ID: p.ID()}), } - n.clients[p] = client + n.clients[p.ID()] = client return client } diff --git a/bitswap/testnet/network_test.go b/bitswap/testnet/network_test.go index 1418497f0..08f4ff500 100644 --- a/bitswap/testnet/network_test.go +++ b/bitswap/testnet/network_test.go @@ -5,24 +5,24 @@ import ( "testing" context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" - blocks "github.com/jbenet/go-ipfs/blocks" bsmsg "github.com/jbenet/go-ipfs/exchange/bitswap/message" bsnet "github.com/jbenet/go-ipfs/exchange/bitswap/network" peer "github.com/jbenet/go-ipfs/peer" mockrouting "github.com/jbenet/go-ipfs/routing/mock" delay "github.com/jbenet/go-ipfs/util/delay" + testutil "github.com/jbenet/go-ipfs/util/testutil" ) func TestSendRequestToCooperativePeer(t *testing.T) { net := VirtualNetwork(mockrouting.NewServer(), delay.Fixed(0)) - idOfRecipient := peer.ID("recipient") + recipientPeer := testutil.RandPeerOrFatal(t) t.Log("Get two network adapters") - initiator := net.Adapter(peer.ID("initiator")) - recipient := net.Adapter(idOfRecipient) + initiator := net.Adapter(testutil.RandPeerOrFatal(t)) + recipient := net.Adapter(recipientPeer) expectedStr := "response from recipient" recipient.SetDelegate(lambda(func( @@ -46,7 +46,7 @@ func TestSendRequestToCooperativePeer(t *testing.T) { message := bsmsg.New() message.AddBlock(blocks.NewBlock([]byte("data"))) response, err := initiator.SendRequest( - context.Background(), idOfRecipient, message) + context.Background(), recipientPeer.ID(), message) if err != nil { t.Fatal(err) } @@ -67,9 +67,9 @@ func TestSendRequestToCooperativePeer(t *testing.T) { func TestSendMessageAsyncButWaitForResponse(t *testing.T) { net := VirtualNetwork(mockrouting.NewServer(), delay.Fixed(0)) - idOfResponder := peer.ID("responder") - waiter := net.Adapter(peer.ID("waiter")) - responder := net.Adapter(idOfResponder) + responderPeer := testutil.RandPeerOrFatal(t) + waiter := net.Adapter(testutil.RandPeerOrFatal(t)) + responder := net.Adapter(responderPeer) var wg sync.WaitGroup @@ -114,7 +114,7 @@ func TestSendMessageAsyncButWaitForResponse(t *testing.T) { messageSentAsync := bsmsg.New() messageSentAsync.AddBlock(blocks.NewBlock([]byte("data"))) errSending := waiter.SendMessage( - context.Background(), idOfResponder, messageSentAsync) + context.Background(), responderPeer.ID(), messageSentAsync) if errSending != nil { t.Fatal(errSending) } diff --git a/bitswap/testutils.go b/bitswap/testutils.go index f636eddd6..728c2ba3b 100644 --- a/bitswap/testutils.go +++ b/bitswap/testutils.go @@ -80,7 +80,7 @@ func (i *Instance) SetBlockstoreLatency(t time.Duration) time.Duration { // just a much better idea. func session(ctx context.Context, net tn.Network, p testutil.Peer) Instance { - adapter := net.Adapter(p.ID()) + adapter := net.Adapter(p) bsdelay := delay.Fixed(0) const kWriteCacheElems = 100 From 9f9705673806fb65037cf8f0453494481e847ac7 Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Tue, 16 Dec 2014 18:10:56 -0800 Subject: [PATCH 0274/1035] refactor(bitswap/testnet) extract interface in prep for mockpeernet version License: MIT Signed-off-by: Brian Tiger Chow This commit was moved from ipfs/go-bitswap@d425daf3e97a831e75bdf0be4ca7eb23469fd74e --- bitswap/testnet/interface.go | 13 +++++++++++++ bitswap/testnet/network.go | 8 -------- 2 files changed, 13 insertions(+), 8 deletions(-) create mode 100644 bitswap/testnet/interface.go diff --git a/bitswap/testnet/interface.go b/bitswap/testnet/interface.go new file mode 100644 index 000000000..c194d74cb --- /dev/null +++ b/bitswap/testnet/interface.go @@ -0,0 +1,13 @@ +package bitswap + +import ( + bsnet "github.com/jbenet/go-ipfs/exchange/bitswap/network" + peer "github.com/jbenet/go-ipfs/peer" + "github.com/jbenet/go-ipfs/util/testutil" +) + +type Network interface { + Adapter(testutil.Peer) bsnet.BitSwapNetwork + + HasPeer(peer.ID) bool +} diff --git a/bitswap/testnet/network.go b/bitswap/testnet/network.go index 26566bf7e..0bcffbe51 100644 --- a/bitswap/testnet/network.go +++ b/bitswap/testnet/network.go @@ -15,14 +15,6 @@ import ( testutil "github.com/jbenet/go-ipfs/util/testutil" ) -type Network interface { - Adapter(testutil.Peer) bsnet.BitSwapNetwork - - HasPeer(peer.ID) bool -} - -// network impl - func VirtualNetwork(rs mockrouting.Server, d delay.D) Network { return &network{ clients: make(map[peer.ID]bsnet.Receiver), From 368096b7986604dee7b410eda222e37c60d58607 Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Tue, 16 Dec 2014 18:11:59 -0800 Subject: [PATCH 0275/1035] refactor(bitswap/testnet) rename to virtual License: MIT Signed-off-by: Brian Tiger Chow This commit was moved from ipfs/go-bitswap@acd845798c50937d3284c692e26d1f058f644415 --- bitswap/testnet/{network.go => virtual.go} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename bitswap/testnet/{network.go => virtual.go} (100%) diff --git a/bitswap/testnet/network.go b/bitswap/testnet/virtual.go similarity index 100% rename from bitswap/testnet/network.go rename to bitswap/testnet/virtual.go From 222d6633a71bccb54e95ab99b29bbab26baffa25 Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Tue, 16 Dec 2014 19:10:15 -0800 Subject: [PATCH 0276/1035] feat(bitswap/testnet) impl a version of bitswap testnet that uses mockpeernet under the hood License: MIT Signed-off-by: Brian Tiger Chow This commit was moved from ipfs/go-bitswap@847cbf02ae163238bdf01bbc46a7bb52646f0c4f --- bitswap/testnet/peernet.go | 55 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 55 insertions(+) create mode 100644 bitswap/testnet/peernet.go diff --git a/bitswap/testnet/peernet.go b/bitswap/testnet/peernet.go new file mode 100644 index 000000000..af2e57258 --- /dev/null +++ b/bitswap/testnet/peernet.go @@ -0,0 +1,55 @@ +package bitswap + +import ( + "math" + + context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" + bsnet "github.com/jbenet/go-ipfs/exchange/bitswap/network" + mockpeernet "github.com/jbenet/go-ipfs/net/mock" + peer "github.com/jbenet/go-ipfs/peer" + mockrouting "github.com/jbenet/go-ipfs/routing/mock" + delay "github.com/jbenet/go-ipfs/util/delay" + testutil "github.com/jbenet/go-ipfs/util/testutil" +) + +type peernet struct { + mockpeernet.Mocknet + routingserver mockrouting.Server +} + +func StreamNetWithDelay( + ctx context.Context, + rs mockrouting.Server, + d delay.D) (Network, error) { + + net := mockpeernet.New(ctx) + net.SetLinkDefaults(mockpeernet.LinkOptions{ + Latency: d.Get(), + Bandwidth: math.MaxInt32, // TODO inject + }) + return &peernet{net, rs}, nil +} + +func (pn *peernet) Adapter(p testutil.Peer) bsnet.BitSwapNetwork { + peers := pn.Mocknet.Peers() + client, err := pn.Mocknet.AddPeer(p.PrivateKey(), p.Address()) + if err != nil { + panic(err.Error()) + } + for _, other := range peers { + pn.Mocknet.LinkPeers(p.ID(), other) + } + routing := pn.routingserver.Client(peer.PeerInfo{ID: p.ID()}) + return bsnet.NewFromIpfsNetwork(client, routing) +} + +func (pn *peernet) HasPeer(p peer.ID) bool { + for _, member := range pn.Mocknet.Peers() { + if p == member { + return true + } + } + return false +} + +var _ Network = &peernet{} From a8c47833b5966273a362ecb3aeaf9e7cc197f7bc Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Wed, 17 Dec 2014 10:02:19 -0800 Subject: [PATCH 0277/1035] wip with DHT @whyrusleeping @jbenet this is a WIP with the DHT. wip License: MIT Signed-off-by: Brian Tiger Chow Conflicts: epictest/addcat_test.go exchange/bitswap/testnet/peernet.go exchange/bitswap/testutils.go routing/mock/centralized_server.go routing/mock/centralized_test.go routing/mock/interface.go fix(routing/mock) fill in function definition This commit was moved from ipfs/go-bitswap@c6684e18435b022fcbad1a23b481bdeb0ce503c4 --- bitswap/bitswap_test.go | 4 ++-- bitswap/testnet/peernet.go | 17 +++-------------- bitswap/testnet/virtual.go | 2 +- bitswap/testutils.go | 10 +++++----- 4 files changed, 11 insertions(+), 22 deletions(-) diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index 6da4aaeff..4ef0838a5 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -11,10 +11,10 @@ import ( blocks "github.com/jbenet/go-ipfs/blocks" blocksutil "github.com/jbenet/go-ipfs/blocks/blocksutil" tn "github.com/jbenet/go-ipfs/exchange/bitswap/testnet" - peer "github.com/jbenet/go-ipfs/peer" mockrouting "github.com/jbenet/go-ipfs/routing/mock" u "github.com/jbenet/go-ipfs/util" delay "github.com/jbenet/go-ipfs/util/delay" + "github.com/jbenet/go-ipfs/util/testutil" ) // FIXME the tests are really sensitive to the network delay. fix them to work @@ -61,7 +61,7 @@ func TestProviderForKeyButNetworkCannotFind(t *testing.T) { // TODO revisit this defer g.Close() block := blocks.NewBlock([]byte("block")) - pinfo := peer.PeerInfo{ID: peer.ID("testing")} + pinfo := testutil.RandPeerOrFatal(t) rs.Client(pinfo).Provide(context.Background(), block.Key()) // but not on network solo := g.Next() diff --git a/bitswap/testnet/peernet.go b/bitswap/testnet/peernet.go index af2e57258..ef4f3d503 100644 --- a/bitswap/testnet/peernet.go +++ b/bitswap/testnet/peernet.go @@ -1,14 +1,12 @@ package bitswap import ( - "math" - context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" + ds "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore" bsnet "github.com/jbenet/go-ipfs/exchange/bitswap/network" mockpeernet "github.com/jbenet/go-ipfs/net/mock" peer "github.com/jbenet/go-ipfs/peer" mockrouting "github.com/jbenet/go-ipfs/routing/mock" - delay "github.com/jbenet/go-ipfs/util/delay" testutil "github.com/jbenet/go-ipfs/util/testutil" ) @@ -17,16 +15,7 @@ type peernet struct { routingserver mockrouting.Server } -func StreamNetWithDelay( - ctx context.Context, - rs mockrouting.Server, - d delay.D) (Network, error) { - - net := mockpeernet.New(ctx) - net.SetLinkDefaults(mockpeernet.LinkOptions{ - Latency: d.Get(), - Bandwidth: math.MaxInt32, // TODO inject - }) +func StreamNet(ctx context.Context, net mockpeernet.Mocknet, rs mockrouting.Server) (Network, error) { return &peernet{net, rs}, nil } @@ -39,7 +28,7 @@ func (pn *peernet) Adapter(p testutil.Peer) bsnet.BitSwapNetwork { for _, other := range peers { pn.Mocknet.LinkPeers(p.ID(), other) } - routing := pn.routingserver.Client(peer.PeerInfo{ID: p.ID()}) + routing := pn.routingserver.ClientWithDatastore(context.TODO(), p, ds.NewMapDatastore()) return bsnet.NewFromIpfsNetwork(client, routing) } diff --git a/bitswap/testnet/virtual.go b/bitswap/testnet/virtual.go index 0bcffbe51..5811db3bb 100644 --- a/bitswap/testnet/virtual.go +++ b/bitswap/testnet/virtual.go @@ -33,7 +33,7 @@ func (n *network) Adapter(p testutil.Peer) bsnet.BitSwapNetwork { client := &networkClient{ local: p.ID(), network: n, - routing: n.routingserver.Client(peer.PeerInfo{ID: p.ID()}), + routing: n.routingserver.Client(p), } n.clients[p.ID()] = client return client diff --git a/bitswap/testutils.go b/bitswap/testutils.go index 728c2ba3b..9ad3cf312 100644 --- a/bitswap/testutils.go +++ b/bitswap/testutils.go @@ -79,15 +79,15 @@ func (i *Instance) SetBlockstoreLatency(t time.Duration) time.Duration { // sessions. To safeguard, use the SessionGenerator to generate sessions. It's // just a much better idea. func session(ctx context.Context, net tn.Network, p testutil.Peer) Instance { + bsdelay := delay.Fixed(0) + const kWriteCacheElems = 100 adapter := net.Adapter(p) + dstore := ds_sync.MutexWrap(datastore2.WithDelay(ds.NewMapDatastore(), bsdelay)) - bsdelay := delay.Fixed(0) - const kWriteCacheElems = 100 - bstore, err := blockstore.WriteCached(blockstore.NewBlockstore(ds_sync.MutexWrap(datastore2.WithDelay(ds.NewMapDatastore(), bsdelay))), kWriteCacheElems) + bstore, err := blockstore.WriteCached(blockstore.NewBlockstore(ds_sync.MutexWrap(dstore)), kWriteCacheElems) if err != nil { - // FIXME perhaps change signature and return error. - panic(err.Error()) + panic(err.Error()) // FIXME perhaps change signature and return error. } const alwaysSendToPeer = true From 07f7a2f5641c7f437fd535ecb655d20938c5835c Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Wed, 24 Dec 2014 09:26:53 -0500 Subject: [PATCH 0278/1035] don't link when creating network client. rely on caller This commit was moved from ipfs/go-bitswap@d7ff47d82b7dfd46a93914c01bc022b95468afc4 --- bitswap/testnet/peernet.go | 4 ---- 1 file changed, 4 deletions(-) diff --git a/bitswap/testnet/peernet.go b/bitswap/testnet/peernet.go index ef4f3d503..4db254560 100644 --- a/bitswap/testnet/peernet.go +++ b/bitswap/testnet/peernet.go @@ -20,14 +20,10 @@ func StreamNet(ctx context.Context, net mockpeernet.Mocknet, rs mockrouting.Serv } func (pn *peernet) Adapter(p testutil.Peer) bsnet.BitSwapNetwork { - peers := pn.Mocknet.Peers() client, err := pn.Mocknet.AddPeer(p.PrivateKey(), p.Address()) if err != nil { panic(err.Error()) } - for _, other := range peers { - pn.Mocknet.LinkPeers(p.ID(), other) - } routing := pn.routingserver.ClientWithDatastore(context.TODO(), p, ds.NewMapDatastore()) return bsnet.NewFromIpfsNetwork(client, routing) } From 446f8a9d7aa7dfe40dea59c1844c76382d2fed7b Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Wed, 24 Dec 2014 09:53:18 -0500 Subject: [PATCH 0279/1035] style(testutil) rename testutil.Peer -> testutil.Identity cc @jbenet This commit was moved from ipfs/go-bitswap@15615ccd42ce3c66cfe1acc4eaa3846c37c2dd1c --- bitswap/bitswap_test.go | 2 +- bitswap/testnet/interface.go | 2 +- bitswap/testnet/network_test.go | 8 ++++---- bitswap/testnet/peernet.go | 2 +- bitswap/testnet/virtual.go | 2 +- bitswap/testutils.go | 4 ++-- 6 files changed, 10 insertions(+), 10 deletions(-) diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index 4ef0838a5..af6cb138c 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -61,7 +61,7 @@ func TestProviderForKeyButNetworkCannotFind(t *testing.T) { // TODO revisit this defer g.Close() block := blocks.NewBlock([]byte("block")) - pinfo := testutil.RandPeerOrFatal(t) + pinfo := testutil.RandIdentityOrFatal(t) rs.Client(pinfo).Provide(context.Background(), block.Key()) // but not on network solo := g.Next() diff --git a/bitswap/testnet/interface.go b/bitswap/testnet/interface.go index c194d74cb..029ea704e 100644 --- a/bitswap/testnet/interface.go +++ b/bitswap/testnet/interface.go @@ -7,7 +7,7 @@ import ( ) type Network interface { - Adapter(testutil.Peer) bsnet.BitSwapNetwork + Adapter(testutil.Identity) bsnet.BitSwapNetwork HasPeer(peer.ID) bool } diff --git a/bitswap/testnet/network_test.go b/bitswap/testnet/network_test.go index 08f4ff500..6f6275896 100644 --- a/bitswap/testnet/network_test.go +++ b/bitswap/testnet/network_test.go @@ -17,11 +17,11 @@ import ( func TestSendRequestToCooperativePeer(t *testing.T) { net := VirtualNetwork(mockrouting.NewServer(), delay.Fixed(0)) - recipientPeer := testutil.RandPeerOrFatal(t) + recipientPeer := testutil.RandIdentityOrFatal(t) t.Log("Get two network adapters") - initiator := net.Adapter(testutil.RandPeerOrFatal(t)) + initiator := net.Adapter(testutil.RandIdentityOrFatal(t)) recipient := net.Adapter(recipientPeer) expectedStr := "response from recipient" @@ -67,8 +67,8 @@ func TestSendRequestToCooperativePeer(t *testing.T) { func TestSendMessageAsyncButWaitForResponse(t *testing.T) { net := VirtualNetwork(mockrouting.NewServer(), delay.Fixed(0)) - responderPeer := testutil.RandPeerOrFatal(t) - waiter := net.Adapter(testutil.RandPeerOrFatal(t)) + responderPeer := testutil.RandIdentityOrFatal(t) + waiter := net.Adapter(testutil.RandIdentityOrFatal(t)) responder := net.Adapter(responderPeer) var wg sync.WaitGroup diff --git a/bitswap/testnet/peernet.go b/bitswap/testnet/peernet.go index 4db254560..905d78a6a 100644 --- a/bitswap/testnet/peernet.go +++ b/bitswap/testnet/peernet.go @@ -19,7 +19,7 @@ func StreamNet(ctx context.Context, net mockpeernet.Mocknet, rs mockrouting.Serv return &peernet{net, rs}, nil } -func (pn *peernet) Adapter(p testutil.Peer) bsnet.BitSwapNetwork { +func (pn *peernet) Adapter(p testutil.Identity) bsnet.BitSwapNetwork { client, err := pn.Mocknet.AddPeer(p.PrivateKey(), p.Address()) if err != nil { panic(err.Error()) diff --git a/bitswap/testnet/virtual.go b/bitswap/testnet/virtual.go index 5811db3bb..887d29bee 100644 --- a/bitswap/testnet/virtual.go +++ b/bitswap/testnet/virtual.go @@ -29,7 +29,7 @@ type network struct { delay delay.D } -func (n *network) Adapter(p testutil.Peer) bsnet.BitSwapNetwork { +func (n *network) Adapter(p testutil.Identity) bsnet.BitSwapNetwork { client := &networkClient{ local: p.ID(), network: n, diff --git a/bitswap/testutils.go b/bitswap/testutils.go index 9ad3cf312..0d1aa4fec 100644 --- a/bitswap/testutils.go +++ b/bitswap/testutils.go @@ -41,7 +41,7 @@ func (g *SessionGenerator) Close() error { func (g *SessionGenerator) Next() Instance { g.seq++ - p, err := testutil.RandPeer() + p, err := testutil.RandIdentity() if err != nil { panic("FIXME") // TODO change signature } @@ -78,7 +78,7 @@ func (i *Instance) SetBlockstoreLatency(t time.Duration) time.Duration { // NB: It's easy make mistakes by providing the same peer ID to two different // sessions. To safeguard, use the SessionGenerator to generate sessions. It's // just a much better idea. -func session(ctx context.Context, net tn.Network, p testutil.Peer) Instance { +func session(ctx context.Context, net tn.Network, p testutil.Identity) Instance { bsdelay := delay.Fixed(0) const kWriteCacheElems = 100 From 5476b6c2a463b1e41803a72fb45a5433dfd67d6c Mon Sep 17 00:00:00 2001 From: Juan Batiz-Benet Date: Mon, 29 Dec 2014 05:43:56 -0800 Subject: [PATCH 0280/1035] introducing p2p pkg I think it's time to move a lot of the peer-to-peer networking but-not-ipfs-specific things into its own package: p2p. This could in the future be split off into its own library. The first thing to go is the peer. This commit was moved from ipfs/go-bitswap@0636625d7a21d9279bf04e2d7dd14281eb7aa28a --- bitswap/bitswap.go | 2 +- bitswap/decision/engine.go | 2 +- bitswap/decision/engine_test.go | 2 +- bitswap/decision/ledger.go | 2 +- bitswap/decision/taskqueue.go | 2 +- bitswap/network/interface.go | 2 +- bitswap/network/ipfs_impl.go | 2 +- bitswap/testnet/interface.go | 2 +- bitswap/testnet/network_test.go | 2 +- bitswap/testnet/peernet.go | 2 +- bitswap/testnet/virtual.go | 2 +- bitswap/testutils.go | 2 +- 12 files changed, 12 insertions(+), 12 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 4ff23aee2..fe20a406a 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -17,7 +17,7 @@ import ( bsnet "github.com/jbenet/go-ipfs/exchange/bitswap/network" notifications "github.com/jbenet/go-ipfs/exchange/bitswap/notifications" wantlist "github.com/jbenet/go-ipfs/exchange/bitswap/wantlist" - peer "github.com/jbenet/go-ipfs/peer" + peer "github.com/jbenet/go-ipfs/p2p/peer" u "github.com/jbenet/go-ipfs/util" errors "github.com/jbenet/go-ipfs/util/debugerror" "github.com/jbenet/go-ipfs/util/delay" diff --git a/bitswap/decision/engine.go b/bitswap/decision/engine.go index da5ccfe6d..582d96e08 100644 --- a/bitswap/decision/engine.go +++ b/bitswap/decision/engine.go @@ -7,7 +7,7 @@ import ( bstore "github.com/jbenet/go-ipfs/blocks/blockstore" bsmsg "github.com/jbenet/go-ipfs/exchange/bitswap/message" wl "github.com/jbenet/go-ipfs/exchange/bitswap/wantlist" - peer "github.com/jbenet/go-ipfs/peer" + peer "github.com/jbenet/go-ipfs/p2p/peer" u "github.com/jbenet/go-ipfs/util" ) diff --git a/bitswap/decision/engine_test.go b/bitswap/decision/engine_test.go index 0196863b3..08e729dc8 100644 --- a/bitswap/decision/engine_test.go +++ b/bitswap/decision/engine_test.go @@ -11,7 +11,7 @@ import ( blocks "github.com/jbenet/go-ipfs/blocks" blockstore "github.com/jbenet/go-ipfs/blocks/blockstore" message "github.com/jbenet/go-ipfs/exchange/bitswap/message" - peer "github.com/jbenet/go-ipfs/peer" + peer "github.com/jbenet/go-ipfs/p2p/peer" ) type peerAndEngine struct { diff --git a/bitswap/decision/ledger.go b/bitswap/decision/ledger.go index f2b824603..273c3e706 100644 --- a/bitswap/decision/ledger.go +++ b/bitswap/decision/ledger.go @@ -4,7 +4,7 @@ import ( "time" wl "github.com/jbenet/go-ipfs/exchange/bitswap/wantlist" - peer "github.com/jbenet/go-ipfs/peer" + peer "github.com/jbenet/go-ipfs/p2p/peer" u "github.com/jbenet/go-ipfs/util" ) diff --git a/bitswap/decision/taskqueue.go b/bitswap/decision/taskqueue.go index c86a73371..11af3db35 100644 --- a/bitswap/decision/taskqueue.go +++ b/bitswap/decision/taskqueue.go @@ -4,7 +4,7 @@ import ( "sync" wantlist "github.com/jbenet/go-ipfs/exchange/bitswap/wantlist" - peer "github.com/jbenet/go-ipfs/peer" + peer "github.com/jbenet/go-ipfs/p2p/peer" u "github.com/jbenet/go-ipfs/util" ) diff --git a/bitswap/network/interface.go b/bitswap/network/interface.go index 1bc14ca88..8598898fa 100644 --- a/bitswap/network/interface.go +++ b/bitswap/network/interface.go @@ -4,7 +4,7 @@ import ( context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" bsmsg "github.com/jbenet/go-ipfs/exchange/bitswap/message" - peer "github.com/jbenet/go-ipfs/peer" + peer "github.com/jbenet/go-ipfs/p2p/peer" u "github.com/jbenet/go-ipfs/util" ) diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index 5388c8e6d..73114642f 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -4,7 +4,7 @@ import ( context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" bsmsg "github.com/jbenet/go-ipfs/exchange/bitswap/message" inet "github.com/jbenet/go-ipfs/net" - peer "github.com/jbenet/go-ipfs/peer" + peer "github.com/jbenet/go-ipfs/p2p/peer" routing "github.com/jbenet/go-ipfs/routing" util "github.com/jbenet/go-ipfs/util" ) diff --git a/bitswap/testnet/interface.go b/bitswap/testnet/interface.go index 029ea704e..4b6f46aaf 100644 --- a/bitswap/testnet/interface.go +++ b/bitswap/testnet/interface.go @@ -2,7 +2,7 @@ package bitswap import ( bsnet "github.com/jbenet/go-ipfs/exchange/bitswap/network" - peer "github.com/jbenet/go-ipfs/peer" + peer "github.com/jbenet/go-ipfs/p2p/peer" "github.com/jbenet/go-ipfs/util/testutil" ) diff --git a/bitswap/testnet/network_test.go b/bitswap/testnet/network_test.go index 6f6275896..bbf84995c 100644 --- a/bitswap/testnet/network_test.go +++ b/bitswap/testnet/network_test.go @@ -8,7 +8,7 @@ import ( blocks "github.com/jbenet/go-ipfs/blocks" bsmsg "github.com/jbenet/go-ipfs/exchange/bitswap/message" bsnet "github.com/jbenet/go-ipfs/exchange/bitswap/network" - peer "github.com/jbenet/go-ipfs/peer" + peer "github.com/jbenet/go-ipfs/p2p/peer" mockrouting "github.com/jbenet/go-ipfs/routing/mock" delay "github.com/jbenet/go-ipfs/util/delay" testutil "github.com/jbenet/go-ipfs/util/testutil" diff --git a/bitswap/testnet/peernet.go b/bitswap/testnet/peernet.go index 905d78a6a..e16242ce0 100644 --- a/bitswap/testnet/peernet.go +++ b/bitswap/testnet/peernet.go @@ -5,7 +5,7 @@ import ( ds "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore" bsnet "github.com/jbenet/go-ipfs/exchange/bitswap/network" mockpeernet "github.com/jbenet/go-ipfs/net/mock" - peer "github.com/jbenet/go-ipfs/peer" + peer "github.com/jbenet/go-ipfs/p2p/peer" mockrouting "github.com/jbenet/go-ipfs/routing/mock" testutil "github.com/jbenet/go-ipfs/util/testutil" ) diff --git a/bitswap/testnet/virtual.go b/bitswap/testnet/virtual.go index 887d29bee..9426176a2 100644 --- a/bitswap/testnet/virtual.go +++ b/bitswap/testnet/virtual.go @@ -7,7 +7,7 @@ import ( context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" bsmsg "github.com/jbenet/go-ipfs/exchange/bitswap/message" bsnet "github.com/jbenet/go-ipfs/exchange/bitswap/network" - peer "github.com/jbenet/go-ipfs/peer" + peer "github.com/jbenet/go-ipfs/p2p/peer" routing "github.com/jbenet/go-ipfs/routing" mockrouting "github.com/jbenet/go-ipfs/routing/mock" util "github.com/jbenet/go-ipfs/util" diff --git a/bitswap/testutils.go b/bitswap/testutils.go index 0d1aa4fec..dd96e5f46 100644 --- a/bitswap/testutils.go +++ b/bitswap/testutils.go @@ -9,7 +9,7 @@ import ( blockstore "github.com/jbenet/go-ipfs/blocks/blockstore" exchange "github.com/jbenet/go-ipfs/exchange" tn "github.com/jbenet/go-ipfs/exchange/bitswap/testnet" - peer "github.com/jbenet/go-ipfs/peer" + peer "github.com/jbenet/go-ipfs/p2p/peer" datastore2 "github.com/jbenet/go-ipfs/util/datastore2" delay "github.com/jbenet/go-ipfs/util/delay" testutil "github.com/jbenet/go-ipfs/util/testutil" From 5c11f72597564b7838f26e7d4f39ddb62e353be2 Mon Sep 17 00:00:00 2001 From: Juan Batiz-Benet Date: Mon, 29 Dec 2014 05:48:21 -0800 Subject: [PATCH 0281/1035] net -> p2p/net The net package is the next to move. It will be massaged a bit still to fix the Network / "NetworkBackend" conflict. This commit was moved from ipfs/go-bitswap@442eb2c994311d7922156f4f4b78f3d3c84cfe2d --- bitswap/message/message.go | 2 +- bitswap/network/ipfs_impl.go | 2 +- bitswap/testnet/peernet.go | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/bitswap/message/message.go b/bitswap/message/message.go index 7f7f1d08e..117758d9e 100644 --- a/bitswap/message/message.go +++ b/bitswap/message/message.go @@ -6,7 +6,7 @@ import ( blocks "github.com/jbenet/go-ipfs/blocks" pb "github.com/jbenet/go-ipfs/exchange/bitswap/message/internal/pb" wantlist "github.com/jbenet/go-ipfs/exchange/bitswap/wantlist" - inet "github.com/jbenet/go-ipfs/net" + inet "github.com/jbenet/go-ipfs/p2p/net" u "github.com/jbenet/go-ipfs/util" ggio "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/gogoprotobuf/io" diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index 73114642f..7c975acf2 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -3,7 +3,7 @@ package network import ( context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" bsmsg "github.com/jbenet/go-ipfs/exchange/bitswap/message" - inet "github.com/jbenet/go-ipfs/net" + inet "github.com/jbenet/go-ipfs/p2p/net" peer "github.com/jbenet/go-ipfs/p2p/peer" routing "github.com/jbenet/go-ipfs/routing" util "github.com/jbenet/go-ipfs/util" diff --git a/bitswap/testnet/peernet.go b/bitswap/testnet/peernet.go index e16242ce0..7caa64efd 100644 --- a/bitswap/testnet/peernet.go +++ b/bitswap/testnet/peernet.go @@ -4,7 +4,7 @@ import ( context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" ds "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore" bsnet "github.com/jbenet/go-ipfs/exchange/bitswap/network" - mockpeernet "github.com/jbenet/go-ipfs/net/mock" + mockpeernet "github.com/jbenet/go-ipfs/p2p/net/mock" peer "github.com/jbenet/go-ipfs/p2p/peer" mockrouting "github.com/jbenet/go-ipfs/routing/mock" testutil "github.com/jbenet/go-ipfs/util/testutil" From f8cd505f4d4bc67b1cf181c06cf5f25bb4438f3b Mon Sep 17 00:00:00 2001 From: Juan Batiz-Benet Date: Thu, 1 Jan 2015 12:45:39 -0800 Subject: [PATCH 0282/1035] swap net2 -> net This commit was moved from ipfs/go-bitswap@4637c322df39691cb6707d1515b6ba7d8ae3c008 --- bitswap/network/interface.go | 3 +++ bitswap/network/ipfs_impl.go | 20 ++++++++++---------- bitswap/testnet/peernet.go | 2 +- 3 files changed, 14 insertions(+), 11 deletions(-) diff --git a/bitswap/network/interface.go b/bitswap/network/interface.go index 8598898fa..7c34a352b 100644 --- a/bitswap/network/interface.go +++ b/bitswap/network/interface.go @@ -5,9 +5,12 @@ import ( bsmsg "github.com/jbenet/go-ipfs/exchange/bitswap/message" peer "github.com/jbenet/go-ipfs/p2p/peer" + protocol "github.com/jbenet/go-ipfs/p2p/protocol" u "github.com/jbenet/go-ipfs/util" ) +var ProtocolBitswap protocol.ID = "/ipfs/bitswap" + // BitSwapNetwork provides network connectivity for BitSwap sessions type BitSwapNetwork interface { diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index 7c975acf2..4e349dbed 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -3,6 +3,7 @@ package network import ( context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" bsmsg "github.com/jbenet/go-ipfs/exchange/bitswap/message" + host "github.com/jbenet/go-ipfs/p2p/host" inet "github.com/jbenet/go-ipfs/p2p/net" peer "github.com/jbenet/go-ipfs/p2p/peer" routing "github.com/jbenet/go-ipfs/routing" @@ -11,21 +12,20 @@ import ( var log = util.Logger("bitswap_network") -// NewFromIpfsNetwork returns a BitSwapNetwork supported by underlying IPFS -// Dialer & Service -func NewFromIpfsNetwork(n inet.Network, r routing.IpfsRouting) BitSwapNetwork { +// NewFromIpfsHost returns a BitSwapNetwork supported by underlying IPFS host +func NewFromIpfsHost(host host.Host, r routing.IpfsRouting) BitSwapNetwork { bitswapNetwork := impl{ - network: n, + host: host, routing: r, } - n.SetHandler(inet.ProtocolBitswap, bitswapNetwork.handleNewStream) + host.SetStreamHandler(ProtocolBitswap, bitswapNetwork.handleNewStream) return &bitswapNetwork } // impl transforms the ipfs network interface, which sends and receives // NetMessage objects, into the bitswap network interface. type impl struct { - network inet.Network + host host.Host routing routing.IpfsRouting // inbound messages from the network are forwarded to the receiver @@ -33,7 +33,7 @@ type impl struct { } func (bsnet *impl) DialPeer(ctx context.Context, p peer.ID) error { - return bsnet.network.DialPeer(ctx, p) + return bsnet.host.Connect(ctx, peer.PeerInfo{ID: p}) } func (bsnet *impl) SendMessage( @@ -41,7 +41,7 @@ func (bsnet *impl) SendMessage( p peer.ID, outgoing bsmsg.BitSwapMessage) error { - s, err := bsnet.network.NewStream(inet.ProtocolBitswap, p) + s, err := bsnet.host.NewStream(ProtocolBitswap, p) if err != nil { return err } @@ -55,7 +55,7 @@ func (bsnet *impl) SendRequest( p peer.ID, outgoing bsmsg.BitSwapMessage) (bsmsg.BitSwapMessage, error) { - s, err := bsnet.network.NewStream(inet.ProtocolBitswap, p) + s, err := bsnet.host.NewStream(ProtocolBitswap, p) if err != nil { return nil, err } @@ -79,7 +79,7 @@ func (bsnet *impl) FindProvidersAsync(ctx context.Context, k util.Key, max int) defer close(out) providers := bsnet.routing.FindProvidersAsync(ctx, k, max) for info := range providers { - bsnet.network.Peerstore().AddAddresses(info.ID, info.Addrs) + bsnet.host.Peerstore().AddAddresses(info.ID, info.Addrs) select { case <-ctx.Done(): case out <- info.ID: diff --git a/bitswap/testnet/peernet.go b/bitswap/testnet/peernet.go index 7caa64efd..1d1d22408 100644 --- a/bitswap/testnet/peernet.go +++ b/bitswap/testnet/peernet.go @@ -25,7 +25,7 @@ func (pn *peernet) Adapter(p testutil.Identity) bsnet.BitSwapNetwork { panic(err.Error()) } routing := pn.routingserver.ClientWithDatastore(context.TODO(), p, ds.NewMapDatastore()) - return bsnet.NewFromIpfsNetwork(client, routing) + return bsnet.NewFromIpfsHost(client, routing) } func (pn *peernet) HasPeer(p peer.ID) bool { From 3106515328494cd2bd95735d35ff259fe699f8a7 Mon Sep 17 00:00:00 2001 From: Juan Batiz-Benet Date: Sat, 3 Jan 2015 03:01:17 -0800 Subject: [PATCH 0283/1035] bitswap: add self peer.ID This commit was moved from ipfs/go-bitswap@83d122006131614a8bd6182afaaeb032ebeaae43 --- bitswap/bitswap.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index fe20a406a..cea618970 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -61,6 +61,7 @@ func New(parent context.Context, p peer.ID, network bsnet.BitSwapNetwork, }() bs := &bitswap{ + self: p, blockstore: bstore, cancelFunc: cancelFunc, notifications: notif, @@ -79,6 +80,9 @@ func New(parent context.Context, p peer.ID, network bsnet.BitSwapNetwork, // bitswap instances implement the bitswap protocol. type bitswap struct { + // the ID of the peer to act on behalf of + self peer.ID + // network delivers messages on behalf of the session network bsnet.BitSwapNetwork From 123f13412dcca10cee1921dbda131aa09be78cdd Mon Sep 17 00:00:00 2001 From: Juan Batiz-Benet Date: Sat, 3 Jan 2015 03:01:45 -0800 Subject: [PATCH 0284/1035] bitswap: send wantlist code reuse + debug logs This commit was moved from ipfs/go-bitswap@6a6dc56a2942e9ea886d32fca7e56cd691514cf5 --- bitswap/bitswap.go | 85 +++++++++++++++++++++++++++++++++------------- 1 file changed, 62 insertions(+), 23 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index cea618970..79e5a576c 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -3,6 +3,7 @@ package bitswap import ( + "fmt" "math" "sync" "time" @@ -170,58 +171,96 @@ func (bs *bitswap) HasBlock(ctx context.Context, blk *blocks.Block) error { return bs.network.Provide(ctx, blk.Key()) } -func (bs *bitswap) sendWantListTo(ctx context.Context, peers <-chan peer.ID) error { +func (bs *bitswap) sendWantlistMsgToPeer(ctx context.Context, m bsmsg.BitSwapMessage, p peer.ID) error { + logd := fmt.Sprintf("%s bitswap.sendWantlistMsgToPeer(%d, %s)", bs.self, len(m.Wantlist()), p) + + log.Debugf("%s sending wantlist", logd) + if err := bs.send(ctx, p, m); err != nil { + log.Errorf("%s send wantlist error: %s", logd, err) + return err + } + log.Debugf("%s send wantlist success", logd) + return nil +} + +func (bs *bitswap) sendWantlistMsgToPeers(ctx context.Context, m bsmsg.BitSwapMessage, peers <-chan peer.ID) error { if peers == nil { panic("Cant send wantlist to nil peerchan") } - message := bsmsg.New() - for _, wanted := range bs.wantlist.Entries() { - message.AddEntry(wanted.Key, wanted.Priority) - } + + logd := fmt.Sprintf("%s bitswap.sendWantlistMsgTo(%d)", bs.self, len(m.Wantlist())) + log.Debugf("%s begin", logd) + defer log.Debugf("%s end", logd) + + set := pset.New() wg := sync.WaitGroup{} for peerToQuery := range peers { log.Event(ctx, "PeerToQuery", peerToQuery) + logd := fmt.Sprintf("%sto(%s)", logd, peerToQuery) + + if !set.TryAdd(peerToQuery) { //Do once per peer + log.Debugf("%s skipped (already sent)", logd) + continue + } + wg.Add(1) go func(p peer.ID) { defer wg.Done() - if err := bs.send(ctx, p, message); err != nil { - log.Error(err) - return - } + bs.sendWantlistMsgToPeer(ctx, m, p) }(peerToQuery) } wg.Wait() return nil } -func (bs *bitswap) sendWantlistToProviders(ctx context.Context, wantlist *wantlist.ThreadSafe) { - ctx, cancel := context.WithCancel(ctx) - defer cancel() - +func (bs *bitswap) sendWantlistToPeers(ctx context.Context, peers <-chan peer.ID) error { message := bsmsg.New() message.SetFull(true) - for _, e := range bs.wantlist.Entries() { - message.AddEntry(e.Key, e.Priority) + for _, wanted := range bs.wantlist.Entries() { + message.AddEntry(wanted.Key, wanted.Priority) } + return bs.sendWantlistMsgToPeers(ctx, message, peers) +} - set := pset.New() +func (bs *bitswap) sendWantlistToProviders(ctx context.Context) { + logd := fmt.Sprintf("%s bitswap.sendWantlistToProviders", bs.self) + log.Debugf("%s begin", logd) + defer log.Debugf("%s end", logd) + + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + // prepare a channel to hand off to sendWantlistToPeers + sendToPeers := make(chan peer.ID) // Get providers for all entries in wantlist (could take a while) wg := sync.WaitGroup{} - for _, e := range wantlist.Entries() { + for _, e := range bs.wantlist.Entries() { wg.Add(1) go func(k u.Key) { defer wg.Done() + + logd := fmt.Sprintf("%s(entry: %s)", logd, k) + log.Debugf("%s asking dht for providers", logd) + child, _ := context.WithTimeout(ctx, providerRequestTimeout) providers := bs.network.FindProvidersAsync(child, k, maxProvidersPerRequest) for prov := range providers { - if set.TryAdd(prov) { //Do once per peer - bs.send(ctx, prov, message) - } + log.Debugf("%s dht returned provider %s. send wantlist", logd, prov) + sendToPeers <- prov } }(e.Key) } - wg.Wait() + + go func() { + wg.Wait() // make sure all our children do finish. + close(sendToPeers) + }() + + err := bs.sendWantlistToPeers(ctx, sendToPeers) + if err != nil { + log.Errorf("%s sendWantlistToPeers error: %s", logd, err) + } } func (bs *bitswap) taskWorker(ctx context.Context) { @@ -247,7 +286,7 @@ func (bs *bitswap) clientWorker(parent context.Context) { select { case <-broadcastSignal: // Resend unfulfilled wantlist keys - bs.sendWantlistToProviders(ctx, bs.wantlist) + bs.sendWantlistToProviders(ctx) broadcastSignal = time.After(rebroadcastDelay.Get()) case ks := <-bs.batchRequests: if len(ks) == 0 { @@ -266,7 +305,7 @@ func (bs *bitswap) clientWorker(parent context.Context) { // newer bitswap strategies. child, _ := context.WithTimeout(ctx, providerRequestTimeout) providers := bs.network.FindProvidersAsync(child, ks[0], maxProvidersPerRequest) - err := bs.sendWantListTo(ctx, providers) + err := bs.sendWantlistToPeers(ctx, providers) if err != nil { log.Errorf("error sending wantlist: %s", err) } From 8d4b6557a634847ae78bea96b8938f24a69c5103 Mon Sep 17 00:00:00 2001 From: Juan Batiz-Benet Date: Sat, 3 Jan 2015 06:14:16 -0800 Subject: [PATCH 0285/1035] bitswap engine: signal in own func This commit was moved from ipfs/go-bitswap@a13903c2a5e3a478976ee54ba5a7e4a4ab6eb1f3 --- bitswap/decision/engine.go | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/bitswap/decision/engine.go b/bitswap/decision/engine.go index 582d96e08..80a6e2fab 100644 --- a/bitswap/decision/engine.go +++ b/bitswap/decision/engine.go @@ -143,13 +143,10 @@ func (e *Engine) MessageReceived(p peer.ID, m bsmsg.BitSwapMessage) error { newWorkExists := false defer func() { if newWorkExists { - // Signal task generation to restart (if stopped!) - select { - case e.workSignal <- struct{}{}: - default: - } + e.signalNewWork() } }() + e.lock.Lock() defer e.lock.Unlock() @@ -222,3 +219,11 @@ func (e *Engine) findOrCreate(p peer.ID) *ledger { } return l } + +func (e *Engine) signalNewWork() { + // Signal task generation to restart (if stopped!) + select { + case e.workSignal <- struct{}{}: + default: + } +} From 373229580c03bfab5ca9e8319d55a0d7d1ba11b1 Mon Sep 17 00:00:00 2001 From: Juan Batiz-Benet Date: Sat, 3 Jan 2015 06:15:50 -0800 Subject: [PATCH 0286/1035] bitswap debug logging This commit was moved from ipfs/go-bitswap@cdefdb39911043a935811a03c997881348e855e0 --- bitswap/bitswap.go | 33 ++++++++++++++++----------------- bitswap/decision/engine.go | 11 +++++++++-- bitswap/network/ipfs_impl.go | 1 + 3 files changed, 26 insertions(+), 19 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 79e5a576c..4ba099860 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -3,7 +3,6 @@ package bitswap import ( - "fmt" "math" "sync" "time" @@ -172,14 +171,14 @@ func (bs *bitswap) HasBlock(ctx context.Context, blk *blocks.Block) error { } func (bs *bitswap) sendWantlistMsgToPeer(ctx context.Context, m bsmsg.BitSwapMessage, p peer.ID) error { - logd := fmt.Sprintf("%s bitswap.sendWantlistMsgToPeer(%d, %s)", bs.self, len(m.Wantlist()), p) + log := log.Prefix("bitswap(%s).bitswap.sendWantlistMsgToPeer(%d, %s)", bs.self, len(m.Wantlist()), p) - log.Debugf("%s sending wantlist", logd) + log.Debug("sending wantlist") if err := bs.send(ctx, p, m); err != nil { - log.Errorf("%s send wantlist error: %s", logd, err) + log.Errorf("send wantlist error: %s", err) return err } - log.Debugf("%s send wantlist success", logd) + log.Debugf("send wantlist success") return nil } @@ -188,20 +187,20 @@ func (bs *bitswap) sendWantlistMsgToPeers(ctx context.Context, m bsmsg.BitSwapMe panic("Cant send wantlist to nil peerchan") } - logd := fmt.Sprintf("%s bitswap.sendWantlistMsgTo(%d)", bs.self, len(m.Wantlist())) - log.Debugf("%s begin", logd) - defer log.Debugf("%s end", logd) + log := log.Prefix("bitswap(%s).sendWantlistMsgToPeers(%d)", bs.self, len(m.Wantlist())) + log.Debugf("begin") + defer log.Debugf("end") set := pset.New() wg := sync.WaitGroup{} for peerToQuery := range peers { log.Event(ctx, "PeerToQuery", peerToQuery) - logd := fmt.Sprintf("%sto(%s)", logd, peerToQuery) if !set.TryAdd(peerToQuery) { //Do once per peer - log.Debugf("%s skipped (already sent)", logd) + log.Debugf("%s skipped (already sent)", peerToQuery) continue } + log.Debugf("%s sending", peerToQuery) wg.Add(1) go func(p peer.ID) { @@ -223,9 +222,9 @@ func (bs *bitswap) sendWantlistToPeers(ctx context.Context, peers <-chan peer.ID } func (bs *bitswap) sendWantlistToProviders(ctx context.Context) { - logd := fmt.Sprintf("%s bitswap.sendWantlistToProviders", bs.self) - log.Debugf("%s begin", logd) - defer log.Debugf("%s end", logd) + log := log.Prefix("bitswap(%s).sendWantlistToProviders ", bs.self) + log.Debugf("begin") + defer log.Debugf("end") ctx, cancel := context.WithCancel(ctx) defer cancel() @@ -240,13 +239,13 @@ func (bs *bitswap) sendWantlistToProviders(ctx context.Context) { go func(k u.Key) { defer wg.Done() - logd := fmt.Sprintf("%s(entry: %s)", logd, k) - log.Debugf("%s asking dht for providers", logd) + log := log.Prefix("(entry: %s) ", k) + log.Debug("asking dht for providers") child, _ := context.WithTimeout(ctx, providerRequestTimeout) providers := bs.network.FindProvidersAsync(child, k, maxProvidersPerRequest) for prov := range providers { - log.Debugf("%s dht returned provider %s. send wantlist", logd, prov) + log.Debugf("dht returned provider %s. send wantlist", prov) sendToPeers <- prov } }(e.Key) @@ -259,7 +258,7 @@ func (bs *bitswap) sendWantlistToProviders(ctx context.Context) { err := bs.sendWantlistToPeers(ctx, sendToPeers) if err != nil { - log.Errorf("%s sendWantlistToPeers error: %s", logd, err) + log.Errorf("sendWantlistToPeers error: %s", err) } } diff --git a/bitswap/decision/engine.go b/bitswap/decision/engine.go index 80a6e2fab..cd3ebac31 100644 --- a/bitswap/decision/engine.go +++ b/bitswap/decision/engine.go @@ -8,7 +8,7 @@ import ( bsmsg "github.com/jbenet/go-ipfs/exchange/bitswap/message" wl "github.com/jbenet/go-ipfs/exchange/bitswap/wantlist" peer "github.com/jbenet/go-ipfs/p2p/peer" - u "github.com/jbenet/go-ipfs/util" + eventlog "github.com/jbenet/go-ipfs/util/eventlog" ) // TODO consider taking responsibility for other types of requests. For @@ -41,7 +41,7 @@ import ( // whatever it sees fit to produce desired outcomes (get wanted keys // quickly, maintain good relationships with peers, etc). -var log = u.Logger("engine") +var log = eventlog.Logger("engine") const ( sizeOutboxChan = 4 @@ -140,6 +140,10 @@ func (e *Engine) Peers() []peer.ID { // MessageReceived performs book-keeping. Returns error if passed invalid // arguments. func (e *Engine) MessageReceived(p peer.ID, m bsmsg.BitSwapMessage) error { + log := log.Prefix("Engine.MessageReceived(%s)", p) + log.Debugf("enter") + defer log.Debugf("exit") + newWorkExists := false defer func() { if newWorkExists { @@ -156,9 +160,11 @@ func (e *Engine) MessageReceived(p peer.ID, m bsmsg.BitSwapMessage) error { } for _, entry := range m.Wantlist() { if entry.Cancel { + log.Debug("cancel", entry.Key) l.CancelWant(entry.Key) e.peerRequestQueue.Remove(entry.Key, p) } else { + log.Debug("wants", entry.Key, entry.Priority) l.Wants(entry.Key, entry.Priority) if exists, err := e.bs.Has(entry.Key); err == nil && exists { newWorkExists = true @@ -169,6 +175,7 @@ func (e *Engine) MessageReceived(p peer.ID, m bsmsg.BitSwapMessage) error { for _, block := range m.Blocks() { // FIXME extract blocks.NumBytes(block) or block.NumBytes() method + log.Debug("got block %s %d bytes", block.Key(), len(block.Data)) l.ReceivedBytes(len(block.Data)) for _, l := range e.ledgerMap { if l.WantListContains(block.Key()) { diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index 4e349dbed..c2a87ce0a 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -55,6 +55,7 @@ func (bsnet *impl) SendRequest( p peer.ID, outgoing bsmsg.BitSwapMessage) (bsmsg.BitSwapMessage, error) { + log.Debugf("bsnet SendRequest to %s", p) s, err := bsnet.host.NewStream(ProtocolBitswap, p) if err != nil { return nil, err From 97fa90880ade02b8c55ce7c34454414afa2eff56 Mon Sep 17 00:00:00 2001 From: Juan Batiz-Benet Date: Sat, 3 Jan 2015 06:16:09 -0800 Subject: [PATCH 0287/1035] bitswap net: always close This commit was moved from ipfs/go-bitswap@3f773ebd65ba2c7b022a34ff83b83581c617bff7 --- bitswap/network/ipfs_impl.go | 23 ++++++++++------------- 1 file changed, 10 insertions(+), 13 deletions(-) diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index c2a87ce0a..2f3fe950b 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -97,23 +97,20 @@ func (bsnet *impl) Provide(ctx context.Context, k util.Key) error { // handleNewStream receives a new stream from the network. func (bsnet *impl) handleNewStream(s inet.Stream) { + defer s.Close() if bsnet.receiver == nil { return } - go func() { - defer s.Close() - - received, err := bsmsg.FromNet(s) - if err != nil { - go bsnet.receiver.ReceiveError(err) - return - } - - p := s.Conn().RemotePeer() - ctx := context.Background() - bsnet.receiver.ReceiveMessage(ctx, p, received) - }() + received, err := bsmsg.FromNet(s) + if err != nil { + go bsnet.receiver.ReceiveError(err) + return + } + p := s.Conn().RemotePeer() + ctx := context.Background() + log.Debugf("bsnet handleNewStream from %s", s.Conn().RemotePeer()) + bsnet.receiver.ReceiveMessage(ctx, p, received) } From ed186a698c444b4b1d1a38fb488c19bd66ed02d7 Mon Sep 17 00:00:00 2001 From: Juan Batiz-Benet Date: Sat, 3 Jan 2015 08:54:36 -0800 Subject: [PATCH 0288/1035] bitswap and dht: lots of debugging logs This commit was moved from ipfs/go-bitswap@fa45a7dbe7c9ad3cd5662b5e3c4ea81fc8f2c486 --- bitswap/bitswap.go | 7 +++++++ bitswap/decision/engine.go | 10 +++++++++- bitswap/decision/taskqueue.go | 5 +++++ bitswap/network/ipfs_impl.go | 35 ++++++++++++++++++++++++++++++----- 4 files changed, 51 insertions(+), 6 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 4ba099860..bdc17ff96 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -108,6 +108,7 @@ type bitswap struct { // GetBlock attempts to retrieve a particular block from peers within the // deadline enforced by the context. func (bs *bitswap) GetBlock(parent context.Context, k u.Key) (*blocks.Block, error) { + log := log.Prefix("bitswap(%s).GetBlock(%s)", bs.self, k) // Any async work initiated by this function must end when this function // returns. To ensure this, derive a new context. Note that it is okay to @@ -120,10 +121,12 @@ func (bs *bitswap) GetBlock(parent context.Context, k u.Key) (*blocks.Block, err ctx = eventlog.ContextWithLoggable(ctx, eventlog.Uuid("GetBlockRequest")) log.Event(ctx, "GetBlockRequestBegin", &k) + log.Debugf("GetBlockRequestBegin") defer func() { cancelFunc() log.Event(ctx, "GetBlockRequestEnd", &k) + log.Debugf("GetBlockRequestEnd") }() promise, err := bs.GetBlocks(ctx, []u.Key{k}) @@ -263,12 +266,16 @@ func (bs *bitswap) sendWantlistToProviders(ctx context.Context) { } func (bs *bitswap) taskWorker(ctx context.Context) { + log := log.Prefix("bitswap(%s).taskWorker", bs.self) for { select { case <-ctx.Done(): + log.Debugf("exiting") return case envelope := <-bs.engine.Outbox(): + log.Debugf("message to %s sending...", envelope.Peer) bs.send(ctx, envelope.Peer, envelope.Message) + log.Debugf("message to %s sent", envelope.Peer) } } } diff --git a/bitswap/decision/engine.go b/bitswap/decision/engine.go index cd3ebac31..b2e20bf8e 100644 --- a/bitswap/decision/engine.go +++ b/bitswap/decision/engine.go @@ -91,6 +91,7 @@ func NewEngine(ctx context.Context, bs bstore.Blockstore) *Engine { } func (e *Engine) taskWorker(ctx context.Context) { + log := log.Prefix("bitswap.Engine.taskWorker") for { nextTask := e.peerRequestQueue.Pop() if nextTask == nil { @@ -98,11 +99,16 @@ func (e *Engine) taskWorker(ctx context.Context) { // Wait until there are! select { case <-ctx.Done(): + log.Debugf("exiting: %s", ctx.Err()) return case <-e.workSignal: + log.Debugf("woken up") } continue } + log := log.Prefix("%s", nextTask) + log.Debugf("processing") + block, err := e.bs.Get(nextTask.Entry.Key) if err != nil { log.Warning("engine: task exists to send block, but block is not in blockstore") @@ -113,10 +119,12 @@ func (e *Engine) taskWorker(ctx context.Context) { m := bsmsg.New() m.AddBlock(block) // TODO: maybe add keys from our wantlist? + log.Debugf("sending...") select { case <-ctx.Done(): return case e.outbox <- Envelope{Peer: nextTask.Target, Message: m}: + log.Debugf("sent") } } } @@ -140,7 +148,7 @@ func (e *Engine) Peers() []peer.ID { // MessageReceived performs book-keeping. Returns error if passed invalid // arguments. func (e *Engine) MessageReceived(p peer.ID, m bsmsg.BitSwapMessage) error { - log := log.Prefix("Engine.MessageReceived(%s)", p) + log := log.Prefix("bitswap.Engine.MessageReceived(%s)", p) log.Debugf("enter") defer log.Debugf("exit") diff --git a/bitswap/decision/taskqueue.go b/bitswap/decision/taskqueue.go index 11af3db35..659e287d0 100644 --- a/bitswap/decision/taskqueue.go +++ b/bitswap/decision/taskqueue.go @@ -1,6 +1,7 @@ package decision import ( + "fmt" "sync" wantlist "github.com/jbenet/go-ipfs/exchange/bitswap/wantlist" @@ -30,6 +31,10 @@ type task struct { Trash bool } +func (t *task) String() string { + return fmt.Sprintf("", t.Target, t.Entry.Key, t.Trash) +} + // Push currently adds a new task to the end of the list func (tl *taskQueue) Push(entry wantlist.Entry, to peer.ID) { tl.lock.Lock() diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index 2f3fe950b..0950ed0b8 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -2,15 +2,17 @@ package network import ( context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" + bsmsg "github.com/jbenet/go-ipfs/exchange/bitswap/message" host "github.com/jbenet/go-ipfs/p2p/host" inet "github.com/jbenet/go-ipfs/p2p/net" peer "github.com/jbenet/go-ipfs/p2p/peer" routing "github.com/jbenet/go-ipfs/routing" util "github.com/jbenet/go-ipfs/util" + eventlog "github.com/jbenet/go-ipfs/util/eventlog" ) -var log = util.Logger("bitswap_network") +var log = eventlog.Logger("bitswap_network") // NewFromIpfsHost returns a BitSwapNetwork supported by underlying IPFS host func NewFromIpfsHost(host host.Host, r routing.IpfsRouting) BitSwapNetwork { @@ -41,13 +43,23 @@ func (bsnet *impl) SendMessage( p peer.ID, outgoing bsmsg.BitSwapMessage) error { + log := log.Prefix("bitswap net SendMessage to %s", p) + + log.Debug("opening stream") s, err := bsnet.host.NewStream(ProtocolBitswap, p) if err != nil { return err } defer s.Close() - return outgoing.ToNet(s) + log.Debug("sending") + if err := outgoing.ToNet(s); err != nil { + log.Errorf("error: %s", err) + return err + } + + log.Debug("sent") + return err } func (bsnet *impl) SendRequest( @@ -55,18 +67,30 @@ func (bsnet *impl) SendRequest( p peer.ID, outgoing bsmsg.BitSwapMessage) (bsmsg.BitSwapMessage, error) { - log.Debugf("bsnet SendRequest to %s", p) + log := log.Prefix("bitswap net SendRequest to %s", p) + + log.Debug("opening stream") s, err := bsnet.host.NewStream(ProtocolBitswap, p) if err != nil { return nil, err } defer s.Close() + log.Debug("sending") if err := outgoing.ToNet(s); err != nil { + log.Errorf("error: %s", err) return nil, err } - return bsmsg.FromNet(s) + log.Debug("sent, now receiveing") + incoming, err := bsmsg.FromNet(s) + if err != nil { + log.Errorf("error: %s", err) + return incoming, err + } + + log.Debug("received") + return incoming, nil } func (bsnet *impl) SetDelegate(r Receiver) { @@ -106,11 +130,12 @@ func (bsnet *impl) handleNewStream(s inet.Stream) { received, err := bsmsg.FromNet(s) if err != nil { go bsnet.receiver.ReceiveError(err) + log.Errorf("bitswap net handleNewStream from %s error: %s", s.Conn().RemotePeer(), err) return } p := s.Conn().RemotePeer() ctx := context.Background() - log.Debugf("bsnet handleNewStream from %s", s.Conn().RemotePeer()) + log.Debugf("bitswap net handleNewStream from %s", s.Conn().RemotePeer()) bsnet.receiver.ReceiveMessage(ctx, p, received) } From 9d1e4e07b49365295c9c5d95c10215431e70211e Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Sat, 3 Jan 2015 17:15:05 -0500 Subject: [PATCH 0289/1035] fix(bitswap/network): return when context is done @jbenet @whyrusleeping This bug (missing return) could tie up the client worker and cause operations to come to a halt. This commit was moved from ipfs/go-bitswap@d01e7e1922fb01bc104a3e1fac5ad9ca8dd695e1 --- bitswap/network/ipfs_impl.go | 1 + 1 file changed, 1 insertion(+) diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index 0950ed0b8..841688162 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -107,6 +107,7 @@ func (bsnet *impl) FindProvidersAsync(ctx context.Context, k util.Key, max int) bsnet.host.Peerstore().AddAddresses(info.ID, info.Addrs) select { case <-ctx.Done(): + return case out <- info.ID: } } From abef1329a1a4a7845a1dbc281eddc676a6c97a38 Mon Sep 17 00:00:00 2001 From: Juan Batiz-Benet Date: Sun, 4 Jan 2015 13:56:38 -0800 Subject: [PATCH 0290/1035] bitswap: remove DialPeer from interface Bitswap doesn't usually care about dialing. the underlying network adapter can make sure of that. This commit was moved from ipfs/go-bitswap@e4cdc05a1eab284ff168db3fca01a9dbe92da51d --- bitswap/bitswap.go | 5 ----- bitswap/network/interface.go | 3 --- bitswap/network/ipfs_impl.go | 16 ++++++++++++---- bitswap/testnet/virtual.go | 9 --------- 4 files changed, 12 insertions(+), 21 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index bdc17ff96..a883e4b03 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -385,11 +385,6 @@ func (bs *bitswap) ReceiveError(err error) { // send strives to ensure that accounting is always performed when a message is // sent func (bs *bitswap) send(ctx context.Context, p peer.ID, m bsmsg.BitSwapMessage) error { - log.Event(ctx, "DialPeer", p) - err := bs.network.DialPeer(ctx, p) - if err != nil { - return errors.Wrap(err) - } if err := bs.network.SendMessage(ctx, p, m); err != nil { return errors.Wrap(err) } diff --git a/bitswap/network/interface.go b/bitswap/network/interface.go index 7c34a352b..18bb1df83 100644 --- a/bitswap/network/interface.go +++ b/bitswap/network/interface.go @@ -14,9 +14,6 @@ var ProtocolBitswap protocol.ID = "/ipfs/bitswap" // BitSwapNetwork provides network connectivity for BitSwap sessions type BitSwapNetwork interface { - // DialPeer ensures there is a connection to peer. - DialPeer(context.Context, peer.ID) error - // SendMessage sends a BitSwap message to a peer. SendMessage( context.Context, diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index 841688162..ea98cc87f 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -34,10 +34,6 @@ type impl struct { receiver Receiver } -func (bsnet *impl) DialPeer(ctx context.Context, p peer.ID) error { - return bsnet.host.Connect(ctx, peer.PeerInfo{ID: p}) -} - func (bsnet *impl) SendMessage( ctx context.Context, p peer.ID, @@ -45,6 +41,12 @@ func (bsnet *impl) SendMessage( log := log.Prefix("bitswap net SendMessage to %s", p) + // ensure we're connected + //TODO(jbenet) move this into host.NewStream? + if err := bsnet.host.Connect(ctx, peer.PeerInfo{ID: p}); err != nil { + return err + } + log.Debug("opening stream") s, err := bsnet.host.NewStream(ProtocolBitswap, p) if err != nil { @@ -69,6 +71,12 @@ func (bsnet *impl) SendRequest( log := log.Prefix("bitswap net SendRequest to %s", p) + // ensure we're connected + //TODO(jbenet) move this into host.NewStream? + if err := bsnet.host.Connect(ctx, peer.PeerInfo{ID: p}); err != nil { + return nil, err + } + log.Debug("opening stream") s, err := bsnet.host.NewStream(ProtocolBitswap, p) if err != nil { diff --git a/bitswap/testnet/virtual.go b/bitswap/testnet/virtual.go index 9426176a2..639bb00d3 100644 --- a/bitswap/testnet/virtual.go +++ b/bitswap/testnet/virtual.go @@ -2,7 +2,6 @@ package bitswap import ( "errors" - "fmt" context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" bsmsg "github.com/jbenet/go-ipfs/exchange/bitswap/message" @@ -178,14 +177,6 @@ func (nc *networkClient) Provide(ctx context.Context, k util.Key) error { return nc.routing.Provide(ctx, k) } -func (nc *networkClient) DialPeer(ctx context.Context, p peer.ID) error { - // no need to do anything because dialing isn't a thing in this test net. - if !nc.network.HasPeer(p) { - return fmt.Errorf("Peer not in network: %s", p) - } - return nil -} - func (nc *networkClient) SetDelegate(r bsnet.Receiver) { nc.Receiver = r } From b2e5acb285b442b3322060c6de27db7a4d29bfd6 Mon Sep 17 00:00:00 2001 From: Juan Batiz-Benet Date: Sun, 4 Jan 2015 14:06:33 -0800 Subject: [PATCH 0291/1035] bitswap: log superfluous messages This commit was moved from ipfs/go-bitswap@c83c43a2a1ccc19524f242a43705579cdded8b76 --- bitswap/decision/engine.go | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/bitswap/decision/engine.go b/bitswap/decision/engine.go index b2e20bf8e..e4b2ab832 100644 --- a/bitswap/decision/engine.go +++ b/bitswap/decision/engine.go @@ -149,9 +149,13 @@ func (e *Engine) Peers() []peer.ID { // arguments. func (e *Engine) MessageReceived(p peer.ID, m bsmsg.BitSwapMessage) error { log := log.Prefix("bitswap.Engine.MessageReceived(%s)", p) - log.Debugf("enter") + log.Debugf("enter. %d entries %d blocks", len(m.Wantlist()), len(m.Blocks())) defer log.Debugf("exit") + if len(m.Wantlist()) == 0 && len(m.Blocks()) == 0 { + log.Info("superfluous message") + } + newWorkExists := false defer func() { if newWorkExists { @@ -166,6 +170,7 @@ func (e *Engine) MessageReceived(p peer.ID, m bsmsg.BitSwapMessage) error { if m.Full() { l.wantList = wl.New() } + for _, entry := range m.Wantlist() { if entry.Cancel { log.Debug("cancel", entry.Key) From a494116cc0d32b79d8df9e1a5778e729f481f448 Mon Sep 17 00:00:00 2001 From: Juan Batiz-Benet Date: Mon, 5 Jan 2015 05:21:05 -0800 Subject: [PATCH 0292/1035] p2p/test: bogus key pair for faster tests This commit was moved from ipfs/go-bitswap@09a1db4c220e6fc4ca7b6fb2bc8097a04113a2a3 --- bitswap/bitswap_test.go | 18 +++++++++--------- bitswap/testutils.go | 6 ++++-- 2 files changed, 13 insertions(+), 11 deletions(-) diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index af6cb138c..64d5ead52 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -11,10 +11,10 @@ import ( blocks "github.com/jbenet/go-ipfs/blocks" blocksutil "github.com/jbenet/go-ipfs/blocks/blocksutil" tn "github.com/jbenet/go-ipfs/exchange/bitswap/testnet" + p2ptestutil "github.com/jbenet/go-ipfs/p2p/test/util" mockrouting "github.com/jbenet/go-ipfs/routing/mock" u "github.com/jbenet/go-ipfs/util" delay "github.com/jbenet/go-ipfs/util/delay" - "github.com/jbenet/go-ipfs/util/testutil" ) // FIXME the tests are really sensitive to the network delay. fix them to work @@ -25,7 +25,7 @@ func TestClose(t *testing.T) { // TODO t.Skip("TODO Bitswap's Close implementation is a WIP") vnet := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay)) - sesgen := NewSessionGenerator(vnet) + sesgen := NewTestSessionGenerator(vnet) defer sesgen.Close() bgen := blocksutil.NewBlockGenerator() @@ -39,7 +39,7 @@ func TestClose(t *testing.T) { func TestGetBlockTimeout(t *testing.T) { net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay)) - g := NewSessionGenerator(net) + g := NewTestSessionGenerator(net) defer g.Close() self := g.Next() @@ -57,11 +57,11 @@ func TestProviderForKeyButNetworkCannotFind(t *testing.T) { // TODO revisit this rs := mockrouting.NewServer() net := tn.VirtualNetwork(rs, delay.Fixed(kNetworkDelay)) - g := NewSessionGenerator(net) + g := NewTestSessionGenerator(net) defer g.Close() block := blocks.NewBlock([]byte("block")) - pinfo := testutil.RandIdentityOrFatal(t) + pinfo := p2ptestutil.RandTestBogusIdentityOrFatal(t) rs.Client(pinfo).Provide(context.Background(), block.Key()) // but not on network solo := g.Next() @@ -81,7 +81,7 @@ func TestGetBlockFromPeerAfterPeerAnnounces(t *testing.T) { net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay)) block := blocks.NewBlock([]byte("block")) - g := NewSessionGenerator(net) + g := NewTestSessionGenerator(net) defer g.Close() hasBlock := g.Next() @@ -134,7 +134,7 @@ func PerformDistributionTest(t *testing.T, numInstances, numBlocks int) { t.SkipNow() } net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay)) - sg := NewSessionGenerator(net) + sg := NewTestSessionGenerator(net) defer sg.Close() bg := blocksutil.NewBlockGenerator() @@ -198,7 +198,7 @@ func TestSendToWantingPeer(t *testing.T) { } net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay)) - sg := NewSessionGenerator(net) + sg := NewTestSessionGenerator(net) defer sg.Close() bg := blocksutil.NewBlockGenerator() @@ -243,7 +243,7 @@ func TestSendToWantingPeer(t *testing.T) { func TestBasicBitswap(t *testing.T) { net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay)) - sg := NewSessionGenerator(net) + sg := NewTestSessionGenerator(net) bg := blocksutil.NewBlockGenerator() t.Log("Test a few nodes trying to get one file with a lot of blocks") diff --git a/bitswap/testutils.go b/bitswap/testutils.go index dd96e5f46..95019f297 100644 --- a/bitswap/testutils.go +++ b/bitswap/testutils.go @@ -10,12 +10,14 @@ import ( exchange "github.com/jbenet/go-ipfs/exchange" tn "github.com/jbenet/go-ipfs/exchange/bitswap/testnet" peer "github.com/jbenet/go-ipfs/p2p/peer" + p2ptestutil "github.com/jbenet/go-ipfs/p2p/test/util" datastore2 "github.com/jbenet/go-ipfs/util/datastore2" delay "github.com/jbenet/go-ipfs/util/delay" testutil "github.com/jbenet/go-ipfs/util/testutil" ) -func NewSessionGenerator( +// WARNING: this uses RandTestBogusIdentity DO NOT USE for NON TESTS! +func NewTestSessionGenerator( net tn.Network) SessionGenerator { ctx, cancel := context.WithCancel(context.TODO()) return SessionGenerator{ @@ -41,7 +43,7 @@ func (g *SessionGenerator) Close() error { func (g *SessionGenerator) Next() Instance { g.seq++ - p, err := testutil.RandIdentity() + p, err := p2ptestutil.RandTestBogusIdentity() if err != nil { panic("FIXME") // TODO change signature } From 7b79961d266166e918eb3075b38678ad6b7d6fb3 Mon Sep 17 00:00:00 2001 From: Jeromy Date: Sun, 11 Jan 2015 08:03:46 +0000 Subject: [PATCH 0293/1035] early out if no entries in wantlist This commit was moved from ipfs/go-bitswap@5da9c5e70bb512af302c2ed1d3042f6febe1a39b --- bitswap/bitswap.go | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index a883e4b03..f0063a9d9 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -225,6 +225,12 @@ func (bs *bitswap) sendWantlistToPeers(ctx context.Context, peers <-chan peer.ID } func (bs *bitswap) sendWantlistToProviders(ctx context.Context) { + entries := bs.wantlist.Entries() + if len(entries) == 0 { + log.Debug("No entries in wantlist, skipping send routine.") + return + } + log := log.Prefix("bitswap(%s).sendWantlistToProviders ", bs.self) log.Debugf("begin") defer log.Debugf("end") @@ -237,7 +243,7 @@ func (bs *bitswap) sendWantlistToProviders(ctx context.Context) { // Get providers for all entries in wantlist (could take a while) wg := sync.WaitGroup{} - for _, e := range bs.wantlist.Entries() { + for _, e := range entries { wg.Add(1) go func(k u.Key) { defer wg.Done() From 0251c9b8e17372c48f2023bb3c19a0f2bf2d209a Mon Sep 17 00:00:00 2001 From: Jeromy Date: Thu, 15 Jan 2015 04:17:17 +0000 Subject: [PATCH 0294/1035] starting to move important events over to EventBegin/Done This commit was moved from ipfs/go-bitswap@a95d86b07d527bd9d371278bec5a3f5e12085022 --- bitswap/bitswap.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index f0063a9d9..0ccf0cffa 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -120,12 +120,12 @@ func (bs *bitswap) GetBlock(parent context.Context, k u.Key) (*blocks.Block, err ctx, cancelFunc := context.WithCancel(parent) ctx = eventlog.ContextWithLoggable(ctx, eventlog.Uuid("GetBlockRequest")) - log.Event(ctx, "GetBlockRequestBegin", &k) + e := log.EventBegin(ctx, "GetBlockRequest", &k) log.Debugf("GetBlockRequestBegin") defer func() { cancelFunc() - log.Event(ctx, "GetBlockRequestEnd", &k) + e.Done() log.Debugf("GetBlockRequestEnd") }() From de91a3742b12a243af5daa3d78020748e66446c6 Mon Sep 17 00:00:00 2001 From: Jeromy Date: Thu, 15 Jan 2015 04:45:34 +0000 Subject: [PATCH 0295/1035] rewrite as single line defer logs This commit was moved from ipfs/go-bitswap@6e8403d7eac049a0d7664f470fa63c86253e62f3 --- bitswap/bitswap.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 0ccf0cffa..25025bb8e 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -120,12 +120,11 @@ func (bs *bitswap) GetBlock(parent context.Context, k u.Key) (*blocks.Block, err ctx, cancelFunc := context.WithCancel(parent) ctx = eventlog.ContextWithLoggable(ctx, eventlog.Uuid("GetBlockRequest")) - e := log.EventBegin(ctx, "GetBlockRequest", &k) + defer log.EventBegin(ctx, "GetBlockRequest", &k).Done() log.Debugf("GetBlockRequestBegin") defer func() { cancelFunc() - e.Done() log.Debugf("GetBlockRequestEnd") }() From 14f7e1a663538218091200b3f466dfb0296255cc Mon Sep 17 00:00:00 2001 From: Juan Batiz-Benet Date: Fri, 16 Jan 2015 02:13:00 -0800 Subject: [PATCH 0296/1035] addr-explosion mitigated adding mitigated adding our own addresses where received from peers see #573 This commit was moved from ipfs/go-bitswap@74c3cfc10a6ff3eb7f3c4facd7f1128f08ea1f73 --- bitswap/network/ipfs_impl.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index ea98cc87f..4415cf8cf 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -112,7 +112,9 @@ func (bsnet *impl) FindProvidersAsync(ctx context.Context, k util.Key, max int) defer close(out) providers := bsnet.routing.FindProvidersAsync(ctx, k, max) for info := range providers { - bsnet.host.Peerstore().AddAddresses(info.ID, info.Addrs) + if info.ID != bsnet.host.ID() { // dont add addrs for ourselves. + bsnet.host.Peerstore().AddAddresses(info.ID, info.Addrs) + } select { case <-ctx.Done(): return From aaad1a253e02e40259b489e20f94e1e1368d93d3 Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Sun, 18 Jan 2015 12:31:12 -0800 Subject: [PATCH 0297/1035] move generic packages to thirdparty (see thirdparty/README.md) This commit was moved from ipfs/go-bitswap@3165eb7d535452046fed3441c229f88054c2d733 --- bitswap/bitswap.go | 4 ++-- bitswap/bitswap_test.go | 2 +- bitswap/decision/engine.go | 2 +- bitswap/network/ipfs_impl.go | 2 +- bitswap/testnet/network_test.go | 2 +- bitswap/testnet/virtual.go | 2 +- bitswap/testutils.go | 2 +- 7 files changed, 8 insertions(+), 8 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 25025bb8e..770f4fd7f 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -18,10 +18,10 @@ import ( notifications "github.com/jbenet/go-ipfs/exchange/bitswap/notifications" wantlist "github.com/jbenet/go-ipfs/exchange/bitswap/wantlist" peer "github.com/jbenet/go-ipfs/p2p/peer" + "github.com/jbenet/go-ipfs/thirdparty/delay" + eventlog "github.com/jbenet/go-ipfs/thirdparty/eventlog" u "github.com/jbenet/go-ipfs/util" errors "github.com/jbenet/go-ipfs/util/debugerror" - "github.com/jbenet/go-ipfs/util/delay" - eventlog "github.com/jbenet/go-ipfs/util/eventlog" pset "github.com/jbenet/go-ipfs/util/peerset" // TODO move this to peerstore ) diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index 64d5ead52..13bb3304f 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -13,8 +13,8 @@ import ( tn "github.com/jbenet/go-ipfs/exchange/bitswap/testnet" p2ptestutil "github.com/jbenet/go-ipfs/p2p/test/util" mockrouting "github.com/jbenet/go-ipfs/routing/mock" + delay "github.com/jbenet/go-ipfs/thirdparty/delay" u "github.com/jbenet/go-ipfs/util" - delay "github.com/jbenet/go-ipfs/util/delay" ) // FIXME the tests are really sensitive to the network delay. fix them to work diff --git a/bitswap/decision/engine.go b/bitswap/decision/engine.go index e4b2ab832..f766f5ddf 100644 --- a/bitswap/decision/engine.go +++ b/bitswap/decision/engine.go @@ -8,7 +8,7 @@ import ( bsmsg "github.com/jbenet/go-ipfs/exchange/bitswap/message" wl "github.com/jbenet/go-ipfs/exchange/bitswap/wantlist" peer "github.com/jbenet/go-ipfs/p2p/peer" - eventlog "github.com/jbenet/go-ipfs/util/eventlog" + eventlog "github.com/jbenet/go-ipfs/thirdparty/eventlog" ) // TODO consider taking responsibility for other types of requests. For diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index 4415cf8cf..1bc47603a 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -8,8 +8,8 @@ import ( inet "github.com/jbenet/go-ipfs/p2p/net" peer "github.com/jbenet/go-ipfs/p2p/peer" routing "github.com/jbenet/go-ipfs/routing" + eventlog "github.com/jbenet/go-ipfs/thirdparty/eventlog" util "github.com/jbenet/go-ipfs/util" - eventlog "github.com/jbenet/go-ipfs/util/eventlog" ) var log = eventlog.Logger("bitswap_network") diff --git a/bitswap/testnet/network_test.go b/bitswap/testnet/network_test.go index bbf84995c..e80fccba5 100644 --- a/bitswap/testnet/network_test.go +++ b/bitswap/testnet/network_test.go @@ -10,7 +10,7 @@ import ( bsnet "github.com/jbenet/go-ipfs/exchange/bitswap/network" peer "github.com/jbenet/go-ipfs/p2p/peer" mockrouting "github.com/jbenet/go-ipfs/routing/mock" - delay "github.com/jbenet/go-ipfs/util/delay" + delay "github.com/jbenet/go-ipfs/thirdparty/delay" testutil "github.com/jbenet/go-ipfs/util/testutil" ) diff --git a/bitswap/testnet/virtual.go b/bitswap/testnet/virtual.go index 639bb00d3..7ee082cfd 100644 --- a/bitswap/testnet/virtual.go +++ b/bitswap/testnet/virtual.go @@ -9,8 +9,8 @@ import ( peer "github.com/jbenet/go-ipfs/p2p/peer" routing "github.com/jbenet/go-ipfs/routing" mockrouting "github.com/jbenet/go-ipfs/routing/mock" + delay "github.com/jbenet/go-ipfs/thirdparty/delay" util "github.com/jbenet/go-ipfs/util" - delay "github.com/jbenet/go-ipfs/util/delay" testutil "github.com/jbenet/go-ipfs/util/testutil" ) diff --git a/bitswap/testutils.go b/bitswap/testutils.go index 95019f297..5a6b59b3a 100644 --- a/bitswap/testutils.go +++ b/bitswap/testutils.go @@ -11,8 +11,8 @@ import ( tn "github.com/jbenet/go-ipfs/exchange/bitswap/testnet" peer "github.com/jbenet/go-ipfs/p2p/peer" p2ptestutil "github.com/jbenet/go-ipfs/p2p/test/util" + delay "github.com/jbenet/go-ipfs/thirdparty/delay" datastore2 "github.com/jbenet/go-ipfs/util/datastore2" - delay "github.com/jbenet/go-ipfs/util/delay" testutil "github.com/jbenet/go-ipfs/util/testutil" ) From 6377e54a926a1cdeddc453b3d3e892ceb1f052e7 Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Sun, 4 Jan 2015 17:45:37 -0500 Subject: [PATCH 0298/1035] doc This commit was moved from ipfs/go-bitswap@63458193452754a7eb33593d0f889110afc3ba12 --- bitswap/bitswap.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 770f4fd7f..fe6b8d7c4 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -295,8 +295,7 @@ func (bs *bitswap) clientWorker(parent context.Context) { for { select { - case <-broadcastSignal: - // Resend unfulfilled wantlist keys + case <-broadcastSignal: // resend unfulfilled wantlist keys bs.sendWantlistToProviders(ctx) broadcastSignal = time.After(rebroadcastDelay.Get()) case ks := <-bs.batchRequests: From ebb80ebf5b8d8cee5d273ce0de483792b5735f82 Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Sun, 4 Jan 2015 17:58:01 -0500 Subject: [PATCH 0299/1035] fix(bitswap/engine): get priority from wantlist This commit was moved from ipfs/go-bitswap@545938aa0220813862945fdbf50d152938c84468 --- bitswap/decision/engine.go | 5 ++--- bitswap/decision/ledger.go | 2 +- bitswap/wantlist/wantlist.go | 8 ++++---- 3 files changed, 7 insertions(+), 8 deletions(-) diff --git a/bitswap/decision/engine.go b/bitswap/decision/engine.go index f766f5ddf..cb1fc4add 100644 --- a/bitswap/decision/engine.go +++ b/bitswap/decision/engine.go @@ -187,13 +187,12 @@ func (e *Engine) MessageReceived(p peer.ID, m bsmsg.BitSwapMessage) error { } for _, block := range m.Blocks() { - // FIXME extract blocks.NumBytes(block) or block.NumBytes() method log.Debug("got block %s %d bytes", block.Key(), len(block.Data)) l.ReceivedBytes(len(block.Data)) for _, l := range e.ledgerMap { - if l.WantListContains(block.Key()) { + if entry, ok := l.WantListContains(block.Key()); ok { newWorkExists = true - e.peerRequestQueue.Push(wl.Entry{block.Key(), 1}, l.Partner) + e.peerRequestQueue.Push(entry, l.Partner) } } } diff --git a/bitswap/decision/ledger.go b/bitswap/decision/ledger.go index 273c3e706..8e1eb83ee 100644 --- a/bitswap/decision/ledger.go +++ b/bitswap/decision/ledger.go @@ -77,7 +77,7 @@ func (l *ledger) CancelWant(k u.Key) { l.wantList.Remove(k) } -func (l *ledger) WantListContains(k u.Key) bool { +func (l *ledger) WantListContains(k u.Key) (wl.Entry, bool) { return l.wantList.Contains(k) } diff --git a/bitswap/wantlist/wantlist.go b/bitswap/wantlist/wantlist.go index aa58ee155..14d729d99 100644 --- a/bitswap/wantlist/wantlist.go +++ b/bitswap/wantlist/wantlist.go @@ -55,7 +55,7 @@ func (w *ThreadSafe) Remove(k u.Key) { w.Wantlist.Remove(k) } -func (w *ThreadSafe) Contains(k u.Key) bool { +func (w *ThreadSafe) Contains(k u.Key) (Entry, bool) { // TODO rm defer for perf w.lk.RLock() defer w.lk.RUnlock() @@ -88,9 +88,9 @@ func (w *Wantlist) Remove(k u.Key) { delete(w.set, k) } -func (w *Wantlist) Contains(k u.Key) bool { - _, ok := w.set[k] - return ok +func (w *Wantlist) Contains(k u.Key) (Entry, bool) { + e, ok := w.set[k] + return e, ok } func (w *Wantlist) Entries() []Entry { From e6fdf5a5763599dbd0b8f86a85c9654f3bf69610 Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Thu, 18 Dec 2014 22:47:34 -0500 Subject: [PATCH 0300/1035] feat: add time to taskQueue License: MIT Signed-off-by: Brian Tiger Chow Conflicts: exchange/bitswap/decision/taskqueue.go This commit was moved from ipfs/go-bitswap@bbad81f4d913506fce699495f51f77e8d1c169e9 --- bitswap/decision/taskqueue.go | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/bitswap/decision/taskqueue.go b/bitswap/decision/taskqueue.go index 659e287d0..e2087a472 100644 --- a/bitswap/decision/taskqueue.go +++ b/bitswap/decision/taskqueue.go @@ -3,6 +3,7 @@ package decision import ( "fmt" "sync" + "time" wantlist "github.com/jbenet/go-ipfs/exchange/bitswap/wantlist" peer "github.com/jbenet/go-ipfs/p2p/peer" @@ -28,7 +29,9 @@ func newTaskQueue() *taskQueue { type task struct { Entry wantlist.Entry Target peer.ID - Trash bool + Trash bool // TODO make private + + created time.Time } func (t *task) String() string { @@ -46,8 +49,9 @@ func (tl *taskQueue) Push(entry wantlist.Entry, to peer.ID) { return } task := &task{ - Entry: entry, - Target: to, + Entry: entry, + Target: to, + created: time.Now(), } tl.tasks = append(tl.tasks, task) tl.taskmap[taskKey(to, entry.Key)] = task From f4ba743bd3285b92bc023e9a0723d993668c4009 Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Thu, 18 Dec 2014 23:34:48 -0500 Subject: [PATCH 0301/1035] tests: add bench License: MIT Signed-off-by: Brian Tiger Chow This commit was moved from ipfs/go-bitswap@6f8835c55fa6299b2f4ec09b665df988d5ce93fb --- bitswap/decision/bench_test.go | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) create mode 100644 bitswap/decision/bench_test.go diff --git a/bitswap/decision/bench_test.go b/bitswap/decision/bench_test.go new file mode 100644 index 000000000..4fa6336b9 --- /dev/null +++ b/bitswap/decision/bench_test.go @@ -0,0 +1,25 @@ +package decision + +import ( + "math" + "testing" + + "github.com/jbenet/go-ipfs/exchange/bitswap/wantlist" + "github.com/jbenet/go-ipfs/p2p/peer" + "github.com/jbenet/go-ipfs/util" + "github.com/jbenet/go-ipfs/util/testutil" +) + +// FWIW: At the time of this commit, including a timestamp in task increases +// time cost of Push by 3%. +func BenchmarkTaskQueuePush(b *testing.B) { + q := newTaskQueue() + peers := []peer.ID{ + testutil.RandPeerIDFatal(b), + testutil.RandPeerIDFatal(b), + testutil.RandPeerIDFatal(b), + } + for i := 0; i < b.N; i++ { + q.Push(wantlist.Entry{Key: util.Key(i), Priority: math.MaxInt32}, peers[i%len(peers)]) + } +} From 416b47acf8e0845061b81d2550ed0da8629ec54a Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Thu, 18 Dec 2014 23:07:39 -0500 Subject: [PATCH 0302/1035] feat(PQ) refactor: peerRequestQueue it's a mistake to make one queue to fit all. Go's lack of algebraic types turns a generalized queue into a monstrosity of type checking/casting. Better to have individual queues for individual purposes. Conflicts: exchange/bitswap/decision/bench_test.go exchange/bitswap/decision/tasks/task_queue.go fix(bitswap.decision.PRQ): if peers match, always return result of pri comparison fix(bitswap.decision.Engine): push to the queue before notifying TOCTOU bug 1. client notifies 2. worker checks (finds nil) 3. worker sleeps 3. client pushes (worker missed the update) test(PQ): improve documentation and add test test(bitswap.decision.Engine): handling received messages License: MIT Signed-off-by: Brian Tiger Chow This commit was moved from ipfs/go-bitswap@3b397e8e0df35a85cdf7b66b8a1ce4d7a4df51bc --- bitswap/decision/bench_test.go | 3 +- bitswap/decision/engine.go | 8 +- bitswap/decision/engine_test.go | 117 +++++++++++++++-- bitswap/decision/peer_request_queue.go | 134 ++++++++++++++++++++ bitswap/decision/peer_request_queue_test.go | 56 ++++++++ bitswap/decision/pq/container.go | 105 +++++++++++++++ bitswap/decision/pq/container_test.go | 85 +++++++++++++ bitswap/decision/taskqueue.go | 93 -------------- 8 files changed, 494 insertions(+), 107 deletions(-) create mode 100644 bitswap/decision/peer_request_queue.go create mode 100644 bitswap/decision/peer_request_queue_test.go create mode 100644 bitswap/decision/pq/container.go create mode 100644 bitswap/decision/pq/container_test.go delete mode 100644 bitswap/decision/taskqueue.go diff --git a/bitswap/decision/bench_test.go b/bitswap/decision/bench_test.go index 4fa6336b9..a79c32b05 100644 --- a/bitswap/decision/bench_test.go +++ b/bitswap/decision/bench_test.go @@ -13,12 +13,13 @@ import ( // FWIW: At the time of this commit, including a timestamp in task increases // time cost of Push by 3%. func BenchmarkTaskQueuePush(b *testing.B) { - q := newTaskQueue() + q := newPRQ() peers := []peer.ID{ testutil.RandPeerIDFatal(b), testutil.RandPeerIDFatal(b), testutil.RandPeerIDFatal(b), } + b.ResetTimer() for i := 0; i < b.N; i++ { q.Push(wantlist.Entry{Key: util.Key(i), Priority: math.MaxInt32}, peers[i%len(peers)]) } diff --git a/bitswap/decision/engine.go b/bitswap/decision/engine.go index cb1fc4add..ea0491c2c 100644 --- a/bitswap/decision/engine.go +++ b/bitswap/decision/engine.go @@ -59,7 +59,7 @@ type Engine struct { // peerRequestQueue is a priority queue of requests received from peers. // Requests are popped from the queue, packaged up, and placed in the // outbox. - peerRequestQueue *taskQueue + peerRequestQueue peerRequestQueue // FIXME it's a bit odd for the client and the worker to both share memory // (both modify the peerRequestQueue) and also to communicate over the @@ -82,7 +82,7 @@ func NewEngine(ctx context.Context, bs bstore.Blockstore) *Engine { e := &Engine{ ledgerMap: make(map[peer.ID]*ledger), bs: bs, - peerRequestQueue: newTaskQueue(), + peerRequestQueue: newPRQ(), outbox: make(chan Envelope, sizeOutboxChan), workSignal: make(chan struct{}), } @@ -180,8 +180,8 @@ func (e *Engine) MessageReceived(p peer.ID, m bsmsg.BitSwapMessage) error { log.Debug("wants", entry.Key, entry.Priority) l.Wants(entry.Key, entry.Priority) if exists, err := e.bs.Has(entry.Key); err == nil && exists { - newWorkExists = true e.peerRequestQueue.Push(entry.Entry, p) + newWorkExists = true } } } @@ -191,8 +191,8 @@ func (e *Engine) MessageReceived(p peer.ID, m bsmsg.BitSwapMessage) error { l.ReceivedBytes(len(block.Data)) for _, l := range e.ledgerMap { if entry, ok := l.WantListContains(block.Key()); ok { - newWorkExists = true e.peerRequestQueue.Push(entry, l.Partner) + newWorkExists = true } } } diff --git a/bitswap/decision/engine_test.go b/bitswap/decision/engine_test.go index 08e729dc8..b2583a020 100644 --- a/bitswap/decision/engine_test.go +++ b/bitswap/decision/engine_test.go @@ -1,17 +1,19 @@ package decision import ( + "math" "strings" + "sync" "testing" context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" ds "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore" - sync "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore/sync" - + dssync "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore/sync" blocks "github.com/jbenet/go-ipfs/blocks" blockstore "github.com/jbenet/go-ipfs/blocks/blockstore" message "github.com/jbenet/go-ipfs/exchange/bitswap/message" peer "github.com/jbenet/go-ipfs/p2p/peer" + testutil "github.com/jbenet/go-ipfs/util/testutil" ) type peerAndEngine struct { @@ -19,18 +21,20 @@ type peerAndEngine struct { Engine *Engine } -func newPeerAndLedgermanager(idStr string) peerAndEngine { +func newEngine(ctx context.Context, idStr string) peerAndEngine { return peerAndEngine{ Peer: peer.ID(idStr), //Strategy: New(true), - Engine: NewEngine(context.TODO(), - blockstore.NewBlockstore(sync.MutexWrap(ds.NewMapDatastore()))), + Engine: NewEngine(ctx, + blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore()))), } } func TestConsistentAccounting(t *testing.T) { - sender := newPeerAndLedgermanager("Ernie") - receiver := newPeerAndLedgermanager("Bert") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + sender := newEngine(ctx, "Ernie") + receiver := newEngine(ctx, "Bert") // Send messages from Ernie to Bert for i := 0; i < 1000; i++ { @@ -62,8 +66,10 @@ func TestConsistentAccounting(t *testing.T) { func TestPeerIsAddedToPeersWhenMessageReceivedOrSent(t *testing.T) { - sanfrancisco := newPeerAndLedgermanager("sf") - seattle := newPeerAndLedgermanager("sea") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + sanfrancisco := newEngine(ctx, "sf") + seattle := newEngine(ctx, "sea") m := message.New() @@ -91,3 +97,96 @@ func peerIsPartner(p peer.ID, e *Engine) bool { } return false } + +func TestOutboxClosedWhenEngineClosed(t *testing.T) { + t.SkipNow() // TODO implement *Engine.Close + e := NewEngine(context.Background(), blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore()))) + var wg sync.WaitGroup + wg.Add(1) + go func() { + for _ = range e.Outbox() { + } + wg.Done() + }() + // e.Close() + wg.Wait() + if _, ok := <-e.Outbox(); ok { + t.Fatal("channel should be closed") + } +} + +func TestPartnerWantsThenCancels(t *testing.T) { + alphabet := strings.Split("abcdefghijklmnopqrstuvwxyz", "") + vowels := strings.Split("aeiou", "") + + type testCase [][]string + testcases := []testCase{ + testCase{ + alphabet, vowels, + }, + testCase{ + alphabet, stringsComplement(alphabet, vowels), + }, + } + + for _, testcase := range testcases { + set := testcase[0] + cancels := testcase[1] + keeps := stringsComplement(set, cancels) + + bs := blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore())) + e := NewEngine(context.Background(), bs) + partner := testutil.RandPeerIDFatal(t) + for _, letter := range set { + block := blocks.NewBlock([]byte(letter)) + bs.Put(block) + } + partnerWants(e, set, partner) + partnerCancels(e, cancels, partner) + assertPoppedInOrder(t, e, keeps) + } + +} + +func partnerWants(e *Engine, keys []string, partner peer.ID) { + add := message.New() + for i, letter := range keys { + block := blocks.NewBlock([]byte(letter)) + add.AddEntry(block.Key(), math.MaxInt32-i) + } + e.MessageReceived(partner, add) +} + +func partnerCancels(e *Engine, keys []string, partner peer.ID) { + cancels := message.New() + for _, k := range keys { + block := blocks.NewBlock([]byte(k)) + cancels.Cancel(block.Key()) + } + e.MessageReceived(partner, cancels) +} + +func assertPoppedInOrder(t *testing.T, e *Engine, keys []string) { + for _, k := range keys { + envelope := <-e.Outbox() + received := envelope.Message.Blocks()[0] + expected := blocks.NewBlock([]byte(k)) + if received.Key() != expected.Key() { + t.Fatal("received", string(received.Data), "expected", string(expected.Data)) + } + } +} + +func stringsComplement(set, subset []string) []string { + m := make(map[string]struct{}) + for _, letter := range subset { + m[letter] = struct{}{} + } + var complement []string + for _, letter := range set { + if _, exists := m[letter]; !exists { + complement = append(complement, letter) + } + } + return complement +} diff --git a/bitswap/decision/peer_request_queue.go b/bitswap/decision/peer_request_queue.go new file mode 100644 index 000000000..030f9bdab --- /dev/null +++ b/bitswap/decision/peer_request_queue.go @@ -0,0 +1,134 @@ +package decision + +import ( + "sync" + "time" + + pq "github.com/jbenet/go-ipfs/exchange/bitswap/decision/pq" + wantlist "github.com/jbenet/go-ipfs/exchange/bitswap/wantlist" + peer "github.com/jbenet/go-ipfs/p2p/peer" + u "github.com/jbenet/go-ipfs/util" +) + +type peerRequestQueue interface { + // Pop returns the next peerRequestTask. Returns nil if the peerRequestQueue is empty. + Pop() *peerRequestTask + Push(entry wantlist.Entry, to peer.ID) + Remove(k u.Key, p peer.ID) + // NB: cannot expose simply expose taskQueue.Len because trashed elements + // may exist. These trashed elements should not contribute to the count. +} + +func newPRQ() peerRequestQueue { + return &prq{ + taskMap: make(map[string]*peerRequestTask), + taskQueue: pq.New(wrapCmp(V1)), + } +} + +var _ peerRequestQueue = &prq{} + +// TODO: at some point, the strategy needs to plug in here +// to help decide how to sort tasks (on add) and how to select +// tasks (on getnext). For now, we are assuming a dumb/nice strategy. +type prq struct { + lock sync.Mutex + taskQueue pq.PQ + taskMap map[string]*peerRequestTask +} + +// Push currently adds a new peerRequestTask to the end of the list +func (tl *prq) Push(entry wantlist.Entry, to peer.ID) { + tl.lock.Lock() + defer tl.lock.Unlock() + if task, ok := tl.taskMap[taskKey(to, entry.Key)]; ok { + task.Entry.Priority = entry.Priority + tl.taskQueue.Update(task.index) + return + } + task := &peerRequestTask{ + Entry: entry, + Target: to, + created: time.Now(), + } + tl.taskQueue.Push(task) + tl.taskMap[task.Key()] = task +} + +// Pop 'pops' the next task to be performed. Returns nil if no task exists. +func (tl *prq) Pop() *peerRequestTask { + tl.lock.Lock() + defer tl.lock.Unlock() + var out *peerRequestTask + for tl.taskQueue.Len() > 0 { + out = tl.taskQueue.Pop().(*peerRequestTask) + delete(tl.taskMap, out.Key()) + if out.trash { + continue // discarding tasks that have been removed + } + break // and return |out| + } + return out +} + +// Remove removes a task from the queue +func (tl *prq) Remove(k u.Key, p peer.ID) { + tl.lock.Lock() + t, ok := tl.taskMap[taskKey(p, k)] + if ok { + // remove the task "lazily" + // simply mark it as trash, so it'll be dropped when popped off the + // queue. + t.trash = true + } + tl.lock.Unlock() +} + +type peerRequestTask struct { + Entry wantlist.Entry + Target peer.ID // required + + // trash in a book-keeping field + trash bool + // created marks the time that the task was added to the queue + created time.Time + index int // book-keeping field used by the pq container +} + +// Key uniquely identifies a task. +func (t *peerRequestTask) Key() string { + return taskKey(t.Target, t.Entry.Key) +} + +func (t *peerRequestTask) Index() int { + return t.index +} + +func (t *peerRequestTask) SetIndex(i int) { + t.index = i +} + +// taskKey returns a key that uniquely identifies a task. +func taskKey(p peer.ID, k u.Key) string { + return string(p.String() + k.String()) +} + +// FIFO is a basic task comparator that returns tasks in the order created. +var FIFO = func(a, b *peerRequestTask) bool { + return a.created.Before(b.created) +} + +// V1 respects the target peer's wantlist priority. For tasks involving +// different peers, the oldest task is prioritized. +var V1 = func(a, b *peerRequestTask) bool { + if a.Target == b.Target { + return a.Entry.Priority > b.Entry.Priority + } + return FIFO(a, b) +} + +func wrapCmp(f func(a, b *peerRequestTask) bool) func(a, b pq.Elem) bool { + return func(a, b pq.Elem) bool { + return f(a.(*peerRequestTask), b.(*peerRequestTask)) + } +} diff --git a/bitswap/decision/peer_request_queue_test.go b/bitswap/decision/peer_request_queue_test.go new file mode 100644 index 000000000..fa6102d67 --- /dev/null +++ b/bitswap/decision/peer_request_queue_test.go @@ -0,0 +1,56 @@ +package decision + +import ( + "math" + "math/rand" + "sort" + "strings" + "testing" + + "github.com/jbenet/go-ipfs/exchange/bitswap/wantlist" + "github.com/jbenet/go-ipfs/util" + "github.com/jbenet/go-ipfs/util/testutil" +) + +func TestPushPop(t *testing.T) { + prq := newPRQ() + partner := testutil.RandPeerIDFatal(t) + alphabet := strings.Split("abcdefghijklmnopqrstuvwxyz", "") + vowels := strings.Split("aeiou", "") + consonants := func() []string { + var out []string + for _, letter := range alphabet { + skip := false + for _, vowel := range vowels { + if letter == vowel { + skip = true + } + } + if !skip { + out = append(out, letter) + } + } + return out + }() + sort.Strings(alphabet) + sort.Strings(vowels) + sort.Strings(consonants) + + // add a bunch of blocks. cancel some. drain the queue. the queue should only have the kept entries + + for _, index := range rand.Perm(len(alphabet)) { // add blocks for all letters + letter := alphabet[index] + t.Log(partner.String()) + prq.Push(wantlist.Entry{Key: util.Key(letter), Priority: math.MaxInt32 - index}, partner) + } + for _, consonant := range consonants { + prq.Remove(util.Key(consonant), partner) + } + + for _, expected := range vowels { + received := prq.Pop().Entry.Key + if received != util.Key(expected) { + t.Fatal("received", string(received), "expected", string(expected)) + } + } +} diff --git a/bitswap/decision/pq/container.go b/bitswap/decision/pq/container.go new file mode 100644 index 000000000..9f20c31c7 --- /dev/null +++ b/bitswap/decision/pq/container.go @@ -0,0 +1,105 @@ +package pq + +import "container/heap" + +// PQ is a basic priority queue. +type PQ interface { + // Push adds the ele + Push(Elem) + // Pop returns the highest priority Elem in PQ. + Pop() Elem + // Len returns the number of elements in the PQ. + Len() int + // Update `fixes` the PQ. + Update(index int) + + // TODO explain why this interface should not be extended + // It does not support Remove. This is because... +} + +// Elem describes elements that can be added to the PQ. Clients must implement +// this interface. +type Elem interface { + // SetIndex stores the int index. + SetIndex(int) + // Index returns the last given by SetIndex(int). + Index() int +} + +// ElemComparator returns true if pri(a) > pri(b) +type ElemComparator func(a, b Elem) bool + +// New creates a PQ with a client-supplied comparator. +func New(cmp ElemComparator) PQ { + q := &wrapper{heapinterface{ + elems: make([]Elem, 0), + cmp: cmp, + }} + heap.Init(&q.heapinterface) + return q +} + +// wrapper exists because we cannot re-define Push. We want to expose +// Push(Elem) but heap.Interface requires Push(interface{}) +type wrapper struct { + heapinterface +} + +var _ PQ = &wrapper{} + +func (w *wrapper) Push(e Elem) { + heap.Push(&w.heapinterface, e) +} + +func (w *wrapper) Pop() Elem { + return heap.Pop(&w.heapinterface).(Elem) +} + +func (w *wrapper) Update(index int) { + heap.Fix(&w.heapinterface, index) +} + +// heapinterface handles dirty low-level details of managing the priority queue. +type heapinterface struct { + elems []Elem + cmp ElemComparator +} + +var _ heap.Interface = &heapinterface{} + +// public interface + +func (q *heapinterface) Len() int { + return len(q.elems) +} + +// Less delegates the decision to the comparator +func (q *heapinterface) Less(i, j int) bool { + return q.cmp(q.elems[i], q.elems[j]) +} + +// Swap swaps the elements with indexes i and j. +func (q *heapinterface) Swap(i, j int) { + q.elems[i], q.elems[j] = q.elems[j], q.elems[i] + q.elems[i].SetIndex(i) + q.elems[j].SetIndex(j) +} + +// Note that Push and Pop in this interface are for package heap's +// implementation to call. To add and remove things from the heap, wrap with +// the pq struct to call heap.Push and heap.Pop. + +func (q *heapinterface) Push(x interface{}) { // where to put the elem? + t := x.(Elem) + t.SetIndex(len(q.elems)) + q.elems = append(q.elems, t) +} + +func (q *heapinterface) Pop() interface{} { + old := q.elems + n := len(old) + elem := old[n-1] // remove the last + elem.SetIndex(-1) // for safety // FIXME why? + q.elems = old[0 : n-1] // shrink + return elem +} diff --git a/bitswap/decision/pq/container_test.go b/bitswap/decision/pq/container_test.go new file mode 100644 index 000000000..d96c677cb --- /dev/null +++ b/bitswap/decision/pq/container_test.go @@ -0,0 +1,85 @@ +package pq + +import ( + "sort" + "testing" +) + +type TestElem struct { + Key string + Priority int + index int +} + +func (e *TestElem) Index() int { + return e.index +} + +func (e *TestElem) SetIndex(i int) { + e.index = i +} + +var PriorityComparator = func(i, j Elem) bool { + return i.(*TestElem).Priority > j.(*TestElem).Priority +} + +func TestQueuesReturnTypeIsSameAsParameterToPush(t *testing.T) { + q := New(PriorityComparator) + expectedKey := "foo" + elem := &TestElem{Key: expectedKey} + q.Push(elem) + switch v := q.Pop().(type) { + case *TestElem: + if v.Key != expectedKey { + t.Fatal("the key doesn't match the pushed value") + } + default: + t.Fatal("the queue is not casting values appropriately") + } +} + +func TestCorrectnessOfPop(t *testing.T) { + q := New(PriorityComparator) + tasks := []TestElem{ + TestElem{Key: "a", Priority: 9}, + TestElem{Key: "b", Priority: 4}, + TestElem{Key: "c", Priority: 3}, + TestElem{Key: "d", Priority: 0}, + TestElem{Key: "e", Priority: 6}, + } + for _, e := range tasks { + q.Push(&e) + } + var priorities []int + for q.Len() > 0 { + i := q.Pop().(*TestElem).Priority + t.Log("popped %v", i) + priorities = append(priorities, i) + } + if !sort.IntsAreSorted(priorities) { + t.Fatal("the values were not returned in sorted order") + } +} + +func TestUpdate(t *testing.T) { + t.Log(` + Add 3 elements. + Update the highest priority element to have the lowest priority and fix the queue. + It should come out last.`) + q := New(PriorityComparator) + lowest := &TestElem{Key: "originallyLowest", Priority: 1} + middle := &TestElem{Key: "originallyMiddle", Priority: 2} + highest := &TestElem{Key: "toBeUpdated", Priority: 3} + q.Push(middle) + q.Push(highest) + q.Push(lowest) + if q.Pop().(*TestElem).Key != highest.Key { + t.Fatal("popped element doesn't have the highest priority") + } + q.Push(highest) // re-add the popped element + highest.Priority = 0 // update the PQ + q.Update(highest.Index()) // fix the PQ + if q.Pop().(*TestElem).Key != middle.Key { + t.Fatal("middle element should now have the highest priority") + } +} diff --git a/bitswap/decision/taskqueue.go b/bitswap/decision/taskqueue.go deleted file mode 100644 index e2087a472..000000000 --- a/bitswap/decision/taskqueue.go +++ /dev/null @@ -1,93 +0,0 @@ -package decision - -import ( - "fmt" - "sync" - "time" - - wantlist "github.com/jbenet/go-ipfs/exchange/bitswap/wantlist" - peer "github.com/jbenet/go-ipfs/p2p/peer" - u "github.com/jbenet/go-ipfs/util" -) - -// TODO: at some point, the strategy needs to plug in here -// to help decide how to sort tasks (on add) and how to select -// tasks (on getnext). For now, we are assuming a dumb/nice strategy. -type taskQueue struct { - // TODO: make this into a priority queue - lock sync.Mutex - tasks []*task - taskmap map[string]*task -} - -func newTaskQueue() *taskQueue { - return &taskQueue{ - taskmap: make(map[string]*task), - } -} - -type task struct { - Entry wantlist.Entry - Target peer.ID - Trash bool // TODO make private - - created time.Time -} - -func (t *task) String() string { - return fmt.Sprintf("", t.Target, t.Entry.Key, t.Trash) -} - -// Push currently adds a new task to the end of the list -func (tl *taskQueue) Push(entry wantlist.Entry, to peer.ID) { - tl.lock.Lock() - defer tl.lock.Unlock() - if task, ok := tl.taskmap[taskKey(to, entry.Key)]; ok { - // TODO: when priority queue is implemented, - // rearrange this task - task.Entry.Priority = entry.Priority - return - } - task := &task{ - Entry: entry, - Target: to, - created: time.Now(), - } - tl.tasks = append(tl.tasks, task) - tl.taskmap[taskKey(to, entry.Key)] = task -} - -// Pop 'pops' the next task to be performed. Returns nil no task exists. -func (tl *taskQueue) Pop() *task { - tl.lock.Lock() - defer tl.lock.Unlock() - var out *task - for len(tl.tasks) > 0 { - // TODO: instead of zero, use exponential distribution - // it will help reduce the chance of receiving - // the same block from multiple peers - out = tl.tasks[0] - tl.tasks = tl.tasks[1:] - delete(tl.taskmap, taskKey(out.Target, out.Entry.Key)) - if out.Trash { - continue // discarding tasks that have been removed - } - break // and return |out| - } - return out -} - -// Remove lazily removes a task from the queue -func (tl *taskQueue) Remove(k u.Key, p peer.ID) { - tl.lock.Lock() - t, ok := tl.taskmap[taskKey(p, k)] - if ok { - t.Trash = true - } - tl.lock.Unlock() -} - -// taskKey returns a key that uniquely identifies a task. -func taskKey(p peer.ID, k u.Key) string { - return string(p) + string(k) -} From c4dcb576a19251c674131fd26b02b29caface155 Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Sun, 18 Jan 2015 14:12:55 -0800 Subject: [PATCH 0303/1035] move PQ to thirdparty This commit was moved from ipfs/go-bitswap@a70a16c9db1cff1be5577f2c86dec0030c165206 --- bitswap/decision/peer_request_queue.go | 2 +- bitswap/decision/pq/container.go | 105 ------------------------- bitswap/decision/pq/container_test.go | 85 -------------------- 3 files changed, 1 insertion(+), 191 deletions(-) delete mode 100644 bitswap/decision/pq/container.go delete mode 100644 bitswap/decision/pq/container_test.go diff --git a/bitswap/decision/peer_request_queue.go b/bitswap/decision/peer_request_queue.go index 030f9bdab..8b9b1c2f2 100644 --- a/bitswap/decision/peer_request_queue.go +++ b/bitswap/decision/peer_request_queue.go @@ -4,9 +4,9 @@ import ( "sync" "time" - pq "github.com/jbenet/go-ipfs/exchange/bitswap/decision/pq" wantlist "github.com/jbenet/go-ipfs/exchange/bitswap/wantlist" peer "github.com/jbenet/go-ipfs/p2p/peer" + pq "github.com/jbenet/go-ipfs/thirdparty/pq" u "github.com/jbenet/go-ipfs/util" ) diff --git a/bitswap/decision/pq/container.go b/bitswap/decision/pq/container.go deleted file mode 100644 index 9f20c31c7..000000000 --- a/bitswap/decision/pq/container.go +++ /dev/null @@ -1,105 +0,0 @@ -package pq - -import "container/heap" - -// PQ is a basic priority queue. -type PQ interface { - // Push adds the ele - Push(Elem) - // Pop returns the highest priority Elem in PQ. - Pop() Elem - // Len returns the number of elements in the PQ. - Len() int - // Update `fixes` the PQ. - Update(index int) - - // TODO explain why this interface should not be extended - // It does not support Remove. This is because... -} - -// Elem describes elements that can be added to the PQ. Clients must implement -// this interface. -type Elem interface { - // SetIndex stores the int index. - SetIndex(int) - // Index returns the last given by SetIndex(int). - Index() int -} - -// ElemComparator returns true if pri(a) > pri(b) -type ElemComparator func(a, b Elem) bool - -// New creates a PQ with a client-supplied comparator. -func New(cmp ElemComparator) PQ { - q := &wrapper{heapinterface{ - elems: make([]Elem, 0), - cmp: cmp, - }} - heap.Init(&q.heapinterface) - return q -} - -// wrapper exists because we cannot re-define Push. We want to expose -// Push(Elem) but heap.Interface requires Push(interface{}) -type wrapper struct { - heapinterface -} - -var _ PQ = &wrapper{} - -func (w *wrapper) Push(e Elem) { - heap.Push(&w.heapinterface, e) -} - -func (w *wrapper) Pop() Elem { - return heap.Pop(&w.heapinterface).(Elem) -} - -func (w *wrapper) Update(index int) { - heap.Fix(&w.heapinterface, index) -} - -// heapinterface handles dirty low-level details of managing the priority queue. -type heapinterface struct { - elems []Elem - cmp ElemComparator -} - -var _ heap.Interface = &heapinterface{} - -// public interface - -func (q *heapinterface) Len() int { - return len(q.elems) -} - -// Less delegates the decision to the comparator -func (q *heapinterface) Less(i, j int) bool { - return q.cmp(q.elems[i], q.elems[j]) -} - -// Swap swaps the elements with indexes i and j. -func (q *heapinterface) Swap(i, j int) { - q.elems[i], q.elems[j] = q.elems[j], q.elems[i] - q.elems[i].SetIndex(i) - q.elems[j].SetIndex(j) -} - -// Note that Push and Pop in this interface are for package heap's -// implementation to call. To add and remove things from the heap, wrap with -// the pq struct to call heap.Push and heap.Pop. - -func (q *heapinterface) Push(x interface{}) { // where to put the elem? - t := x.(Elem) - t.SetIndex(len(q.elems)) - q.elems = append(q.elems, t) -} - -func (q *heapinterface) Pop() interface{} { - old := q.elems - n := len(old) - elem := old[n-1] // remove the last - elem.SetIndex(-1) // for safety // FIXME why? - q.elems = old[0 : n-1] // shrink - return elem -} diff --git a/bitswap/decision/pq/container_test.go b/bitswap/decision/pq/container_test.go deleted file mode 100644 index d96c677cb..000000000 --- a/bitswap/decision/pq/container_test.go +++ /dev/null @@ -1,85 +0,0 @@ -package pq - -import ( - "sort" - "testing" -) - -type TestElem struct { - Key string - Priority int - index int -} - -func (e *TestElem) Index() int { - return e.index -} - -func (e *TestElem) SetIndex(i int) { - e.index = i -} - -var PriorityComparator = func(i, j Elem) bool { - return i.(*TestElem).Priority > j.(*TestElem).Priority -} - -func TestQueuesReturnTypeIsSameAsParameterToPush(t *testing.T) { - q := New(PriorityComparator) - expectedKey := "foo" - elem := &TestElem{Key: expectedKey} - q.Push(elem) - switch v := q.Pop().(type) { - case *TestElem: - if v.Key != expectedKey { - t.Fatal("the key doesn't match the pushed value") - } - default: - t.Fatal("the queue is not casting values appropriately") - } -} - -func TestCorrectnessOfPop(t *testing.T) { - q := New(PriorityComparator) - tasks := []TestElem{ - TestElem{Key: "a", Priority: 9}, - TestElem{Key: "b", Priority: 4}, - TestElem{Key: "c", Priority: 3}, - TestElem{Key: "d", Priority: 0}, - TestElem{Key: "e", Priority: 6}, - } - for _, e := range tasks { - q.Push(&e) - } - var priorities []int - for q.Len() > 0 { - i := q.Pop().(*TestElem).Priority - t.Log("popped %v", i) - priorities = append(priorities, i) - } - if !sort.IntsAreSorted(priorities) { - t.Fatal("the values were not returned in sorted order") - } -} - -func TestUpdate(t *testing.T) { - t.Log(` - Add 3 elements. - Update the highest priority element to have the lowest priority and fix the queue. - It should come out last.`) - q := New(PriorityComparator) - lowest := &TestElem{Key: "originallyLowest", Priority: 1} - middle := &TestElem{Key: "originallyMiddle", Priority: 2} - highest := &TestElem{Key: "toBeUpdated", Priority: 3} - q.Push(middle) - q.Push(highest) - q.Push(lowest) - if q.Pop().(*TestElem).Key != highest.Key { - t.Fatal("popped element doesn't have the highest priority") - } - q.Push(highest) // re-add the popped element - highest.Priority = 0 // update the PQ - q.Update(highest.Index()) // fix the PQ - if q.Pop().(*TestElem).Key != middle.Key { - t.Fatal("middle element should now have the highest priority") - } -} From bfe05f1820690584edbf2a34b68a1ddb6264a902 Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Sun, 18 Jan 2015 23:37:04 -0800 Subject: [PATCH 0304/1035] fix(bitswap.decision.Engine) enqueue only the freshest messages Before, the engine worker would pop a task and block on send to the bitswap worker even if the bitswap worker wasn't to receive. Since the task could have been invalidated during this blocking send, a small number of stale (already acquired) blocks would be send to partners. Now, tasks are only popped off of the queue when bitswap is ready to send them over the wire. This is accomplished by removing the outboxChanBuffer and implementing a two-phase communication sequence. This commit was moved from ipfs/go-bitswap@e82011a8e5b72029785e1e860a404cb6f937a206 --- bitswap/bitswap.go | 11 ++++-- bitswap/decision/engine.go | 66 +++++++++++++++++++-------------- bitswap/decision/engine_test.go | 53 +++++++++++++++++--------- 3 files changed, 81 insertions(+), 49 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index fe6b8d7c4..f27f0cc36 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -277,10 +277,13 @@ func (bs *bitswap) taskWorker(ctx context.Context) { case <-ctx.Done(): log.Debugf("exiting") return - case envelope := <-bs.engine.Outbox(): - log.Debugf("message to %s sending...", envelope.Peer) - bs.send(ctx, envelope.Peer, envelope.Message) - log.Debugf("message to %s sent", envelope.Peer) + case nextEnvelope := <-bs.engine.Outbox(): + select { + case <-ctx.Done(): + return + case envelope := <-nextEnvelope: + bs.send(ctx, envelope.Peer, envelope.Message) + } } } } diff --git a/bitswap/decision/engine.go b/bitswap/decision/engine.go index ea0491c2c..b84732e82 100644 --- a/bitswap/decision/engine.go +++ b/bitswap/decision/engine.go @@ -44,7 +44,8 @@ import ( var log = eventlog.Logger("engine") const ( - sizeOutboxChan = 4 + // outboxChanBuffer must be 0 to prevent stale messages from being sent + outboxChanBuffer = 0 ) // Envelope contains a message for a Peer @@ -68,8 +69,9 @@ type Engine struct { // that case, no lock would be required. workSignal chan struct{} - // outbox contains outgoing messages to peers - outbox chan Envelope + // outbox contains outgoing messages to peers. This is owned by the + // taskWorker goroutine + outbox chan (<-chan Envelope) bs bstore.Blockstore @@ -83,7 +85,7 @@ func NewEngine(ctx context.Context, bs bstore.Blockstore) *Engine { ledgerMap: make(map[peer.ID]*ledger), bs: bs, peerRequestQueue: newPRQ(), - outbox: make(chan Envelope, sizeOutboxChan), + outbox: make(chan (<-chan Envelope), outboxChanBuffer), workSignal: make(chan struct{}), } go e.taskWorker(ctx) @@ -91,45 +93,55 @@ func NewEngine(ctx context.Context, bs bstore.Blockstore) *Engine { } func (e *Engine) taskWorker(ctx context.Context) { - log := log.Prefix("bitswap.Engine.taskWorker") + defer close(e.outbox) // because taskWorker uses the channel exclusively + for { + oneTimeUse := make(chan Envelope, 1) // buffer to prevent blocking + select { + case <-ctx.Done(): + return + case e.outbox <- oneTimeUse: + } + // receiver is ready for an outoing envelope. let's prepare one. first, + // we must acquire a task from the PQ... + envelope, err := e.nextEnvelope(ctx) + if err != nil { + close(oneTimeUse) + return // ctx cancelled + } + oneTimeUse <- *envelope // buffered. won't block + close(oneTimeUse) + } +} + +// nextEnvelope runs in the taskWorker goroutine. Returns an error if the +// context is cancelled before the next Envelope can be created. +func (e *Engine) nextEnvelope(ctx context.Context) (*Envelope, error) { for { nextTask := e.peerRequestQueue.Pop() - if nextTask == nil { - // No tasks in the list? - // Wait until there are! + for nextTask == nil { select { case <-ctx.Done(): - log.Debugf("exiting: %s", ctx.Err()) - return + return nil, ctx.Err() case <-e.workSignal: - log.Debugf("woken up") + nextTask = e.peerRequestQueue.Pop() } - continue } - log := log.Prefix("%s", nextTask) - log.Debugf("processing") + + // with a task in hand, we're ready to prepare the envelope... block, err := e.bs.Get(nextTask.Entry.Key) if err != nil { - log.Warning("engine: task exists to send block, but block is not in blockstore") continue } - // construct message here so we can make decisions about any additional - // information we may want to include at this time. - m := bsmsg.New() + + m := bsmsg.New() // TODO: maybe add keys from our wantlist? m.AddBlock(block) - // TODO: maybe add keys from our wantlist? - log.Debugf("sending...") - select { - case <-ctx.Done(): - return - case e.outbox <- Envelope{Peer: nextTask.Target, Message: m}: - log.Debugf("sent") - } + return &Envelope{Peer: nextTask.Target, Message: m}, nil } } -func (e *Engine) Outbox() <-chan Envelope { +// Outbox returns a channel of one-time use Envelope channels. +func (e *Engine) Outbox() <-chan (<-chan Envelope) { return e.outbox } diff --git a/bitswap/decision/engine_test.go b/bitswap/decision/engine_test.go index b2583a020..8e5ab672c 100644 --- a/bitswap/decision/engine_test.go +++ b/bitswap/decision/engine_test.go @@ -1,6 +1,8 @@ package decision import ( + "errors" + "fmt" "math" "strings" "sync" @@ -104,7 +106,8 @@ func TestOutboxClosedWhenEngineClosed(t *testing.T) { var wg sync.WaitGroup wg.Add(1) go func() { - for _ = range e.Outbox() { + for nextEnvelope := range e.Outbox() { + <-nextEnvelope } wg.Done() }() @@ -116,6 +119,10 @@ func TestOutboxClosedWhenEngineClosed(t *testing.T) { } func TestPartnerWantsThenCancels(t *testing.T) { + numRounds := 10 + if testing.Short() { + numRounds = 1 + } alphabet := strings.Split("abcdefghijklmnopqrstuvwxyz", "") vowels := strings.Split("aeiou", "") @@ -129,23 +136,31 @@ func TestPartnerWantsThenCancels(t *testing.T) { }, } - for _, testcase := range testcases { - set := testcase[0] - cancels := testcase[1] - keeps := stringsComplement(set, cancels) - - bs := blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore())) - e := NewEngine(context.Background(), bs) - partner := testutil.RandPeerIDFatal(t) - for _, letter := range set { - block := blocks.NewBlock([]byte(letter)) - bs.Put(block) + bs := blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore())) + for _, letter := range alphabet { + block := blocks.NewBlock([]byte(letter)) + if err := bs.Put(block); err != nil { + t.Fatal(err) } - partnerWants(e, set, partner) - partnerCancels(e, cancels, partner) - assertPoppedInOrder(t, e, keeps) } + for i := 0; i < numRounds; i++ { + for _, testcase := range testcases { + set := testcase[0] + cancels := testcase[1] + keeps := stringsComplement(set, cancels) + + e := NewEngine(context.Background(), bs) + partner := testutil.RandPeerIDFatal(t) + + partnerWants(e, set, partner) + partnerCancels(e, cancels, partner) + if err := checkHandledInOrder(t, e, keeps); err != nil { + t.Logf("run #%d of %d", i, numRounds) + t.Fatal(err) + } + } + } } func partnerWants(e *Engine, keys []string, partner peer.ID) { @@ -166,15 +181,17 @@ func partnerCancels(e *Engine, keys []string, partner peer.ID) { e.MessageReceived(partner, cancels) } -func assertPoppedInOrder(t *testing.T, e *Engine, keys []string) { +func checkHandledInOrder(t *testing.T, e *Engine, keys []string) error { for _, k := range keys { - envelope := <-e.Outbox() + next := <-e.Outbox() + envelope := <-next received := envelope.Message.Blocks()[0] expected := blocks.NewBlock([]byte(k)) if received.Key() != expected.Key() { - t.Fatal("received", string(received.Data), "expected", string(expected.Data)) + return errors.New(fmt.Sprintln("received", string(received.Data), "expected", string(expected.Data))) } } + return nil } func stringsComplement(set, subset []string) []string { From 22456d7a369f1e8540fdc0f6748f117133ad2230 Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Mon, 19 Jan 2015 02:35:09 -0800 Subject: [PATCH 0305/1035] fix: return pointer @whyrusleeping This commit was moved from ipfs/go-bitswap@02c7adcf9017e44f9c5b21e5c2b6b1faec983ecf --- bitswap/bitswap.go | 5 ++++- bitswap/decision/engine.go | 10 +++++----- 2 files changed, 9 insertions(+), 6 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index f27f0cc36..dfa72ff2f 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -281,7 +281,10 @@ func (bs *bitswap) taskWorker(ctx context.Context) { select { case <-ctx.Done(): return - case envelope := <-nextEnvelope: + case envelope, ok := <-nextEnvelope: + if !ok { + continue + } bs.send(ctx, envelope.Peer, envelope.Message) } } diff --git a/bitswap/decision/engine.go b/bitswap/decision/engine.go index b84732e82..05687b312 100644 --- a/bitswap/decision/engine.go +++ b/bitswap/decision/engine.go @@ -71,7 +71,7 @@ type Engine struct { // outbox contains outgoing messages to peers. This is owned by the // taskWorker goroutine - outbox chan (<-chan Envelope) + outbox chan (<-chan *Envelope) bs bstore.Blockstore @@ -85,7 +85,7 @@ func NewEngine(ctx context.Context, bs bstore.Blockstore) *Engine { ledgerMap: make(map[peer.ID]*ledger), bs: bs, peerRequestQueue: newPRQ(), - outbox: make(chan (<-chan Envelope), outboxChanBuffer), + outbox: make(chan (<-chan *Envelope), outboxChanBuffer), workSignal: make(chan struct{}), } go e.taskWorker(ctx) @@ -95,7 +95,7 @@ func NewEngine(ctx context.Context, bs bstore.Blockstore) *Engine { func (e *Engine) taskWorker(ctx context.Context) { defer close(e.outbox) // because taskWorker uses the channel exclusively for { - oneTimeUse := make(chan Envelope, 1) // buffer to prevent blocking + oneTimeUse := make(chan *Envelope, 1) // buffer to prevent blocking select { case <-ctx.Done(): return @@ -108,7 +108,7 @@ func (e *Engine) taskWorker(ctx context.Context) { close(oneTimeUse) return // ctx cancelled } - oneTimeUse <- *envelope // buffered. won't block + oneTimeUse <- envelope // buffered. won't block close(oneTimeUse) } } @@ -141,7 +141,7 @@ func (e *Engine) nextEnvelope(ctx context.Context) (*Envelope, error) { } // Outbox returns a channel of one-time use Envelope channels. -func (e *Engine) Outbox() <-chan (<-chan Envelope) { +func (e *Engine) Outbox() <-chan (<-chan *Envelope) { return e.outbox } From 3f93bf5d096ff8aa10733993d650fc1720b7c494 Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Sun, 18 Jan 2015 23:40:23 -0800 Subject: [PATCH 0306/1035] fix(bitswap): release the lock last The area above the lock was getting big. Moving this up to avoid mistakes down the road. This commit was moved from ipfs/go-bitswap@4db3e96da62889dcfe07ea45e790fd7b09f480f9 --- bitswap/decision/engine.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/bitswap/decision/engine.go b/bitswap/decision/engine.go index 05687b312..99c66d0ba 100644 --- a/bitswap/decision/engine.go +++ b/bitswap/decision/engine.go @@ -160,6 +160,9 @@ func (e *Engine) Peers() []peer.ID { // MessageReceived performs book-keeping. Returns error if passed invalid // arguments. func (e *Engine) MessageReceived(p peer.ID, m bsmsg.BitSwapMessage) error { + e.lock.Lock() + defer e.lock.Unlock() + log := log.Prefix("bitswap.Engine.MessageReceived(%s)", p) log.Debugf("enter. %d entries %d blocks", len(m.Wantlist()), len(m.Blocks())) defer log.Debugf("exit") @@ -175,9 +178,6 @@ func (e *Engine) MessageReceived(p peer.ID, m bsmsg.BitSwapMessage) error { } }() - e.lock.Lock() - defer e.lock.Unlock() - l := e.findOrCreate(p) if m.Full() { l.wantList = wl.New() From fc4a5b057425b9fa20ca2bd3277982402335a83d Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Sun, 18 Jan 2015 20:30:23 -0800 Subject: [PATCH 0307/1035] chore(bitswap): rm debug log (covered by eventlog) This commit was moved from ipfs/go-bitswap@40993c17f8df5328190f5e99bdd7454a456bb0dd --- bitswap/bitswap.go | 3 --- 1 file changed, 3 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index dfa72ff2f..b80b13f98 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -108,7 +108,6 @@ type bitswap struct { // GetBlock attempts to retrieve a particular block from peers within the // deadline enforced by the context. func (bs *bitswap) GetBlock(parent context.Context, k u.Key) (*blocks.Block, error) { - log := log.Prefix("bitswap(%s).GetBlock(%s)", bs.self, k) // Any async work initiated by this function must end when this function // returns. To ensure this, derive a new context. Note that it is okay to @@ -121,11 +120,9 @@ func (bs *bitswap) GetBlock(parent context.Context, k u.Key) (*blocks.Block, err ctx = eventlog.ContextWithLoggable(ctx, eventlog.Uuid("GetBlockRequest")) defer log.EventBegin(ctx, "GetBlockRequest", &k).Done() - log.Debugf("GetBlockRequestBegin") defer func() { cancelFunc() - log.Debugf("GetBlockRequestEnd") }() promise, err := bs.GetBlocks(ctx, []u.Key{k}) From 0a3556d05821ce51bd5848546ffe9a9e1ba73b0c Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Sun, 18 Jan 2015 20:30:50 -0800 Subject: [PATCH 0308/1035] chore(bitswap): rm todo This commit was moved from ipfs/go-bitswap@d5085c4c8103f0d7dd81207f8c7328d9bfe2568d --- bitswap/bitswap.go | 1 - 1 file changed, 1 deletion(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index b80b13f98..d313713c1 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -147,7 +147,6 @@ func (bs *bitswap) GetBlock(parent context.Context, k u.Key) (*blocks.Block, err // resources, provide a context with a reasonably short deadline (ie. not one // that lasts throughout the lifetime of the server) func (bs *bitswap) GetBlocks(ctx context.Context, keys []u.Key) (<-chan *blocks.Block, error) { - // TODO log the request promise := bs.notifications.Subscribe(ctx, keys...) select { From 2dcbece941399c8b9b734446aa4672aa523df987 Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Sun, 18 Jan 2015 20:34:06 -0800 Subject: [PATCH 0309/1035] rm logging statements and inline `send` This commit was moved from ipfs/go-bitswap@fe90ed4a141a9df211d49abd46ef4061792c5579 --- bitswap/bitswap.go | 16 +++------------- 1 file changed, 3 insertions(+), 13 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index d313713c1..8019fab6e 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -168,18 +168,6 @@ func (bs *bitswap) HasBlock(ctx context.Context, blk *blocks.Block) error { return bs.network.Provide(ctx, blk.Key()) } -func (bs *bitswap) sendWantlistMsgToPeer(ctx context.Context, m bsmsg.BitSwapMessage, p peer.ID) error { - log := log.Prefix("bitswap(%s).bitswap.sendWantlistMsgToPeer(%d, %s)", bs.self, len(m.Wantlist()), p) - - log.Debug("sending wantlist") - if err := bs.send(ctx, p, m); err != nil { - log.Errorf("send wantlist error: %s", err) - return err - } - log.Debugf("send wantlist success") - return nil -} - func (bs *bitswap) sendWantlistMsgToPeers(ctx context.Context, m bsmsg.BitSwapMessage, peers <-chan peer.ID) error { if peers == nil { panic("Cant send wantlist to nil peerchan") @@ -203,7 +191,9 @@ func (bs *bitswap) sendWantlistMsgToPeers(ctx context.Context, m bsmsg.BitSwapMe wg.Add(1) go func(p peer.ID) { defer wg.Done() - bs.sendWantlistMsgToPeer(ctx, m, p) + if err := bs.send(ctx, p, m); err != nil { + log.Error(err) // TODO remove if too verbose + } }(peerToQuery) } wg.Wait() From 78d28fc7f4ca9f3a209871a13fea1a62acc4051c Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Sun, 18 Jan 2015 20:39:09 -0800 Subject: [PATCH 0310/1035] misc(bitswap): shorten comment and rename var This commit was moved from ipfs/go-bitswap@6cd6b3778301cccc4b316b3e917a5613b3133fa7 --- bitswap/bitswap.go | 18 +++++++----------- 1 file changed, 7 insertions(+), 11 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 8019fab6e..fd90899ec 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -290,23 +290,19 @@ func (bs *bitswap) clientWorker(parent context.Context) { case <-broadcastSignal: // resend unfulfilled wantlist keys bs.sendWantlistToProviders(ctx) broadcastSignal = time.After(rebroadcastDelay.Get()) - case ks := <-bs.batchRequests: - if len(ks) == 0 { + case keys := <-bs.batchRequests: + if len(keys) == 0 { log.Warning("Received batch request for zero blocks") continue } - for i, k := range ks { + for i, k := range keys { bs.wantlist.Add(k, kMaxPriority-i) } - // NB: send want list to providers for the first peer in this list. - // the assumption is made that the providers of the first key in - // the set are likely to have others as well. - // This currently holds true in most every situation, since when - // pinning a file, you store and provide all blocks associated with - // it. Later, this assumption may not hold as true if we implement - // newer bitswap strategies. + // NB: Optimization. Assumes that providers of key[0] are likely to + // be able to provide for all keys. This currently holds true in most + // every situation. Later, this assumption may not hold as true. child, _ := context.WithTimeout(ctx, providerRequestTimeout) - providers := bs.network.FindProvidersAsync(child, ks[0], maxProvidersPerRequest) + providers := bs.network.FindProvidersAsync(child, keys[0], maxProvidersPerRequest) err := bs.sendWantlistToPeers(ctx, providers) if err != nil { log.Errorf("error sending wantlist: %s", err) From 6fac219956a42abf6de3b0b082852dda5c7bb6f7 Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Sat, 24 Jan 2015 00:24:44 -0800 Subject: [PATCH 0311/1035] remove prefix logger This commit was moved from ipfs/go-bitswap@d905de22abc9dc3faa3d86912b118e1f445ea9fd --- bitswap/bitswap.go | 12 ------------ bitswap/decision/engine.go | 4 ---- bitswap/network/ipfs_impl.go | 11 ----------- 3 files changed, 27 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index fd90899ec..f703bf7e1 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -173,10 +173,6 @@ func (bs *bitswap) sendWantlistMsgToPeers(ctx context.Context, m bsmsg.BitSwapMe panic("Cant send wantlist to nil peerchan") } - log := log.Prefix("bitswap(%s).sendWantlistMsgToPeers(%d)", bs.self, len(m.Wantlist())) - log.Debugf("begin") - defer log.Debugf("end") - set := pset.New() wg := sync.WaitGroup{} for peerToQuery := range peers { @@ -216,10 +212,6 @@ func (bs *bitswap) sendWantlistToProviders(ctx context.Context) { return } - log := log.Prefix("bitswap(%s).sendWantlistToProviders ", bs.self) - log.Debugf("begin") - defer log.Debugf("end") - ctx, cancel := context.WithCancel(ctx) defer cancel() @@ -233,9 +225,6 @@ func (bs *bitswap) sendWantlistToProviders(ctx context.Context) { go func(k u.Key) { defer wg.Done() - log := log.Prefix("(entry: %s) ", k) - log.Debug("asking dht for providers") - child, _ := context.WithTimeout(ctx, providerRequestTimeout) providers := bs.network.FindProvidersAsync(child, k, maxProvidersPerRequest) for prov := range providers { @@ -257,7 +246,6 @@ func (bs *bitswap) sendWantlistToProviders(ctx context.Context) { } func (bs *bitswap) taskWorker(ctx context.Context) { - log := log.Prefix("bitswap(%s).taskWorker", bs.self) for { select { case <-ctx.Done(): diff --git a/bitswap/decision/engine.go b/bitswap/decision/engine.go index 99c66d0ba..0a759ade3 100644 --- a/bitswap/decision/engine.go +++ b/bitswap/decision/engine.go @@ -163,10 +163,6 @@ func (e *Engine) MessageReceived(p peer.ID, m bsmsg.BitSwapMessage) error { e.lock.Lock() defer e.lock.Unlock() - log := log.Prefix("bitswap.Engine.MessageReceived(%s)", p) - log.Debugf("enter. %d entries %d blocks", len(m.Wantlist()), len(m.Blocks())) - defer log.Debugf("exit") - if len(m.Wantlist()) == 0 && len(m.Blocks()) == 0 { log.Info("superfluous message") } diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index 1bc47603a..652a1f9c6 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -39,28 +39,23 @@ func (bsnet *impl) SendMessage( p peer.ID, outgoing bsmsg.BitSwapMessage) error { - log := log.Prefix("bitswap net SendMessage to %s", p) - // ensure we're connected //TODO(jbenet) move this into host.NewStream? if err := bsnet.host.Connect(ctx, peer.PeerInfo{ID: p}); err != nil { return err } - log.Debug("opening stream") s, err := bsnet.host.NewStream(ProtocolBitswap, p) if err != nil { return err } defer s.Close() - log.Debug("sending") if err := outgoing.ToNet(s); err != nil { log.Errorf("error: %s", err) return err } - log.Debug("sent") return err } @@ -69,35 +64,29 @@ func (bsnet *impl) SendRequest( p peer.ID, outgoing bsmsg.BitSwapMessage) (bsmsg.BitSwapMessage, error) { - log := log.Prefix("bitswap net SendRequest to %s", p) - // ensure we're connected //TODO(jbenet) move this into host.NewStream? if err := bsnet.host.Connect(ctx, peer.PeerInfo{ID: p}); err != nil { return nil, err } - log.Debug("opening stream") s, err := bsnet.host.NewStream(ProtocolBitswap, p) if err != nil { return nil, err } defer s.Close() - log.Debug("sending") if err := outgoing.ToNet(s); err != nil { log.Errorf("error: %s", err) return nil, err } - log.Debug("sent, now receiveing") incoming, err := bsmsg.FromNet(s) if err != nil { log.Errorf("error: %s", err) return incoming, err } - log.Debug("received") return incoming, nil } From e2187f67b17fb0b3ea13582d393a3d6f58b56a15 Mon Sep 17 00:00:00 2001 From: Juan Batiz-Benet Date: Sat, 24 Jan 2015 09:12:27 -0800 Subject: [PATCH 0312/1035] bitswap: respond to peers connecting + disconnecting With these notifications, bitswap can reclaim all resources for any outstanding work for a peer. cc @briantigerchow @whyrusleeping This commit was moved from ipfs/go-bitswap@a67942307715aa31c5f27f4d50f3c2eb6a6dd898 --- bitswap/bitswap.go | 18 ++++++++++++++++++ bitswap/network/interface.go | 4 ++++ bitswap/network/ipfs_impl.go | 20 ++++++++++++++++++++ bitswap/testnet/network_test.go | 7 +++++++ 4 files changed, 49 insertions(+) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index f703bf7e1..262b2fd5f 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -339,6 +339,24 @@ func (bs *bitswap) ReceiveMessage(ctx context.Context, p peer.ID, incoming bsmsg return "", nil } +// Connected/Disconnected warns bitswap about peer connections +func (bs *bitswap) PeerConnected(p peer.ID) { + // TODO: add to clientWorker?? + + peers := make(chan peer.ID) + err := bs.sendWantlistToPeers(context.TODO(), peers) + if err != nil { + log.Errorf("error sending wantlist: %s", err) + } + peers <- p + close(peers) +} + +// Connected/Disconnected warns bitswap about peer connections +func (bs *bitswap) PeerDisconnected(peer.ID) { + // TODO: release resources. +} + func (bs *bitswap) cancelBlocks(ctx context.Context, bkeys []u.Key) { if len(bkeys) < 1 { return diff --git a/bitswap/network/interface.go b/bitswap/network/interface.go index 18bb1df83..857201152 100644 --- a/bitswap/network/interface.go +++ b/bitswap/network/interface.go @@ -40,6 +40,10 @@ type Receiver interface { destination peer.ID, outgoing bsmsg.BitSwapMessage) ReceiveError(error) + + // Connected/Disconnected warns bitswap about peer connections + PeerConnected(peer.ID) + PeerDisconnected(peer.ID) } type Routing interface { diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index 652a1f9c6..f54e181d1 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -21,6 +21,9 @@ func NewFromIpfsHost(host host.Host, r routing.IpfsRouting) BitSwapNetwork { routing: r, } host.SetStreamHandler(ProtocolBitswap, bitswapNetwork.handleNewStream) + host.Network().Notify((*netNotifiee)(&bitswapNetwork)) + // TODO: StopNotify. + return &bitswapNetwork } @@ -139,3 +142,20 @@ func (bsnet *impl) handleNewStream(s inet.Stream) { log.Debugf("bitswap net handleNewStream from %s", s.Conn().RemotePeer()) bsnet.receiver.ReceiveMessage(ctx, p, received) } + +type netNotifiee impl + +func (nn *netNotifiee) impl() *impl { + return (*impl)(nn) +} + +func (nn *netNotifiee) Connected(n inet.Network, v inet.Conn) { + nn.impl().receiver.PeerConnected(v.RemotePeer()) +} + +func (nn *netNotifiee) Disconnected(n inet.Network, v inet.Conn) { + nn.impl().receiver.PeerDisconnected(v.RemotePeer()) +} + +func (nn *netNotifiee) OpenedStream(n inet.Network, v inet.Stream) {} +func (nn *netNotifiee) ClosedStream(n inet.Network, v inet.Stream) {} diff --git a/bitswap/testnet/network_test.go b/bitswap/testnet/network_test.go index e80fccba5..268f93607 100644 --- a/bitswap/testnet/network_test.go +++ b/bitswap/testnet/network_test.go @@ -146,3 +146,10 @@ func (lam *lambdaImpl) ReceiveMessage(ctx context.Context, func (lam *lambdaImpl) ReceiveError(err error) { // TODO log error } + +func (lam *lambdaImpl) PeerConnected(p peer.ID) { + // TODO +} +func (lam *lambdaImpl) PeerDisconnected(peer.ID) { + // TODO +} From b997a9f40fc53091a2e80713abe64dacf587d362 Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Sat, 24 Jan 2015 11:46:23 -0800 Subject: [PATCH 0313/1035] revert bitswap network notification @jbenet @whyrusleeping This commit was moved from ipfs/go-bitswap@0c9f60a755e7644207eb133e84ba68d8c0b3d0f4 --- bitswap/bitswap.go | 8 -------- 1 file changed, 8 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 262b2fd5f..81da2e61b 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -342,14 +342,6 @@ func (bs *bitswap) ReceiveMessage(ctx context.Context, p peer.ID, incoming bsmsg // Connected/Disconnected warns bitswap about peer connections func (bs *bitswap) PeerConnected(p peer.ID) { // TODO: add to clientWorker?? - - peers := make(chan peer.ID) - err := bs.sendWantlistToPeers(context.TODO(), peers) - if err != nil { - log.Errorf("error sending wantlist: %s", err) - } - peers <- p - close(peers) } // Connected/Disconnected warns bitswap about peer connections From 47dbe7f7caa9b57634d665b3197289e76cedecff Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Sat, 24 Jan 2015 11:33:16 -0800 Subject: [PATCH 0314/1035] fix(bitswap): handling of network notification This commit was moved from ipfs/go-bitswap@044f3b385ad100960ad6f0fa108d6a1876d93c99 --- bitswap/bitswap.go | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 81da2e61b..b698146ba 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -342,6 +342,13 @@ func (bs *bitswap) ReceiveMessage(ctx context.Context, p peer.ID, incoming bsmsg // Connected/Disconnected warns bitswap about peer connections func (bs *bitswap) PeerConnected(p peer.ID) { // TODO: add to clientWorker?? + peers := make(chan peer.ID, 1) + peers <- p + close(peers) + err := bs.sendWantlistToPeers(context.TODO(), peers) + if err != nil { + log.Errorf("error sending wantlist: %s", err) + } } // Connected/Disconnected warns bitswap about peer connections From b5d7af8df0b25926bf95625b46adeb5c07738357 Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Wed, 28 Jan 2015 22:49:45 -0800 Subject: [PATCH 0315/1035] optimization(bitswap) return connected peers as providers This commit was moved from ipfs/go-bitswap@ecb13824dde3a50aee3286acb92a60cdce7237e6 --- bitswap/network/ipfs_impl.go | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index f54e181d1..a0f05342f 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -99,7 +99,19 @@ func (bsnet *impl) SetDelegate(r Receiver) { // FindProvidersAsync returns a channel of providers for the given key func (bsnet *impl) FindProvidersAsync(ctx context.Context, k util.Key, max int) <-chan peer.ID { - out := make(chan peer.ID) + + // Since routing queries are expensive, give bitswap the peers to which we + // have open connections. Note that this may cause issues if bitswap starts + // precisely tracking which peers provide certain keys. This optimization + // would be misleading. In the long run, this may not be the most + // appropriate place for this optimization, but it won't cause any harm in + // the short term. + connectedPeers := bsnet.host.Network().Peers() + out := make(chan peer.ID, len(connectedPeers)) // just enough buffer for these connectedPeers + for _, id := range bsnet.host.Network().Peers() { + out <- id + } + go func() { defer close(out) providers := bsnet.routing.FindProvidersAsync(ctx, k, max) From ede6aa6950540306025a5be66f30de528b017c1a Mon Sep 17 00:00:00 2001 From: Juan Batiz-Benet Date: Wed, 28 Jan 2015 23:55:30 -0800 Subject: [PATCH 0316/1035] epictest: added test for bitswap wo routing This commit was moved from ipfs/go-bitswap@15ecff901361a2f9f9a5796ddfc839380d846609 --- bitswap/bitswap_test.go | 1 + 1 file changed, 1 insertion(+) diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index 13bb3304f..cff2827ef 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -244,6 +244,7 @@ func TestSendToWantingPeer(t *testing.T) { func TestBasicBitswap(t *testing.T) { net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay)) sg := NewTestSessionGenerator(net) + defer sg.Close() bg := blocksutil.NewBlockGenerator() t.Log("Test a few nodes trying to get one file with a lot of blocks") From 32a686224f8a197facf33d27e7345fe39977fe9d Mon Sep 17 00:00:00 2001 From: Juan Batiz-Benet Date: Thu, 29 Jan 2015 00:07:52 -0800 Subject: [PATCH 0317/1035] bitswap: removed dubious error check test. This commit was moved from ipfs/go-bitswap@a7584cdf6e6173101ecff0243f8e1994b5fbd4eb --- bitswap/bitswap_test.go | 17 ----------------- 1 file changed, 17 deletions(-) diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index cff2827ef..e81e57ba1 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -36,23 +36,6 @@ func TestClose(t *testing.T) { bitswap.Exchange.GetBlock(context.Background(), block.Key()) } -func TestGetBlockTimeout(t *testing.T) { - - net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay)) - g := NewTestSessionGenerator(net) - defer g.Close() - - self := g.Next() - - ctx, _ := context.WithTimeout(context.Background(), time.Nanosecond) - block := blocks.NewBlock([]byte("block")) - _, err := self.Exchange.GetBlock(ctx, block.Key()) - - if err != context.DeadlineExceeded { - t.Fatal("Expected DeadlineExceeded error") - } -} - func TestProviderForKeyButNetworkCannotFind(t *testing.T) { // TODO revisit this rs := mockrouting.NewServer() From 756da9af17738a71f22536536bc02c84ef55ee49 Mon Sep 17 00:00:00 2001 From: Juan Batiz-Benet Date: Thu, 29 Jan 2015 01:16:45 -0800 Subject: [PATCH 0318/1035] bitswap/net: race fix in peers iteration This commit was moved from ipfs/go-bitswap@55d05cbf3b6c8a21daaa10467e6d64d43849a12f --- bitswap/network/ipfs_impl.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index a0f05342f..bab465c72 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -108,7 +108,7 @@ func (bsnet *impl) FindProvidersAsync(ctx context.Context, k util.Key, max int) // the short term. connectedPeers := bsnet.host.Network().Peers() out := make(chan peer.ID, len(connectedPeers)) // just enough buffer for these connectedPeers - for _, id := range bsnet.host.Network().Peers() { + for _, id := range connectedPeers { out <- id } From 8daae89067248c40d4934203306480e51ee88793 Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Tue, 20 Jan 2015 02:42:16 -0800 Subject: [PATCH 0319/1035] log(bitswap): clean up This commit was moved from ipfs/go-bitswap@8e6e2db962728b3e884310e96e0ecd75f20adbbb --- bitswap/bitswap.go | 9 --------- 1 file changed, 9 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index b698146ba..7387a98bd 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -169,20 +169,14 @@ func (bs *bitswap) HasBlock(ctx context.Context, blk *blocks.Block) error { } func (bs *bitswap) sendWantlistMsgToPeers(ctx context.Context, m bsmsg.BitSwapMessage, peers <-chan peer.ID) error { - if peers == nil { - panic("Cant send wantlist to nil peerchan") - } - set := pset.New() wg := sync.WaitGroup{} for peerToQuery := range peers { log.Event(ctx, "PeerToQuery", peerToQuery) if !set.TryAdd(peerToQuery) { //Do once per peer - log.Debugf("%s skipped (already sent)", peerToQuery) continue } - log.Debugf("%s sending", peerToQuery) wg.Add(1) go func(p peer.ID) { @@ -228,7 +222,6 @@ func (bs *bitswap) sendWantlistToProviders(ctx context.Context) { child, _ := context.WithTimeout(ctx, providerRequestTimeout) providers := bs.network.FindProvidersAsync(child, k, maxProvidersPerRequest) for prov := range providers { - log.Debugf("dht returned provider %s. send wantlist", prov) sendToPeers <- prov } }(e.Key) @@ -249,7 +242,6 @@ func (bs *bitswap) taskWorker(ctx context.Context) { for { select { case <-ctx.Done(): - log.Debugf("exiting") return case nextEnvelope := <-bs.engine.Outbox(): select { @@ -304,7 +296,6 @@ func (bs *bitswap) clientWorker(parent context.Context) { // TODO(brian): handle errors func (bs *bitswap) ReceiveMessage(ctx context.Context, p peer.ID, incoming bsmsg.BitSwapMessage) ( peer.ID, bsmsg.BitSwapMessage) { - log.Debugf("ReceiveMessage from %s", p) if p == "" { log.Error("Received message from nil peer!") From bcc7d561cadd15233cd26e2029b7af12e00df524 Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Tue, 20 Jan 2015 02:43:01 -0800 Subject: [PATCH 0320/1035] pass as param This commit was moved from ipfs/go-bitswap@3c044d23ad44612c32f9c483f4d613385fa108af --- bitswap/bitswap.go | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 7387a98bd..10b7befd8 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -199,12 +199,7 @@ func (bs *bitswap) sendWantlistToPeers(ctx context.Context, peers <-chan peer.ID return bs.sendWantlistMsgToPeers(ctx, message, peers) } -func (bs *bitswap) sendWantlistToProviders(ctx context.Context) { - entries := bs.wantlist.Entries() - if len(entries) == 0 { - log.Debug("No entries in wantlist, skipping send routine.") - return - } +func (bs *bitswap) sendWantlistToProviders(ctx context.Context, entries []wantlist.Entry) { ctx, cancel := context.WithCancel(ctx) defer cancel() @@ -268,7 +263,10 @@ func (bs *bitswap) clientWorker(parent context.Context) { for { select { case <-broadcastSignal: // resend unfulfilled wantlist keys - bs.sendWantlistToProviders(ctx) + entries := bs.wantlist.Entries() + if len(entries) > 0 { + bs.sendWantlistToProviders(ctx, entries) + } broadcastSignal = time.After(rebroadcastDelay.Get()) case keys := <-bs.batchRequests: if len(keys) == 0 { From 0826fc69e2141807b94dc2b3080a7fe692133b2b Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Tue, 20 Jan 2015 02:43:21 -0800 Subject: [PATCH 0321/1035] expose O(1) len This commit was moved from ipfs/go-bitswap@fe4f8ad253a921546119c1d8f1aa5fd3e2d06549 --- bitswap/wantlist/wantlist.go | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/bitswap/wantlist/wantlist.go b/bitswap/wantlist/wantlist.go index 14d729d99..ff6f0af1a 100644 --- a/bitswap/wantlist/wantlist.go +++ b/bitswap/wantlist/wantlist.go @@ -7,13 +7,14 @@ import ( ) type ThreadSafe struct { - lk sync.RWMutex - Wantlist + lk sync.RWMutex + Wantlist Wantlist } // not threadsafe type Wantlist struct { set map[u.Key]Entry + // TODO provide O(1) len accessor if cost becomes an issue } type Entry struct { @@ -74,6 +75,16 @@ func (w *ThreadSafe) SortedEntries() []Entry { return w.Wantlist.SortedEntries() } +func (w *ThreadSafe) Len() int { + w.lk.RLock() + defer w.lk.RUnlock() + return w.Wantlist.Len() +} + +func (w *Wantlist) Len() int { + return len(w.set) +} + func (w *Wantlist) Add(k u.Key, priority int) { if _, ok := w.set[k]; ok { return From 23c36c8507279877fdae5bdafb64f9ee10db8b67 Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Tue, 20 Jan 2015 02:43:29 -0800 Subject: [PATCH 0322/1035] periodically print the number of keys in the wantlist (if any) This commit was moved from ipfs/go-bitswap@9f3de14a20c416738edd40cbd63f6ac7fe059aeb --- bitswap/bitswap.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 10b7befd8..d11c22872 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -262,6 +262,11 @@ func (bs *bitswap) clientWorker(parent context.Context) { for { select { + case <-time.Tick(10 * time.Second): + n := bs.wantlist.Len() + if n > 0 { + log.Debugf("%d keys in bitswap wantlist...", n) + } case <-broadcastSignal: // resend unfulfilled wantlist keys entries := bs.wantlist.Entries() if len(entries) > 0 { From f8231b215c42211e271e86d2e4d08aa3ba01b92b Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Tue, 20 Jan 2015 02:58:01 -0800 Subject: [PATCH 0323/1035] fix inflection This commit was moved from ipfs/go-bitswap@751dad90578244418dbca255e6fe2266405fe088 --- bitswap/bitswap.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index d11c22872..5cb40e874 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -8,6 +8,7 @@ import ( "time" context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" + inflect "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/briantigerchow/inflect" blocks "github.com/jbenet/go-ipfs/blocks" blockstore "github.com/jbenet/go-ipfs/blocks/blockstore" @@ -265,7 +266,7 @@ func (bs *bitswap) clientWorker(parent context.Context) { case <-time.Tick(10 * time.Second): n := bs.wantlist.Len() if n > 0 { - log.Debugf("%d keys in bitswap wantlist...", n) + log.Debug(n, inflect.FromNumber("keys", n), "in bitswap wantlist") } case <-broadcastSignal: // resend unfulfilled wantlist keys entries := bs.wantlist.Entries() From a059d6b59dff4cc3cd8e554baaa41a41aa240efd Mon Sep 17 00:00:00 2001 From: Juan Batiz-Benet Date: Fri, 30 Jan 2015 20:17:55 -0800 Subject: [PATCH 0324/1035] p2p/net: notify on listens Network now signals when it successfully listens on some address or when an address shuts down. This will be used to establish and close nat port mappings. It could also be used to notify peers of address changes. This commit was moved from ipfs/go-bitswap@2027fbe0138ea24d2bdbd4738bca64adcca6fce4 --- bitswap/network/ipfs_impl.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index bab465c72..92743f916 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -2,6 +2,7 @@ package network import ( context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" + ma "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr" bsmsg "github.com/jbenet/go-ipfs/exchange/bitswap/message" host "github.com/jbenet/go-ipfs/p2p/host" @@ -171,3 +172,5 @@ func (nn *netNotifiee) Disconnected(n inet.Network, v inet.Conn) { func (nn *netNotifiee) OpenedStream(n inet.Network, v inet.Stream) {} func (nn *netNotifiee) ClosedStream(n inet.Network, v inet.Stream) {} +func (nn *netNotifiee) Listen(n inet.Network, a ma.Multiaddr) {} +func (nn *netNotifiee) ListenClose(n inet.Network, a ma.Multiaddr) {} From 065a63c0ec2b799a1f48445f2034ac891eb77539 Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Fri, 30 Jan 2015 01:29:12 -0800 Subject: [PATCH 0325/1035] feat(bitswap): synchronous close This commit was moved from ipfs/go-bitswap@b4cd1252508087b6e741815115f556d62845fab8 --- bitswap/bitswap.go | 51 +++++++++++++++++++++++++++++++++-------- bitswap/bitswap_test.go | 2 -- 2 files changed, 41 insertions(+), 12 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 5cb40e874..d3f935cfa 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -9,6 +9,7 @@ import ( context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" inflect "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/briantigerchow/inflect" + process "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/goprocess" blocks "github.com/jbenet/go-ipfs/blocks" blockstore "github.com/jbenet/go-ipfs/blocks/blockstore" @@ -52,28 +53,47 @@ var ( func New(parent context.Context, p peer.ID, network bsnet.BitSwapNetwork, bstore blockstore.Blockstore, nice bool) exchange.Interface { + // important to use provided parent context (since it may include important + // loggable data). It's probably not a good idea to allow bitswap to be + // coupled to the concerns of the IPFS daemon in this way. + // + // FIXME(btc) Now that bitswap manages itself using a process, it probably + // shouldn't accept a context anymore. Clients should probably use Close() + // exclusively. We should probably find another way to share logging data ctx, cancelFunc := context.WithCancel(parent) notif := notifications.New() + px := process.WithTeardown(func() error { + notif.Shutdown() + return nil + }) + go func() { - <-ctx.Done() + <-px.Closing() // process closes first cancelFunc() - notif.Shutdown() + }() + go func() { + <-ctx.Done() // parent cancelled first + px.Close() }() bs := &bitswap{ self: p, blockstore: bstore, - cancelFunc: cancelFunc, notifications: notif, - engine: decision.NewEngine(ctx, bstore), + engine: decision.NewEngine(ctx, bstore), // TODO close the engine with Close() method network: network, wantlist: wantlist.NewThreadSafe(), batchRequests: make(chan []u.Key, sizeBatchRequestChan), + process: px, } network.SetDelegate(bs) - go bs.clientWorker(ctx) - go bs.taskWorker(ctx) + px.Go(func(px process.Process) { + bs.clientWorker(ctx) + }) + px.Go(func(px process.Process) { + bs.taskWorker(ctx) + }) return bs } @@ -102,8 +122,7 @@ type bitswap struct { wantlist *wantlist.ThreadSafe - // cancelFunc signals cancellation to the bitswap event loop - cancelFunc func() + process process.Process } // GetBlock attempts to retrieve a particular block from peers within the @@ -149,6 +168,11 @@ func (bs *bitswap) GetBlock(parent context.Context, k u.Key) (*blocks.Block, err // that lasts throughout the lifetime of the server) func (bs *bitswap) GetBlocks(ctx context.Context, keys []u.Key) (<-chan *blocks.Block, error) { + select { + case <-bs.process.Closing(): + return nil, errors.New("bitswap is closed") + default: + } promise := bs.notifications.Subscribe(ctx, keys...) select { case bs.batchRequests <- keys: @@ -161,6 +185,11 @@ func (bs *bitswap) GetBlocks(ctx context.Context, keys []u.Key) (<-chan *blocks. // HasBlock announces the existance of a block to this bitswap service. The // service will potentially notify its peers. func (bs *bitswap) HasBlock(ctx context.Context, blk *blocks.Block) error { + select { + case <-bs.process.Closing(): + return errors.New("bitswap is closed") + default: + } if err := bs.blockstore.Put(blk); err != nil { return err } @@ -235,6 +264,7 @@ func (bs *bitswap) sendWantlistToProviders(ctx context.Context, entries []wantli } func (bs *bitswap) taskWorker(ctx context.Context) { + defer log.Info("bitswap task worker shutting down...") for { select { case <-ctx.Done(): @@ -256,6 +286,8 @@ func (bs *bitswap) taskWorker(ctx context.Context) { // TODO ensure only one active request per key func (bs *bitswap) clientWorker(parent context.Context) { + defer log.Info("bitswap client worker shutting down...") + ctx, cancel := context.WithCancel(parent) broadcastSignal := time.After(rebroadcastDelay.Get()) @@ -384,6 +416,5 @@ func (bs *bitswap) send(ctx context.Context, p peer.ID, m bsmsg.BitSwapMessage) } func (bs *bitswap) Close() error { - bs.cancelFunc() - return nil // to conform to Closer interface + return bs.process.Close() } diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index e81e57ba1..6192773a4 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -22,8 +22,6 @@ import ( const kNetworkDelay = 0 * time.Millisecond func TestClose(t *testing.T) { - // TODO - t.Skip("TODO Bitswap's Close implementation is a WIP") vnet := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay)) sesgen := NewTestSessionGenerator(vnet) defer sesgen.Close() From aab84238ace9e24add93ca60773b0260870db29f Mon Sep 17 00:00:00 2001 From: Juan Batiz-Benet Date: Fri, 30 Jan 2015 22:22:47 -0800 Subject: [PATCH 0326/1035] fix(bitswap/network/ipfs) ignore self as provider This commit was moved from ipfs/go-bitswap@cd31cea3bf8ed6693bb43240b09ff89b519d4214 --- bitswap/network/ipfs_impl.go | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index 92743f916..2ea6705d0 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -110,6 +110,9 @@ func (bsnet *impl) FindProvidersAsync(ctx context.Context, k util.Key, max int) connectedPeers := bsnet.host.Network().Peers() out := make(chan peer.ID, len(connectedPeers)) // just enough buffer for these connectedPeers for _, id := range connectedPeers { + if id == bsnet.host.ID() { + continue // ignore self as provider + } out <- id } @@ -117,9 +120,10 @@ func (bsnet *impl) FindProvidersAsync(ctx context.Context, k util.Key, max int) defer close(out) providers := bsnet.routing.FindProvidersAsync(ctx, k, max) for info := range providers { - if info.ID != bsnet.host.ID() { // dont add addrs for ourselves. - bsnet.host.Peerstore().AddAddresses(info.ID, info.Addrs) + if info.ID == bsnet.host.ID() { + continue // ignore self as provider } + bsnet.host.Peerstore().AddAddresses(info.ID, info.Addrs) select { case <-ctx.Done(): return From 360a7def3d045eccdd0b1aaa9b44042cf4ff1f29 Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Fri, 30 Jan 2015 23:06:11 -0800 Subject: [PATCH 0327/1035] log(bitswap) add bitswap loggable This commit was moved from ipfs/go-bitswap@838dc151f1bb58f5bff410e87114ff8dadf3acb7 --- bitswap/bitswap.go | 1 + bitswap/message/message.go | 8 ++++++++ 2 files changed, 9 insertions(+) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index d3f935cfa..1117d7742 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -409,6 +409,7 @@ func (bs *bitswap) ReceiveError(err error) { // send strives to ensure that accounting is always performed when a message is // sent func (bs *bitswap) send(ctx context.Context, p peer.ID, m bsmsg.BitSwapMessage) error { + defer log.EventBegin(ctx, "sendMessage", p, m).Done() if err := bs.network.SendMessage(ctx, p, m); err != nil { return errors.Wrap(err) } diff --git a/bitswap/message/message.go b/bitswap/message/message.go index 117758d9e..d02d82740 100644 --- a/bitswap/message/message.go +++ b/bitswap/message/message.go @@ -39,6 +39,8 @@ type BitSwapMessage interface { AddBlock(*blocks.Block) Exportable + + Loggable() map[string]interface{} } type Exportable interface { @@ -170,3 +172,9 @@ func (m *impl) ToNet(w io.Writer) error { } return nil } + +func (m *impl) Loggable() map[string]interface{} { + return map[string]interface{}{ + "wantlist": m.wantlist, + } +} From 7b8578feaa0c03303e07cf220fbf1a0ae428223b Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Fri, 30 Jan 2015 23:16:59 -0800 Subject: [PATCH 0328/1035] log(bitswap/message) make bsmsg loggable This commit was moved from ipfs/go-bitswap@2cb81b718efca279bcb71fb965516f9e7ff0bf9f --- bitswap/message/message.go | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/bitswap/message/message.go b/bitswap/message/message.go index d02d82740..68748c0d8 100644 --- a/bitswap/message/message.go +++ b/bitswap/message/message.go @@ -174,7 +174,12 @@ func (m *impl) ToNet(w io.Writer) error { } func (m *impl) Loggable() map[string]interface{} { + var blocks []string + for _, v := range m.blocks { + blocks = append(blocks, v.Key().Pretty()) + } return map[string]interface{}{ - "wantlist": m.wantlist, + "blocks": blocks, + "wants": m.Wantlist(), } } From c7591a3ef330b4f7a7436bb146aa56e5e3ad91db Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Fri, 30 Jan 2015 23:35:47 -0800 Subject: [PATCH 0329/1035] fix(bitswap) rename PeerToQuery to send wantlist log(bitswap) remove ambiguous event This commit was moved from ipfs/go-bitswap@6778e4264df97ebea1ddbb7469cbe9c573e41a44 --- bitswap/bitswap.go | 1 - 1 file changed, 1 deletion(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 1117d7742..985c75012 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -202,7 +202,6 @@ func (bs *bitswap) sendWantlistMsgToPeers(ctx context.Context, m bsmsg.BitSwapMe set := pset.New() wg := sync.WaitGroup{} for peerToQuery := range peers { - log.Event(ctx, "PeerToQuery", peerToQuery) if !set.TryAdd(peerToQuery) { //Do once per peer continue From c47cae1944316a464efbe454dab65e83be868dad Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Fri, 30 Jan 2015 23:36:05 -0800 Subject: [PATCH 0330/1035] feat(bitswap) add deliverBlocks Event This commit was moved from ipfs/go-bitswap@8ea3a4b9eea89259bfdc3cd4a8a738a86d18098c --- bitswap/bitswap.go | 1 + 1 file changed, 1 insertion(+) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 985c75012..ce37c47ae 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -276,6 +276,7 @@ func (bs *bitswap) taskWorker(ctx context.Context) { if !ok { continue } + log.Event(ctx, "deliverBlocks", envelope.Message, envelope.Peer) bs.send(ctx, envelope.Peer, envelope.Message) } } From 5c7a1aa19b795e3bb808f0fdcb5e8fe0ef4c5168 Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Sat, 31 Jan 2015 01:41:02 -0800 Subject: [PATCH 0331/1035] refactor(bitswap) move workers to bottom of file This commit was moved from ipfs/go-bitswap@37412e93c5297d90295f8d8054147a175ba0c475 --- bitswap/bitswap.go | 134 ++++++++++++++++++++++----------------------- 1 file changed, 67 insertions(+), 67 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index ce37c47ae..b0d7ad4b0 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -262,73 +262,6 @@ func (bs *bitswap) sendWantlistToProviders(ctx context.Context, entries []wantli } } -func (bs *bitswap) taskWorker(ctx context.Context) { - defer log.Info("bitswap task worker shutting down...") - for { - select { - case <-ctx.Done(): - return - case nextEnvelope := <-bs.engine.Outbox(): - select { - case <-ctx.Done(): - return - case envelope, ok := <-nextEnvelope: - if !ok { - continue - } - log.Event(ctx, "deliverBlocks", envelope.Message, envelope.Peer) - bs.send(ctx, envelope.Peer, envelope.Message) - } - } - } -} - -// TODO ensure only one active request per key -func (bs *bitswap) clientWorker(parent context.Context) { - - defer log.Info("bitswap client worker shutting down...") - - ctx, cancel := context.WithCancel(parent) - - broadcastSignal := time.After(rebroadcastDelay.Get()) - defer cancel() - - for { - select { - case <-time.Tick(10 * time.Second): - n := bs.wantlist.Len() - if n > 0 { - log.Debug(n, inflect.FromNumber("keys", n), "in bitswap wantlist") - } - case <-broadcastSignal: // resend unfulfilled wantlist keys - entries := bs.wantlist.Entries() - if len(entries) > 0 { - bs.sendWantlistToProviders(ctx, entries) - } - broadcastSignal = time.After(rebroadcastDelay.Get()) - case keys := <-bs.batchRequests: - if len(keys) == 0 { - log.Warning("Received batch request for zero blocks") - continue - } - for i, k := range keys { - bs.wantlist.Add(k, kMaxPriority-i) - } - // NB: Optimization. Assumes that providers of key[0] are likely to - // be able to provide for all keys. This currently holds true in most - // every situation. Later, this assumption may not hold as true. - child, _ := context.WithTimeout(ctx, providerRequestTimeout) - providers := bs.network.FindProvidersAsync(child, keys[0], maxProvidersPerRequest) - err := bs.sendWantlistToPeers(ctx, providers) - if err != nil { - log.Errorf("error sending wantlist: %s", err) - } - case <-parent.Done(): - return - } - } -} - // TODO(brian): handle errors func (bs *bitswap) ReceiveMessage(ctx context.Context, p peer.ID, incoming bsmsg.BitSwapMessage) ( peer.ID, bsmsg.BitSwapMessage) { @@ -419,3 +352,70 @@ func (bs *bitswap) send(ctx context.Context, p peer.ID, m bsmsg.BitSwapMessage) func (bs *bitswap) Close() error { return bs.process.Close() } + +func (bs *bitswap) taskWorker(ctx context.Context) { + defer log.Info("bitswap task worker shutting down...") + for { + select { + case <-ctx.Done(): + return + case nextEnvelope := <-bs.engine.Outbox(): + select { + case <-ctx.Done(): + return + case envelope, ok := <-nextEnvelope: + if !ok { + continue + } + log.Event(ctx, "deliverBlocks", envelope.Message, envelope.Peer) + bs.send(ctx, envelope.Peer, envelope.Message) + } + } + } +} + +// TODO ensure only one active request per key +func (bs *bitswap) clientWorker(parent context.Context) { + + defer log.Info("bitswap client worker shutting down...") + + ctx, cancel := context.WithCancel(parent) + + broadcastSignal := time.After(rebroadcastDelay.Get()) + defer cancel() + + for { + select { + case <-time.Tick(10 * time.Second): + n := bs.wantlist.Len() + if n > 0 { + log.Debug(n, inflect.FromNumber("keys", n), "in bitswap wantlist") + } + case <-broadcastSignal: // resend unfulfilled wantlist keys + entries := bs.wantlist.Entries() + if len(entries) > 0 { + bs.sendWantlistToProviders(ctx, entries) + } + broadcastSignal = time.After(rebroadcastDelay.Get()) + case keys := <-bs.batchRequests: + if len(keys) == 0 { + log.Warning("Received batch request for zero blocks") + continue + } + for i, k := range keys { + bs.wantlist.Add(k, kMaxPriority-i) + } + // NB: Optimization. Assumes that providers of key[0] are likely to + // be able to provide for all keys. This currently holds true in most + // every situation. Later, this assumption may not hold as true. + child, _ := context.WithTimeout(ctx, providerRequestTimeout) + providers := bs.network.FindProvidersAsync(child, keys[0], maxProvidersPerRequest) + err := bs.sendWantlistToPeers(ctx, providers) + if err != nil { + log.Errorf("error sending wantlist: %s", err) + } + case <-parent.Done(): + return + } + } +} From d4375eff1e731a83cad9a4e15aedd384875b7237 Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Sat, 31 Jan 2015 02:08:57 -0800 Subject: [PATCH 0332/1035] log(bitswap) add message when message received This commit was moved from ipfs/go-bitswap@50b768c810934504703eea074e4e6bce086da9bf --- bitswap/bitswap.go | 1 + 1 file changed, 1 insertion(+) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index b0d7ad4b0..3f5440a5d 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -265,6 +265,7 @@ func (bs *bitswap) sendWantlistToProviders(ctx context.Context, entries []wantli // TODO(brian): handle errors func (bs *bitswap) ReceiveMessage(ctx context.Context, p peer.ID, incoming bsmsg.BitSwapMessage) ( peer.ID, bsmsg.BitSwapMessage) { + defer log.EventBegin(ctx, "receiveMessage", p, incoming).Done() if p == "" { log.Error("Received message from nil peer!") From 1f7a56c699b4e0fed8a0965a8186c0bb8c340fad Mon Sep 17 00:00:00 2001 From: Juan Batiz-Benet Date: Mon, 2 Feb 2015 11:30:00 -0800 Subject: [PATCH 0333/1035] AddrManager: use addr manager with smarter TTLs This addr manager should seriously help with the addrsplosion problem. This commit was moved from ipfs/go-bitswap@3bac90de9ee035bcb804b2f9605e39b77c505427 --- bitswap/network/ipfs_impl.go | 30 +++++++++++++++--------------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index 2ea6705d0..22ead701c 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -38,18 +38,24 @@ type impl struct { receiver Receiver } -func (bsnet *impl) SendMessage( - ctx context.Context, - p peer.ID, - outgoing bsmsg.BitSwapMessage) error { +func (bsnet *impl) newStreamToPeer(ctx context.Context, p peer.ID) (inet.Stream, error) { - // ensure we're connected + // first, make sure we're connected. + // if this fails, we cannot connect to given peer. //TODO(jbenet) move this into host.NewStream? if err := bsnet.host.Connect(ctx, peer.PeerInfo{ID: p}); err != nil { - return err + return nil, err } - s, err := bsnet.host.NewStream(ProtocolBitswap, p) + return bsnet.host.NewStream(ProtocolBitswap, p) +} + +func (bsnet *impl) SendMessage( + ctx context.Context, + p peer.ID, + outgoing bsmsg.BitSwapMessage) error { + + s, err := bsnet.newStreamToPeer(ctx, p) if err != nil { return err } @@ -68,13 +74,7 @@ func (bsnet *impl) SendRequest( p peer.ID, outgoing bsmsg.BitSwapMessage) (bsmsg.BitSwapMessage, error) { - // ensure we're connected - //TODO(jbenet) move this into host.NewStream? - if err := bsnet.host.Connect(ctx, peer.PeerInfo{ID: p}); err != nil { - return nil, err - } - - s, err := bsnet.host.NewStream(ProtocolBitswap, p) + s, err := bsnet.newStreamToPeer(ctx, p) if err != nil { return nil, err } @@ -123,7 +123,7 @@ func (bsnet *impl) FindProvidersAsync(ctx context.Context, k util.Key, max int) if info.ID == bsnet.host.ID() { continue // ignore self as provider } - bsnet.host.Peerstore().AddAddresses(info.ID, info.Addrs) + bsnet.host.Peerstore().AddAddrs(info.ID, info.Addrs, peer.TempAddrTTL) select { case <-ctx.Done(): return From b18c9a1b4bbb216f30a6b8df90e159f17ce3e65e Mon Sep 17 00:00:00 2001 From: Juan Batiz-Benet Date: Tue, 3 Feb 2015 01:06:07 -0800 Subject: [PATCH 0334/1035] logs: removed all log.Errors unhelpful to users Let's save log.Error for things the user can take action on. Moved all our diagnostics to log.Debug. We can ideally reduce them even further. This commit was moved from ipfs/go-bitswap@50ab623e498d1509878fc1919c70fe83bfc53cd8 --- bitswap/bitswap.go | 18 +++++++++--------- bitswap/network/ipfs_impl.go | 8 ++++---- 2 files changed, 13 insertions(+), 13 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 3f5440a5d..ed411fe36 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -211,7 +211,7 @@ func (bs *bitswap) sendWantlistMsgToPeers(ctx context.Context, m bsmsg.BitSwapMe go func(p peer.ID) { defer wg.Done() if err := bs.send(ctx, p, m); err != nil { - log.Error(err) // TODO remove if too verbose + log.Debug(err) // TODO remove if too verbose } }(peerToQuery) } @@ -258,7 +258,7 @@ func (bs *bitswap) sendWantlistToProviders(ctx context.Context, entries []wantli err := bs.sendWantlistToPeers(ctx, sendToPeers) if err != nil { - log.Errorf("sendWantlistToPeers error: %s", err) + log.Debugf("sendWantlistToPeers error: %s", err) } } @@ -268,12 +268,12 @@ func (bs *bitswap) ReceiveMessage(ctx context.Context, p peer.ID, incoming bsmsg defer log.EventBegin(ctx, "receiveMessage", p, incoming).Done() if p == "" { - log.Error("Received message from nil peer!") + log.Debug("Received message from nil peer!") // TODO propagate the error upward return "", nil } if incoming == nil { - log.Error("Got nil bitswap message!") + log.Debug("Got nil bitswap message!") // TODO propagate the error upward return "", nil } @@ -287,7 +287,7 @@ func (bs *bitswap) ReceiveMessage(ctx context.Context, p peer.ID, incoming bsmsg for _, block := range incoming.Blocks() { hasBlockCtx, _ := context.WithTimeout(ctx, hasBlockTimeout) if err := bs.HasBlock(hasBlockCtx, block); err != nil { - log.Error(err) + log.Debug(err) } } var keys []u.Key @@ -308,7 +308,7 @@ func (bs *bitswap) PeerConnected(p peer.ID) { close(peers) err := bs.sendWantlistToPeers(context.TODO(), peers) if err != nil { - log.Errorf("error sending wantlist: %s", err) + log.Debugf("error sending wantlist: %s", err) } } @@ -329,13 +329,13 @@ func (bs *bitswap) cancelBlocks(ctx context.Context, bkeys []u.Key) { for _, p := range bs.engine.Peers() { err := bs.send(ctx, p, message) if err != nil { - log.Errorf("Error sending message: %s", err) + log.Debugf("Error sending message: %s", err) } } } func (bs *bitswap) ReceiveError(err error) { - log.Errorf("Bitswap ReceiveError: %s", err) + log.Debugf("Bitswap ReceiveError: %s", err) // TODO log the network error // TODO bubble the network error up to the parent context/error logger } @@ -413,7 +413,7 @@ func (bs *bitswap) clientWorker(parent context.Context) { providers := bs.network.FindProvidersAsync(child, keys[0], maxProvidersPerRequest) err := bs.sendWantlistToPeers(ctx, providers) if err != nil { - log.Errorf("error sending wantlist: %s", err) + log.Debugf("error sending wantlist: %s", err) } case <-parent.Done(): return diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index 22ead701c..d9458776e 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -62,7 +62,7 @@ func (bsnet *impl) SendMessage( defer s.Close() if err := outgoing.ToNet(s); err != nil { - log.Errorf("error: %s", err) + log.Debugf("error: %s", err) return err } @@ -81,13 +81,13 @@ func (bsnet *impl) SendRequest( defer s.Close() if err := outgoing.ToNet(s); err != nil { - log.Errorf("error: %s", err) + log.Debugf("error: %s", err) return nil, err } incoming, err := bsmsg.FromNet(s) if err != nil { - log.Errorf("error: %s", err) + log.Debugf("error: %s", err) return incoming, err } @@ -150,7 +150,7 @@ func (bsnet *impl) handleNewStream(s inet.Stream) { received, err := bsmsg.FromNet(s) if err != nil { go bsnet.receiver.ReceiveError(err) - log.Errorf("bitswap net handleNewStream from %s error: %s", s.Conn().RemotePeer(), err) + log.Debugf("bitswap net handleNewStream from %s error: %s", s.Conn().RemotePeer(), err) return } From 9a669f4c0e66bcd2cc05ce6f5a56484b62107568 Mon Sep 17 00:00:00 2001 From: Jeromy Date: Tue, 10 Feb 2015 22:59:10 +0000 Subject: [PATCH 0335/1035] document some packages This commit was moved from ipfs/go-bitswap@50df3982461c4d884c2f830ce4c7d68ce07c45d9 --- bitswap/decision/engine.go | 1 + bitswap/wantlist/wantlist.go | 2 ++ 2 files changed, 3 insertions(+) diff --git a/bitswap/decision/engine.go b/bitswap/decision/engine.go index 0a759ade3..e0f733929 100644 --- a/bitswap/decision/engine.go +++ b/bitswap/decision/engine.go @@ -1,3 +1,4 @@ +// package decision implements the decision engine for the bitswap service. package decision import ( diff --git a/bitswap/wantlist/wantlist.go b/bitswap/wantlist/wantlist.go index ff6f0af1a..450fe3bd3 100644 --- a/bitswap/wantlist/wantlist.go +++ b/bitswap/wantlist/wantlist.go @@ -1,3 +1,5 @@ +// package wantlist implements an object for bitswap that contains the keys +// that a given peer wants. package wantlist import ( From f1ba3c6c55007299a005880c75fd06078761565d Mon Sep 17 00:00:00 2001 From: Jeromy Date: Thu, 12 Feb 2015 19:53:34 +0000 Subject: [PATCH 0336/1035] fix a panic caused by context cancelling closing a promise channel This commit was moved from ipfs/go-bitswap@5dd7124e0fd2452277e588b1d4e80f0f5baecfef --- bitswap/bitswap.go | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index ed411fe36..1fcce72d9 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -151,7 +151,15 @@ func (bs *bitswap) GetBlock(parent context.Context, k u.Key) (*blocks.Block, err } select { - case block := <-promise: + case block, ok := <-promise: + if !ok { + select { + case <-ctx.Done(): + return nil, ctx.Err() + default: + return nil, errors.New("promise channel was closed") + } + } return block, nil case <-parent.Done(): return nil, parent.Err() From f97df3238572694169f7e9e1f77a77563075317a Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Fri, 6 Feb 2015 11:24:08 -0700 Subject: [PATCH 0337/1035] misc: suppress logs to Debug (from Info) This commit was moved from ipfs/go-bitswap@5ff4f16bae9769fd12bd437072856031e7963f32 --- bitswap/decision/engine.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bitswap/decision/engine.go b/bitswap/decision/engine.go index e0f733929..e4e16e3da 100644 --- a/bitswap/decision/engine.go +++ b/bitswap/decision/engine.go @@ -165,7 +165,7 @@ func (e *Engine) MessageReceived(p peer.ID, m bsmsg.BitSwapMessage) error { defer e.lock.Unlock() if len(m.Wantlist()) == 0 && len(m.Blocks()) == 0 { - log.Info("superfluous message") + log.Debug("received empty message from", p) } newWorkExists := false From b5de56b7e17fb7bdf5ee133fda4778b63b966cb3 Mon Sep 17 00:00:00 2001 From: Jeromy Date: Thu, 19 Feb 2015 00:31:10 +0000 Subject: [PATCH 0338/1035] move blocking calls out of single threaded loops, cancel contexts ASAP This commit was moved from ipfs/go-bitswap@55523af9a3acac77485eeb71b26cd6de3cfeb6d4 --- bitswap/bitswap.go | 92 +++++++++++++++++++++++++++----------- bitswap/decision/engine.go | 4 ++ 2 files changed, 71 insertions(+), 25 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 1fcce72d9..ff24e068b 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -84,7 +84,7 @@ func New(parent context.Context, p peer.ID, network bsnet.BitSwapNetwork, engine: decision.NewEngine(ctx, bstore), // TODO close the engine with Close() method network: network, wantlist: wantlist.NewThreadSafe(), - batchRequests: make(chan []u.Key, sizeBatchRequestChan), + batchRequests: make(chan *blockRequest, sizeBatchRequestChan), process: px, } network.SetDelegate(bs) @@ -94,6 +94,9 @@ func New(parent context.Context, p peer.ID, network bsnet.BitSwapNetwork, px.Go(func(px process.Process) { bs.taskWorker(ctx) }) + px.Go(func(px process.Process) { + bs.rebroadcastWorker(ctx) + }) return bs } @@ -116,7 +119,7 @@ type bitswap struct { // Requests for a set of related blocks // the assumption is made that the same peer is likely to // have more than a single block in the set - batchRequests chan []u.Key + batchRequests chan *blockRequest engine *decision.Engine @@ -125,6 +128,11 @@ type bitswap struct { process process.Process } +type blockRequest struct { + keys []u.Key + ctx context.Context +} + // GetBlock attempts to retrieve a particular block from peers within the // deadline enforced by the context. func (bs *bitswap) GetBlock(parent context.Context, k u.Key) (*blocks.Block, error) { @@ -175,15 +183,19 @@ func (bs *bitswap) GetBlock(parent context.Context, k u.Key) (*blocks.Block, err // resources, provide a context with a reasonably short deadline (ie. not one // that lasts throughout the lifetime of the server) func (bs *bitswap) GetBlocks(ctx context.Context, keys []u.Key) (<-chan *blocks.Block, error) { - select { case <-bs.process.Closing(): return nil, errors.New("bitswap is closed") default: } promise := bs.notifications.Subscribe(ctx, keys...) + + req := &blockRequest{ + keys: keys, + ctx: ctx, + } select { - case bs.batchRequests <- keys: + case bs.batchRequests <- req: return promise, nil case <-ctx.Done(): return nil, ctx.Err() @@ -321,8 +333,8 @@ func (bs *bitswap) PeerConnected(p peer.ID) { } // Connected/Disconnected warns bitswap about peer connections -func (bs *bitswap) PeerDisconnected(peer.ID) { - // TODO: release resources. +func (bs *bitswap) PeerDisconnected(p peer.ID) { + bs.engine.PeerDisconnected(p) } func (bs *bitswap) cancelBlocks(ctx context.Context, bkeys []u.Key) { @@ -342,6 +354,24 @@ func (bs *bitswap) cancelBlocks(ctx context.Context, bkeys []u.Key) { } } +func (bs *bitswap) wantNewBlocks(ctx context.Context, bkeys []u.Key) { + if len(bkeys) < 1 { + return + } + + message := bsmsg.New() + message.SetFull(false) + for i, k := range bkeys { + message.AddEntry(k, kMaxPriority-i) + } + for _, p := range bs.engine.Peers() { + err := bs.send(ctx, p, message) + if err != nil { + log.Debugf("Error sending message: %s", err) + } + } +} + func (bs *bitswap) ReceiveError(err error) { log.Debugf("Bitswap ReceiveError: %s", err) // TODO log the network error @@ -385,13 +415,42 @@ func (bs *bitswap) taskWorker(ctx context.Context) { // TODO ensure only one active request per key func (bs *bitswap) clientWorker(parent context.Context) { - defer log.Info("bitswap client worker shutting down...") + for { + select { + case req := <-bs.batchRequests: + keys := req.keys + if len(keys) == 0 { + log.Warning("Received batch request for zero blocks") + continue + } + for i, k := range keys { + bs.wantlist.Add(k, kMaxPriority-i) + } + + bs.wantNewBlocks(req.ctx, keys) + + // NB: Optimization. Assumes that providers of key[0] are likely to + // be able to provide for all keys. This currently holds true in most + // every situation. Later, this assumption may not hold as true. + child, _ := context.WithTimeout(req.ctx, providerRequestTimeout) + providers := bs.network.FindProvidersAsync(child, keys[0], maxProvidersPerRequest) + err := bs.sendWantlistToPeers(req.ctx, providers) + if err != nil { + log.Debugf("error sending wantlist: %s", err) + } + case <-parent.Done(): + return + } + } +} + +func (bs *bitswap) rebroadcastWorker(parent context.Context) { ctx, cancel := context.WithCancel(parent) + defer cancel() broadcastSignal := time.After(rebroadcastDelay.Get()) - defer cancel() for { select { @@ -406,23 +465,6 @@ func (bs *bitswap) clientWorker(parent context.Context) { bs.sendWantlistToProviders(ctx, entries) } broadcastSignal = time.After(rebroadcastDelay.Get()) - case keys := <-bs.batchRequests: - if len(keys) == 0 { - log.Warning("Received batch request for zero blocks") - continue - } - for i, k := range keys { - bs.wantlist.Add(k, kMaxPriority-i) - } - // NB: Optimization. Assumes that providers of key[0] are likely to - // be able to provide for all keys. This currently holds true in most - // every situation. Later, this assumption may not hold as true. - child, _ := context.WithTimeout(ctx, providerRequestTimeout) - providers := bs.network.FindProvidersAsync(child, keys[0], maxProvidersPerRequest) - err := bs.sendWantlistToPeers(ctx, providers) - if err != nil { - log.Debugf("error sending wantlist: %s", err) - } case <-parent.Done(): return } diff --git a/bitswap/decision/engine.go b/bitswap/decision/engine.go index e4e16e3da..11edf5f6d 100644 --- a/bitswap/decision/engine.go +++ b/bitswap/decision/engine.go @@ -228,6 +228,10 @@ func (e *Engine) MessageSent(p peer.ID, m bsmsg.BitSwapMessage) error { return nil } +func (e *Engine) PeerDisconnected(p peer.ID) { + // TODO: release ledger +} + func (e *Engine) numBytesSentTo(p peer.ID) uint64 { // NB not threadsafe return e.findOrCreate(p).Accounting.BytesSent From b491f00e2a8e368911cdfe46e4ea08acfba5ce85 Mon Sep 17 00:00:00 2001 From: Jeromy Date: Tue, 17 Feb 2015 06:38:48 +0000 Subject: [PATCH 0339/1035] add a test to make sure duplicate subscriptions to the same block dont have weird side effects This commit was moved from ipfs/go-bitswap@6896d8f0af7c045f6a58baf9f89d43d7d3196bc0 --- bitswap/notifications/notifications_test.go | 24 +++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/bitswap/notifications/notifications_test.go b/bitswap/notifications/notifications_test.go index 3a6ada1ea..372b1e139 100644 --- a/bitswap/notifications/notifications_test.go +++ b/bitswap/notifications/notifications_test.go @@ -76,6 +76,30 @@ func TestSubscribeMany(t *testing.T) { assertBlocksEqual(t, e2, r2) } +// TestDuplicateSubscribe tests a scenario where a given block +// would be requested twice at the same time. +func TestDuplicateSubscribe(t *testing.T) { + e1 := blocks.NewBlock([]byte("1")) + + n := New() + defer n.Shutdown() + ch1 := n.Subscribe(context.Background(), e1.Key()) + ch2 := n.Subscribe(context.Background(), e1.Key()) + + n.Publish(e1) + r1, ok := <-ch1 + if !ok { + t.Fatal("didn't receive first expected block") + } + assertBlocksEqual(t, e1, r1) + + r2, ok := <-ch2 + if !ok { + t.Fatal("didn't receive second expected block") + } + assertBlocksEqual(t, e1, r2) +} + func TestSubscribeIsANoopWhenCalledWithNoKeys(t *testing.T) { n := New() defer n.Shutdown() From c95e10245107c5b0b5e411b524f9d817a4a04bdb Mon Sep 17 00:00:00 2001 From: Jeromy Date: Wed, 18 Feb 2015 08:18:19 +0000 Subject: [PATCH 0340/1035] add worker to bitswap for reproviding new blocks This commit was moved from ipfs/go-bitswap@88853d992e8f81027ad94149f74392b8477a4740 --- bitswap/bitswap.go | 108 ++++++------------------------------ bitswap/workers.go | 133 +++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 150 insertions(+), 91 deletions(-) create mode 100644 bitswap/workers.go diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index ff24e068b..3046c987c 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -8,7 +8,6 @@ import ( "time" context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" - inflect "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/briantigerchow/inflect" process "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/goprocess" blocks "github.com/jbenet/go-ipfs/blocks" @@ -37,9 +36,13 @@ const ( maxProvidersPerRequest = 3 providerRequestTimeout = time.Second * 10 hasBlockTimeout = time.Second * 15 + provideTimeout = time.Second * 15 sizeBatchRequestChan = 32 // kMaxPriority is the max priority as defined by the bitswap protocol kMaxPriority = math.MaxInt32 + + hasBlockBufferSize = 256 + provideWorkers = 4 ) var ( @@ -86,18 +89,12 @@ func New(parent context.Context, p peer.ID, network bsnet.BitSwapNetwork, wantlist: wantlist.NewThreadSafe(), batchRequests: make(chan *blockRequest, sizeBatchRequestChan), process: px, + newBlocks: make(chan *blocks.Block, hasBlockBufferSize), } network.SetDelegate(bs) - px.Go(func(px process.Process) { - bs.clientWorker(ctx) - }) - px.Go(func(px process.Process) { - bs.taskWorker(ctx) - }) - px.Go(func(px process.Process) { - bs.rebroadcastWorker(ctx) - }) + // Start up bitswaps async worker routines + bs.startWorkers(px, ctx) return bs } @@ -126,6 +123,8 @@ type bitswap struct { wantlist *wantlist.ThreadSafe process process.Process + + newBlocks chan *blocks.Block } type blockRequest struct { @@ -172,7 +171,6 @@ func (bs *bitswap) GetBlock(parent context.Context, k u.Key) (*blocks.Block, err case <-parent.Done(): return nil, parent.Err() } - } // GetBlocks returns a channel where the caller may receive blocks that @@ -205,6 +203,7 @@ func (bs *bitswap) GetBlocks(ctx context.Context, keys []u.Key) (<-chan *blocks. // HasBlock announces the existance of a block to this bitswap service. The // service will potentially notify its peers. func (bs *bitswap) HasBlock(ctx context.Context, blk *blocks.Block) error { + log.Event(ctx, "hasBlock", blk) select { case <-bs.process.Closing(): return errors.New("bitswap is closed") @@ -215,7 +214,12 @@ func (bs *bitswap) HasBlock(ctx context.Context, blk *blocks.Block) error { } bs.wantlist.Remove(blk.Key()) bs.notifications.Publish(blk) - return bs.network.Provide(ctx, blk.Key()) + select { + case bs.newBlocks <- blk: + case <-ctx.Done(): + return ctx.Err() + } + return nil } func (bs *bitswap) sendWantlistMsgToPeers(ctx context.Context, m bsmsg.BitSwapMessage, peers <-chan peer.ID) error { @@ -310,6 +314,7 @@ func (bs *bitswap) ReceiveMessage(ctx context.Context, p peer.ID, incoming bsmsg log.Debug(err) } } + var keys []u.Key for _, block := range incoming.Blocks() { keys = append(keys, block.Key()) @@ -391,82 +396,3 @@ func (bs *bitswap) send(ctx context.Context, p peer.ID, m bsmsg.BitSwapMessage) func (bs *bitswap) Close() error { return bs.process.Close() } - -func (bs *bitswap) taskWorker(ctx context.Context) { - defer log.Info("bitswap task worker shutting down...") - for { - select { - case <-ctx.Done(): - return - case nextEnvelope := <-bs.engine.Outbox(): - select { - case <-ctx.Done(): - return - case envelope, ok := <-nextEnvelope: - if !ok { - continue - } - log.Event(ctx, "deliverBlocks", envelope.Message, envelope.Peer) - bs.send(ctx, envelope.Peer, envelope.Message) - } - } - } -} - -// TODO ensure only one active request per key -func (bs *bitswap) clientWorker(parent context.Context) { - defer log.Info("bitswap client worker shutting down...") - - for { - select { - case req := <-bs.batchRequests: - keys := req.keys - if len(keys) == 0 { - log.Warning("Received batch request for zero blocks") - continue - } - for i, k := range keys { - bs.wantlist.Add(k, kMaxPriority-i) - } - - bs.wantNewBlocks(req.ctx, keys) - - // NB: Optimization. Assumes that providers of key[0] are likely to - // be able to provide for all keys. This currently holds true in most - // every situation. Later, this assumption may not hold as true. - child, _ := context.WithTimeout(req.ctx, providerRequestTimeout) - providers := bs.network.FindProvidersAsync(child, keys[0], maxProvidersPerRequest) - err := bs.sendWantlistToPeers(req.ctx, providers) - if err != nil { - log.Debugf("error sending wantlist: %s", err) - } - case <-parent.Done(): - return - } - } -} - -func (bs *bitswap) rebroadcastWorker(parent context.Context) { - ctx, cancel := context.WithCancel(parent) - defer cancel() - - broadcastSignal := time.After(rebroadcastDelay.Get()) - - for { - select { - case <-time.Tick(10 * time.Second): - n := bs.wantlist.Len() - if n > 0 { - log.Debug(n, inflect.FromNumber("keys", n), "in bitswap wantlist") - } - case <-broadcastSignal: // resend unfulfilled wantlist keys - entries := bs.wantlist.Entries() - if len(entries) > 0 { - bs.sendWantlistToProviders(ctx, entries) - } - broadcastSignal = time.After(rebroadcastDelay.Get()) - case <-parent.Done(): - return - } - } -} diff --git a/bitswap/workers.go b/bitswap/workers.go new file mode 100644 index 000000000..f2f348305 --- /dev/null +++ b/bitswap/workers.go @@ -0,0 +1,133 @@ +package bitswap + +import ( + "time" + + context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" + inflect "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/briantigerchow/inflect" + process "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/goprocess" +) + +func (bs *bitswap) startWorkers(px process.Process, ctx context.Context) { + // Start up a worker to handle block requests this node is making + px.Go(func(px process.Process) { + bs.clientWorker(ctx) + }) + + // Start up a worker to handle requests from other nodes for the data on this node + px.Go(func(px process.Process) { + bs.taskWorker(ctx) + }) + + // Start up a worker to manage periodically resending our wantlist out to peers + px.Go(func(px process.Process) { + bs.rebroadcastWorker(ctx) + }) + + // Spawn up multiple workers to handle incoming blocks + // consider increasing number if providing blocks bottlenecks + // file transfers + for i := 0; i < provideWorkers; i++ { + px.Go(func(px process.Process) { + bs.blockReceiveWorker(ctx) + }) + } +} + +func (bs *bitswap) taskWorker(ctx context.Context) { + defer log.Info("bitswap task worker shutting down...") + for { + select { + case nextEnvelope := <-bs.engine.Outbox(): + select { + case envelope, ok := <-nextEnvelope: + if !ok { + continue + } + log.Event(ctx, "deliverBlocks", envelope.Message, envelope.Peer) + bs.send(ctx, envelope.Peer, envelope.Message) + case <-ctx.Done(): + return + } + case <-ctx.Done(): + return + } + } +} + +func (bs *bitswap) blockReceiveWorker(ctx context.Context) { + for { + select { + case blk, ok := <-bs.newBlocks: + if !ok { + log.Debug("newBlocks channel closed") + return + } + ctx, _ := context.WithTimeout(ctx, provideTimeout) + err := bs.network.Provide(ctx, blk.Key()) + if err != nil { + log.Error(err) + } + case <-ctx.Done(): + return + } + } +} + +// TODO ensure only one active request per key +func (bs *bitswap) clientWorker(parent context.Context) { + defer log.Info("bitswap client worker shutting down...") + + for { + select { + case req := <-bs.batchRequests: + keys := req.keys + if len(keys) == 0 { + log.Warning("Received batch request for zero blocks") + continue + } + for i, k := range keys { + bs.wantlist.Add(k, kMaxPriority-i) + } + + bs.wantNewBlocks(req.ctx, keys) + + // NB: Optimization. Assumes that providers of key[0] are likely to + // be able to provide for all keys. This currently holds true in most + // every situation. Later, this assumption may not hold as true. + child, _ := context.WithTimeout(req.ctx, providerRequestTimeout) + providers := bs.network.FindProvidersAsync(child, keys[0], maxProvidersPerRequest) + err := bs.sendWantlistToPeers(req.ctx, providers) + if err != nil { + log.Debugf("error sending wantlist: %s", err) + } + case <-parent.Done(): + return + } + } +} + +func (bs *bitswap) rebroadcastWorker(parent context.Context) { + ctx, cancel := context.WithCancel(parent) + defer cancel() + + broadcastSignal := time.After(rebroadcastDelay.Get()) + + for { + select { + case <-time.Tick(10 * time.Second): + n := bs.wantlist.Len() + if n > 0 { + log.Debug(n, inflect.FromNumber("keys", n), "in bitswap wantlist") + } + case <-broadcastSignal: // resend unfulfilled wantlist keys + entries := bs.wantlist.Entries() + if len(entries) > 0 { + bs.sendWantlistToProviders(ctx, entries) + } + broadcastSignal = time.After(rebroadcastDelay.Get()) + case <-parent.Done(): + return + } + } +} From deb50da47c099729eaa50ea2d77b6df895738286 Mon Sep 17 00:00:00 2001 From: Jeromy Date: Thu, 19 Feb 2015 13:41:18 -0800 Subject: [PATCH 0341/1035] rename for clarity This commit was moved from ipfs/go-bitswap@5577b33896d81dfb625efaeecdb0eaab62e6e92a --- bitswap/workers.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/bitswap/workers.go b/bitswap/workers.go index f2f348305..0c6e45604 100644 --- a/bitswap/workers.go +++ b/bitswap/workers.go @@ -29,7 +29,7 @@ func (bs *bitswap) startWorkers(px process.Process, ctx context.Context) { // file transfers for i := 0; i < provideWorkers; i++ { px.Go(func(px process.Process) { - bs.blockReceiveWorker(ctx) + bs.provideWorker(ctx) }) } } @@ -55,7 +55,7 @@ func (bs *bitswap) taskWorker(ctx context.Context) { } } -func (bs *bitswap) blockReceiveWorker(ctx context.Context) { +func (bs *bitswap) provideWorker(ctx context.Context) { for { select { case blk, ok := <-bs.newBlocks: From f65dc8b6d5dbfef7259433b81ea98f037d1bf979 Mon Sep 17 00:00:00 2001 From: Henry Date: Mon, 23 Feb 2015 16:51:09 +0100 Subject: [PATCH 0342/1035] rewrote import paths of go.net/context to use golang.org/x/context - updated go-ctxgroup and goprocess ctxgroup: AddChildGroup was changed to AddChild. Used in two files: - p2p/net/mock/mock_net.go - routing/dht/dht.go - updated context from hg repo to git prev. commit in hg was ad01a6fcc8a19d3a4478c836895ffe883bd2ceab. (context: make parentCancelCtx iterative) represents commit 84f8955a887232b6308d79c68b8db44f64df455c in git repo - updated context to master (b6fdb7d8a4ccefede406f8fe0f017fb58265054c) Aaron Jacobs (2): net/context: Don't accept a context in the DoSomethingSlow example. context: Be clear that users must cancel the result of WithCancel. Andrew Gerrand (1): go.net: use golang.org/x/... import paths Bryan C. Mills (1): net/context: Don't leak goroutines in Done example. Damien Neil (1): context: fix removal of cancelled timer contexts from parent David Symonds (2): context: Fix WithValue example code. net: add import comments. Sameer Ajmani (1): context: fix TestAllocs to account for ints in interfaces This commit was moved from ipfs/go-bitswap@234cb05f38c7729533ee846eb72cf8a49aff6942 --- bitswap/bitswap.go | 3 +-- bitswap/bitswap_test.go | 3 +-- bitswap/decision/engine.go | 2 +- bitswap/decision/engine_test.go | 2 +- bitswap/network/interface.go | 3 +-- bitswap/network/ipfs_impl.go | 3 +-- bitswap/notifications/notifications.go | 3 +-- bitswap/notifications/notifications_test.go | 2 +- bitswap/testnet/network_test.go | 2 +- bitswap/testnet/peernet.go | 2 +- bitswap/testnet/virtual.go | 2 +- bitswap/testutils.go | 2 +- bitswap/workers.go | 2 +- 13 files changed, 13 insertions(+), 18 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 3046c987c..500817b0a 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -7,9 +7,8 @@ import ( "sync" "time" - context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" process "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/goprocess" - + context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context" blocks "github.com/jbenet/go-ipfs/blocks" blockstore "github.com/jbenet/go-ipfs/blocks/blockstore" exchange "github.com/jbenet/go-ipfs/exchange" diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index 6192773a4..781bde91f 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -6,8 +6,7 @@ import ( "testing" "time" - context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" - + context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context" blocks "github.com/jbenet/go-ipfs/blocks" blocksutil "github.com/jbenet/go-ipfs/blocks/blocksutil" tn "github.com/jbenet/go-ipfs/exchange/bitswap/testnet" diff --git a/bitswap/decision/engine.go b/bitswap/decision/engine.go index 11edf5f6d..534f7ae65 100644 --- a/bitswap/decision/engine.go +++ b/bitswap/decision/engine.go @@ -4,7 +4,7 @@ package decision import ( "sync" - context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" + context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context" bstore "github.com/jbenet/go-ipfs/blocks/blockstore" bsmsg "github.com/jbenet/go-ipfs/exchange/bitswap/message" wl "github.com/jbenet/go-ipfs/exchange/bitswap/wantlist" diff --git a/bitswap/decision/engine_test.go b/bitswap/decision/engine_test.go index 8e5ab672c..dec19281b 100644 --- a/bitswap/decision/engine_test.go +++ b/bitswap/decision/engine_test.go @@ -8,9 +8,9 @@ import ( "sync" "testing" - context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" ds "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore" dssync "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore/sync" + context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context" blocks "github.com/jbenet/go-ipfs/blocks" blockstore "github.com/jbenet/go-ipfs/blocks/blockstore" message "github.com/jbenet/go-ipfs/exchange/bitswap/message" diff --git a/bitswap/network/interface.go b/bitswap/network/interface.go index 857201152..aa87e3126 100644 --- a/bitswap/network/interface.go +++ b/bitswap/network/interface.go @@ -1,8 +1,7 @@ package network import ( - context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" - + context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context" bsmsg "github.com/jbenet/go-ipfs/exchange/bitswap/message" peer "github.com/jbenet/go-ipfs/p2p/peer" protocol "github.com/jbenet/go-ipfs/p2p/protocol" diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index d9458776e..9d5c94535 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -1,9 +1,8 @@ package network import ( - context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" ma "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr" - + context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context" bsmsg "github.com/jbenet/go-ipfs/exchange/bitswap/message" host "github.com/jbenet/go-ipfs/p2p/host" inet "github.com/jbenet/go-ipfs/p2p/net" diff --git a/bitswap/notifications/notifications.go b/bitswap/notifications/notifications.go index 4616ac735..8797792cf 100644 --- a/bitswap/notifications/notifications.go +++ b/bitswap/notifications/notifications.go @@ -1,9 +1,8 @@ package notifications import ( - context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" pubsub "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/maybebtc/pubsub" - + context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context" blocks "github.com/jbenet/go-ipfs/blocks" u "github.com/jbenet/go-ipfs/util" ) diff --git a/bitswap/notifications/notifications_test.go b/bitswap/notifications/notifications_test.go index 372b1e139..97f28d1b9 100644 --- a/bitswap/notifications/notifications_test.go +++ b/bitswap/notifications/notifications_test.go @@ -5,7 +5,7 @@ import ( "testing" "time" - context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" + context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context" blocks "github.com/jbenet/go-ipfs/blocks" blocksutil "github.com/jbenet/go-ipfs/blocks/blocksutil" "github.com/jbenet/go-ipfs/util" diff --git a/bitswap/testnet/network_test.go b/bitswap/testnet/network_test.go index 268f93607..8af357bf2 100644 --- a/bitswap/testnet/network_test.go +++ b/bitswap/testnet/network_test.go @@ -4,7 +4,7 @@ import ( "sync" "testing" - context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" + context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context" blocks "github.com/jbenet/go-ipfs/blocks" bsmsg "github.com/jbenet/go-ipfs/exchange/bitswap/message" bsnet "github.com/jbenet/go-ipfs/exchange/bitswap/network" diff --git a/bitswap/testnet/peernet.go b/bitswap/testnet/peernet.go index 1d1d22408..632c12d37 100644 --- a/bitswap/testnet/peernet.go +++ b/bitswap/testnet/peernet.go @@ -1,8 +1,8 @@ package bitswap import ( - context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" ds "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore" + context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context" bsnet "github.com/jbenet/go-ipfs/exchange/bitswap/network" mockpeernet "github.com/jbenet/go-ipfs/p2p/net/mock" peer "github.com/jbenet/go-ipfs/p2p/peer" diff --git a/bitswap/testnet/virtual.go b/bitswap/testnet/virtual.go index 7ee082cfd..8bebde357 100644 --- a/bitswap/testnet/virtual.go +++ b/bitswap/testnet/virtual.go @@ -3,7 +3,7 @@ package bitswap import ( "errors" - context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" + context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context" bsmsg "github.com/jbenet/go-ipfs/exchange/bitswap/message" bsnet "github.com/jbenet/go-ipfs/exchange/bitswap/network" peer "github.com/jbenet/go-ipfs/p2p/peer" diff --git a/bitswap/testutils.go b/bitswap/testutils.go index 5a6b59b3a..c14f1abb8 100644 --- a/bitswap/testutils.go +++ b/bitswap/testutils.go @@ -3,9 +3,9 @@ package bitswap import ( "time" - context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" ds "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore" ds_sync "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore/sync" + context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context" blockstore "github.com/jbenet/go-ipfs/blocks/blockstore" exchange "github.com/jbenet/go-ipfs/exchange" tn "github.com/jbenet/go-ipfs/exchange/bitswap/testnet" diff --git a/bitswap/workers.go b/bitswap/workers.go index 0c6e45604..8239fced3 100644 --- a/bitswap/workers.go +++ b/bitswap/workers.go @@ -3,9 +3,9 @@ package bitswap import ( "time" - context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" inflect "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/briantigerchow/inflect" process "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/goprocess" + context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context" ) func (bs *bitswap) startWorkers(px process.Process, ctx context.Context) { From 247aba6c876af13bf9618c201a4cabe528dea87a Mon Sep 17 00:00:00 2001 From: Jeromy Date: Thu, 26 Feb 2015 10:12:21 -0800 Subject: [PATCH 0343/1035] make wantlist updates to connected peers happen async, dramatically improves performance between connected nodes This commit was moved from ipfs/go-bitswap@5fb913d6254df910181d05486be4970c6ce6e308 --- bitswap/bitswap.go | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 500817b0a..1a4ec73cf 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -369,10 +369,12 @@ func (bs *bitswap) wantNewBlocks(ctx context.Context, bkeys []u.Key) { message.AddEntry(k, kMaxPriority-i) } for _, p := range bs.engine.Peers() { - err := bs.send(ctx, p, message) - if err != nil { - log.Debugf("Error sending message: %s", err) - } + go func(p peer.ID) { + err := bs.send(ctx, p, message) + if err != nil { + log.Debugf("Error sending message: %s", err) + } + }(p) } } From fc31906b3718e8753745ec8676a954ccd4bad422 Mon Sep 17 00:00:00 2001 From: Jeromy Date: Thu, 26 Feb 2015 16:43:18 -0800 Subject: [PATCH 0344/1035] make sure not to orphan any extra goroutines This commit was moved from ipfs/go-bitswap@64329ed1c7d5063a6dfb1e1d0e29c12a9057bd43 --- bitswap/bitswap.go | 5 +++++ bitswap/workers.go | 10 +++++++++- 2 files changed, 14 insertions(+), 1 deletion(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 1a4ec73cf..5508f66e3 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -368,14 +368,19 @@ func (bs *bitswap) wantNewBlocks(ctx context.Context, bkeys []u.Key) { for i, k := range bkeys { message.AddEntry(k, kMaxPriority-i) } + + wg := sync.WaitGroup{} for _, p := range bs.engine.Peers() { + wg.Add(1) go func(p peer.ID) { + defer wg.Done() err := bs.send(ctx, p, message) if err != nil { log.Debugf("Error sending message: %s", err) } }(p) } + wg.Wait() } func (bs *bitswap) ReceiveError(err error) { diff --git a/bitswap/workers.go b/bitswap/workers.go index 8239fced3..3753edb62 100644 --- a/bitswap/workers.go +++ b/bitswap/workers.go @@ -90,7 +90,11 @@ func (bs *bitswap) clientWorker(parent context.Context) { bs.wantlist.Add(k, kMaxPriority-i) } - bs.wantNewBlocks(req.ctx, keys) + done := make(chan struct{}) + go func() { + bs.wantNewBlocks(req.ctx, keys) + close(done) + }() // NB: Optimization. Assumes that providers of key[0] are likely to // be able to provide for all keys. This currently holds true in most @@ -101,6 +105,10 @@ func (bs *bitswap) clientWorker(parent context.Context) { if err != nil { log.Debugf("error sending wantlist: %s", err) } + + // Wait for wantNewBlocks to finish + <-done + case <-parent.Done(): return } From fabe46e51b274ad3cfd0be73443952ba5f9a6255 Mon Sep 17 00:00:00 2001 From: Jeromy Date: Mon, 2 Feb 2015 02:48:12 +0000 Subject: [PATCH 0345/1035] implement a simple wantlist command to allow the user to view their wantlist This commit was moved from ipfs/go-bitswap@38a0286fef939015eef07eeca927282a8783a687 --- bitswap/bitswap.go | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 5508f66e3..1101ffd9b 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -402,3 +402,11 @@ func (bs *bitswap) send(ctx context.Context, p peer.ID, m bsmsg.BitSwapMessage) func (bs *bitswap) Close() error { return bs.process.Close() } + +func (bs *bitswap) GetWantlist() []u.Key { + var out []u.Key + for _, e := range bs.wantlist.Entries() { + out = append(out, e.Key) + } + return out +} From 20decda65ba53fb1a029ebdf9ec058750f11a734 Mon Sep 17 00:00:00 2001 From: Jeromy Date: Fri, 20 Feb 2015 01:45:01 -0800 Subject: [PATCH 0346/1035] rename wantlist to bitswap, add stat command This commit was moved from ipfs/go-bitswap@5792e978660e4ed70e13721dcd0079912ec908cc --- bitswap/bitswap.go | 36 ++++++++++++++++++------------------ bitswap/stat.go | 22 ++++++++++++++++++++++ bitswap/workers.go | 10 +++++----- 3 files changed, 45 insertions(+), 23 deletions(-) create mode 100644 bitswap/stat.go diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 1101ffd9b..d40a13efa 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -79,7 +79,7 @@ func New(parent context.Context, p peer.ID, network bsnet.BitSwapNetwork, px.Close() }() - bs := &bitswap{ + bs := &Bitswap{ self: p, blockstore: bstore, notifications: notif, @@ -97,8 +97,8 @@ func New(parent context.Context, p peer.ID, network bsnet.BitSwapNetwork, return bs } -// bitswap instances implement the bitswap protocol. -type bitswap struct { +// Bitswap instances implement the bitswap protocol. +type Bitswap struct { // the ID of the peer to act on behalf of self peer.ID @@ -133,7 +133,7 @@ type blockRequest struct { // GetBlock attempts to retrieve a particular block from peers within the // deadline enforced by the context. -func (bs *bitswap) GetBlock(parent context.Context, k u.Key) (*blocks.Block, error) { +func (bs *Bitswap) GetBlock(parent context.Context, k u.Key) (*blocks.Block, error) { // Any async work initiated by this function must end when this function // returns. To ensure this, derive a new context. Note that it is okay to @@ -179,7 +179,7 @@ func (bs *bitswap) GetBlock(parent context.Context, k u.Key) (*blocks.Block, err // NB: Your request remains open until the context expires. To conserve // resources, provide a context with a reasonably short deadline (ie. not one // that lasts throughout the lifetime of the server) -func (bs *bitswap) GetBlocks(ctx context.Context, keys []u.Key) (<-chan *blocks.Block, error) { +func (bs *Bitswap) GetBlocks(ctx context.Context, keys []u.Key) (<-chan *blocks.Block, error) { select { case <-bs.process.Closing(): return nil, errors.New("bitswap is closed") @@ -201,7 +201,7 @@ func (bs *bitswap) GetBlocks(ctx context.Context, keys []u.Key) (<-chan *blocks. // HasBlock announces the existance of a block to this bitswap service. The // service will potentially notify its peers. -func (bs *bitswap) HasBlock(ctx context.Context, blk *blocks.Block) error { +func (bs *Bitswap) HasBlock(ctx context.Context, blk *blocks.Block) error { log.Event(ctx, "hasBlock", blk) select { case <-bs.process.Closing(): @@ -221,7 +221,7 @@ func (bs *bitswap) HasBlock(ctx context.Context, blk *blocks.Block) error { return nil } -func (bs *bitswap) sendWantlistMsgToPeers(ctx context.Context, m bsmsg.BitSwapMessage, peers <-chan peer.ID) error { +func (bs *Bitswap) sendWantlistMsgToPeers(ctx context.Context, m bsmsg.BitSwapMessage, peers <-chan peer.ID) error { set := pset.New() wg := sync.WaitGroup{} for peerToQuery := range peers { @@ -242,7 +242,7 @@ func (bs *bitswap) sendWantlistMsgToPeers(ctx context.Context, m bsmsg.BitSwapMe return nil } -func (bs *bitswap) sendWantlistToPeers(ctx context.Context, peers <-chan peer.ID) error { +func (bs *Bitswap) sendWantlistToPeers(ctx context.Context, peers <-chan peer.ID) error { message := bsmsg.New() message.SetFull(true) for _, wanted := range bs.wantlist.Entries() { @@ -251,7 +251,7 @@ func (bs *bitswap) sendWantlistToPeers(ctx context.Context, peers <-chan peer.ID return bs.sendWantlistMsgToPeers(ctx, message, peers) } -func (bs *bitswap) sendWantlistToProviders(ctx context.Context, entries []wantlist.Entry) { +func (bs *Bitswap) sendWantlistToProviders(ctx context.Context, entries []wantlist.Entry) { ctx, cancel := context.WithCancel(ctx) defer cancel() @@ -286,7 +286,7 @@ func (bs *bitswap) sendWantlistToProviders(ctx context.Context, entries []wantli } // TODO(brian): handle errors -func (bs *bitswap) ReceiveMessage(ctx context.Context, p peer.ID, incoming bsmsg.BitSwapMessage) ( +func (bs *Bitswap) ReceiveMessage(ctx context.Context, p peer.ID, incoming bsmsg.BitSwapMessage) ( peer.ID, bsmsg.BitSwapMessage) { defer log.EventBegin(ctx, "receiveMessage", p, incoming).Done() @@ -325,7 +325,7 @@ func (bs *bitswap) ReceiveMessage(ctx context.Context, p peer.ID, incoming bsmsg } // Connected/Disconnected warns bitswap about peer connections -func (bs *bitswap) PeerConnected(p peer.ID) { +func (bs *Bitswap) PeerConnected(p peer.ID) { // TODO: add to clientWorker?? peers := make(chan peer.ID, 1) peers <- p @@ -337,11 +337,11 @@ func (bs *bitswap) PeerConnected(p peer.ID) { } // Connected/Disconnected warns bitswap about peer connections -func (bs *bitswap) PeerDisconnected(p peer.ID) { +func (bs *Bitswap) PeerDisconnected(p peer.ID) { bs.engine.PeerDisconnected(p) } -func (bs *bitswap) cancelBlocks(ctx context.Context, bkeys []u.Key) { +func (bs *Bitswap) cancelBlocks(ctx context.Context, bkeys []u.Key) { if len(bkeys) < 1 { return } @@ -358,7 +358,7 @@ func (bs *bitswap) cancelBlocks(ctx context.Context, bkeys []u.Key) { } } -func (bs *bitswap) wantNewBlocks(ctx context.Context, bkeys []u.Key) { +func (bs *Bitswap) wantNewBlocks(ctx context.Context, bkeys []u.Key) { if len(bkeys) < 1 { return } @@ -383,7 +383,7 @@ func (bs *bitswap) wantNewBlocks(ctx context.Context, bkeys []u.Key) { wg.Wait() } -func (bs *bitswap) ReceiveError(err error) { +func (bs *Bitswap) ReceiveError(err error) { log.Debugf("Bitswap ReceiveError: %s", err) // TODO log the network error // TODO bubble the network error up to the parent context/error logger @@ -391,7 +391,7 @@ func (bs *bitswap) ReceiveError(err error) { // send strives to ensure that accounting is always performed when a message is // sent -func (bs *bitswap) send(ctx context.Context, p peer.ID, m bsmsg.BitSwapMessage) error { +func (bs *Bitswap) send(ctx context.Context, p peer.ID, m bsmsg.BitSwapMessage) error { defer log.EventBegin(ctx, "sendMessage", p, m).Done() if err := bs.network.SendMessage(ctx, p, m); err != nil { return errors.Wrap(err) @@ -399,11 +399,11 @@ func (bs *bitswap) send(ctx context.Context, p peer.ID, m bsmsg.BitSwapMessage) return bs.engine.MessageSent(p, m) } -func (bs *bitswap) Close() error { +func (bs *Bitswap) Close() error { return bs.process.Close() } -func (bs *bitswap) GetWantlist() []u.Key { +func (bs *Bitswap) GetWantlist() []u.Key { var out []u.Key for _, e := range bs.wantlist.Entries() { out = append(out, e.Key) diff --git a/bitswap/stat.go b/bitswap/stat.go new file mode 100644 index 000000000..f3c213f03 --- /dev/null +++ b/bitswap/stat.go @@ -0,0 +1,22 @@ +package bitswap + +import ( + peer "github.com/jbenet/go-ipfs/p2p/peer" + u "github.com/jbenet/go-ipfs/util" +) + +type Stat struct { + ProvideBufLen int + Wantlist []u.Key + Peers []peer.ID +} + +func (bs *Bitswap) Stat() (*Stat, error) { + st := new(Stat) + st.ProvideBufLen = len(bs.newBlocks) + st.Wantlist = bs.GetWantlist() + + st.Peers = bs.engine.Peers() + + return st, nil +} diff --git a/bitswap/workers.go b/bitswap/workers.go index 3753edb62..1b28aedb1 100644 --- a/bitswap/workers.go +++ b/bitswap/workers.go @@ -8,7 +8,7 @@ import ( context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context" ) -func (bs *bitswap) startWorkers(px process.Process, ctx context.Context) { +func (bs *Bitswap) startWorkers(px process.Process, ctx context.Context) { // Start up a worker to handle block requests this node is making px.Go(func(px process.Process) { bs.clientWorker(ctx) @@ -34,7 +34,7 @@ func (bs *bitswap) startWorkers(px process.Process, ctx context.Context) { } } -func (bs *bitswap) taskWorker(ctx context.Context) { +func (bs *Bitswap) taskWorker(ctx context.Context) { defer log.Info("bitswap task worker shutting down...") for { select { @@ -55,7 +55,7 @@ func (bs *bitswap) taskWorker(ctx context.Context) { } } -func (bs *bitswap) provideWorker(ctx context.Context) { +func (bs *Bitswap) provideWorker(ctx context.Context) { for { select { case blk, ok := <-bs.newBlocks: @@ -75,7 +75,7 @@ func (bs *bitswap) provideWorker(ctx context.Context) { } // TODO ensure only one active request per key -func (bs *bitswap) clientWorker(parent context.Context) { +func (bs *Bitswap) clientWorker(parent context.Context) { defer log.Info("bitswap client worker shutting down...") for { @@ -115,7 +115,7 @@ func (bs *bitswap) clientWorker(parent context.Context) { } } -func (bs *bitswap) rebroadcastWorker(parent context.Context) { +func (bs *Bitswap) rebroadcastWorker(parent context.Context) { ctx, cancel := context.WithCancel(parent) defer cancel() From 5109d2fa3a37cc8e09ca31a5cf96cdda9cd81c28 Mon Sep 17 00:00:00 2001 From: Jeromy Date: Fri, 20 Feb 2015 03:15:49 -0800 Subject: [PATCH 0347/1035] fix output formatting on stat This commit was moved from ipfs/go-bitswap@92089f5aa05651e509eb2474cbaaecccc95e153e --- bitswap/bitswap.go | 4 ++-- bitswap/stat.go | 9 ++++++--- 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index d40a13efa..3a81015be 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -40,7 +40,7 @@ const ( // kMaxPriority is the max priority as defined by the bitswap protocol kMaxPriority = math.MaxInt32 - hasBlockBufferSize = 256 + HasBlockBufferSize = 256 provideWorkers = 4 ) @@ -88,7 +88,7 @@ func New(parent context.Context, p peer.ID, network bsnet.BitSwapNetwork, wantlist: wantlist.NewThreadSafe(), batchRequests: make(chan *blockRequest, sizeBatchRequestChan), process: px, - newBlocks: make(chan *blocks.Block, hasBlockBufferSize), + newBlocks: make(chan *blocks.Block, HasBlockBufferSize), } network.SetDelegate(bs) diff --git a/bitswap/stat.go b/bitswap/stat.go index f3c213f03..4e37443ef 100644 --- a/bitswap/stat.go +++ b/bitswap/stat.go @@ -1,14 +1,14 @@ package bitswap import ( - peer "github.com/jbenet/go-ipfs/p2p/peer" u "github.com/jbenet/go-ipfs/util" + "sort" ) type Stat struct { ProvideBufLen int Wantlist []u.Key - Peers []peer.ID + Peers []string } func (bs *Bitswap) Stat() (*Stat, error) { @@ -16,7 +16,10 @@ func (bs *Bitswap) Stat() (*Stat, error) { st.ProvideBufLen = len(bs.newBlocks) st.Wantlist = bs.GetWantlist() - st.Peers = bs.engine.Peers() + for _, p := range bs.engine.Peers() { + st.Peers = append(st.Peers, p.Pretty()) + } + sort.Strings(st.Peers) return st, nil } From 10eb44c43e81ace549e2ca4d1018d34c9db2a4f1 Mon Sep 17 00:00:00 2001 From: Henry Date: Fri, 27 Feb 2015 14:40:45 +0100 Subject: [PATCH 0348/1035] godeps: maybebtc renamed is account This commit was moved from ipfs/go-bitswap@6d9153fe97b5168ea4953fbcdd261d22533e9c4b --- bitswap/notifications/notifications.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bitswap/notifications/notifications.go b/bitswap/notifications/notifications.go index 8797792cf..829f7288f 100644 --- a/bitswap/notifications/notifications.go +++ b/bitswap/notifications/notifications.go @@ -1,7 +1,7 @@ package notifications import ( - pubsub "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/maybebtc/pubsub" + pubsub "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/briantigerchow/pubsub" context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context" blocks "github.com/jbenet/go-ipfs/blocks" u "github.com/jbenet/go-ipfs/util" From 0a5022dcfa4f9c4bf447856448dae63fdcfad986 Mon Sep 17 00:00:00 2001 From: Henry Date: Sun, 1 Mar 2015 03:56:54 +0100 Subject: [PATCH 0349/1035] godep: changed back to inflect upstream This commit was moved from ipfs/go-bitswap@f3c8024ccd1b6cd8dde96dd1b0ca7ad0eccc5307 --- bitswap/workers.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bitswap/workers.go b/bitswap/workers.go index 1b28aedb1..da521ef46 100644 --- a/bitswap/workers.go +++ b/bitswap/workers.go @@ -3,7 +3,7 @@ package bitswap import ( "time" - inflect "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/briantigerchow/inflect" + inflect "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/chuckpreslar/inflect" process "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/goprocess" context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context" ) From a1ea02eb753ce16c7e218cc6aedb706c3a5dc799 Mon Sep 17 00:00:00 2001 From: Juan Batiz-Benet Date: Mon, 2 Mar 2015 01:58:54 -0800 Subject: [PATCH 0350/1035] testfix: dont break 8k goroutine limit under race This commit was moved from ipfs/go-bitswap@309229dad10c639c4c5ac27342d07ce576d05dd8 --- bitswap/bitswap_test.go | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index 781bde91f..21ad69dfb 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -6,7 +6,9 @@ import ( "testing" "time" + detectrace "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-detect-race" context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context" + blocks "github.com/jbenet/go-ipfs/blocks" blocksutil "github.com/jbenet/go-ipfs/blocks/blocksutil" tn "github.com/jbenet/go-ipfs/exchange/bitswap/testnet" @@ -93,9 +95,15 @@ func TestLargeSwarm(t *testing.T) { if testing.Short() { t.SkipNow() } - t.Parallel() numInstances := 500 numBlocks := 2 + if detectrace.WithRace() { + // when running with the race detector, 500 instances launches + // well over 8k goroutines. This hits a race detector limit. + numInstances = 100 + } else { + t.Parallel() + } PerformDistributionTest(t, numInstances, numBlocks) } From 301942d890777e8a998aab47f5f5131e5551970c Mon Sep 17 00:00:00 2001 From: Jeromy Date: Thu, 5 Mar 2015 15:18:57 -0800 Subject: [PATCH 0351/1035] implement a worker to consolidate HasBlock provide calls into one to alieviate memory pressure This commit was moved from ipfs/go-bitswap@6e6c663876c49c228d5ad50205e4ed46611ab941 --- bitswap/bitswap.go | 3 +++ bitswap/workers.go | 56 +++++++++++++++++++++++++++++++++++++++++++--- 2 files changed, 56 insertions(+), 3 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 3a81015be..60672d0c3 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -89,6 +89,7 @@ func New(parent context.Context, p peer.ID, network bsnet.BitSwapNetwork, batchRequests: make(chan *blockRequest, sizeBatchRequestChan), process: px, newBlocks: make(chan *blocks.Block, HasBlockBufferSize), + provideKeys: make(chan u.Key), } network.SetDelegate(bs) @@ -124,6 +125,8 @@ type Bitswap struct { process process.Process newBlocks chan *blocks.Block + + provideKeys chan u.Key } type blockRequest struct { diff --git a/bitswap/workers.go b/bitswap/workers.go index da521ef46..a14b30092 100644 --- a/bitswap/workers.go +++ b/bitswap/workers.go @@ -6,6 +6,7 @@ import ( inflect "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/chuckpreslar/inflect" process "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/goprocess" context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context" + u "github.com/jbenet/go-ipfs/util" ) func (bs *Bitswap) startWorkers(px process.Process, ctx context.Context) { @@ -24,6 +25,10 @@ func (bs *Bitswap) startWorkers(px process.Process, ctx context.Context) { bs.rebroadcastWorker(ctx) }) + px.Go(func(px process.Process) { + bs.provideCollector(ctx) + }) + // Spawn up multiple workers to handle incoming blocks // consider increasing number if providing blocks bottlenecks // file transfers @@ -58,13 +63,13 @@ func (bs *Bitswap) taskWorker(ctx context.Context) { func (bs *Bitswap) provideWorker(ctx context.Context) { for { select { - case blk, ok := <-bs.newBlocks: + case k, ok := <-bs.provideKeys: if !ok { - log.Debug("newBlocks channel closed") + log.Debug("provideKeys channel closed") return } ctx, _ := context.WithTimeout(ctx, provideTimeout) - err := bs.network.Provide(ctx, blk.Key()) + err := bs.network.Provide(ctx, k) if err != nil { log.Error(err) } @@ -74,6 +79,51 @@ func (bs *Bitswap) provideWorker(ctx context.Context) { } } +func (bs *Bitswap) provideCollector(ctx context.Context) { + defer close(bs.provideKeys) + var toprovide []u.Key + var nextKey u.Key + + select { + case blk, ok := <-bs.newBlocks: + if !ok { + log.Debug("newBlocks channel closed") + return + } + nextKey = blk.Key() + case <-ctx.Done(): + return + } + + for { + select { + case blk, ok := <-bs.newBlocks: + if !ok { + log.Debug("newBlocks channel closed") + return + } + toprovide = append(toprovide, blk.Key()) + case bs.provideKeys <- nextKey: + if len(toprovide) > 0 { + nextKey = toprovide[0] + toprovide = toprovide[1:] + } else { + select { + case blk, ok := <-bs.newBlocks: + if !ok { + return + } + nextKey = blk.Key() + case <-ctx.Done(): + return + } + } + case <-ctx.Done(): + return + } + } +} + // TODO ensure only one active request per key func (bs *Bitswap) clientWorker(parent context.Context) { defer log.Info("bitswap client worker shutting down...") From cc227f03fc1effbf14f476b417295a9354ff2538 Mon Sep 17 00:00:00 2001 From: Jeromy Date: Thu, 5 Mar 2015 16:27:47 -0800 Subject: [PATCH 0352/1035] simplify provideCollector This commit was moved from ipfs/go-bitswap@c3ce1319e194b636abb8386465cb4f90677dd165 --- bitswap/workers.go | 31 +++++++++---------------------- 1 file changed, 9 insertions(+), 22 deletions(-) diff --git a/bitswap/workers.go b/bitswap/workers.go index a14b30092..f5f6e6553 100644 --- a/bitswap/workers.go +++ b/bitswap/workers.go @@ -83,17 +83,7 @@ func (bs *Bitswap) provideCollector(ctx context.Context) { defer close(bs.provideKeys) var toprovide []u.Key var nextKey u.Key - - select { - case blk, ok := <-bs.newBlocks: - if !ok { - log.Debug("newBlocks channel closed") - return - } - nextKey = blk.Key() - case <-ctx.Done(): - return - } + var keysOut chan u.Key for { select { @@ -102,21 +92,18 @@ func (bs *Bitswap) provideCollector(ctx context.Context) { log.Debug("newBlocks channel closed") return } - toprovide = append(toprovide, blk.Key()) - case bs.provideKeys <- nextKey: + if keysOut == nil { + nextKey = blk.Key() + keysOut = bs.provideKeys + } else { + toprovide = append(toprovide, blk.Key()) + } + case keysOut <- nextKey: if len(toprovide) > 0 { nextKey = toprovide[0] toprovide = toprovide[1:] } else { - select { - case blk, ok := <-bs.newBlocks: - if !ok { - return - } - nextKey = blk.Key() - case <-ctx.Done(): - return - } + keysOut = nil } case <-ctx.Done(): return From 7d650c0c5dd6f54c1d8c8dfe77d3416cbc5bc3e2 Mon Sep 17 00:00:00 2001 From: Jeromy Date: Thu, 5 Mar 2015 16:37:40 -0800 Subject: [PATCH 0353/1035] toprovide -> toProvide This commit was moved from ipfs/go-bitswap@006dd2cadf6667397cc6315510572eea7f69ce12 --- bitswap/workers.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/bitswap/workers.go b/bitswap/workers.go index f5f6e6553..967c1bc0c 100644 --- a/bitswap/workers.go +++ b/bitswap/workers.go @@ -81,7 +81,7 @@ func (bs *Bitswap) provideWorker(ctx context.Context) { func (bs *Bitswap) provideCollector(ctx context.Context) { defer close(bs.provideKeys) - var toprovide []u.Key + var toProvide []u.Key var nextKey u.Key var keysOut chan u.Key @@ -96,12 +96,12 @@ func (bs *Bitswap) provideCollector(ctx context.Context) { nextKey = blk.Key() keysOut = bs.provideKeys } else { - toprovide = append(toprovide, blk.Key()) + toProvide = append(toProvide, blk.Key()) } case keysOut <- nextKey: - if len(toprovide) > 0 { - nextKey = toprovide[0] - toprovide = toprovide[1:] + if len(toProvide) > 0 { + nextKey = toProvide[0] + toProvide = toProvide[1:] } else { keysOut = nil } From 1fee347c85bc105bf218dcaed4a8b0608d25d07d Mon Sep 17 00:00:00 2001 From: Henry Date: Sat, 7 Mar 2015 11:47:19 +0100 Subject: [PATCH 0354/1035] fixed two more This commit was moved from ipfs/go-bitswap@f91baafb0c9f2ebbd388733b6ac76998920add95 --- bitswap/workers.go | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/bitswap/workers.go b/bitswap/workers.go index 967c1bc0c..0a9b7aa92 100644 --- a/bitswap/workers.go +++ b/bitswap/workers.go @@ -68,11 +68,12 @@ func (bs *Bitswap) provideWorker(ctx context.Context) { log.Debug("provideKeys channel closed") return } - ctx, _ := context.WithTimeout(ctx, provideTimeout) + ctx, cancel := context.WithTimeout(ctx, provideTimeout) err := bs.network.Provide(ctx, k) if err != nil { log.Error(err) } + cancel() case <-ctx.Done(): return } @@ -136,12 +137,13 @@ func (bs *Bitswap) clientWorker(parent context.Context) { // NB: Optimization. Assumes that providers of key[0] are likely to // be able to provide for all keys. This currently holds true in most // every situation. Later, this assumption may not hold as true. - child, _ := context.WithTimeout(req.ctx, providerRequestTimeout) + child, cancel := context.WithTimeout(req.ctx, providerRequestTimeout) providers := bs.network.FindProvidersAsync(child, keys[0], maxProvidersPerRequest) err := bs.sendWantlistToPeers(req.ctx, providers) if err != nil { log.Debugf("error sending wantlist: %s", err) } + cancel() // Wait for wantNewBlocks to finish <-done From a237bafd790548af2cbd56176a1a578a088adf02 Mon Sep 17 00:00:00 2001 From: Juan Batiz-Benet Date: Sat, 7 Mar 2015 09:31:46 -0800 Subject: [PATCH 0355/1035] added cancel func calls previously ignored This commit was moved from ipfs/go-bitswap@3c10e99cbd98660654e233faac658e294c441e68 --- bitswap/bitswap.go | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 60672d0c3..5271e23f1 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -269,7 +269,8 @@ func (bs *Bitswap) sendWantlistToProviders(ctx context.Context, entries []wantli go func(k u.Key) { defer wg.Done() - child, _ := context.WithTimeout(ctx, providerRequestTimeout) + child, cancel := context.WithTimeout(ctx, providerRequestTimeout) + defer cancel() providers := bs.network.FindProvidersAsync(child, k, maxProvidersPerRequest) for prov := range providers { sendToPeers <- prov @@ -311,10 +312,11 @@ func (bs *Bitswap) ReceiveMessage(ctx context.Context, p peer.ID, incoming bsmsg // Should only track *useful* messages in ledger for _, block := range incoming.Blocks() { - hasBlockCtx, _ := context.WithTimeout(ctx, hasBlockTimeout) + hasBlockCtx, cancel := context.WithTimeout(ctx, hasBlockTimeout) if err := bs.HasBlock(hasBlockCtx, block); err != nil { log.Debug(err) } + cancel() } var keys []u.Key From d2b21f1481fd9de54a52cce7eb78f616a1bcd894 Mon Sep 17 00:00:00 2001 From: Jeromy Date: Sun, 8 Mar 2015 14:10:02 -0700 Subject: [PATCH 0356/1035] respect contexts in a more timely manner This commit was moved from ipfs/go-bitswap@8de772f404602f36c700dc03275a9596d974e1c3 --- bitswap/bitswap.go | 51 +++++++++++++++++++++++++++++++++++----------- 1 file changed, 39 insertions(+), 12 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 5271e23f1..91105b20a 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -227,21 +227,40 @@ func (bs *Bitswap) HasBlock(ctx context.Context, blk *blocks.Block) error { func (bs *Bitswap) sendWantlistMsgToPeers(ctx context.Context, m bsmsg.BitSwapMessage, peers <-chan peer.ID) error { set := pset.New() wg := sync.WaitGroup{} - for peerToQuery := range peers { - if !set.TryAdd(peerToQuery) { //Do once per peer - continue - } +loop: + for { + select { + case peerToQuery, ok := <-peers: + if !ok { + break loop + } - wg.Add(1) - go func(p peer.ID) { - defer wg.Done() - if err := bs.send(ctx, p, m); err != nil { - log.Debug(err) // TODO remove if too verbose + if !set.TryAdd(peerToQuery) { //Do once per peer + continue } - }(peerToQuery) + + wg.Add(1) + go func(p peer.ID) { + defer wg.Done() + if err := bs.send(ctx, p, m); err != nil { + log.Debug(err) // TODO remove if too verbose + } + }(peerToQuery) + case <-ctx.Done(): + return nil + } + } + done := make(chan struct{}) + go func() { + wg.Wait() + close(done) + }() + + select { + case <-done: + case <-ctx.Done(): } - wg.Wait() return nil } @@ -385,7 +404,15 @@ func (bs *Bitswap) wantNewBlocks(ctx context.Context, bkeys []u.Key) { } }(p) } - wg.Wait() + done := make(chan struct{}) + go func() { + wg.Wait() + close(done) + }() + select { + case <-done: + case <-ctx.Done(): + } } func (bs *Bitswap) ReceiveError(err error) { From f426825c78bdbcfc659437e519bf94584da09138 Mon Sep 17 00:00:00 2001 From: Jeromy Date: Mon, 9 Mar 2015 00:03:59 -0700 Subject: [PATCH 0357/1035] add warning comment about possibly leaked goroutines This commit was moved from ipfs/go-bitswap@bb99f55bf327ee4d57115fc5ff8f8b41c6c89c65 --- bitswap/bitswap.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 91105b20a..649b3cc48 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -260,6 +260,9 @@ loop: select { case <-done: case <-ctx.Done(): + // NB: we may be abandoning goroutines here before they complete + // this shouldnt be an issue because they will complete soon anyways + // we just don't want their being slow to impact bitswap transfer speeds } return nil } @@ -412,6 +415,9 @@ func (bs *Bitswap) wantNewBlocks(ctx context.Context, bkeys []u.Key) { select { case <-done: case <-ctx.Done(): + // NB: we may be abandoning goroutines here before they complete + // this shouldnt be an issue because they will complete soon anyways + // we just don't want their being slow to impact bitswap transfer speeds } } From c1adc55f71d842a78ff1356711a14ae131d8f9d3 Mon Sep 17 00:00:00 2001 From: Ho-Sheng Hsiao Date: Mon, 30 Mar 2015 20:04:32 -0700 Subject: [PATCH 0358/1035] Reorged imports from jbenet/go-ipfs to ipfs/go-ipfs - Modified Godeps/Godeps.json by hand - [TEST] Updated welcome docs hash to sharness - [TEST] Updated contact doc - [TEST] disabled breaking test (t0080-repo refs local) This commit was moved from ipfs/go-bitswap@22913170dd7be31147b7d247438eee86eab2c858 --- bitswap/bitswap.go | 32 ++++++++++----------- bitswap/bitswap_test.go | 20 ++++++------- bitswap/decision/bench_test.go | 8 +++--- bitswap/decision/engine.go | 12 ++++---- bitswap/decision/engine_test.go | 16 +++++------ bitswap/decision/ledger.go | 6 ++-- bitswap/decision/peer_request_queue.go | 8 +++--- bitswap/decision/peer_request_queue_test.go | 6 ++-- bitswap/message/internal/pb/message.pb.go | 2 +- bitswap/message/message.go | 16 +++++------ bitswap/message/message_test.go | 8 +++--- bitswap/network/interface.go | 10 +++---- bitswap/network/ipfs_impl.go | 18 ++++++------ bitswap/notifications/notifications.go | 8 +++--- bitswap/notifications/notifications_test.go | 8 +++--- bitswap/stat.go | 2 +- bitswap/testnet/interface.go | 6 ++-- bitswap/testnet/network_test.go | 16 +++++------ bitswap/testnet/peernet.go | 14 ++++----- bitswap/testnet/virtual.go | 18 ++++++------ bitswap/testutils.go | 22 +++++++------- bitswap/wantlist/wantlist.go | 2 +- bitswap/workers.go | 8 +++--- 23 files changed, 133 insertions(+), 133 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 649b3cc48..78a421b57 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -7,22 +7,22 @@ import ( "sync" "time" - process "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/goprocess" - context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context" - blocks "github.com/jbenet/go-ipfs/blocks" - blockstore "github.com/jbenet/go-ipfs/blocks/blockstore" - exchange "github.com/jbenet/go-ipfs/exchange" - decision "github.com/jbenet/go-ipfs/exchange/bitswap/decision" - bsmsg "github.com/jbenet/go-ipfs/exchange/bitswap/message" - bsnet "github.com/jbenet/go-ipfs/exchange/bitswap/network" - notifications "github.com/jbenet/go-ipfs/exchange/bitswap/notifications" - wantlist "github.com/jbenet/go-ipfs/exchange/bitswap/wantlist" - peer "github.com/jbenet/go-ipfs/p2p/peer" - "github.com/jbenet/go-ipfs/thirdparty/delay" - eventlog "github.com/jbenet/go-ipfs/thirdparty/eventlog" - u "github.com/jbenet/go-ipfs/util" - errors "github.com/jbenet/go-ipfs/util/debugerror" - pset "github.com/jbenet/go-ipfs/util/peerset" // TODO move this to peerstore + process "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/goprocess" + context "github.com/ipfs/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context" + blocks "github.com/ipfs/go-ipfs/blocks" + blockstore "github.com/ipfs/go-ipfs/blocks/blockstore" + exchange "github.com/ipfs/go-ipfs/exchange" + decision "github.com/ipfs/go-ipfs/exchange/bitswap/decision" + bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" + bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" + notifications "github.com/ipfs/go-ipfs/exchange/bitswap/notifications" + wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" + peer "github.com/ipfs/go-ipfs/p2p/peer" + "github.com/ipfs/go-ipfs/thirdparty/delay" + eventlog "github.com/ipfs/go-ipfs/thirdparty/eventlog" + u "github.com/ipfs/go-ipfs/util" + errors "github.com/ipfs/go-ipfs/util/debugerror" + pset "github.com/ipfs/go-ipfs/util/peerset" // TODO move this to peerstore ) var log = eventlog.Logger("bitswap") diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index 21ad69dfb..85b3c0ec8 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -6,16 +6,16 @@ import ( "testing" "time" - detectrace "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-detect-race" - context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context" - - blocks "github.com/jbenet/go-ipfs/blocks" - blocksutil "github.com/jbenet/go-ipfs/blocks/blocksutil" - tn "github.com/jbenet/go-ipfs/exchange/bitswap/testnet" - p2ptestutil "github.com/jbenet/go-ipfs/p2p/test/util" - mockrouting "github.com/jbenet/go-ipfs/routing/mock" - delay "github.com/jbenet/go-ipfs/thirdparty/delay" - u "github.com/jbenet/go-ipfs/util" + detectrace "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-detect-race" + context "github.com/ipfs/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context" + + blocks "github.com/ipfs/go-ipfs/blocks" + blocksutil "github.com/ipfs/go-ipfs/blocks/blocksutil" + tn "github.com/ipfs/go-ipfs/exchange/bitswap/testnet" + p2ptestutil "github.com/ipfs/go-ipfs/p2p/test/util" + mockrouting "github.com/ipfs/go-ipfs/routing/mock" + delay "github.com/ipfs/go-ipfs/thirdparty/delay" + u "github.com/ipfs/go-ipfs/util" ) // FIXME the tests are really sensitive to the network delay. fix them to work diff --git a/bitswap/decision/bench_test.go b/bitswap/decision/bench_test.go index a79c32b05..0a1e53ce1 100644 --- a/bitswap/decision/bench_test.go +++ b/bitswap/decision/bench_test.go @@ -4,10 +4,10 @@ import ( "math" "testing" - "github.com/jbenet/go-ipfs/exchange/bitswap/wantlist" - "github.com/jbenet/go-ipfs/p2p/peer" - "github.com/jbenet/go-ipfs/util" - "github.com/jbenet/go-ipfs/util/testutil" + "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" + "github.com/ipfs/go-ipfs/p2p/peer" + "github.com/ipfs/go-ipfs/util" + "github.com/ipfs/go-ipfs/util/testutil" ) // FWIW: At the time of this commit, including a timestamp in task increases diff --git a/bitswap/decision/engine.go b/bitswap/decision/engine.go index 534f7ae65..380c868b6 100644 --- a/bitswap/decision/engine.go +++ b/bitswap/decision/engine.go @@ -4,12 +4,12 @@ package decision import ( "sync" - context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context" - bstore "github.com/jbenet/go-ipfs/blocks/blockstore" - bsmsg "github.com/jbenet/go-ipfs/exchange/bitswap/message" - wl "github.com/jbenet/go-ipfs/exchange/bitswap/wantlist" - peer "github.com/jbenet/go-ipfs/p2p/peer" - eventlog "github.com/jbenet/go-ipfs/thirdparty/eventlog" + context "github.com/ipfs/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context" + bstore "github.com/ipfs/go-ipfs/blocks/blockstore" + bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" + wl "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" + peer "github.com/ipfs/go-ipfs/p2p/peer" + eventlog "github.com/ipfs/go-ipfs/thirdparty/eventlog" ) // TODO consider taking responsibility for other types of requests. For diff --git a/bitswap/decision/engine_test.go b/bitswap/decision/engine_test.go index dec19281b..b69f8b1df 100644 --- a/bitswap/decision/engine_test.go +++ b/bitswap/decision/engine_test.go @@ -8,14 +8,14 @@ import ( "sync" "testing" - ds "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore" - dssync "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore/sync" - context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context" - blocks "github.com/jbenet/go-ipfs/blocks" - blockstore "github.com/jbenet/go-ipfs/blocks/blockstore" - message "github.com/jbenet/go-ipfs/exchange/bitswap/message" - peer "github.com/jbenet/go-ipfs/p2p/peer" - testutil "github.com/jbenet/go-ipfs/util/testutil" + ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore" + dssync "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore/sync" + context "github.com/ipfs/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context" + blocks "github.com/ipfs/go-ipfs/blocks" + blockstore "github.com/ipfs/go-ipfs/blocks/blockstore" + message "github.com/ipfs/go-ipfs/exchange/bitswap/message" + peer "github.com/ipfs/go-ipfs/p2p/peer" + testutil "github.com/ipfs/go-ipfs/util/testutil" ) type peerAndEngine struct { diff --git a/bitswap/decision/ledger.go b/bitswap/decision/ledger.go index 8e1eb83ee..51b1bc914 100644 --- a/bitswap/decision/ledger.go +++ b/bitswap/decision/ledger.go @@ -3,9 +3,9 @@ package decision import ( "time" - wl "github.com/jbenet/go-ipfs/exchange/bitswap/wantlist" - peer "github.com/jbenet/go-ipfs/p2p/peer" - u "github.com/jbenet/go-ipfs/util" + wl "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" + peer "github.com/ipfs/go-ipfs/p2p/peer" + u "github.com/ipfs/go-ipfs/util" ) // keySet is just a convenient alias for maps of keys, where we only care diff --git a/bitswap/decision/peer_request_queue.go b/bitswap/decision/peer_request_queue.go index 8b9b1c2f2..a83d2675f 100644 --- a/bitswap/decision/peer_request_queue.go +++ b/bitswap/decision/peer_request_queue.go @@ -4,10 +4,10 @@ import ( "sync" "time" - wantlist "github.com/jbenet/go-ipfs/exchange/bitswap/wantlist" - peer "github.com/jbenet/go-ipfs/p2p/peer" - pq "github.com/jbenet/go-ipfs/thirdparty/pq" - u "github.com/jbenet/go-ipfs/util" + wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" + peer "github.com/ipfs/go-ipfs/p2p/peer" + pq "github.com/ipfs/go-ipfs/thirdparty/pq" + u "github.com/ipfs/go-ipfs/util" ) type peerRequestQueue interface { diff --git a/bitswap/decision/peer_request_queue_test.go b/bitswap/decision/peer_request_queue_test.go index fa6102d67..69d866937 100644 --- a/bitswap/decision/peer_request_queue_test.go +++ b/bitswap/decision/peer_request_queue_test.go @@ -7,9 +7,9 @@ import ( "strings" "testing" - "github.com/jbenet/go-ipfs/exchange/bitswap/wantlist" - "github.com/jbenet/go-ipfs/util" - "github.com/jbenet/go-ipfs/util/testutil" + "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" + "github.com/ipfs/go-ipfs/util" + "github.com/ipfs/go-ipfs/util/testutil" ) func TestPushPop(t *testing.T) { diff --git a/bitswap/message/internal/pb/message.pb.go b/bitswap/message/internal/pb/message.pb.go index 4ddfc56f7..9486ebb1b 100644 --- a/bitswap/message/internal/pb/message.pb.go +++ b/bitswap/message/internal/pb/message.pb.go @@ -13,7 +13,7 @@ It has these top-level messages: */ package bitswap_message_pb -import proto "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto" +import proto "github.com/ipfs/go-ipfs/Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto" import math "math" // Reference imports to suppress errors if they are not otherwise used. diff --git a/bitswap/message/message.go b/bitswap/message/message.go index 68748c0d8..0952c2745 100644 --- a/bitswap/message/message.go +++ b/bitswap/message/message.go @@ -3,14 +3,14 @@ package message import ( "io" - blocks "github.com/jbenet/go-ipfs/blocks" - pb "github.com/jbenet/go-ipfs/exchange/bitswap/message/internal/pb" - wantlist "github.com/jbenet/go-ipfs/exchange/bitswap/wantlist" - inet "github.com/jbenet/go-ipfs/p2p/net" - u "github.com/jbenet/go-ipfs/util" - - ggio "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/gogoprotobuf/io" - proto "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto" + blocks "github.com/ipfs/go-ipfs/blocks" + pb "github.com/ipfs/go-ipfs/exchange/bitswap/message/internal/pb" + wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" + inet "github.com/ipfs/go-ipfs/p2p/net" + u "github.com/ipfs/go-ipfs/util" + + ggio "github.com/ipfs/go-ipfs/Godeps/_workspace/src/code.google.com/p/gogoprotobuf/io" + proto "github.com/ipfs/go-ipfs/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto" ) // TODO move message.go into the bitswap package diff --git a/bitswap/message/message_test.go b/bitswap/message/message_test.go index a0df38c0b..6d1df1411 100644 --- a/bitswap/message/message_test.go +++ b/bitswap/message/message_test.go @@ -4,11 +4,11 @@ import ( "bytes" "testing" - proto "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto" + proto "github.com/ipfs/go-ipfs/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto" - blocks "github.com/jbenet/go-ipfs/blocks" - pb "github.com/jbenet/go-ipfs/exchange/bitswap/message/internal/pb" - u "github.com/jbenet/go-ipfs/util" + blocks "github.com/ipfs/go-ipfs/blocks" + pb "github.com/ipfs/go-ipfs/exchange/bitswap/message/internal/pb" + u "github.com/ipfs/go-ipfs/util" ) func TestAppendWanted(t *testing.T) { diff --git a/bitswap/network/interface.go b/bitswap/network/interface.go index aa87e3126..146c73341 100644 --- a/bitswap/network/interface.go +++ b/bitswap/network/interface.go @@ -1,11 +1,11 @@ package network import ( - context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context" - bsmsg "github.com/jbenet/go-ipfs/exchange/bitswap/message" - peer "github.com/jbenet/go-ipfs/p2p/peer" - protocol "github.com/jbenet/go-ipfs/p2p/protocol" - u "github.com/jbenet/go-ipfs/util" + context "github.com/ipfs/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context" + bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" + peer "github.com/ipfs/go-ipfs/p2p/peer" + protocol "github.com/ipfs/go-ipfs/p2p/protocol" + u "github.com/ipfs/go-ipfs/util" ) var ProtocolBitswap protocol.ID = "/ipfs/bitswap" diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index 9d5c94535..97745e32d 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -1,15 +1,15 @@ package network import ( - ma "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr" - context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context" - bsmsg "github.com/jbenet/go-ipfs/exchange/bitswap/message" - host "github.com/jbenet/go-ipfs/p2p/host" - inet "github.com/jbenet/go-ipfs/p2p/net" - peer "github.com/jbenet/go-ipfs/p2p/peer" - routing "github.com/jbenet/go-ipfs/routing" - eventlog "github.com/jbenet/go-ipfs/thirdparty/eventlog" - util "github.com/jbenet/go-ipfs/util" + ma "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr" + context "github.com/ipfs/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context" + bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" + host "github.com/ipfs/go-ipfs/p2p/host" + inet "github.com/ipfs/go-ipfs/p2p/net" + peer "github.com/ipfs/go-ipfs/p2p/peer" + routing "github.com/ipfs/go-ipfs/routing" + eventlog "github.com/ipfs/go-ipfs/thirdparty/eventlog" + util "github.com/ipfs/go-ipfs/util" ) var log = eventlog.Logger("bitswap_network") diff --git a/bitswap/notifications/notifications.go b/bitswap/notifications/notifications.go index 829f7288f..d1764defc 100644 --- a/bitswap/notifications/notifications.go +++ b/bitswap/notifications/notifications.go @@ -1,10 +1,10 @@ package notifications import ( - pubsub "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/briantigerchow/pubsub" - context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context" - blocks "github.com/jbenet/go-ipfs/blocks" - u "github.com/jbenet/go-ipfs/util" + pubsub "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/briantigerchow/pubsub" + context "github.com/ipfs/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context" + blocks "github.com/ipfs/go-ipfs/blocks" + u "github.com/ipfs/go-ipfs/util" ) const bufferSize = 16 diff --git a/bitswap/notifications/notifications_test.go b/bitswap/notifications/notifications_test.go index 97f28d1b9..8cf89669b 100644 --- a/bitswap/notifications/notifications_test.go +++ b/bitswap/notifications/notifications_test.go @@ -5,10 +5,10 @@ import ( "testing" "time" - context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context" - blocks "github.com/jbenet/go-ipfs/blocks" - blocksutil "github.com/jbenet/go-ipfs/blocks/blocksutil" - "github.com/jbenet/go-ipfs/util" + context "github.com/ipfs/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context" + blocks "github.com/ipfs/go-ipfs/blocks" + blocksutil "github.com/ipfs/go-ipfs/blocks/blocksutil" + "github.com/ipfs/go-ipfs/util" ) func TestDuplicates(t *testing.T) { diff --git a/bitswap/stat.go b/bitswap/stat.go index 4e37443ef..1c5fec62b 100644 --- a/bitswap/stat.go +++ b/bitswap/stat.go @@ -1,7 +1,7 @@ package bitswap import ( - u "github.com/jbenet/go-ipfs/util" + u "github.com/ipfs/go-ipfs/util" "sort" ) diff --git a/bitswap/testnet/interface.go b/bitswap/testnet/interface.go index 4b6f46aaf..b0d01b79f 100644 --- a/bitswap/testnet/interface.go +++ b/bitswap/testnet/interface.go @@ -1,9 +1,9 @@ package bitswap import ( - bsnet "github.com/jbenet/go-ipfs/exchange/bitswap/network" - peer "github.com/jbenet/go-ipfs/p2p/peer" - "github.com/jbenet/go-ipfs/util/testutil" + bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" + peer "github.com/ipfs/go-ipfs/p2p/peer" + "github.com/ipfs/go-ipfs/util/testutil" ) type Network interface { diff --git a/bitswap/testnet/network_test.go b/bitswap/testnet/network_test.go index 8af357bf2..8d457d81c 100644 --- a/bitswap/testnet/network_test.go +++ b/bitswap/testnet/network_test.go @@ -4,14 +4,14 @@ import ( "sync" "testing" - context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context" - blocks "github.com/jbenet/go-ipfs/blocks" - bsmsg "github.com/jbenet/go-ipfs/exchange/bitswap/message" - bsnet "github.com/jbenet/go-ipfs/exchange/bitswap/network" - peer "github.com/jbenet/go-ipfs/p2p/peer" - mockrouting "github.com/jbenet/go-ipfs/routing/mock" - delay "github.com/jbenet/go-ipfs/thirdparty/delay" - testutil "github.com/jbenet/go-ipfs/util/testutil" + context "github.com/ipfs/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context" + blocks "github.com/ipfs/go-ipfs/blocks" + bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" + bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" + peer "github.com/ipfs/go-ipfs/p2p/peer" + mockrouting "github.com/ipfs/go-ipfs/routing/mock" + delay "github.com/ipfs/go-ipfs/thirdparty/delay" + testutil "github.com/ipfs/go-ipfs/util/testutil" ) func TestSendRequestToCooperativePeer(t *testing.T) { diff --git a/bitswap/testnet/peernet.go b/bitswap/testnet/peernet.go index 632c12d37..446224b6b 100644 --- a/bitswap/testnet/peernet.go +++ b/bitswap/testnet/peernet.go @@ -1,13 +1,13 @@ package bitswap import ( - ds "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore" - context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context" - bsnet "github.com/jbenet/go-ipfs/exchange/bitswap/network" - mockpeernet "github.com/jbenet/go-ipfs/p2p/net/mock" - peer "github.com/jbenet/go-ipfs/p2p/peer" - mockrouting "github.com/jbenet/go-ipfs/routing/mock" - testutil "github.com/jbenet/go-ipfs/util/testutil" + ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore" + context "github.com/ipfs/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context" + bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" + mockpeernet "github.com/ipfs/go-ipfs/p2p/net/mock" + peer "github.com/ipfs/go-ipfs/p2p/peer" + mockrouting "github.com/ipfs/go-ipfs/routing/mock" + testutil "github.com/ipfs/go-ipfs/util/testutil" ) type peernet struct { diff --git a/bitswap/testnet/virtual.go b/bitswap/testnet/virtual.go index 8bebde357..e0812ffbd 100644 --- a/bitswap/testnet/virtual.go +++ b/bitswap/testnet/virtual.go @@ -3,15 +3,15 @@ package bitswap import ( "errors" - context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context" - bsmsg "github.com/jbenet/go-ipfs/exchange/bitswap/message" - bsnet "github.com/jbenet/go-ipfs/exchange/bitswap/network" - peer "github.com/jbenet/go-ipfs/p2p/peer" - routing "github.com/jbenet/go-ipfs/routing" - mockrouting "github.com/jbenet/go-ipfs/routing/mock" - delay "github.com/jbenet/go-ipfs/thirdparty/delay" - util "github.com/jbenet/go-ipfs/util" - testutil "github.com/jbenet/go-ipfs/util/testutil" + context "github.com/ipfs/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context" + bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" + bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" + peer "github.com/ipfs/go-ipfs/p2p/peer" + routing "github.com/ipfs/go-ipfs/routing" + mockrouting "github.com/ipfs/go-ipfs/routing/mock" + delay "github.com/ipfs/go-ipfs/thirdparty/delay" + util "github.com/ipfs/go-ipfs/util" + testutil "github.com/ipfs/go-ipfs/util/testutil" ) func VirtualNetwork(rs mockrouting.Server, d delay.D) Network { diff --git a/bitswap/testutils.go b/bitswap/testutils.go index c14f1abb8..2ce035c3d 100644 --- a/bitswap/testutils.go +++ b/bitswap/testutils.go @@ -3,17 +3,17 @@ package bitswap import ( "time" - ds "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore" - ds_sync "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore/sync" - context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context" - blockstore "github.com/jbenet/go-ipfs/blocks/blockstore" - exchange "github.com/jbenet/go-ipfs/exchange" - tn "github.com/jbenet/go-ipfs/exchange/bitswap/testnet" - peer "github.com/jbenet/go-ipfs/p2p/peer" - p2ptestutil "github.com/jbenet/go-ipfs/p2p/test/util" - delay "github.com/jbenet/go-ipfs/thirdparty/delay" - datastore2 "github.com/jbenet/go-ipfs/util/datastore2" - testutil "github.com/jbenet/go-ipfs/util/testutil" + ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore" + ds_sync "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore/sync" + context "github.com/ipfs/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context" + blockstore "github.com/ipfs/go-ipfs/blocks/blockstore" + exchange "github.com/ipfs/go-ipfs/exchange" + tn "github.com/ipfs/go-ipfs/exchange/bitswap/testnet" + peer "github.com/ipfs/go-ipfs/p2p/peer" + p2ptestutil "github.com/ipfs/go-ipfs/p2p/test/util" + delay "github.com/ipfs/go-ipfs/thirdparty/delay" + datastore2 "github.com/ipfs/go-ipfs/util/datastore2" + testutil "github.com/ipfs/go-ipfs/util/testutil" ) // WARNING: this uses RandTestBogusIdentity DO NOT USE for NON TESTS! diff --git a/bitswap/wantlist/wantlist.go b/bitswap/wantlist/wantlist.go index 450fe3bd3..508a7a09b 100644 --- a/bitswap/wantlist/wantlist.go +++ b/bitswap/wantlist/wantlist.go @@ -3,7 +3,7 @@ package wantlist import ( - u "github.com/jbenet/go-ipfs/util" + u "github.com/ipfs/go-ipfs/util" "sort" "sync" ) diff --git a/bitswap/workers.go b/bitswap/workers.go index 0a9b7aa92..fdd3c1549 100644 --- a/bitswap/workers.go +++ b/bitswap/workers.go @@ -3,10 +3,10 @@ package bitswap import ( "time" - inflect "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/chuckpreslar/inflect" - process "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/goprocess" - context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context" - u "github.com/jbenet/go-ipfs/util" + inflect "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/chuckpreslar/inflect" + process "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/goprocess" + context "github.com/ipfs/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context" + u "github.com/ipfs/go-ipfs/util" ) func (bs *Bitswap) startWorkers(px process.Process, ctx context.Context) { From 157235dff0fb30a695922d0c3d8873aaa3b75aff Mon Sep 17 00:00:00 2001 From: Jeromy Date: Fri, 3 Apr 2015 01:07:11 -0700 Subject: [PATCH 0359/1035] refactor task queue to have queues per peer This commit was moved from ipfs/go-bitswap@219ed26061bc1d0f94c0695a504df3a17c6a3f77 --- bitswap/decision/engine.go | 9 +- bitswap/decision/peer_request_queue.go | 107 ++++++++++++++++++-- bitswap/decision/peer_request_queue_test.go | 66 +++++++++++- 3 files changed, 167 insertions(+), 15 deletions(-) diff --git a/bitswap/decision/engine.go b/bitswap/decision/engine.go index 380c868b6..4711f182a 100644 --- a/bitswap/decision/engine.go +++ b/bitswap/decision/engine.go @@ -55,6 +55,9 @@ type Envelope struct { Peer peer.ID // Message is the payload Message bsmsg.BitSwapMessage + + // A callback to notify the decision queue that the task is complete + Sent func() } type Engine struct { @@ -137,7 +140,11 @@ func (e *Engine) nextEnvelope(ctx context.Context) (*Envelope, error) { m := bsmsg.New() // TODO: maybe add keys from our wantlist? m.AddBlock(block) - return &Envelope{Peer: nextTask.Target, Message: m}, nil + return &Envelope{ + Peer: nextTask.Target, + Message: m, + Sent: nextTask.Done, + }, nil } } diff --git a/bitswap/decision/peer_request_queue.go b/bitswap/decision/peer_request_queue.go index a83d2675f..e154fdfc9 100644 --- a/bitswap/decision/peer_request_queue.go +++ b/bitswap/decision/peer_request_queue.go @@ -21,8 +21,9 @@ type peerRequestQueue interface { func newPRQ() peerRequestQueue { return &prq{ - taskMap: make(map[string]*peerRequestTask), - taskQueue: pq.New(wrapCmp(V1)), + taskMap: make(map[string]*peerRequestTask), + partners: make(map[peer.ID]*activePartner), + pQueue: pq.New(partnerCompare), } } @@ -32,42 +33,73 @@ var _ peerRequestQueue = &prq{} // to help decide how to sort tasks (on add) and how to select // tasks (on getnext). For now, we are assuming a dumb/nice strategy. type prq struct { - lock sync.Mutex - taskQueue pq.PQ - taskMap map[string]*peerRequestTask + lock sync.Mutex + pQueue pq.PQ + taskMap map[string]*peerRequestTask + partners map[peer.ID]*activePartner } // Push currently adds a new peerRequestTask to the end of the list func (tl *prq) Push(entry wantlist.Entry, to peer.ID) { tl.lock.Lock() defer tl.lock.Unlock() + partner, ok := tl.partners[to] + if !ok { + partner = &activePartner{taskQueue: pq.New(wrapCmp(V1))} + tl.pQueue.Push(partner) + tl.partners[to] = partner + } + if task, ok := tl.taskMap[taskKey(to, entry.Key)]; ok { task.Entry.Priority = entry.Priority - tl.taskQueue.Update(task.index) + partner.taskQueue.Update(task.index) return } + task := &peerRequestTask{ Entry: entry, Target: to, created: time.Now(), + Done: func() { + partner.TaskDone() + tl.lock.Lock() + tl.pQueue.Update(partner.Index()) + tl.lock.Unlock() + }, } - tl.taskQueue.Push(task) + + partner.taskQueue.Push(task) tl.taskMap[task.Key()] = task + partner.requests++ + tl.pQueue.Update(partner.Index()) } // Pop 'pops' the next task to be performed. Returns nil if no task exists. func (tl *prq) Pop() *peerRequestTask { tl.lock.Lock() defer tl.lock.Unlock() + if tl.pQueue.Len() == 0 { + return nil + } + pElem := tl.pQueue.Pop() + if pElem == nil { + return nil + } + + partner := pElem.(*activePartner) + var out *peerRequestTask - for tl.taskQueue.Len() > 0 { - out = tl.taskQueue.Pop().(*peerRequestTask) + for partner.taskQueue.Len() > 0 { + out = partner.taskQueue.Pop().(*peerRequestTask) delete(tl.taskMap, out.Key()) if out.trash { continue // discarding tasks that have been removed } break // and return |out| } + partner.StartTask() + partner.requests-- + tl.pQueue.Push(partner) return out } @@ -80,13 +112,16 @@ func (tl *prq) Remove(k u.Key, p peer.ID) { // simply mark it as trash, so it'll be dropped when popped off the // queue. t.trash = true + tl.partners[p].requests-- } tl.lock.Unlock() } type peerRequestTask struct { Entry wantlist.Entry - Target peer.ID // required + Target peer.ID + + Done func() // trash in a book-keeping field trash bool @@ -132,3 +167,55 @@ func wrapCmp(f func(a, b *peerRequestTask) bool) func(a, b pq.Elem) bool { return f(a.(*peerRequestTask), b.(*peerRequestTask)) } } + +type activePartner struct { + lk sync.Mutex + + // Active is the number of blocks this peer is currently being sent + active int + + // requests is the number of blocks this peer is currently requesting + requests int + + index int + + // priority queue of + taskQueue pq.PQ +} + +func partnerCompare(a, b pq.Elem) bool { + pa := a.(*activePartner) + pb := b.(*activePartner) + + // having no blocks in their wantlist means lowest priority + if pa.requests == 0 { + return false + } + if pb.requests == 0 { + return true + } + return pa.active < pb.active +} + +func (p *activePartner) StartTask() { + p.lk.Lock() + p.active++ + p.lk.Unlock() +} + +func (p *activePartner) TaskDone() { + p.lk.Lock() + p.active-- + if p.active < 0 { + panic("more tasks finished than started!") + } + p.lk.Unlock() +} + +func (p *activePartner) Index() int { + return p.index +} + +func (p *activePartner) SetIndex(i int) { + p.index = i +} diff --git a/bitswap/decision/peer_request_queue_test.go b/bitswap/decision/peer_request_queue_test.go index 69d866937..cd8c4b1ff 100644 --- a/bitswap/decision/peer_request_queue_test.go +++ b/bitswap/decision/peer_request_queue_test.go @@ -47,10 +47,68 @@ func TestPushPop(t *testing.T) { prq.Remove(util.Key(consonant), partner) } - for _, expected := range vowels { - received := prq.Pop().Entry.Key - if received != util.Key(expected) { - t.Fatal("received", string(received), "expected", string(expected)) + var out []string + for { + received := prq.Pop() + if received == nil { + break } + + out = append(out, string(received.Entry.Key)) + } + + // Entries popped should already be in correct order + for i, expected := range vowels { + if out[i] != expected { + t.Fatal("received", out[i], "expected", expected) + } + } +} + +// This test checks that peers wont starve out other peers +func TestPeerRepeats(t *testing.T) { + prq := newPRQ() + a := testutil.RandPeerIDFatal(t) + b := testutil.RandPeerIDFatal(t) + c := testutil.RandPeerIDFatal(t) + d := testutil.RandPeerIDFatal(t) + + // Have each push some blocks + + for i := 0; i < 5; i++ { + prq.Push(wantlist.Entry{Key: util.Key(i)}, a) + prq.Push(wantlist.Entry{Key: util.Key(i)}, b) + prq.Push(wantlist.Entry{Key: util.Key(i)}, c) + prq.Push(wantlist.Entry{Key: util.Key(i)}, d) + } + + // now, pop off four entries, there should be one from each + var targets []string + var tasks []*peerRequestTask + for i := 0; i < 4; i++ { + t := prq.Pop() + targets = append(targets, t.Target.Pretty()) + tasks = append(tasks, t) + } + + expected := []string{a.Pretty(), b.Pretty(), c.Pretty(), d.Pretty()} + sort.Strings(expected) + sort.Strings(targets) + + t.Log(targets) + t.Log(expected) + for i, s := range targets { + if expected[i] != s { + t.Fatal("unexpected peer", s, expected[i]) + } + } + + // Now, if one of the tasks gets finished, the next task off the queue should + // be for the same peer + tasks[0].Done() + + ntask := prq.Pop() + if ntask.Target != tasks[0].Target { + t.Fatal("Expected task from peer with lowest active count") } } From 28f2d0f21f2a32e64801904560e63630d48d76d0 Mon Sep 17 00:00:00 2001 From: Jeromy Date: Fri, 3 Apr 2015 11:40:26 -0700 Subject: [PATCH 0360/1035] some code cleanup and commenting This commit was moved from ipfs/go-bitswap@a45f185a8f60de1cc184b59c13e34176f0e263e2 --- bitswap/decision/engine.go | 4 ---- bitswap/decision/peer_request_queue.go | 26 +++++++++++++++++++------- 2 files changed, 19 insertions(+), 11 deletions(-) diff --git a/bitswap/decision/engine.go b/bitswap/decision/engine.go index 4711f182a..928af7c4b 100644 --- a/bitswap/decision/engine.go +++ b/bitswap/decision/engine.go @@ -55,9 +55,6 @@ type Envelope struct { Peer peer.ID // Message is the payload Message bsmsg.BitSwapMessage - - // A callback to notify the decision queue that the task is complete - Sent func() } type Engine struct { @@ -143,7 +140,6 @@ func (e *Engine) nextEnvelope(ctx context.Context) (*Envelope, error) { return &Envelope{ Peer: nextTask.Target, Message: m, - Sent: nextTask.Done, }, nil } } diff --git a/bitswap/decision/peer_request_queue.go b/bitswap/decision/peer_request_queue.go index e154fdfc9..c0dd52ccf 100644 --- a/bitswap/decision/peer_request_queue.go +++ b/bitswap/decision/peer_request_queue.go @@ -27,6 +27,7 @@ func newPRQ() peerRequestQueue { } } +// verify interface implementation var _ peerRequestQueue = &prq{} // TODO: at some point, the strategy needs to plug in here @@ -81,12 +82,7 @@ func (tl *prq) Pop() *peerRequestTask { if tl.pQueue.Len() == 0 { return nil } - pElem := tl.pQueue.Pop() - if pElem == nil { - return nil - } - - partner := pElem.(*activePartner) + partner := tl.pQueue.Pop().(*activePartner) var out *peerRequestTask for partner.taskQueue.Len() > 0 { @@ -97,6 +93,8 @@ func (tl *prq) Pop() *peerRequestTask { } break // and return |out| } + + // start the new task, and push the partner back onto the queue partner.StartTask() partner.requests-- tl.pQueue.Push(partner) @@ -112,6 +110,8 @@ func (tl *prq) Remove(k u.Key, p peer.ID) { // simply mark it as trash, so it'll be dropped when popped off the // queue. t.trash = true + + // having canceled a block, we now account for that in the given partner tl.partners[p].requests-- } tl.lock.Unlock() @@ -121,6 +121,7 @@ type peerRequestTask struct { Entry wantlist.Entry Target peer.ID + // A callback to signal that this task has been completed Done func() // trash in a book-keeping field @@ -135,10 +136,12 @@ func (t *peerRequestTask) Key() string { return taskKey(t.Target, t.Entry.Key) } +// Index implements pq.Elem func (t *peerRequestTask) Index() int { return t.index } +// SetIndex implements pq.Elem func (t *peerRequestTask) SetIndex(i int) { t.index = i } @@ -172,17 +175,22 @@ type activePartner struct { lk sync.Mutex // Active is the number of blocks this peer is currently being sent + // active must be locked around as it will be updated externally active int // requests is the number of blocks this peer is currently requesting + // request need not be locked around as it will only be modified under + // the peerRequestQueue's locks requests int + // for the PQ interface index int - // priority queue of + // priority queue of tasks belonging to this peer taskQueue pq.PQ } +// partnerCompare implements pq.ElemComparator func partnerCompare(a, b pq.Elem) bool { pa := a.(*activePartner) pb := b.(*activePartner) @@ -197,12 +205,14 @@ func partnerCompare(a, b pq.Elem) bool { return pa.active < pb.active } +// StartTask signals that a task was started for this partner func (p *activePartner) StartTask() { p.lk.Lock() p.active++ p.lk.Unlock() } +// TaskDone signals that a task was completed for this partner func (p *activePartner) TaskDone() { p.lk.Lock() p.active-- @@ -212,10 +222,12 @@ func (p *activePartner) TaskDone() { p.lk.Unlock() } +// Index implements pq.Elem func (p *activePartner) Index() int { return p.index } +// SetIndex implements pq.Elem func (p *activePartner) SetIndex(i int) { p.index = i } From 618e60126986aef6731fe47ac26c5ea8489171ef Mon Sep 17 00:00:00 2001 From: Jeromy Date: Fri, 3 Apr 2015 11:42:28 -0700 Subject: [PATCH 0361/1035] fix some logic This commit was moved from ipfs/go-bitswap@e3f251bf304814d6cae3f7e97d3855119c9e9b9f --- bitswap/decision/peer_request_queue.go | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/bitswap/decision/peer_request_queue.go b/bitswap/decision/peer_request_queue.go index c0dd52ccf..a1c6ae102 100644 --- a/bitswap/decision/peer_request_queue.go +++ b/bitswap/decision/peer_request_queue.go @@ -89,14 +89,15 @@ func (tl *prq) Pop() *peerRequestTask { out = partner.taskQueue.Pop().(*peerRequestTask) delete(tl.taskMap, out.Key()) if out.trash { + out = nil continue // discarding tasks that have been removed } + + partner.StartTask() + partner.requests-- break // and return |out| } - // start the new task, and push the partner back onto the queue - partner.StartTask() - partner.requests-- tl.pQueue.Push(partner) return out } From d29e347873b5ccb88813212901d72245b9b5e2bb Mon Sep 17 00:00:00 2001 From: Jeromy Date: Fri, 3 Apr 2015 15:37:14 -0700 Subject: [PATCH 0362/1035] address comments from CR This commit was moved from ipfs/go-bitswap@6bae251a2726abe9fd96e67c660da14d9de8f330 --- bitswap/decision/engine.go | 7 +++++++ bitswap/decision/peer_request_queue.go | 13 +++++++------ bitswap/decision/peer_request_queue_test.go | 15 ++++++++++----- bitswap/workers.go | 1 + 4 files changed, 25 insertions(+), 11 deletions(-) diff --git a/bitswap/decision/engine.go b/bitswap/decision/engine.go index 928af7c4b..119869677 100644 --- a/bitswap/decision/engine.go +++ b/bitswap/decision/engine.go @@ -55,6 +55,9 @@ type Envelope struct { Peer peer.ID // Message is the payload Message bsmsg.BitSwapMessage + + // A callback to notify the decision queue that the task is complete + Sent func() } type Engine struct { @@ -132,6 +135,9 @@ func (e *Engine) nextEnvelope(ctx context.Context) (*Envelope, error) { block, err := e.bs.Get(nextTask.Entry.Key) if err != nil { + // If we don't have the block, don't hold that against the peer + // make sure to update that the task has been 'completed' + nextTask.Done() continue } @@ -140,6 +146,7 @@ func (e *Engine) nextEnvelope(ctx context.Context) (*Envelope, error) { return &Envelope{ Peer: nextTask.Target, Message: m, + Sent: nextTask.Done, }, nil } } diff --git a/bitswap/decision/peer_request_queue.go b/bitswap/decision/peer_request_queue.go index a1c6ae102..e771ece0b 100644 --- a/bitswap/decision/peer_request_queue.go +++ b/bitswap/decision/peer_request_queue.go @@ -173,11 +173,11 @@ func wrapCmp(f func(a, b *peerRequestTask) bool) func(a, b pq.Elem) bool { } type activePartner struct { - lk sync.Mutex // Active is the number of blocks this peer is currently being sent // active must be locked around as it will be updated externally - active int + activelk sync.Mutex + active int // requests is the number of blocks this peer is currently requesting // request need not be locked around as it will only be modified under @@ -197,6 +197,7 @@ func partnerCompare(a, b pq.Elem) bool { pb := b.(*activePartner) // having no blocks in their wantlist means lowest priority + // having both of these checks ensures stability of the sort if pa.requests == 0 { return false } @@ -208,19 +209,19 @@ func partnerCompare(a, b pq.Elem) bool { // StartTask signals that a task was started for this partner func (p *activePartner) StartTask() { - p.lk.Lock() + p.activelk.Lock() p.active++ - p.lk.Unlock() + p.activelk.Unlock() } // TaskDone signals that a task was completed for this partner func (p *activePartner) TaskDone() { - p.lk.Lock() + p.activelk.Lock() p.active-- if p.active < 0 { panic("more tasks finished than started!") } - p.lk.Unlock() + p.activelk.Unlock() } // Index implements pq.Elem diff --git a/bitswap/decision/peer_request_queue_test.go b/bitswap/decision/peer_request_queue_test.go index cd8c4b1ff..96c136d6f 100644 --- a/bitswap/decision/peer_request_queue_test.go +++ b/bitswap/decision/peer_request_queue_test.go @@ -105,10 +105,15 @@ func TestPeerRepeats(t *testing.T) { // Now, if one of the tasks gets finished, the next task off the queue should // be for the same peer - tasks[0].Done() - - ntask := prq.Pop() - if ntask.Target != tasks[0].Target { - t.Fatal("Expected task from peer with lowest active count") + for blockI := 0; blockI < 4; blockI++ { + for i := 0; i < 4; i++ { + // its okay to mark the same task done multiple times here (JUST FOR TESTING) + tasks[i].Done() + + ntask := prq.Pop() + if ntask.Target != tasks[i].Target { + t.Fatal("Expected task from peer with lowest active count") + } + } } } diff --git a/bitswap/workers.go b/bitswap/workers.go index fdd3c1549..370aa1a87 100644 --- a/bitswap/workers.go +++ b/bitswap/workers.go @@ -51,6 +51,7 @@ func (bs *Bitswap) taskWorker(ctx context.Context) { } log.Event(ctx, "deliverBlocks", envelope.Message, envelope.Peer) bs.send(ctx, envelope.Peer, envelope.Message) + envelope.Sent() case <-ctx.Done(): return } From 28910613043542e05593e5d9809ff65140152db4 Mon Sep 17 00:00:00 2001 From: Jeromy Date: Fri, 17 Apr 2015 12:43:31 -0700 Subject: [PATCH 0363/1035] add more bitswap task workers This commit was moved from ipfs/go-bitswap@de59d5c1feb695010e923583fb47f5157ea58d69 --- bitswap/workers.go | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/bitswap/workers.go b/bitswap/workers.go index 370aa1a87..df476a341 100644 --- a/bitswap/workers.go +++ b/bitswap/workers.go @@ -9,16 +9,20 @@ import ( u "github.com/ipfs/go-ipfs/util" ) +var TaskWorkerCount = 4 + func (bs *Bitswap) startWorkers(px process.Process, ctx context.Context) { // Start up a worker to handle block requests this node is making px.Go(func(px process.Process) { bs.clientWorker(ctx) }) - // Start up a worker to handle requests from other nodes for the data on this node - px.Go(func(px process.Process) { - bs.taskWorker(ctx) - }) + // Start up workers to handle requests from other nodes for the data on this node + for i := 0; i < TaskWorkerCount; i++ { + px.Go(func(px process.Process) { + bs.taskWorker(ctx) + }) + } // Start up a worker to manage periodically resending our wantlist out to peers px.Go(func(px process.Process) { From fb0608bc4590b5e04a6460dd324f5ddc8f8604b8 Mon Sep 17 00:00:00 2001 From: Jeromy Date: Fri, 17 Apr 2015 15:57:50 -0700 Subject: [PATCH 0364/1035] make number of workers tuneable by an env var This commit was moved from ipfs/go-bitswap@948633d47da798c59b494267c7b32acb8649c8e5 --- bitswap/workers.go | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/bitswap/workers.go b/bitswap/workers.go index df476a341..051496218 100644 --- a/bitswap/workers.go +++ b/bitswap/workers.go @@ -1,6 +1,8 @@ package bitswap import ( + "os" + "strconv" "time" inflect "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/chuckpreslar/inflect" @@ -9,7 +11,18 @@ import ( u "github.com/ipfs/go-ipfs/util" ) -var TaskWorkerCount = 4 +var TaskWorkerCount = 16 + +func init() { + twc := os.Getenv("IPFS_TASK_WORKERS") + if twc != "" { + n, err := strconv.Atoi(twc) + if err != nil { + return + } + TaskWorkerCount = n + } +} func (bs *Bitswap) startWorkers(px process.Process, ctx context.Context) { // Start up a worker to handle block requests this node is making From e3e842827c3718899bbbae212154f3638b19b72b Mon Sep 17 00:00:00 2001 From: Jeromy Date: Sun, 19 Apr 2015 11:19:51 -0700 Subject: [PATCH 0365/1035] address comments from CR This commit was moved from ipfs/go-bitswap@394bdee1bd3de1b3caf80c0e6be37f44037cff38 --- bitswap/workers.go | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/bitswap/workers.go b/bitswap/workers.go index 051496218..982eea3f1 100644 --- a/bitswap/workers.go +++ b/bitswap/workers.go @@ -18,9 +18,14 @@ func init() { if twc != "" { n, err := strconv.Atoi(twc) if err != nil { + log.Error(err) return } - TaskWorkerCount = n + if n > 0 { + TaskWorkerCount = n + } else { + log.Errorf("Invalid value of '%d' for IPFS_TASK_WORKERS", n) + } } } From b0e1b1cf1d84f056af82f76159df208149164e6a Mon Sep 17 00:00:00 2001 From: Jeromy Johnson Date: Sun, 19 Apr 2015 13:10:43 -0700 Subject: [PATCH 0366/1035] change env var for bitswap changed IPFS_TASK_WORKERS to IPFS_BITSWAP_TASK_WORKERS This commit was moved from ipfs/go-bitswap@d1ca2ab0b69b0dfc4014d39bf7a2ae4dc316c7da --- bitswap/workers.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/bitswap/workers.go b/bitswap/workers.go index 982eea3f1..4e2bf43b8 100644 --- a/bitswap/workers.go +++ b/bitswap/workers.go @@ -14,7 +14,7 @@ import ( var TaskWorkerCount = 16 func init() { - twc := os.Getenv("IPFS_TASK_WORKERS") + twc := os.Getenv("IPFS_BITSWAP_TASK_WORKERS") if twc != "" { n, err := strconv.Atoi(twc) if err != nil { @@ -24,7 +24,7 @@ func init() { if n > 0 { TaskWorkerCount = n } else { - log.Errorf("Invalid value of '%d' for IPFS_TASK_WORKERS", n) + log.Errorf("Invalid value of '%d' for IPFS_BITSWAP_TASK_WORKERS", n) } } } From c286fdbaa73b4a17bd60e92b9bfe135e464699a8 Mon Sep 17 00:00:00 2001 From: Juan Batiz-Benet Date: Mon, 20 Apr 2015 00:15:34 -0700 Subject: [PATCH 0367/1035] remove debugerrors We now consider debugerrors harmful: we've run into cases where debugerror.Wrap() hid valuable error information (err == io.EOF?). I've removed them from the main code, but left them in some tests. Go errors are lacking, but unfortunately, this isn't the solution. It is possible that debugerros.New or debugerrors.Errorf should remain still (i.e. only remove debugerrors.Wrap) but we don't use these errors often enough to keep. This commit was moved from ipfs/go-bitswap@70f2b6b023310aa838868c984181c1cc2df257c7 --- bitswap/bitswap.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 78a421b57..ae0c76daa 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -3,6 +3,7 @@ package bitswap import ( + "errors" "math" "sync" "time" @@ -21,7 +22,6 @@ import ( "github.com/ipfs/go-ipfs/thirdparty/delay" eventlog "github.com/ipfs/go-ipfs/thirdparty/eventlog" u "github.com/ipfs/go-ipfs/util" - errors "github.com/ipfs/go-ipfs/util/debugerror" pset "github.com/ipfs/go-ipfs/util/peerset" // TODO move this to peerstore ) @@ -432,7 +432,7 @@ func (bs *Bitswap) ReceiveError(err error) { func (bs *Bitswap) send(ctx context.Context, p peer.ID, m bsmsg.BitSwapMessage) error { defer log.EventBegin(ctx, "sendMessage", p, m).Done() if err := bs.network.SendMessage(ctx, p, m); err != nil { - return errors.Wrap(err) + return err } return bs.engine.MessageSent(p, m) } From e1100287935dbe806dd5f4873e9dfc22783a684c Mon Sep 17 00:00:00 2001 From: Jeromy Date: Tue, 28 Apr 2015 01:51:30 -0700 Subject: [PATCH 0368/1035] let wantlist command show other peers wantlists This commit was moved from ipfs/go-bitswap@90fede8dda784637cd067a4a31e634e96a6df6c5 --- bitswap/bitswap.go | 8 ++++++++ bitswap/decision/engine.go | 10 ++++++++++ 2 files changed, 18 insertions(+) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index ae0c76daa..37826c492 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -175,6 +175,14 @@ func (bs *Bitswap) GetBlock(parent context.Context, k u.Key) (*blocks.Block, err } } +func (bs *Bitswap) WantlistForPeer(p peer.ID) []u.Key { + var out []u.Key + for _, e := range bs.engine.WantlistForPeer(p) { + out = append(out, e.Key) + } + return out +} + // GetBlocks returns a channel where the caller may receive blocks that // correspond to the provided |keys|. Returns an error if BitSwap is unable to // begin this request within the deadline enforced by the context. diff --git a/bitswap/decision/engine.go b/bitswap/decision/engine.go index 119869677..60b95e469 100644 --- a/bitswap/decision/engine.go +++ b/bitswap/decision/engine.go @@ -96,6 +96,16 @@ func NewEngine(ctx context.Context, bs bstore.Blockstore) *Engine { return e } +func (e *Engine) WantlistForPeer(p peer.ID) (out []wl.Entry) { + e.lock.Lock() + partner, ok := e.ledgerMap[p] + if ok { + out = partner.wantList.SortedEntries() + } + e.lock.Unlock() + return out +} + func (e *Engine) taskWorker(ctx context.Context) { defer close(e.outbox) // because taskWorker uses the channel exclusively for { From b34918e52d304b8ab17f36e707e44120f5b38146 Mon Sep 17 00:00:00 2001 From: Henry Date: Tue, 28 Apr 2015 12:33:02 +0200 Subject: [PATCH 0369/1035] godeps: move (go)goprotobuf to github location This commit was moved from ipfs/go-bitswap@fceb09daeb29a0774b82c8030f4f8269fd461cd6 --- bitswap/message/internal/pb/message.pb.go | 2 +- bitswap/message/message.go | 4 ++-- bitswap/message/message_test.go | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/bitswap/message/internal/pb/message.pb.go b/bitswap/message/internal/pb/message.pb.go index 9486ebb1b..828d1a225 100644 --- a/bitswap/message/internal/pb/message.pb.go +++ b/bitswap/message/internal/pb/message.pb.go @@ -13,7 +13,7 @@ It has these top-level messages: */ package bitswap_message_pb -import proto "github.com/ipfs/go-ipfs/Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto" +import proto "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/gogo/protobuf/proto" import math "math" // Reference imports to suppress errors if they are not otherwise used. diff --git a/bitswap/message/message.go b/bitswap/message/message.go index 0952c2745..3a7d70aae 100644 --- a/bitswap/message/message.go +++ b/bitswap/message/message.go @@ -9,8 +9,8 @@ import ( inet "github.com/ipfs/go-ipfs/p2p/net" u "github.com/ipfs/go-ipfs/util" - ggio "github.com/ipfs/go-ipfs/Godeps/_workspace/src/code.google.com/p/gogoprotobuf/io" - proto "github.com/ipfs/go-ipfs/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto" + ggio "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/gogo/protobuf/io" + proto "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/gogo/protobuf/proto" ) // TODO move message.go into the bitswap package diff --git a/bitswap/message/message_test.go b/bitswap/message/message_test.go index 6d1df1411..dc10dcc70 100644 --- a/bitswap/message/message_test.go +++ b/bitswap/message/message_test.go @@ -4,7 +4,7 @@ import ( "bytes" "testing" - proto "github.com/ipfs/go-ipfs/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto" + proto "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/gogo/protobuf/proto" blocks "github.com/ipfs/go-ipfs/blocks" pb "github.com/ipfs/go-ipfs/exchange/bitswap/message/internal/pb" From 84067097632204b94df0023385a49205f29f0b45 Mon Sep 17 00:00:00 2001 From: Jeromy Date: Wed, 29 Apr 2015 01:36:47 -0700 Subject: [PATCH 0370/1035] try harder to not send duplicate blocks This commit was moved from ipfs/go-bitswap@36427bdea2c963b86e493c1f0043048188928c9e --- bitswap/decision/peer_request_queue.go | 28 +++++++++++++++++++++----- bitswap/workers.go | 2 +- 2 files changed, 24 insertions(+), 6 deletions(-) diff --git a/bitswap/decision/peer_request_queue.go b/bitswap/decision/peer_request_queue.go index e771ece0b..42928487d 100644 --- a/bitswap/decision/peer_request_queue.go +++ b/bitswap/decision/peer_request_queue.go @@ -46,7 +46,7 @@ func (tl *prq) Push(entry wantlist.Entry, to peer.ID) { defer tl.lock.Unlock() partner, ok := tl.partners[to] if !ok { - partner = &activePartner{taskQueue: pq.New(wrapCmp(V1))} + partner = newActivePartner() tl.pQueue.Push(partner) tl.partners[to] = partner } @@ -57,12 +57,19 @@ func (tl *prq) Push(entry wantlist.Entry, to peer.ID) { return } + partner.activelk.Lock() + defer partner.activelk.Unlock() + _, ok = partner.activeBlocks[entry.Key] + if ok { + return + } + task := &peerRequestTask{ Entry: entry, Target: to, created: time.Now(), Done: func() { - partner.TaskDone() + partner.TaskDone(entry.Key) tl.lock.Lock() tl.pQueue.Update(partner.Index()) tl.lock.Unlock() @@ -93,7 +100,7 @@ func (tl *prq) Pop() *peerRequestTask { continue // discarding tasks that have been removed } - partner.StartTask() + partner.StartTask(out.Entry.Key) partner.requests-- break // and return |out| } @@ -179,6 +186,8 @@ type activePartner struct { activelk sync.Mutex active int + activeBlocks map[u.Key]struct{} + // requests is the number of blocks this peer is currently requesting // request need not be locked around as it will only be modified under // the peerRequestQueue's locks @@ -191,6 +200,13 @@ type activePartner struct { taskQueue pq.PQ } +func newActivePartner() *activePartner { + return &activePartner{ + taskQueue: pq.New(wrapCmp(V1)), + activeBlocks: make(map[u.Key]struct{}), + } +} + // partnerCompare implements pq.ElemComparator func partnerCompare(a, b pq.Elem) bool { pa := a.(*activePartner) @@ -208,15 +224,17 @@ func partnerCompare(a, b pq.Elem) bool { } // StartTask signals that a task was started for this partner -func (p *activePartner) StartTask() { +func (p *activePartner) StartTask(k u.Key) { p.activelk.Lock() + p.activeBlocks[k] = struct{}{} p.active++ p.activelk.Unlock() } // TaskDone signals that a task was completed for this partner -func (p *activePartner) TaskDone() { +func (p *activePartner) TaskDone(k u.Key) { p.activelk.Lock() + delete(p.activeBlocks, k) p.active-- if p.active < 0 { panic("more tasks finished than started!") diff --git a/bitswap/workers.go b/bitswap/workers.go index 4e2bf43b8..1fc59a214 100644 --- a/bitswap/workers.go +++ b/bitswap/workers.go @@ -11,7 +11,7 @@ import ( u "github.com/ipfs/go-ipfs/util" ) -var TaskWorkerCount = 16 +var TaskWorkerCount = 8 func init() { twc := os.Getenv("IPFS_BITSWAP_TASK_WORKERS") From 2bf71e0f803f40e852e45cb3d9d995b3806e4e09 Mon Sep 17 00:00:00 2001 From: Jeromy Date: Wed, 29 Apr 2015 19:59:18 -0700 Subject: [PATCH 0371/1035] remove some redundant blockputs to avoid false duplicate block receives This commit was moved from ipfs/go-bitswap@d76b5e4af8755d6461437e568dd5c5dc16856107 --- bitswap/bitswap.go | 9 +++++++++ bitswap/bitswap_test.go | 6 +----- 2 files changed, 10 insertions(+), 5 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 37826c492..937ee131e 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -219,6 +219,15 @@ func (bs *Bitswap) HasBlock(ctx context.Context, blk *blocks.Block) error { return errors.New("bitswap is closed") default: } + has, err := bs.blockstore.Has(blk.Key()) + if err != nil { + return err + } + + if has { + log.Error(bs.self, "Dup Block! ", blk.Key()) + } + if err := bs.blockstore.Put(blk); err != nil { return err } diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index 85b3c0ec8..85a8e9d5d 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -69,9 +69,6 @@ func TestGetBlockFromPeerAfterPeerAnnounces(t *testing.T) { hasBlock := g.Next() defer hasBlock.Exchange.Close() - if err := hasBlock.Blockstore().Put(block); err != nil { - t.Fatal(err) - } if err := hasBlock.Exchange.HasBlock(context.Background(), block); err != nil { t.Fatal(err) } @@ -136,7 +133,6 @@ func PerformDistributionTest(t *testing.T, numInstances, numBlocks int) { var blkeys []u.Key first := instances[0] for _, b := range blocks { - first.Blockstore().Put(b) // TODO remove. don't need to do this. bitswap owns block blkeys = append(blkeys, b.Key()) first.Exchange.HasBlock(context.Background(), b) } @@ -144,7 +140,7 @@ func PerformDistributionTest(t *testing.T, numInstances, numBlocks int) { t.Log("Distribute!") wg := sync.WaitGroup{} - for _, inst := range instances { + for _, inst := range instances[1:] { wg.Add(1) go func(inst Instance) { defer wg.Done() From 71e67231659cebcecc18cfe3d80be44841f50df6 Mon Sep 17 00:00:00 2001 From: Jeromy Date: Fri, 1 May 2015 23:11:40 -0700 Subject: [PATCH 0372/1035] dont create a new ticker each loop This commit was moved from ipfs/go-bitswap@6e4bb2aad98b28cdff2fc1a84beb71878135e2de --- bitswap/workers.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/bitswap/workers.go b/bitswap/workers.go index 4e2bf43b8..77ce18b7d 100644 --- a/bitswap/workers.go +++ b/bitswap/workers.go @@ -182,10 +182,11 @@ func (bs *Bitswap) rebroadcastWorker(parent context.Context) { defer cancel() broadcastSignal := time.After(rebroadcastDelay.Get()) + tick := time.Tick(10 * time.Second) for { select { - case <-time.Tick(10 * time.Second): + case <-tick: n := bs.wantlist.Len() if n > 0 { log.Debug(n, inflect.FromNumber("keys", n), "in bitswap wantlist") From 2a1d60079d93d222632eff3821ea4029f63d4712 Mon Sep 17 00:00:00 2001 From: Jeromy Date: Mon, 4 May 2015 03:12:17 -0700 Subject: [PATCH 0373/1035] remove logging of dup blocks, move to counters for bitswap stat This commit was moved from ipfs/go-bitswap@110eef1d2b29ff5e0d44ccef834f4432cab1b5e4 --- bitswap/bitswap.go | 15 +++++++-------- bitswap/stat.go | 10 +++++++--- 2 files changed, 14 insertions(+), 11 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 937ee131e..8b12a4727 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -127,6 +127,9 @@ type Bitswap struct { newBlocks chan *blocks.Block provideKeys chan u.Key + + blocksRecvd int + dupBlocksRecvd int } type blockRequest struct { @@ -219,14 +222,6 @@ func (bs *Bitswap) HasBlock(ctx context.Context, blk *blocks.Block) error { return errors.New("bitswap is closed") default: } - has, err := bs.blockstore.Has(blk.Key()) - if err != nil { - return err - } - - if has { - log.Error(bs.self, "Dup Block! ", blk.Key()) - } if err := bs.blockstore.Put(blk); err != nil { return err @@ -351,6 +346,10 @@ func (bs *Bitswap) ReceiveMessage(ctx context.Context, p peer.ID, incoming bsmsg // Should only track *useful* messages in ledger for _, block := range incoming.Blocks() { + bs.blocksRecvd++ + if has, err := bs.blockstore.Has(block.Key()); err == nil && has { + bs.dupBlocksRecvd++ + } hasBlockCtx, cancel := context.WithTimeout(ctx, hasBlockTimeout) if err := bs.HasBlock(hasBlockCtx, block); err != nil { log.Debug(err) diff --git a/bitswap/stat.go b/bitswap/stat.go index 1c5fec62b..ceab4b2ee 100644 --- a/bitswap/stat.go +++ b/bitswap/stat.go @@ -6,15 +6,19 @@ import ( ) type Stat struct { - ProvideBufLen int - Wantlist []u.Key - Peers []string + ProvideBufLen int + Wantlist []u.Key + Peers []string + BlocksReceived int + DupBlksReceived int } func (bs *Bitswap) Stat() (*Stat, error) { st := new(Stat) st.ProvideBufLen = len(bs.newBlocks) st.Wantlist = bs.GetWantlist() + st.BlocksReceived = bs.blocksRecvd + st.DupBlksReceived = bs.dupBlocksRecvd for _, p := range bs.engine.Peers() { st.Peers = append(st.Peers, p.Pretty()) From 429daaeeca60d777728fae65a37dfe5b874adc38 Mon Sep 17 00:00:00 2001 From: Jeromy Johnson Date: Tue, 5 May 2015 12:28:50 -0700 Subject: [PATCH 0374/1035] mild refactor of bitswap This commit was moved from ipfs/go-bitswap@b9fa4eedf2a5255b627eb83cb658fc32f7c4a6d1 --- bitswap/bitswap.go | 34 ++++++---------- bitswap/network/interface.go | 11 ++---- bitswap/testnet/network_test.go | 70 ++++----------------------------- bitswap/testnet/virtual.go | 63 +---------------------------- 4 files changed, 23 insertions(+), 155 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 8b12a4727..61854c79a 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -4,6 +4,7 @@ package bitswap import ( "errors" + "fmt" "math" "sync" "time" @@ -324,47 +325,31 @@ func (bs *Bitswap) sendWantlistToProviders(ctx context.Context, entries []wantli } // TODO(brian): handle errors -func (bs *Bitswap) ReceiveMessage(ctx context.Context, p peer.ID, incoming bsmsg.BitSwapMessage) ( - peer.ID, bsmsg.BitSwapMessage) { +func (bs *Bitswap) ReceiveMessage(ctx context.Context, p peer.ID, incoming bsmsg.BitSwapMessage) error { defer log.EventBegin(ctx, "receiveMessage", p, incoming).Done() - if p == "" { - log.Debug("Received message from nil peer!") - // TODO propagate the error upward - return "", nil - } - if incoming == nil { - log.Debug("Got nil bitswap message!") - // TODO propagate the error upward - return "", nil - } - // This call records changes to wantlists, blocks received, // and number of bytes transfered. bs.engine.MessageReceived(p, incoming) // TODO: this is bad, and could be easily abused. // Should only track *useful* messages in ledger + var keys []u.Key for _, block := range incoming.Blocks() { bs.blocksRecvd++ if has, err := bs.blockstore.Has(block.Key()); err == nil && has { bs.dupBlocksRecvd++ } + log.Debugf("got block %s from %s", block, p) hasBlockCtx, cancel := context.WithTimeout(ctx, hasBlockTimeout) if err := bs.HasBlock(hasBlockCtx, block); err != nil { - log.Debug(err) + return fmt.Errorf("ReceiveMessage HasBlock error: %s", err) } cancel() - } - - var keys []u.Key - for _, block := range incoming.Blocks() { keys = append(keys, block.Key()) } - bs.cancelBlocks(ctx, keys) - // TODO: consider changing this function to not return anything - return "", nil + return bs.cancelBlocks(ctx, keys) } // Connected/Disconnected warns bitswap about peer connections @@ -384,21 +369,24 @@ func (bs *Bitswap) PeerDisconnected(p peer.ID) { bs.engine.PeerDisconnected(p) } -func (bs *Bitswap) cancelBlocks(ctx context.Context, bkeys []u.Key) { +func (bs *Bitswap) cancelBlocks(ctx context.Context, bkeys []u.Key) error { if len(bkeys) < 1 { - return + return nil } message := bsmsg.New() message.SetFull(false) for _, k := range bkeys { + log.Debug("cancel block: %s", k) message.Cancel(k) } for _, p := range bs.engine.Peers() { err := bs.send(ctx, p, message) if err != nil { log.Debugf("Error sending message: %s", err) + return err } } + return nil } func (bs *Bitswap) wantNewBlocks(ctx context.Context, bkeys []u.Key) { diff --git a/bitswap/network/interface.go b/bitswap/network/interface.go index 146c73341..a6ed070c0 100644 --- a/bitswap/network/interface.go +++ b/bitswap/network/interface.go @@ -19,12 +19,6 @@ type BitSwapNetwork interface { peer.ID, bsmsg.BitSwapMessage) error - // SendRequest sends a BitSwap message to a peer and waits for a response. - SendRequest( - context.Context, - peer.ID, - bsmsg.BitSwapMessage) (incoming bsmsg.BitSwapMessage, err error) - // SetDelegate registers the Reciver to handle messages received from the // network. SetDelegate(Receiver) @@ -35,8 +29,9 @@ type BitSwapNetwork interface { // Implement Receiver to receive messages from the BitSwapNetwork type Receiver interface { ReceiveMessage( - ctx context.Context, sender peer.ID, incoming bsmsg.BitSwapMessage) ( - destination peer.ID, outgoing bsmsg.BitSwapMessage) + ctx context.Context, + sender peer.ID, + incoming bsmsg.BitSwapMessage) error ReceiveError(error) diff --git a/bitswap/testnet/network_test.go b/bitswap/testnet/network_test.go index 8d457d81c..9091ff255 100644 --- a/bitswap/testnet/network_test.go +++ b/bitswap/testnet/network_test.go @@ -14,57 +14,6 @@ import ( testutil "github.com/ipfs/go-ipfs/util/testutil" ) -func TestSendRequestToCooperativePeer(t *testing.T) { - net := VirtualNetwork(mockrouting.NewServer(), delay.Fixed(0)) - - recipientPeer := testutil.RandIdentityOrFatal(t) - - t.Log("Get two network adapters") - - initiator := net.Adapter(testutil.RandIdentityOrFatal(t)) - recipient := net.Adapter(recipientPeer) - - expectedStr := "response from recipient" - recipient.SetDelegate(lambda(func( - ctx context.Context, - from peer.ID, - incoming bsmsg.BitSwapMessage) ( - peer.ID, bsmsg.BitSwapMessage) { - - t.Log("Recipient received a message from the network") - - // TODO test contents of incoming message - - m := bsmsg.New() - m.AddBlock(blocks.NewBlock([]byte(expectedStr))) - - return from, m - })) - - t.Log("Build a message and send a synchronous request to recipient") - - message := bsmsg.New() - message.AddBlock(blocks.NewBlock([]byte("data"))) - response, err := initiator.SendRequest( - context.Background(), recipientPeer.ID(), message) - if err != nil { - t.Fatal(err) - } - - t.Log("Check the contents of the response from recipient") - - if response == nil { - t.Fatal("Should have received a response") - } - - for _, blockFromRecipient := range response.Blocks() { - if string(blockFromRecipient.Data) == expectedStr { - return - } - } - t.Fatal("Should have returned after finding expected block data") -} - func TestSendMessageAsyncButWaitForResponse(t *testing.T) { net := VirtualNetwork(mockrouting.NewServer(), delay.Fixed(0)) responderPeer := testutil.RandIdentityOrFatal(t) @@ -80,20 +29,19 @@ func TestSendMessageAsyncButWaitForResponse(t *testing.T) { responder.SetDelegate(lambda(func( ctx context.Context, fromWaiter peer.ID, - msgFromWaiter bsmsg.BitSwapMessage) ( - peer.ID, bsmsg.BitSwapMessage) { + msgFromWaiter bsmsg.BitSwapMessage) error { msgToWaiter := bsmsg.New() msgToWaiter.AddBlock(blocks.NewBlock([]byte(expectedStr))) + waiter.SendMessage(ctx, fromWaiter, msgToWaiter) - return fromWaiter, msgToWaiter + return nil })) waiter.SetDelegate(lambda(func( ctx context.Context, fromResponder peer.ID, - msgFromResponder bsmsg.BitSwapMessage) ( - peer.ID, bsmsg.BitSwapMessage) { + msgFromResponder bsmsg.BitSwapMessage) error { // TODO assert that this came from the correct peer and that the message contents are as expected ok := false @@ -108,7 +56,7 @@ func TestSendMessageAsyncButWaitForResponse(t *testing.T) { t.Fatal("Message not received from the responder") } - return "", nil + return nil })) messageSentAsync := bsmsg.New() @@ -123,7 +71,7 @@ func TestSendMessageAsyncButWaitForResponse(t *testing.T) { } type receiverFunc func(ctx context.Context, p peer.ID, - incoming bsmsg.BitSwapMessage) (peer.ID, bsmsg.BitSwapMessage) + incoming bsmsg.BitSwapMessage) error // lambda returns a Receiver instance given a receiver function func lambda(f receiverFunc) bsnet.Receiver { @@ -133,13 +81,11 @@ func lambda(f receiverFunc) bsnet.Receiver { } type lambdaImpl struct { - f func(ctx context.Context, p peer.ID, incoming bsmsg.BitSwapMessage) ( - peer.ID, bsmsg.BitSwapMessage) + f func(ctx context.Context, p peer.ID, incoming bsmsg.BitSwapMessage) error } func (lam *lambdaImpl) ReceiveMessage(ctx context.Context, - p peer.ID, incoming bsmsg.BitSwapMessage) ( - peer.ID, bsmsg.BitSwapMessage) { + p peer.ID, incoming bsmsg.BitSwapMessage) error { return lam.f(ctx, p, incoming) } diff --git a/bitswap/testnet/virtual.go b/bitswap/testnet/virtual.go index e0812ffbd..feb5fd722 100644 --- a/bitswap/testnet/virtual.go +++ b/bitswap/testnet/virtual.go @@ -72,61 +72,7 @@ func (n *network) deliver( n.delay.Wait() - nextPeer, nextMsg := r.ReceiveMessage(context.TODO(), from, message) - - if (nextPeer == "" && nextMsg != nil) || (nextMsg == nil && nextPeer != "") { - return errors.New("Malformed client request") - } - - if nextPeer == "" && nextMsg == nil { // no response to send - return nil - } - - nextReceiver, ok := n.clients[nextPeer] - if !ok { - return errors.New("Cannot locate peer on network") - } - go n.deliver(nextReceiver, nextPeer, nextMsg) - return nil -} - -// TODO -func (n *network) SendRequest( - ctx context.Context, - from peer.ID, - to peer.ID, - message bsmsg.BitSwapMessage) ( - incoming bsmsg.BitSwapMessage, err error) { - - r, ok := n.clients[to] - if !ok { - return nil, errors.New("Cannot locate peer on network") - } - nextPeer, nextMsg := r.ReceiveMessage(context.TODO(), from, message) - - // TODO dedupe code - if (nextPeer == "" && nextMsg != nil) || (nextMsg == nil && nextPeer != "") { - r.ReceiveError(errors.New("Malformed client request")) - return nil, nil - } - - // TODO dedupe code - if nextPeer == "" && nextMsg == nil { - return nil, nil - } - - // TODO test when receiver doesn't immediately respond to the initiator of the request - if nextPeer != from { - go func() { - nextReceiver, ok := n.clients[nextPeer] - if !ok { - // TODO log the error? - } - n.deliver(nextReceiver, nextPeer, nextMsg) - }() - return nil, nil - } - return nextMsg, nil + return r.ReceiveMessage(context.TODO(), from, message) } type networkClient struct { @@ -143,13 +89,6 @@ func (nc *networkClient) SendMessage( return nc.network.SendMessage(ctx, nc.local, to, message) } -func (nc *networkClient) SendRequest( - ctx context.Context, - to peer.ID, - message bsmsg.BitSwapMessage) (incoming bsmsg.BitSwapMessage, err error) { - return nc.network.SendRequest(ctx, nc.local, to, message) -} - // FindProvidersAsync returns a channel of providers for the given key func (nc *networkClient) FindProvidersAsync(ctx context.Context, k util.Key, max int) <-chan peer.ID { From ed3f60f0762e88b791fb6525e46f6e3620fce095 Mon Sep 17 00:00:00 2001 From: Jeromy Date: Wed, 6 May 2015 00:50:44 -0700 Subject: [PATCH 0375/1035] address comments from CR This commit was moved from ipfs/go-bitswap@1f178a6f87cfeb9d7932f42fe1824e7cd4f1ec4a --- bitswap/bitswap.go | 26 +++++++++++++++++--------- 1 file changed, 17 insertions(+), 9 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 61854c79a..757c9067e 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -349,7 +349,8 @@ func (bs *Bitswap) ReceiveMessage(ctx context.Context, p peer.ID, incoming bsmsg keys = append(keys, block.Key()) } - return bs.cancelBlocks(ctx, keys) + bs.cancelBlocks(ctx, keys) + return nil } // Connected/Disconnected warns bitswap about peer connections @@ -369,9 +370,9 @@ func (bs *Bitswap) PeerDisconnected(p peer.ID) { bs.engine.PeerDisconnected(p) } -func (bs *Bitswap) cancelBlocks(ctx context.Context, bkeys []u.Key) error { +func (bs *Bitswap) cancelBlocks(ctx context.Context, bkeys []u.Key) { if len(bkeys) < 1 { - return nil + return } message := bsmsg.New() message.SetFull(false) @@ -379,14 +380,21 @@ func (bs *Bitswap) cancelBlocks(ctx context.Context, bkeys []u.Key) error { log.Debug("cancel block: %s", k) message.Cancel(k) } + + wg := sync.WaitGroup{} for _, p := range bs.engine.Peers() { - err := bs.send(ctx, p, message) - if err != nil { - log.Debugf("Error sending message: %s", err) - return err - } + wg.Add(1) + go func(p peer.ID) { + defer wg.Done() + err := bs.send(ctx, p, message) + if err != nil { + log.Warningf("Error sending message: %s", err) + return + } + }(p) } - return nil + wg.Wait() + return } func (bs *Bitswap) wantNewBlocks(ctx context.Context, bkeys []u.Key) { From bbbba5aa324992526b1ea85c5390b16a5a16235b Mon Sep 17 00:00:00 2001 From: rht Date: Tue, 19 May 2015 00:42:21 +0700 Subject: [PATCH 0376/1035] Run 'gofmt -s -w' on these files This commit was moved from ipfs/go-bitswap@aad2ad5d1648817a18f6fadbcf6872be22cf5ec4 --- bitswap/decision/engine_test.go | 4 ++-- bitswap/message/message_test.go | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/bitswap/decision/engine_test.go b/bitswap/decision/engine_test.go index b69f8b1df..afe6ba9ad 100644 --- a/bitswap/decision/engine_test.go +++ b/bitswap/decision/engine_test.go @@ -128,10 +128,10 @@ func TestPartnerWantsThenCancels(t *testing.T) { type testCase [][]string testcases := []testCase{ - testCase{ + { alphabet, vowels, }, - testCase{ + { alphabet, stringsComplement(alphabet, vowels), }, } diff --git a/bitswap/message/message_test.go b/bitswap/message/message_test.go index dc10dcc70..cbeed8892 100644 --- a/bitswap/message/message_test.go +++ b/bitswap/message/message_test.go @@ -27,7 +27,7 @@ func TestNewMessageFromProto(t *testing.T) { protoMessage := new(pb.Message) protoMessage.Wantlist = new(pb.Message_Wantlist) protoMessage.Wantlist.Entries = []*pb.Message_Wantlist_Entry{ - &pb.Message_Wantlist_Entry{Block: proto.String(str)}, + {Block: proto.String(str)}, } if !wantlistContains(protoMessage.Wantlist, str) { t.Fail() From 6c716e0b0d30f50e8484f89db50008880fabe541 Mon Sep 17 00:00:00 2001 From: Juan Batiz-Benet Date: Tue, 19 May 2015 16:53:13 -0400 Subject: [PATCH 0377/1035] bitswap/test: fix timeout on travis This commit was moved from ipfs/go-bitswap@5848879b015c40cfc881e6faabb508fa1cdd71e1 --- bitswap/bitswap_test.go | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index 85a8e9d5d..354eb73e5 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -8,6 +8,7 @@ import ( detectrace "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-detect-race" context "github.com/ipfs/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context" + travis "github.com/ipfs/go-ipfs/util/testutil/ci/travis" blocks "github.com/ipfs/go-ipfs/blocks" blocksutil "github.com/ipfs/go-ipfs/blocks/blocksutil" @@ -98,6 +99,8 @@ func TestLargeSwarm(t *testing.T) { // when running with the race detector, 500 instances launches // well over 8k goroutines. This hits a race detector limit. numInstances = 100 + } else if travis.IsRunning() { + numInstances = 200 } else { t.Parallel() } @@ -108,7 +111,11 @@ func TestLargeFile(t *testing.T) { if testing.Short() { t.SkipNow() } - t.Parallel() + + if !travis.IsRunning() { + t.Parallel() + } + numInstances := 10 numBlocks := 100 PerformDistributionTest(t, numInstances, numBlocks) From c652e45dae25622c293a86d17f38a820215bac38 Mon Sep 17 00:00:00 2001 From: Jeromy Date: Wed, 20 May 2015 15:09:20 -0700 Subject: [PATCH 0378/1035] remove inflect package This commit was moved from ipfs/go-bitswap@5130165dbd8c6d4909089caa92414f76fc2ff374 --- bitswap/workers.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/bitswap/workers.go b/bitswap/workers.go index 724badd30..dff3d911c 100644 --- a/bitswap/workers.go +++ b/bitswap/workers.go @@ -5,7 +5,6 @@ import ( "strconv" "time" - inflect "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/chuckpreslar/inflect" process "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/goprocess" context "github.com/ipfs/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context" u "github.com/ipfs/go-ipfs/util" @@ -189,7 +188,7 @@ func (bs *Bitswap) rebroadcastWorker(parent context.Context) { case <-tick: n := bs.wantlist.Len() if n > 0 { - log.Debug(n, inflect.FromNumber("keys", n), "in bitswap wantlist") + log.Debug(n, "keys in bitswap wantlist") } case <-broadcastSignal: // resend unfulfilled wantlist keys entries := bs.wantlist.Entries() From 264201a303b2630d069f37ab5be2788ee9f1724a Mon Sep 17 00:00:00 2001 From: Jeromy Date: Fri, 8 May 2015 23:55:35 -0700 Subject: [PATCH 0379/1035] implement peermanager to control outgoing messages Also more refactoring of bitswap in general, including some perf improvements and eventlog removal. clean up, and buffer channels move some things around correctly buffer work messages more cleanup, and improve test perf remove unneccessary test revert changes to bitswap message, they werent necessary This commit was moved from ipfs/go-bitswap@5efc7f693e63a7f03fe73ff37813148beb35cbd9 --- bitswap/bitswap.go | 88 +++-------- bitswap/bitswap_test.go | 35 +---- bitswap/decision/engine.go | 22 +-- bitswap/decision/engine_test.go | 2 +- bitswap/decision/peer_request_queue.go | 2 +- bitswap/message/message.go | 11 +- bitswap/network/interface.go | 2 + bitswap/network/ipfs_impl.go | 4 + bitswap/peermanager.go | 203 +++++++++++++++++++++++++ bitswap/testnet/virtual.go | 9 ++ bitswap/testutils.go | 11 +- bitswap/workers.go | 6 +- 12 files changed, 275 insertions(+), 120 deletions(-) create mode 100644 bitswap/peermanager.go diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 757c9067e..b8dcdab1e 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -91,7 +91,9 @@ func New(parent context.Context, p peer.ID, network bsnet.BitSwapNetwork, process: px, newBlocks: make(chan *blocks.Block, HasBlockBufferSize), provideKeys: make(chan u.Key), + pm: NewPeerManager(network), } + go bs.pm.Run(ctx) network.SetDelegate(bs) // Start up bitswaps async worker routines @@ -108,6 +110,10 @@ type Bitswap struct { // network delivers messages on behalf of the session network bsnet.BitSwapNetwork + // the peermanager manages sending messages to peers in a way that + // wont block bitswap operation + pm *PeerManager + // blockstore is the local database // NB: ensure threadsafety blockstore blockstore.Blockstore @@ -217,7 +223,6 @@ func (bs *Bitswap) GetBlocks(ctx context.Context, keys []u.Key) (<-chan *blocks. // HasBlock announces the existance of a block to this bitswap service. The // service will potentially notify its peers. func (bs *Bitswap) HasBlock(ctx context.Context, blk *blocks.Block) error { - log.Event(ctx, "hasBlock", blk) select { case <-bs.process.Closing(): return errors.New("bitswap is closed") @@ -227,6 +232,7 @@ func (bs *Bitswap) HasBlock(ctx context.Context, blk *blocks.Block) error { if err := bs.blockstore.Put(blk); err != nil { return err } + bs.wantlist.Remove(blk.Key()) bs.notifications.Publish(blk) select { @@ -239,7 +245,6 @@ func (bs *Bitswap) HasBlock(ctx context.Context, blk *blocks.Block) error { func (bs *Bitswap) sendWantlistMsgToPeers(ctx context.Context, m bsmsg.BitSwapMessage, peers <-chan peer.ID) error { set := pset.New() - wg := sync.WaitGroup{} loop: for { @@ -253,37 +258,22 @@ loop: continue } - wg.Add(1) - go func(p peer.ID) { - defer wg.Done() - if err := bs.send(ctx, p, m); err != nil { - log.Debug(err) // TODO remove if too verbose - } - }(peerToQuery) + bs.pm.Send(peerToQuery, m) case <-ctx.Done(): return nil } } - done := make(chan struct{}) - go func() { - wg.Wait() - close(done) - }() - - select { - case <-done: - case <-ctx.Done(): - // NB: we may be abandoning goroutines here before they complete - // this shouldnt be an issue because they will complete soon anyways - // we just don't want their being slow to impact bitswap transfer speeds - } return nil } func (bs *Bitswap) sendWantlistToPeers(ctx context.Context, peers <-chan peer.ID) error { + entries := bs.wantlist.Entries() + if len(entries) == 0 { + return nil + } message := bsmsg.New() message.SetFull(true) - for _, wanted := range bs.wantlist.Entries() { + for _, wanted := range entries { message.AddEntry(wanted.Key, wanted.Priority) } return bs.sendWantlistMsgToPeers(ctx, message, peers) @@ -326,7 +316,7 @@ func (bs *Bitswap) sendWantlistToProviders(ctx context.Context, entries []wantli // TODO(brian): handle errors func (bs *Bitswap) ReceiveMessage(ctx context.Context, p peer.ID, incoming bsmsg.BitSwapMessage) error { - defer log.EventBegin(ctx, "receiveMessage", p, incoming).Done() + //defer log.EventBegin(ctx, "receiveMessage", p, incoming).Done() // This call records changes to wantlists, blocks received, // and number of bytes transfered. @@ -356,6 +346,7 @@ func (bs *Bitswap) ReceiveMessage(ctx context.Context, p peer.ID, incoming bsmsg // Connected/Disconnected warns bitswap about peer connections func (bs *Bitswap) PeerConnected(p peer.ID) { // TODO: add to clientWorker?? + bs.pm.Connected(p) peers := make(chan peer.ID, 1) peers <- p close(peers) @@ -367,6 +358,7 @@ func (bs *Bitswap) PeerConnected(p peer.ID) { // Connected/Disconnected warns bitswap about peer connections func (bs *Bitswap) PeerDisconnected(p peer.ID) { + bs.pm.Disconnected(p) bs.engine.PeerDisconnected(p) } @@ -381,19 +373,7 @@ func (bs *Bitswap) cancelBlocks(ctx context.Context, bkeys []u.Key) { message.Cancel(k) } - wg := sync.WaitGroup{} - for _, p := range bs.engine.Peers() { - wg.Add(1) - go func(p peer.ID) { - defer wg.Done() - err := bs.send(ctx, p, message) - if err != nil { - log.Warningf("Error sending message: %s", err) - return - } - }(p) - } - wg.Wait() + bs.pm.Broadcast(message) return } @@ -408,29 +388,7 @@ func (bs *Bitswap) wantNewBlocks(ctx context.Context, bkeys []u.Key) { message.AddEntry(k, kMaxPriority-i) } - wg := sync.WaitGroup{} - for _, p := range bs.engine.Peers() { - wg.Add(1) - go func(p peer.ID) { - defer wg.Done() - err := bs.send(ctx, p, message) - if err != nil { - log.Debugf("Error sending message: %s", err) - } - }(p) - } - done := make(chan struct{}) - go func() { - wg.Wait() - close(done) - }() - select { - case <-done: - case <-ctx.Done(): - // NB: we may be abandoning goroutines here before they complete - // this shouldnt be an issue because they will complete soon anyways - // we just don't want their being slow to impact bitswap transfer speeds - } + bs.pm.Broadcast(message) } func (bs *Bitswap) ReceiveError(err error) { @@ -439,16 +397,6 @@ func (bs *Bitswap) ReceiveError(err error) { // TODO bubble the network error up to the parent context/error logger } -// send strives to ensure that accounting is always performed when a message is -// sent -func (bs *Bitswap) send(ctx context.Context, p peer.ID, m bsmsg.BitSwapMessage) error { - defer log.EventBegin(ctx, "sendMessage", p, m).Done() - if err := bs.network.SendMessage(ctx, p, m); err != nil { - return err - } - return bs.engine.MessageSent(p, m) -} - func (bs *Bitswap) Close() error { return bs.process.Close() } diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index 354eb73e5..c04946692 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -13,7 +13,6 @@ import ( blocks "github.com/ipfs/go-ipfs/blocks" blocksutil "github.com/ipfs/go-ipfs/blocks/blocksutil" tn "github.com/ipfs/go-ipfs/exchange/bitswap/testnet" - p2ptestutil "github.com/ipfs/go-ipfs/p2p/test/util" mockrouting "github.com/ipfs/go-ipfs/routing/mock" delay "github.com/ipfs/go-ipfs/thirdparty/delay" u "github.com/ipfs/go-ipfs/util" @@ -36,30 +35,6 @@ func TestClose(t *testing.T) { bitswap.Exchange.GetBlock(context.Background(), block.Key()) } -func TestProviderForKeyButNetworkCannotFind(t *testing.T) { // TODO revisit this - - rs := mockrouting.NewServer() - net := tn.VirtualNetwork(rs, delay.Fixed(kNetworkDelay)) - g := NewTestSessionGenerator(net) - defer g.Close() - - block := blocks.NewBlock([]byte("block")) - pinfo := p2ptestutil.RandTestBogusIdentityOrFatal(t) - rs.Client(pinfo).Provide(context.Background(), block.Key()) // but not on network - - solo := g.Next() - defer solo.Exchange.Close() - - ctx, _ := context.WithTimeout(context.Background(), time.Nanosecond) - _, err := solo.Exchange.GetBlock(ctx, block.Key()) - - if err != context.DeadlineExceeded { - t.Fatal("Expected DeadlineExceeded error") - } -} - -// TestGetBlockAfterRequesting... - func TestGetBlockFromPeerAfterPeerAnnounces(t *testing.T) { net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay)) @@ -67,14 +42,15 @@ func TestGetBlockFromPeerAfterPeerAnnounces(t *testing.T) { g := NewTestSessionGenerator(net) defer g.Close() - hasBlock := g.Next() + peers := g.Instances(2) + hasBlock := peers[0] defer hasBlock.Exchange.Close() if err := hasBlock.Exchange.HasBlock(context.Background(), block); err != nil { t.Fatal(err) } - wantsBlock := g.Next() + wantsBlock := peers[1] defer wantsBlock.Exchange.Close() ctx, _ := context.WithTimeout(context.Background(), time.Second) @@ -196,8 +172,9 @@ func TestSendToWantingPeer(t *testing.T) { prev := rebroadcastDelay.Set(time.Second / 2) defer func() { rebroadcastDelay.Set(prev) }() - peerA := sg.Next() - peerB := sg.Next() + peers := sg.Instances(2) + peerA := peers[0] + peerB := peers[1] t.Logf("Session %v\n", peerA.Peer) t.Logf("Session %v\n", peerB.Peer) diff --git a/bitswap/decision/engine.go b/bitswap/decision/engine.go index 60b95e469..0b08a55fb 100644 --- a/bitswap/decision/engine.go +++ b/bitswap/decision/engine.go @@ -5,6 +5,7 @@ import ( "sync" context "github.com/ipfs/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context" + blocks "github.com/ipfs/go-ipfs/blocks" bstore "github.com/ipfs/go-ipfs/blocks/blockstore" bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" wl "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" @@ -53,8 +54,9 @@ const ( type Envelope struct { // Peer is the intended recipient Peer peer.ID - // Message is the payload - Message bsmsg.BitSwapMessage + + // Block is the payload + Block *blocks.Block // A callback to notify the decision queue that the task is complete Sent func() @@ -151,12 +153,10 @@ func (e *Engine) nextEnvelope(ctx context.Context) (*Envelope, error) { continue } - m := bsmsg.New() // TODO: maybe add keys from our wantlist? - m.AddBlock(block) return &Envelope{ - Peer: nextTask.Target, - Message: m, - Sent: nextTask.Done, + Peer: nextTask.Target, + Block: block, + Sent: nextTask.Done, }, nil } } @@ -185,7 +185,7 @@ func (e *Engine) MessageReceived(p peer.ID, m bsmsg.BitSwapMessage) error { defer e.lock.Unlock() if len(m.Wantlist()) == 0 && len(m.Blocks()) == 0 { - log.Debug("received empty message from", p) + log.Debugf("received empty message from %s", p) } newWorkExists := false @@ -202,11 +202,11 @@ func (e *Engine) MessageReceived(p peer.ID, m bsmsg.BitSwapMessage) error { for _, entry := range m.Wantlist() { if entry.Cancel { - log.Debug("cancel", entry.Key) + log.Debugf("cancel %s", entry.Key) l.CancelWant(entry.Key) e.peerRequestQueue.Remove(entry.Key, p) } else { - log.Debug("wants", entry.Key, entry.Priority) + log.Debugf("wants %s", entry.Key, entry.Priority) l.Wants(entry.Key, entry.Priority) if exists, err := e.bs.Has(entry.Key); err == nil && exists { e.peerRequestQueue.Push(entry.Entry, p) @@ -216,7 +216,7 @@ func (e *Engine) MessageReceived(p peer.ID, m bsmsg.BitSwapMessage) error { } for _, block := range m.Blocks() { - log.Debug("got block %s %d bytes", block.Key(), len(block.Data)) + log.Debugf("got block %s %d bytes", block.Key(), len(block.Data)) l.ReceivedBytes(len(block.Data)) for _, l := range e.ledgerMap { if entry, ok := l.WantListContains(block.Key()); ok { diff --git a/bitswap/decision/engine_test.go b/bitswap/decision/engine_test.go index afe6ba9ad..31e46c776 100644 --- a/bitswap/decision/engine_test.go +++ b/bitswap/decision/engine_test.go @@ -185,7 +185,7 @@ func checkHandledInOrder(t *testing.T, e *Engine, keys []string) error { for _, k := range keys { next := <-e.Outbox() envelope := <-next - received := envelope.Message.Blocks()[0] + received := envelope.Block expected := blocks.NewBlock([]byte(k)) if received.Key() != expected.Key() { return errors.New(fmt.Sprintln("received", string(received.Data), "expected", string(expected.Data))) diff --git a/bitswap/decision/peer_request_queue.go b/bitswap/decision/peer_request_queue.go index 42928487d..15f52da74 100644 --- a/bitswap/decision/peer_request_queue.go +++ b/bitswap/decision/peer_request_queue.go @@ -156,7 +156,7 @@ func (t *peerRequestTask) SetIndex(i int) { // taskKey returns a key that uniquely identifies a task. func taskKey(p peer.ID, k u.Key) string { - return string(p.String() + k.String()) + return string(p) + string(k) } // FIFO is a basic task comparator that returns tasks in the order created. diff --git a/bitswap/message/message.go b/bitswap/message/message.go index 3a7d70aae..4e88e738c 100644 --- a/bitswap/message/message.go +++ b/bitswap/message/message.go @@ -29,6 +29,8 @@ type BitSwapMessage interface { Cancel(key u.Key) + Empty() bool + // Sets whether or not the contained wantlist represents the entire wantlist // true = full wantlist // false = wantlist 'patch' @@ -51,7 +53,7 @@ type Exportable interface { type impl struct { full bool wantlist map[u.Key]Entry - blocks map[u.Key]*blocks.Block // map to detect duplicates + blocks map[u.Key]*blocks.Block } func New() BitSwapMessage { @@ -92,6 +94,10 @@ func (m *impl) Full() bool { return m.full } +func (m *impl) Empty() bool { + return len(m.blocks) == 0 && len(m.wantlist) == 0 +} + func (m *impl) Wantlist() []Entry { var out []Entry for _, e := range m.wantlist { @@ -101,7 +107,7 @@ func (m *impl) Wantlist() []Entry { } func (m *impl) Blocks() []*blocks.Block { - bs := make([]*blocks.Block, 0) + bs := make([]*blocks.Block, 0, len(m.blocks)) for _, block := range m.blocks { bs = append(bs, block) } @@ -109,6 +115,7 @@ func (m *impl) Blocks() []*blocks.Block { } func (m *impl) Cancel(k u.Key) { + delete(m.wantlist, k) m.addEntry(k, 0, true) } diff --git a/bitswap/network/interface.go b/bitswap/network/interface.go index a6ed070c0..849a1c28e 100644 --- a/bitswap/network/interface.go +++ b/bitswap/network/interface.go @@ -23,6 +23,8 @@ type BitSwapNetwork interface { // network. SetDelegate(Receiver) + ConnectTo(context.Context, peer.ID) error + Routing } diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index 97745e32d..4e5a1317f 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -97,6 +97,10 @@ func (bsnet *impl) SetDelegate(r Receiver) { bsnet.receiver = r } +func (bsnet *impl) ConnectTo(ctx context.Context, p peer.ID) error { + return bsnet.host.Connect(ctx, peer.PeerInfo{ID: p}) +} + // FindProvidersAsync returns a channel of providers for the given key func (bsnet *impl) FindProvidersAsync(ctx context.Context, k util.Key, max int) <-chan peer.ID { diff --git a/bitswap/peermanager.go b/bitswap/peermanager.go new file mode 100644 index 000000000..ff3d9ab31 --- /dev/null +++ b/bitswap/peermanager.go @@ -0,0 +1,203 @@ +package bitswap + +import ( + "sync" + + context "github.com/ipfs/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context" + engine "github.com/ipfs/go-ipfs/exchange/bitswap/decision" + bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" + bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" + peer "github.com/ipfs/go-ipfs/p2p/peer" + u "github.com/ipfs/go-ipfs/util" +) + +type PeerManager struct { + receiver bsnet.Receiver + + incoming chan *msgPair + connect chan peer.ID + disconnect chan peer.ID + + peers map[peer.ID]*msgQueue + + network bsnet.BitSwapNetwork +} + +func NewPeerManager(network bsnet.BitSwapNetwork) *PeerManager { + return &PeerManager{ + incoming: make(chan *msgPair, 10), + connect: make(chan peer.ID, 10), + disconnect: make(chan peer.ID, 10), + peers: make(map[peer.ID]*msgQueue), + network: network, + } +} + +type msgPair struct { + to peer.ID + msg bsmsg.BitSwapMessage +} + +type cancellation struct { + who peer.ID + blk u.Key +} + +type msgQueue struct { + p peer.ID + + lk sync.Mutex + wlmsg bsmsg.BitSwapMessage + + work chan struct{} + done chan struct{} +} + +func (pm *PeerManager) SendBlock(env *engine.Envelope) { + // Blocks need to be sent synchronously to maintain proper backpressure + // throughout the network stack + defer env.Sent() + + msg := bsmsg.New() + msg.AddBlock(env.Block) + err := pm.network.SendMessage(context.TODO(), env.Peer, msg) + if err != nil { + log.Error(err) + } +} + +func (pm *PeerManager) startPeerHandler(p peer.ID) { + _, ok := pm.peers[p] + if ok { + // TODO: log an error? + return + } + + mq := new(msgQueue) + mq.done = make(chan struct{}) + mq.work = make(chan struct{}, 1) + mq.p = p + + pm.peers[p] = mq + go pm.runQueue(mq) +} + +func (pm *PeerManager) stopPeerHandler(p peer.ID) { + pq, ok := pm.peers[p] + if !ok { + // TODO: log error? + return + } + + close(pq.done) + delete(pm.peers, p) +} + +func (pm *PeerManager) runQueue(mq *msgQueue) { + for { + select { + case <-mq.work: // there is work to be done + + // TODO: this might not need to be done every time, figure out + // a good heuristic + err := pm.network.ConnectTo(context.TODO(), mq.p) + if err != nil { + log.Error(err) + // TODO: cant connect, what now? + } + + // grab messages from queue + mq.lk.Lock() + wlm := mq.wlmsg + mq.wlmsg = nil + mq.lk.Unlock() + + if wlm != nil && !wlm.Empty() { + // send wantlist updates + err = pm.network.SendMessage(context.TODO(), mq.p, wlm) + if err != nil { + log.Error("bitswap send error: ", err) + // TODO: what do we do if this fails? + } + } + case <-mq.done: + return + } + } +} + +func (pm *PeerManager) Send(to peer.ID, msg bsmsg.BitSwapMessage) { + if len(msg.Blocks()) > 0 { + panic("no blocks here!") + } + pm.incoming <- &msgPair{to: to, msg: msg} +} + +func (pm *PeerManager) Broadcast(msg bsmsg.BitSwapMessage) { + pm.incoming <- &msgPair{msg: msg} +} + +func (pm *PeerManager) Connected(p peer.ID) { + pm.connect <- p +} + +func (pm *PeerManager) Disconnected(p peer.ID) { + pm.disconnect <- p +} + +// TODO: use goprocess here once i trust it +func (pm *PeerManager) Run(ctx context.Context) { + for { + select { + case msgp := <-pm.incoming: + + // Broadcast message to all if recipient not set + if msgp.to == "" { + for _, p := range pm.peers { + p.addMessage(msgp.msg) + } + continue + } + + p, ok := pm.peers[msgp.to] + if !ok { + //TODO: decide, drop message? or dial? + pm.startPeerHandler(msgp.to) + p = pm.peers[msgp.to] + } + + p.addMessage(msgp.msg) + case p := <-pm.connect: + pm.startPeerHandler(p) + case p := <-pm.disconnect: + pm.stopPeerHandler(p) + case <-ctx.Done(): + return + } + } +} + +func (mq *msgQueue) addMessage(msg bsmsg.BitSwapMessage) { + mq.lk.Lock() + defer func() { + mq.lk.Unlock() + select { + case mq.work <- struct{}{}: + default: + } + }() + + if mq.wlmsg == nil || msg.Full() { + mq.wlmsg = msg + return + } + + // TODO: add a msg.Combine(...) method + for _, e := range msg.Wantlist() { + if e.Cancel { + mq.wlmsg.Cancel(e.Key) + } else { + mq.wlmsg.AddEntry(e.Key, e.Priority) + } + } +} diff --git a/bitswap/testnet/virtual.go b/bitswap/testnet/virtual.go index feb5fd722..f2c814f81 100644 --- a/bitswap/testnet/virtual.go +++ b/bitswap/testnet/virtual.go @@ -119,3 +119,12 @@ func (nc *networkClient) Provide(ctx context.Context, k util.Key) error { func (nc *networkClient) SetDelegate(r bsnet.Receiver) { nc.Receiver = r } + +func (nc *networkClient) ConnectTo(_ context.Context, p peer.ID) error { + if !nc.network.HasPeer(p) { + return errors.New("no such peer in network") + } + nc.network.clients[p].PeerConnected(nc.local) + nc.Receiver.PeerConnected(p) + return nil +} diff --git a/bitswap/testutils.go b/bitswap/testutils.go index 2ce035c3d..47930de69 100644 --- a/bitswap/testutils.go +++ b/bitswap/testutils.go @@ -7,7 +7,6 @@ import ( ds_sync "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore/sync" context "github.com/ipfs/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context" blockstore "github.com/ipfs/go-ipfs/blocks/blockstore" - exchange "github.com/ipfs/go-ipfs/exchange" tn "github.com/ipfs/go-ipfs/exchange/bitswap/testnet" peer "github.com/ipfs/go-ipfs/p2p/peer" p2ptestutil "github.com/ipfs/go-ipfs/p2p/test/util" @@ -56,12 +55,18 @@ func (g *SessionGenerator) Instances(n int) []Instance { inst := g.Next() instances = append(instances, inst) } + for i, inst := range instances { + for j := i + 1; j < len(instances); j++ { + oinst := instances[j] + inst.Exchange.PeerConnected(oinst.Peer) + } + } return instances } type Instance struct { Peer peer.ID - Exchange exchange.Interface + Exchange *Bitswap blockstore blockstore.Blockstore blockstoreDelay delay.D @@ -94,7 +99,7 @@ func session(ctx context.Context, net tn.Network, p testutil.Identity) Instance const alwaysSendToPeer = true - bs := New(ctx, p.ID(), adapter, bstore, alwaysSendToPeer) + bs := New(ctx, p.ID(), adapter, bstore, alwaysSendToPeer).(*Bitswap) return Instance{ Peer: p.ID(), diff --git a/bitswap/workers.go b/bitswap/workers.go index dff3d911c..c6c2bbb25 100644 --- a/bitswap/workers.go +++ b/bitswap/workers.go @@ -70,9 +70,9 @@ func (bs *Bitswap) taskWorker(ctx context.Context) { if !ok { continue } - log.Event(ctx, "deliverBlocks", envelope.Message, envelope.Peer) - bs.send(ctx, envelope.Peer, envelope.Message) - envelope.Sent() + + //log.Event(ctx, "deliverBlocks", envelope.Message, envelope.Peer) + bs.pm.SendBlock(envelope) case <-ctx.Done(): return } From 986af2c3726c7ce1a3cde6de2ced6e0cef7e86f4 Mon Sep 17 00:00:00 2001 From: Jeromy Date: Tue, 12 May 2015 23:50:57 -0700 Subject: [PATCH 0380/1035] update comments and reintroduce test This commit was moved from ipfs/go-bitswap@16e05fc42c67e00cdaee406cbb423be5660429dd --- bitswap/bitswap_test.go | 23 +++++++++++++++++++++++ bitswap/peermanager.go | 30 +++++++++++++++++------------- 2 files changed, 40 insertions(+), 13 deletions(-) diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index c04946692..9f9fbae25 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -13,6 +13,7 @@ import ( blocks "github.com/ipfs/go-ipfs/blocks" blocksutil "github.com/ipfs/go-ipfs/blocks/blocksutil" tn "github.com/ipfs/go-ipfs/exchange/bitswap/testnet" + p2ptestutil "github.com/ipfs/go-ipfs/p2p/test/util" mockrouting "github.com/ipfs/go-ipfs/routing/mock" delay "github.com/ipfs/go-ipfs/thirdparty/delay" u "github.com/ipfs/go-ipfs/util" @@ -35,6 +36,28 @@ func TestClose(t *testing.T) { bitswap.Exchange.GetBlock(context.Background(), block.Key()) } +func TestProviderForKeyButNetworkCannotFind(t *testing.T) { // TODO revisit this + + rs := mockrouting.NewServer() + net := tn.VirtualNetwork(rs, delay.Fixed(kNetworkDelay)) + g := NewTestSessionGenerator(net) + defer g.Close() + + block := blocks.NewBlock([]byte("block")) + pinfo := p2ptestutil.RandTestBogusIdentityOrFatal(t) + rs.Client(pinfo).Provide(context.Background(), block.Key()) // but not on network + + solo := g.Next() + defer solo.Exchange.Close() + + ctx, _ := context.WithTimeout(context.Background(), time.Nanosecond) + _, err := solo.Exchange.GetBlock(ctx, block.Key()) + + if err != context.DeadlineExceeded { + t.Fatal("Expected DeadlineExceeded error") + } +} + func TestGetBlockFromPeerAfterPeerAnnounces(t *testing.T) { net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay)) diff --git a/bitswap/peermanager.go b/bitswap/peermanager.go index ff3d9ab31..a91acd45b 100644 --- a/bitswap/peermanager.go +++ b/bitswap/peermanager.go @@ -46,8 +46,8 @@ type cancellation struct { type msgQueue struct { p peer.ID - lk sync.Mutex - wlmsg bsmsg.BitSwapMessage + outlk sync.Mutex + out bsmsg.BitSwapMessage work chan struct{} done chan struct{} @@ -106,11 +106,11 @@ func (pm *PeerManager) runQueue(mq *msgQueue) { // TODO: cant connect, what now? } - // grab messages from queue - mq.lk.Lock() - wlm := mq.wlmsg - mq.wlmsg = nil - mq.lk.Unlock() + // grab outgoin message + mq.outlk.Lock() + wlm := mq.out + mq.out = nil + mq.outlk.Unlock() if wlm != nil && !wlm.Empty() { // send wantlist updates @@ -178,26 +178,30 @@ func (pm *PeerManager) Run(ctx context.Context) { } func (mq *msgQueue) addMessage(msg bsmsg.BitSwapMessage) { - mq.lk.Lock() + mq.outlk.Lock() defer func() { - mq.lk.Unlock() + mq.outlk.Unlock() select { case mq.work <- struct{}{}: default: } }() - if mq.wlmsg == nil || msg.Full() { - mq.wlmsg = msg + // if we have no message held, or the one we are given is full + // overwrite the one we are holding + if mq.out == nil || msg.Full() { + mq.out = msg return } // TODO: add a msg.Combine(...) method + // otherwise, combine the one we are holding with the + // one passed in for _, e := range msg.Wantlist() { if e.Cancel { - mq.wlmsg.Cancel(e.Key) + mq.out.Cancel(e.Key) } else { - mq.wlmsg.AddEntry(e.Key, e.Priority) + mq.out.AddEntry(e.Key, e.Priority) } } } From 2f30bcedef134da2491f5e9784c4677fd775153c Mon Sep 17 00:00:00 2001 From: Jeromy Date: Wed, 13 May 2015 16:35:08 -0700 Subject: [PATCH 0381/1035] contextify peermanager This commit was moved from ipfs/go-bitswap@440377e28f1d26ee96e625bb4ce8530cc65d0275 --- bitswap/bitswap.go | 2 -- bitswap/decision/engine.go | 2 +- bitswap/peermanager.go | 22 +++++++++++----------- bitswap/workers.go | 4 ++-- 4 files changed, 14 insertions(+), 16 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index b8dcdab1e..a05ea8091 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -316,8 +316,6 @@ func (bs *Bitswap) sendWantlistToProviders(ctx context.Context, entries []wantli // TODO(brian): handle errors func (bs *Bitswap) ReceiveMessage(ctx context.Context, p peer.ID, incoming bsmsg.BitSwapMessage) error { - //defer log.EventBegin(ctx, "receiveMessage", p, incoming).Done() - // This call records changes to wantlists, blocks received, // and number of bytes transfered. bs.engine.MessageReceived(p, incoming) diff --git a/bitswap/decision/engine.go b/bitswap/decision/engine.go index 0b08a55fb..2644885d3 100644 --- a/bitswap/decision/engine.go +++ b/bitswap/decision/engine.go @@ -206,7 +206,7 @@ func (e *Engine) MessageReceived(p peer.ID, m bsmsg.BitSwapMessage) error { l.CancelWant(entry.Key) e.peerRequestQueue.Remove(entry.Key, p) } else { - log.Debugf("wants %s", entry.Key, entry.Priority) + log.Debugf("wants %s - %d", entry.Key, entry.Priority) l.Wants(entry.Key, entry.Priority) if exists, err := e.bs.Has(entry.Key); err == nil && exists { e.peerRequestQueue.Push(entry.Entry, p) diff --git a/bitswap/peermanager.go b/bitswap/peermanager.go index a91acd45b..a1ce7c7a8 100644 --- a/bitswap/peermanager.go +++ b/bitswap/peermanager.go @@ -53,24 +53,24 @@ type msgQueue struct { done chan struct{} } -func (pm *PeerManager) SendBlock(env *engine.Envelope) { +func (pm *PeerManager) SendBlock(ctx context.Context, env *engine.Envelope) { // Blocks need to be sent synchronously to maintain proper backpressure // throughout the network stack defer env.Sent() msg := bsmsg.New() msg.AddBlock(env.Block) - err := pm.network.SendMessage(context.TODO(), env.Peer, msg) + err := pm.network.SendMessage(ctx, env.Peer, msg) if err != nil { log.Error(err) } } -func (pm *PeerManager) startPeerHandler(p peer.ID) { +func (pm *PeerManager) startPeerHandler(ctx context.Context, p peer.ID) *msgQueue { _, ok := pm.peers[p] if ok { // TODO: log an error? - return + return nil } mq := new(msgQueue) @@ -79,7 +79,8 @@ func (pm *PeerManager) startPeerHandler(p peer.ID) { mq.p = p pm.peers[p] = mq - go pm.runQueue(mq) + go pm.runQueue(ctx, mq) + return mq } func (pm *PeerManager) stopPeerHandler(p peer.ID) { @@ -93,14 +94,14 @@ func (pm *PeerManager) stopPeerHandler(p peer.ID) { delete(pm.peers, p) } -func (pm *PeerManager) runQueue(mq *msgQueue) { +func (pm *PeerManager) runQueue(ctx context.Context, mq *msgQueue) { for { select { case <-mq.work: // there is work to be done // TODO: this might not need to be done every time, figure out // a good heuristic - err := pm.network.ConnectTo(context.TODO(), mq.p) + err := pm.network.ConnectTo(ctx, mq.p) if err != nil { log.Error(err) // TODO: cant connect, what now? @@ -114,7 +115,7 @@ func (pm *PeerManager) runQueue(mq *msgQueue) { if wlm != nil && !wlm.Empty() { // send wantlist updates - err = pm.network.SendMessage(context.TODO(), mq.p, wlm) + err = pm.network.SendMessage(ctx, mq.p, wlm) if err != nil { log.Error("bitswap send error: ", err) // TODO: what do we do if this fails? @@ -162,13 +163,12 @@ func (pm *PeerManager) Run(ctx context.Context) { p, ok := pm.peers[msgp.to] if !ok { //TODO: decide, drop message? or dial? - pm.startPeerHandler(msgp.to) - p = pm.peers[msgp.to] + p = pm.startPeerHandler(ctx, msgp.to) } p.addMessage(msgp.msg) case p := <-pm.connect: - pm.startPeerHandler(p) + pm.startPeerHandler(ctx, p) case p := <-pm.disconnect: pm.stopPeerHandler(p) case <-ctx.Done(): diff --git a/bitswap/workers.go b/bitswap/workers.go index c6c2bbb25..ba9a77549 100644 --- a/bitswap/workers.go +++ b/bitswap/workers.go @@ -46,6 +46,7 @@ func (bs *Bitswap) startWorkers(px process.Process, ctx context.Context) { bs.rebroadcastWorker(ctx) }) + // Start up a worker to manage sending out provides messages px.Go(func(px process.Process) { bs.provideCollector(ctx) }) @@ -71,8 +72,7 @@ func (bs *Bitswap) taskWorker(ctx context.Context) { continue } - //log.Event(ctx, "deliverBlocks", envelope.Message, envelope.Peer) - bs.pm.SendBlock(envelope) + bs.pm.SendBlock(ctx, envelope) case <-ctx.Done(): return } From 881110c435dbecc74b29a7af4e139635be954d04 Mon Sep 17 00:00:00 2001 From: Jeromy Date: Sat, 16 May 2015 12:30:13 -0700 Subject: [PATCH 0382/1035] WIP: super awesome bitswap cleanup fixtime This commit was moved from ipfs/go-bitswap@4ba17a0f31c590b2345b32c485dbf9a90f97f521 --- bitswap/bitswap.go | 134 +++++----------------- bitswap/bitswap_test.go | 14 ++- bitswap/decision/engine.go | 16 ++- bitswap/decision/peer_request_queue.go | 18 ++- bitswap/network/interface.go | 2 +- bitswap/peermanager.go | 152 +++++++++++++++++-------- bitswap/testnet/network_test.go | 16 +-- bitswap/testnet/virtual.go | 3 +- bitswap/workers.go | 45 ++++---- 9 files changed, 191 insertions(+), 209 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index a05ea8091..881de1538 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -4,7 +4,6 @@ package bitswap import ( "errors" - "fmt" "math" "sync" "time" @@ -23,7 +22,6 @@ import ( "github.com/ipfs/go-ipfs/thirdparty/delay" eventlog "github.com/ipfs/go-ipfs/thirdparty/eventlog" u "github.com/ipfs/go-ipfs/util" - pset "github.com/ipfs/go-ipfs/util/peerset" // TODO move this to peerstore ) var log = eventlog.Logger("bitswap") @@ -45,9 +43,7 @@ const ( provideWorkers = 4 ) -var ( - rebroadcastDelay = delay.Fixed(time.Second * 10) -) +var rebroadcastDelay = delay.Fixed(time.Second * 10) // New initializes a BitSwap instance that communicates over the provided // BitSwapNetwork. This function registers the returned instance as the network @@ -86,14 +82,13 @@ func New(parent context.Context, p peer.ID, network bsnet.BitSwapNetwork, notifications: notif, engine: decision.NewEngine(ctx, bstore), // TODO close the engine with Close() method network: network, - wantlist: wantlist.NewThreadSafe(), batchRequests: make(chan *blockRequest, sizeBatchRequestChan), process: px, newBlocks: make(chan *blocks.Block, HasBlockBufferSize), provideKeys: make(chan u.Key), - pm: NewPeerManager(network), + wm: NewWantManager(network), } - go bs.pm.Run(ctx) + go bs.wm.Run(ctx) network.SetDelegate(bs) // Start up bitswaps async worker routines @@ -112,7 +107,7 @@ type Bitswap struct { // the peermanager manages sending messages to peers in a way that // wont block bitswap operation - pm *PeerManager + wm *WantManager // blockstore is the local database // NB: ensure threadsafety @@ -127,8 +122,6 @@ type Bitswap struct { engine *decision.Engine - wantlist *wantlist.ThreadSafe - process process.Process newBlocks chan *blocks.Block @@ -233,60 +226,21 @@ func (bs *Bitswap) HasBlock(ctx context.Context, blk *blocks.Block) error { return err } - bs.wantlist.Remove(blk.Key()) bs.notifications.Publish(blk) select { case bs.newBlocks <- blk: + // send block off to be reprovided case <-ctx.Done(): return ctx.Err() } return nil } -func (bs *Bitswap) sendWantlistMsgToPeers(ctx context.Context, m bsmsg.BitSwapMessage, peers <-chan peer.ID) error { - set := pset.New() - -loop: - for { - select { - case peerToQuery, ok := <-peers: - if !ok { - break loop - } - - if !set.TryAdd(peerToQuery) { //Do once per peer - continue - } - - bs.pm.Send(peerToQuery, m) - case <-ctx.Done(): - return nil - } - } - return nil -} - -func (bs *Bitswap) sendWantlistToPeers(ctx context.Context, peers <-chan peer.ID) error { - entries := bs.wantlist.Entries() - if len(entries) == 0 { - return nil - } - message := bsmsg.New() - message.SetFull(true) - for _, wanted := range entries { - message.AddEntry(wanted.Key, wanted.Priority) - } - return bs.sendWantlistMsgToPeers(ctx, message, peers) -} - -func (bs *Bitswap) sendWantlistToProviders(ctx context.Context, entries []wantlist.Entry) { +func (bs *Bitswap) connectToProviders(ctx context.Context, entries []wantlist.Entry) { ctx, cancel := context.WithCancel(ctx) defer cancel() - // prepare a channel to hand off to sendWantlistToPeers - sendToPeers := make(chan peer.ID) - // Get providers for all entries in wantlist (could take a while) wg := sync.WaitGroup{} for _, e := range entries { @@ -298,97 +252,61 @@ func (bs *Bitswap) sendWantlistToProviders(ctx context.Context, entries []wantli defer cancel() providers := bs.network.FindProvidersAsync(child, k, maxProvidersPerRequest) for prov := range providers { - sendToPeers <- prov + go func(p peer.ID) { + bs.network.ConnectTo(ctx, p) + }(prov) } }(e.Key) } - go func() { - wg.Wait() // make sure all our children do finish. - close(sendToPeers) - }() - - err := bs.sendWantlistToPeers(ctx, sendToPeers) - if err != nil { - log.Debugf("sendWantlistToPeers error: %s", err) - } + wg.Wait() // make sure all our children do finish. } -// TODO(brian): handle errors -func (bs *Bitswap) ReceiveMessage(ctx context.Context, p peer.ID, incoming bsmsg.BitSwapMessage) error { +func (bs *Bitswap) ReceiveMessage(ctx context.Context, p peer.ID, incoming bsmsg.BitSwapMessage) { // This call records changes to wantlists, blocks received, // and number of bytes transfered. bs.engine.MessageReceived(p, incoming) // TODO: this is bad, and could be easily abused. // Should only track *useful* messages in ledger + if len(incoming.Blocks()) == 0 { + return + } + + // quickly send out cancels, reduces chances of duplicate block receives var keys []u.Key + for _, block := range incoming.Blocks() { + keys = append(keys, block.Key()) + } + bs.wm.CancelWants(keys) + for _, block := range incoming.Blocks() { bs.blocksRecvd++ if has, err := bs.blockstore.Has(block.Key()); err == nil && has { bs.dupBlocksRecvd++ } log.Debugf("got block %s from %s", block, p) + hasBlockCtx, cancel := context.WithTimeout(ctx, hasBlockTimeout) if err := bs.HasBlock(hasBlockCtx, block); err != nil { - return fmt.Errorf("ReceiveMessage HasBlock error: %s", err) + log.Warningf("ReceiveMessage HasBlock error: %s", err) } cancel() - keys = append(keys, block.Key()) } - - bs.cancelBlocks(ctx, keys) - return nil } // Connected/Disconnected warns bitswap about peer connections func (bs *Bitswap) PeerConnected(p peer.ID) { // TODO: add to clientWorker?? - bs.pm.Connected(p) - peers := make(chan peer.ID, 1) - peers <- p - close(peers) - err := bs.sendWantlistToPeers(context.TODO(), peers) - if err != nil { - log.Debugf("error sending wantlist: %s", err) - } + bs.wm.Connected(p) } // Connected/Disconnected warns bitswap about peer connections func (bs *Bitswap) PeerDisconnected(p peer.ID) { - bs.pm.Disconnected(p) + bs.wm.Disconnected(p) bs.engine.PeerDisconnected(p) } -func (bs *Bitswap) cancelBlocks(ctx context.Context, bkeys []u.Key) { - if len(bkeys) < 1 { - return - } - message := bsmsg.New() - message.SetFull(false) - for _, k := range bkeys { - log.Debug("cancel block: %s", k) - message.Cancel(k) - } - - bs.pm.Broadcast(message) - return -} - -func (bs *Bitswap) wantNewBlocks(ctx context.Context, bkeys []u.Key) { - if len(bkeys) < 1 { - return - } - - message := bsmsg.New() - message.SetFull(false) - for i, k := range bkeys { - message.AddEntry(k, kMaxPriority-i) - } - - bs.pm.Broadcast(message) -} - func (bs *Bitswap) ReceiveError(err error) { log.Debugf("Bitswap ReceiveError: %s", err) // TODO log the network error @@ -401,7 +319,7 @@ func (bs *Bitswap) Close() error { func (bs *Bitswap) GetWantlist() []u.Key { var out []u.Key - for _, e := range bs.wantlist.Entries() { + for _, e := range bs.wm.wl.Entries() { out = append(out, e.Key) } return out diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index 9f9fbae25..fa5b3b97d 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -120,6 +120,16 @@ func TestLargeFile(t *testing.T) { PerformDistributionTest(t, numInstances, numBlocks) } +func TestLargeFileTwoPeers(t *testing.T) { + if testing.Short() { + t.SkipNow() + } + t.Parallel() + numInstances := 2 + numBlocks := 100 + PerformDistributionTest(t, numInstances, numBlocks) +} + func PerformDistributionTest(t *testing.T, numInstances, numBlocks int) { if testing.Short() { t.SkipNow() @@ -129,8 +139,6 @@ func PerformDistributionTest(t *testing.T, numInstances, numBlocks int) { defer sg.Close() bg := blocksutil.NewBlockGenerator() - t.Log("Test a few nodes trying to get one file with a lot of blocks") - instances := sg.Instances(numInstances) blocks := bg.Blocks(numBlocks) @@ -238,7 +246,7 @@ func TestBasicBitswap(t *testing.T) { defer sg.Close() bg := blocksutil.NewBlockGenerator() - t.Log("Test a few nodes trying to get one file with a lot of blocks") + t.Log("Test a one node trying to get one block from another") instances := sg.Instances(2) blocks := bg.Blocks(1) diff --git a/bitswap/decision/engine.go b/bitswap/decision/engine.go index 2644885d3..186c7ba1a 100644 --- a/bitswap/decision/engine.go +++ b/bitswap/decision/engine.go @@ -92,7 +92,7 @@ func NewEngine(ctx context.Context, bs bstore.Blockstore) *Engine { bs: bs, peerRequestQueue: newPRQ(), outbox: make(chan (<-chan *Envelope), outboxChanBuffer), - workSignal: make(chan struct{}), + workSignal: make(chan struct{}, 1), } go e.taskWorker(ctx) return e @@ -156,7 +156,15 @@ func (e *Engine) nextEnvelope(ctx context.Context) (*Envelope, error) { return &Envelope{ Peer: nextTask.Target, Block: block, - Sent: nextTask.Done, + Sent: func() { + nextTask.Done() + select { + case e.workSignal <- struct{}{}: + // work completing may mean that our queue will provide new + // work to be done. + default: + } + }, }, nil } } @@ -202,11 +210,11 @@ func (e *Engine) MessageReceived(p peer.ID, m bsmsg.BitSwapMessage) error { for _, entry := range m.Wantlist() { if entry.Cancel { - log.Debugf("cancel %s", entry.Key) + log.Errorf("cancel %s", entry.Key) l.CancelWant(entry.Key) e.peerRequestQueue.Remove(entry.Key, p) } else { - log.Debugf("wants %s - %d", entry.Key, entry.Priority) + log.Errorf("wants %s - %d", entry.Key, entry.Priority) l.Wants(entry.Key, entry.Priority) if exists, err := e.bs.Has(entry.Key); err == nil && exists { e.peerRequestQueue.Push(entry.Entry, p) diff --git a/bitswap/decision/peer_request_queue.go b/bitswap/decision/peer_request_queue.go index 15f52da74..1d15578ed 100644 --- a/bitswap/decision/peer_request_queue.go +++ b/bitswap/decision/peer_request_queue.go @@ -51,12 +51,6 @@ func (tl *prq) Push(entry wantlist.Entry, to peer.ID) { tl.partners[to] = partner } - if task, ok := tl.taskMap[taskKey(to, entry.Key)]; ok { - task.Entry.Priority = entry.Priority - partner.taskQueue.Update(task.index) - return - } - partner.activelk.Lock() defer partner.activelk.Unlock() _, ok = partner.activeBlocks[entry.Key] @@ -64,6 +58,12 @@ func (tl *prq) Push(entry wantlist.Entry, to peer.ID) { return } + if task, ok := tl.taskMap[taskKey(to, entry.Key)]; ok { + task.Entry.Priority = entry.Priority + partner.taskQueue.Update(task.index) + return + } + task := &peerRequestTask{ Entry: entry, Target: to, @@ -220,6 +220,12 @@ func partnerCompare(a, b pq.Elem) bool { if pb.requests == 0 { return true } + if pa.active == pb.active { + // sorting by taskQueue.Len() aids in cleaning out trash entries faster + // if we sorted instead by requests, one peer could potentially build up + // a huge number of cancelled entries in the queue resulting in a memory leak + return pa.taskQueue.Len() > pb.taskQueue.Len() + } return pa.active < pb.active } diff --git a/bitswap/network/interface.go b/bitswap/network/interface.go index 849a1c28e..83fca0793 100644 --- a/bitswap/network/interface.go +++ b/bitswap/network/interface.go @@ -33,7 +33,7 @@ type Receiver interface { ReceiveMessage( ctx context.Context, sender peer.ID, - incoming bsmsg.BitSwapMessage) error + incoming bsmsg.BitSwapMessage) ReceiveError(error) diff --git a/bitswap/peermanager.go b/bitswap/peermanager.go index a1ce7c7a8..2eaf36fa5 100644 --- a/bitswap/peermanager.go +++ b/bitswap/peermanager.go @@ -7,28 +7,36 @@ import ( engine "github.com/ipfs/go-ipfs/exchange/bitswap/decision" bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" + wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" peer "github.com/ipfs/go-ipfs/p2p/peer" u "github.com/ipfs/go-ipfs/util" ) -type PeerManager struct { +type WantManager struct { receiver bsnet.Receiver - incoming chan *msgPair - connect chan peer.ID + incoming chan []*bsmsg.Entry + + // notification channel for new peers connecting + connect chan peer.ID + + // notification channel for peers disconnecting disconnect chan peer.ID peers map[peer.ID]*msgQueue + wl *wantlist.Wantlist + network bsnet.BitSwapNetwork } -func NewPeerManager(network bsnet.BitSwapNetwork) *PeerManager { - return &PeerManager{ - incoming: make(chan *msgPair, 10), +func NewWantManager(network bsnet.BitSwapNetwork) *WantManager { + return &WantManager{ + incoming: make(chan []*bsmsg.Entry, 10), connect: make(chan peer.ID, 10), disconnect: make(chan peer.ID, 10), peers: make(map[peer.ID]*msgQueue), + wl: wantlist.New(), network: network, } } @@ -53,37 +61,68 @@ type msgQueue struct { done chan struct{} } -func (pm *PeerManager) SendBlock(ctx context.Context, env *engine.Envelope) { +func (pm *WantManager) WantBlocks(ks []u.Key) { + log.Error("WANT: ", ks) + pm.addEntries(ks, false) +} + +func (pm *WantManager) CancelWants(ks []u.Key) { + log.Error("CANCEL: ", ks) + pm.addEntries(ks, true) +} + +func (pm *WantManager) addEntries(ks []u.Key, cancel bool) { + var entries []*bsmsg.Entry + for i, k := range ks { + entries = append(entries, &bsmsg.Entry{ + Cancel: cancel, + Entry: wantlist.Entry{ + Key: k, + Priority: kMaxPriority - i, + }, + }) + } + pm.incoming <- entries +} + +func (pm *WantManager) SendBlock(ctx context.Context, env *engine.Envelope) { // Blocks need to be sent synchronously to maintain proper backpressure // throughout the network stack defer env.Sent() msg := bsmsg.New() msg.AddBlock(env.Block) + msg.SetFull(false) err := pm.network.SendMessage(ctx, env.Peer, msg) if err != nil { log.Error(err) } } -func (pm *PeerManager) startPeerHandler(ctx context.Context, p peer.ID) *msgQueue { +func (pm *WantManager) startPeerHandler(ctx context.Context, p peer.ID) *msgQueue { _, ok := pm.peers[p] if ok { // TODO: log an error? return nil } - mq := new(msgQueue) - mq.done = make(chan struct{}) - mq.work = make(chan struct{}, 1) - mq.p = p + mq := newMsgQueue(p) + + // new peer, we will want to give them our full wantlist + fullwantlist := bsmsg.New() + for _, e := range pm.wl.Entries() { + fullwantlist.AddEntry(e.Key, e.Priority) + } + fullwantlist.SetFull(true) + mq.out = fullwantlist + mq.work <- struct{}{} pm.peers[p] = mq go pm.runQueue(ctx, mq) return mq } -func (pm *PeerManager) stopPeerHandler(p peer.ID) { +func (pm *WantManager) stopPeerHandler(p peer.ID) { pq, ok := pm.peers[p] if !ok { // TODO: log error? @@ -94,32 +133,38 @@ func (pm *PeerManager) stopPeerHandler(p peer.ID) { delete(pm.peers, p) } -func (pm *PeerManager) runQueue(ctx context.Context, mq *msgQueue) { +func (pm *WantManager) runQueue(ctx context.Context, mq *msgQueue) { for { select { case <-mq.work: // there is work to be done - // TODO: this might not need to be done every time, figure out - // a good heuristic err := pm.network.ConnectTo(ctx, mq.p) if err != nil { log.Error(err) // TODO: cant connect, what now? } - // grab outgoin message + // grab outgoing message mq.outlk.Lock() wlm := mq.out mq.out = nil mq.outlk.Unlock() - if wlm != nil && !wlm.Empty() { - // send wantlist updates - err = pm.network.SendMessage(ctx, mq.p, wlm) - if err != nil { - log.Error("bitswap send error: ", err) - // TODO: what do we do if this fails? - } + // no message or empty message, continue + if wlm == nil { + log.Error("nil wantlist") + continue + } + if wlm.Empty() { + log.Error("empty wantlist") + continue + } + + // send wantlist updates + err = pm.network.SendMessage(ctx, mq.p, wlm) + if err != nil { + log.Error("bitswap send error: ", err) + // TODO: what do we do if this fails? } case <-mq.done: return @@ -127,46 +172,38 @@ func (pm *PeerManager) runQueue(ctx context.Context, mq *msgQueue) { } } -func (pm *PeerManager) Send(to peer.ID, msg bsmsg.BitSwapMessage) { - if len(msg.Blocks()) > 0 { - panic("no blocks here!") - } - pm.incoming <- &msgPair{to: to, msg: msg} -} - -func (pm *PeerManager) Broadcast(msg bsmsg.BitSwapMessage) { - pm.incoming <- &msgPair{msg: msg} -} - -func (pm *PeerManager) Connected(p peer.ID) { +func (pm *WantManager) Connected(p peer.ID) { pm.connect <- p } -func (pm *PeerManager) Disconnected(p peer.ID) { +func (pm *WantManager) Disconnected(p peer.ID) { pm.disconnect <- p } // TODO: use goprocess here once i trust it -func (pm *PeerManager) Run(ctx context.Context) { +func (pm *WantManager) Run(ctx context.Context) { for { select { - case msgp := <-pm.incoming: - - // Broadcast message to all if recipient not set - if msgp.to == "" { - for _, p := range pm.peers { - p.addMessage(msgp.msg) + case entries := <-pm.incoming: + + msg := bsmsg.New() + msg.SetFull(false) + // add changes to our wantlist + for _, e := range entries { + if e.Cancel { + pm.wl.Remove(e.Key) + msg.Cancel(e.Key) + } else { + pm.wl.Add(e.Key, e.Priority) + msg.AddEntry(e.Key, e.Priority) } - continue } - p, ok := pm.peers[msgp.to] - if !ok { - //TODO: decide, drop message? or dial? - p = pm.startPeerHandler(ctx, msgp.to) + // broadcast those wantlist changes + for _, p := range pm.peers { + p.addMessage(msg) } - p.addMessage(msgp.msg) case p := <-pm.connect: pm.startPeerHandler(ctx, p) case p := <-pm.disconnect: @@ -177,6 +214,15 @@ func (pm *PeerManager) Run(ctx context.Context) { } } +func newMsgQueue(p peer.ID) *msgQueue { + mq := new(msgQueue) + mq.done = make(chan struct{}) + mq.work = make(chan struct{}, 1) + mq.p = p + + return mq +} + func (mq *msgQueue) addMessage(msg bsmsg.BitSwapMessage) { mq.outlk.Lock() defer func() { @@ -187,6 +233,10 @@ func (mq *msgQueue) addMessage(msg bsmsg.BitSwapMessage) { } }() + if msg.Full() { + log.Error("GOt FULL MESSAGE") + } + // if we have no message held, or the one we are given is full // overwrite the one we are holding if mq.out == nil || msg.Full() { @@ -199,8 +249,10 @@ func (mq *msgQueue) addMessage(msg bsmsg.BitSwapMessage) { // one passed in for _, e := range msg.Wantlist() { if e.Cancel { + log.Error("add message cancel: ", e.Key, mq.p) mq.out.Cancel(e.Key) } else { + log.Error("add message want: ", e.Key, mq.p) mq.out.AddEntry(e.Key, e.Priority) } } diff --git a/bitswap/testnet/network_test.go b/bitswap/testnet/network_test.go index 9091ff255..c963ae9ac 100644 --- a/bitswap/testnet/network_test.go +++ b/bitswap/testnet/network_test.go @@ -29,19 +29,17 @@ func TestSendMessageAsyncButWaitForResponse(t *testing.T) { responder.SetDelegate(lambda(func( ctx context.Context, fromWaiter peer.ID, - msgFromWaiter bsmsg.BitSwapMessage) error { + msgFromWaiter bsmsg.BitSwapMessage) { msgToWaiter := bsmsg.New() msgToWaiter.AddBlock(blocks.NewBlock([]byte(expectedStr))) waiter.SendMessage(ctx, fromWaiter, msgToWaiter) - - return nil })) waiter.SetDelegate(lambda(func( ctx context.Context, fromResponder peer.ID, - msgFromResponder bsmsg.BitSwapMessage) error { + msgFromResponder bsmsg.BitSwapMessage) { // TODO assert that this came from the correct peer and that the message contents are as expected ok := false @@ -54,9 +52,7 @@ func TestSendMessageAsyncButWaitForResponse(t *testing.T) { if !ok { t.Fatal("Message not received from the responder") - } - return nil })) messageSentAsync := bsmsg.New() @@ -71,7 +67,7 @@ func TestSendMessageAsyncButWaitForResponse(t *testing.T) { } type receiverFunc func(ctx context.Context, p peer.ID, - incoming bsmsg.BitSwapMessage) error + incoming bsmsg.BitSwapMessage) // lambda returns a Receiver instance given a receiver function func lambda(f receiverFunc) bsnet.Receiver { @@ -81,12 +77,12 @@ func lambda(f receiverFunc) bsnet.Receiver { } type lambdaImpl struct { - f func(ctx context.Context, p peer.ID, incoming bsmsg.BitSwapMessage) error + f func(ctx context.Context, p peer.ID, incoming bsmsg.BitSwapMessage) } func (lam *lambdaImpl) ReceiveMessage(ctx context.Context, - p peer.ID, incoming bsmsg.BitSwapMessage) error { - return lam.f(ctx, p, incoming) + p peer.ID, incoming bsmsg.BitSwapMessage) { + lam.f(ctx, p, incoming) } func (lam *lambdaImpl) ReceiveError(err error) { diff --git a/bitswap/testnet/virtual.go b/bitswap/testnet/virtual.go index f2c814f81..f8ca0cd55 100644 --- a/bitswap/testnet/virtual.go +++ b/bitswap/testnet/virtual.go @@ -72,7 +72,8 @@ func (n *network) deliver( n.delay.Wait() - return r.ReceiveMessage(context.TODO(), from, message) + r.ReceiveMessage(context.TODO(), from, message) + return nil } type networkClient struct { diff --git a/bitswap/workers.go b/bitswap/workers.go index ba9a77549..82fb40de9 100644 --- a/bitswap/workers.go +++ b/bitswap/workers.go @@ -42,9 +42,11 @@ func (bs *Bitswap) startWorkers(px process.Process, ctx context.Context) { } // Start up a worker to manage periodically resending our wantlist out to peers - px.Go(func(px process.Process) { - bs.rebroadcastWorker(ctx) - }) + /* + px.Go(func(px process.Process) { + bs.rebroadcastWorker(ctx) + }) + */ // Start up a worker to manage sending out provides messages px.Go(func(px process.Process) { @@ -72,7 +74,7 @@ func (bs *Bitswap) taskWorker(ctx context.Context) { continue } - bs.pm.SendBlock(ctx, envelope) + bs.wm.SendBlock(ctx, envelope) case <-ctx.Done(): return } @@ -146,30 +148,19 @@ func (bs *Bitswap) clientWorker(parent context.Context) { log.Warning("Received batch request for zero blocks") continue } - for i, k := range keys { - bs.wantlist.Add(k, kMaxPriority-i) - } - done := make(chan struct{}) - go func() { - bs.wantNewBlocks(req.ctx, keys) - close(done) - }() + bs.wm.WantBlocks(keys) // NB: Optimization. Assumes that providers of key[0] are likely to // be able to provide for all keys. This currently holds true in most // every situation. Later, this assumption may not hold as true. child, cancel := context.WithTimeout(req.ctx, providerRequestTimeout) providers := bs.network.FindProvidersAsync(child, keys[0], maxProvidersPerRequest) - err := bs.sendWantlistToPeers(req.ctx, providers) - if err != nil { - log.Debugf("error sending wantlist: %s", err) + for p := range providers { + go bs.network.ConnectTo(req.ctx, p) } cancel() - // Wait for wantNewBlocks to finish - <-done - case <-parent.Done(): return } @@ -180,22 +171,24 @@ func (bs *Bitswap) rebroadcastWorker(parent context.Context) { ctx, cancel := context.WithCancel(parent) defer cancel() - broadcastSignal := time.After(rebroadcastDelay.Get()) - tick := time.Tick(10 * time.Second) + broadcastSignal := time.NewTicker(rebroadcastDelay.Get()) + defer broadcastSignal.Stop() + + tick := time.NewTicker(10 * time.Second) + defer tick.Stop() for { select { - case <-tick: - n := bs.wantlist.Len() + case <-tick.C: + n := bs.wm.wl.Len() if n > 0 { log.Debug(n, "keys in bitswap wantlist") } - case <-broadcastSignal: // resend unfulfilled wantlist keys - entries := bs.wantlist.Entries() + case <-broadcastSignal.C: // resend unfulfilled wantlist keys + entries := bs.wm.wl.Entries() if len(entries) > 0 { - bs.sendWantlistToProviders(ctx, entries) + bs.connectToProviders(ctx, entries) } - broadcastSignal = time.After(rebroadcastDelay.Get()) case <-parent.Done(): return } From e4d9328fef790838e32f58ca76985c85a96ed23b Mon Sep 17 00:00:00 2001 From: Jeromy Date: Sat, 16 May 2015 14:26:29 -0700 Subject: [PATCH 0383/1035] fix race bugs This commit was moved from ipfs/go-bitswap@06821bb3b4fd8de1055b35dce07214c5e49665e1 --- bitswap/bitswap.go | 3 +++ bitswap/decision/engine.go | 4 ++-- bitswap/message/message.go | 2 +- bitswap/peermanager.go | 37 +++++++++---------------------------- bitswap/stat.go | 2 ++ 5 files changed, 17 insertions(+), 31 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 881de1538..6a1e58ff4 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -128,6 +128,7 @@ type Bitswap struct { provideKeys chan u.Key + counterLk sync.Mutex blocksRecvd int dupBlocksRecvd int } @@ -281,10 +282,12 @@ func (bs *Bitswap) ReceiveMessage(ctx context.Context, p peer.ID, incoming bsmsg bs.wm.CancelWants(keys) for _, block := range incoming.Blocks() { + bs.counterLk.Lock() bs.blocksRecvd++ if has, err := bs.blockstore.Has(block.Key()); err == nil && has { bs.dupBlocksRecvd++ } + bs.counterLk.Unlock() log.Debugf("got block %s from %s", block, p) hasBlockCtx, cancel := context.WithTimeout(ctx, hasBlockTimeout) diff --git a/bitswap/decision/engine.go b/bitswap/decision/engine.go index 186c7ba1a..d08636d80 100644 --- a/bitswap/decision/engine.go +++ b/bitswap/decision/engine.go @@ -210,11 +210,11 @@ func (e *Engine) MessageReceived(p peer.ID, m bsmsg.BitSwapMessage) error { for _, entry := range m.Wantlist() { if entry.Cancel { - log.Errorf("cancel %s", entry.Key) + log.Debugf("cancel %s", entry.Key) l.CancelWant(entry.Key) e.peerRequestQueue.Remove(entry.Key, p) } else { - log.Errorf("wants %s - %d", entry.Key, entry.Priority) + log.Debugf("wants %s - %d", entry.Key, entry.Priority) l.Wants(entry.Key, entry.Priority) if exists, err := e.bs.Has(entry.Key); err == nil && exists { e.peerRequestQueue.Push(entry.Entry, p) diff --git a/bitswap/message/message.go b/bitswap/message/message.go index 4e88e738c..63f7f28b5 100644 --- a/bitswap/message/message.go +++ b/bitswap/message/message.go @@ -162,7 +162,7 @@ func (m *impl) ToProto() *pb.Message { pbm.Wantlist.Entries = append(pbm.Wantlist.Entries, &pb.Message_Wantlist_Entry{ Block: proto.String(string(e.Key)), Priority: proto.Int32(int32(e.Priority)), - Cancel: &e.Cancel, + Cancel: proto.Bool(e.Cancel), }) } for _, b := range m.Blocks() { diff --git a/bitswap/peermanager.go b/bitswap/peermanager.go index 2eaf36fa5..8ec89c8e3 100644 --- a/bitswap/peermanager.go +++ b/bitswap/peermanager.go @@ -62,12 +62,10 @@ type msgQueue struct { } func (pm *WantManager) WantBlocks(ks []u.Key) { - log.Error("WANT: ", ks) pm.addEntries(ks, false) } func (pm *WantManager) CancelWants(ks []u.Key) { - log.Error("CANCEL: ", ks) pm.addEntries(ks, true) } @@ -147,18 +145,12 @@ func (pm *WantManager) runQueue(ctx context.Context, mq *msgQueue) { // grab outgoing message mq.outlk.Lock() wlm := mq.out - mq.out = nil - mq.outlk.Unlock() - - // no message or empty message, continue - if wlm == nil { - log.Error("nil wantlist") - continue - } - if wlm.Empty() { - log.Error("empty wantlist") + if wlm == nil || wlm.Empty() { + mq.outlk.Unlock() continue } + mq.out = nil + mq.outlk.Unlock() // send wantlist updates err = pm.network.SendMessage(ctx, mq.p, wlm) @@ -186,22 +178,18 @@ func (pm *WantManager) Run(ctx context.Context) { select { case entries := <-pm.incoming: - msg := bsmsg.New() - msg.SetFull(false) // add changes to our wantlist for _, e := range entries { if e.Cancel { pm.wl.Remove(e.Key) - msg.Cancel(e.Key) } else { pm.wl.Add(e.Key, e.Priority) - msg.AddEntry(e.Key, e.Priority) } } // broadcast those wantlist changes for _, p := range pm.peers { - p.addMessage(msg) + p.addMessage(entries) } case p := <-pm.connect: @@ -223,7 +211,7 @@ func newMsgQueue(p peer.ID) *msgQueue { return mq } -func (mq *msgQueue) addMessage(msg bsmsg.BitSwapMessage) { +func (mq *msgQueue) addMessage(entries []*bsmsg.Entry) { mq.outlk.Lock() defer func() { mq.outlk.Unlock() @@ -233,26 +221,19 @@ func (mq *msgQueue) addMessage(msg bsmsg.BitSwapMessage) { } }() - if msg.Full() { - log.Error("GOt FULL MESSAGE") - } - // if we have no message held, or the one we are given is full // overwrite the one we are holding - if mq.out == nil || msg.Full() { - mq.out = msg - return + if mq.out == nil { + mq.out = bsmsg.New() } // TODO: add a msg.Combine(...) method // otherwise, combine the one we are holding with the // one passed in - for _, e := range msg.Wantlist() { + for _, e := range entries { if e.Cancel { - log.Error("add message cancel: ", e.Key, mq.p) mq.out.Cancel(e.Key) } else { - log.Error("add message want: ", e.Key, mq.p) mq.out.AddEntry(e.Key, e.Priority) } } diff --git a/bitswap/stat.go b/bitswap/stat.go index ceab4b2ee..a4db4c9c5 100644 --- a/bitswap/stat.go +++ b/bitswap/stat.go @@ -17,8 +17,10 @@ func (bs *Bitswap) Stat() (*Stat, error) { st := new(Stat) st.ProvideBufLen = len(bs.newBlocks) st.Wantlist = bs.GetWantlist() + bs.counterLk.Lock() st.BlocksReceived = bs.blocksRecvd st.DupBlksReceived = bs.dupBlocksRecvd + bs.counterLk.Unlock() for _, p := range bs.engine.Peers() { st.Peers = append(st.Peers, p.Pretty()) From 327c97e042b464bf622caf6191e4782e286cbc8b Mon Sep 17 00:00:00 2001 From: Jeromy Date: Sat, 16 May 2015 17:16:09 -0700 Subject: [PATCH 0384/1035] move taskdone inside lock boundaries This commit was moved from ipfs/go-bitswap@31198d433bb83de57252c10cdad71d8b1fc63852 --- bitswap/decision/peer_request_queue.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bitswap/decision/peer_request_queue.go b/bitswap/decision/peer_request_queue.go index 1d15578ed..397a16223 100644 --- a/bitswap/decision/peer_request_queue.go +++ b/bitswap/decision/peer_request_queue.go @@ -69,8 +69,8 @@ func (tl *prq) Push(entry wantlist.Entry, to peer.ID) { Target: to, created: time.Now(), Done: func() { - partner.TaskDone(entry.Key) tl.lock.Lock() + partner.TaskDone(entry.Key) tl.pQueue.Update(partner.Index()) tl.lock.Unlock() }, From 6c91988cde8459bc1400c569097c89ff6cae0f78 Mon Sep 17 00:00:00 2001 From: Jeromy Date: Sat, 16 May 2015 17:46:26 -0700 Subject: [PATCH 0385/1035] turn tests down a bit and better context passing This commit was moved from ipfs/go-bitswap@2045a7b3a9aec93791cb6605ade52ab66e818264 --- bitswap/bitswap.go | 4 ++-- bitswap/bitswap_test.go | 4 ++-- bitswap/{peermanager.go => wantmanager.go} | 26 +++++++++++++--------- 3 files changed, 20 insertions(+), 14 deletions(-) rename bitswap/{peermanager.go => wantmanager.go} (89%) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 6a1e58ff4..c6f3c74a9 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -86,9 +86,9 @@ func New(parent context.Context, p peer.ID, network bsnet.BitSwapNetwork, process: px, newBlocks: make(chan *blocks.Block, HasBlockBufferSize), provideKeys: make(chan u.Key), - wm: NewWantManager(network), + wm: NewWantManager(ctx, network), } - go bs.wm.Run(ctx) + go bs.wm.Run() network.SetDelegate(bs) // Start up bitswaps async worker routines diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index fa5b3b97d..86eb2d764 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -92,7 +92,7 @@ func TestLargeSwarm(t *testing.T) { if testing.Short() { t.SkipNow() } - numInstances := 500 + numInstances := 100 numBlocks := 2 if detectrace.WithRace() { // when running with the race detector, 500 instances launches @@ -124,7 +124,6 @@ func TestLargeFileTwoPeers(t *testing.T) { if testing.Short() { t.SkipNow() } - t.Parallel() numInstances := 2 numBlocks := 100 PerformDistributionTest(t, numInstances, numBlocks) @@ -164,6 +163,7 @@ func PerformDistributionTest(t *testing.T, numInstances, numBlocks int) { } for _ = range outch { } + log.Error("DONE") }(inst) } wg.Wait() diff --git a/bitswap/peermanager.go b/bitswap/wantmanager.go similarity index 89% rename from bitswap/peermanager.go rename to bitswap/wantmanager.go index 8ec89c8e3..3b2067914 100644 --- a/bitswap/peermanager.go +++ b/bitswap/wantmanager.go @@ -28,9 +28,11 @@ type WantManager struct { wl *wantlist.Wantlist network bsnet.BitSwapNetwork + + ctx context.Context } -func NewWantManager(network bsnet.BitSwapNetwork) *WantManager { +func NewWantManager(ctx context.Context, network bsnet.BitSwapNetwork) *WantManager { return &WantManager{ incoming: make(chan []*bsmsg.Entry, 10), connect: make(chan peer.ID, 10), @@ -38,6 +40,7 @@ func NewWantManager(network bsnet.BitSwapNetwork) *WantManager { peers: make(map[peer.ID]*msgQueue), wl: wantlist.New(), network: network, + ctx: ctx, } } @@ -80,7 +83,10 @@ func (pm *WantManager) addEntries(ks []u.Key, cancel bool) { }, }) } - pm.incoming <- entries + select { + case pm.incoming <- entries: + case <-pm.ctx.Done(): + } } func (pm *WantManager) SendBlock(ctx context.Context, env *engine.Envelope) { @@ -97,7 +103,7 @@ func (pm *WantManager) SendBlock(ctx context.Context, env *engine.Envelope) { } } -func (pm *WantManager) startPeerHandler(ctx context.Context, p peer.ID) *msgQueue { +func (pm *WantManager) startPeerHandler(p peer.ID) *msgQueue { _, ok := pm.peers[p] if ok { // TODO: log an error? @@ -116,7 +122,7 @@ func (pm *WantManager) startPeerHandler(ctx context.Context, p peer.ID) *msgQueu mq.work <- struct{}{} pm.peers[p] = mq - go pm.runQueue(ctx, mq) + go pm.runQueue(mq) return mq } @@ -131,12 +137,12 @@ func (pm *WantManager) stopPeerHandler(p peer.ID) { delete(pm.peers, p) } -func (pm *WantManager) runQueue(ctx context.Context, mq *msgQueue) { +func (pm *WantManager) runQueue(mq *msgQueue) { for { select { case <-mq.work: // there is work to be done - err := pm.network.ConnectTo(ctx, mq.p) + err := pm.network.ConnectTo(pm.ctx, mq.p) if err != nil { log.Error(err) // TODO: cant connect, what now? @@ -153,7 +159,7 @@ func (pm *WantManager) runQueue(ctx context.Context, mq *msgQueue) { mq.outlk.Unlock() // send wantlist updates - err = pm.network.SendMessage(ctx, mq.p, wlm) + err = pm.network.SendMessage(pm.ctx, mq.p, wlm) if err != nil { log.Error("bitswap send error: ", err) // TODO: what do we do if this fails? @@ -173,7 +179,7 @@ func (pm *WantManager) Disconnected(p peer.ID) { } // TODO: use goprocess here once i trust it -func (pm *WantManager) Run(ctx context.Context) { +func (pm *WantManager) Run() { for { select { case entries := <-pm.incoming: @@ -193,10 +199,10 @@ func (pm *WantManager) Run(ctx context.Context) { } case p := <-pm.connect: - pm.startPeerHandler(ctx, p) + pm.startPeerHandler(p) case p := <-pm.disconnect: pm.stopPeerHandler(p) - case <-ctx.Done(): + case <-pm.ctx.Done(): return } } From 061b8aa078a04261c2d4cd1b72d2dcbb43702a19 Mon Sep 17 00:00:00 2001 From: Jeromy Date: Sat, 16 May 2015 20:38:42 -0700 Subject: [PATCH 0386/1035] turn rebroadcast back on This commit was moved from ipfs/go-bitswap@ef35c2a247d6649af4ac3066c1bdce8029125aeb --- bitswap/workers.go | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/bitswap/workers.go b/bitswap/workers.go index 82fb40de9..1083566a1 100644 --- a/bitswap/workers.go +++ b/bitswap/workers.go @@ -42,11 +42,9 @@ func (bs *Bitswap) startWorkers(px process.Process, ctx context.Context) { } // Start up a worker to manage periodically resending our wantlist out to peers - /* - px.Go(func(px process.Process) { - bs.rebroadcastWorker(ctx) - }) - */ + px.Go(func(px process.Process) { + bs.rebroadcastWorker(ctx) + }) // Start up a worker to manage sending out provides messages px.Go(func(px process.Process) { From 10f7a41e31b030f8df193290b39032286d3d67b2 Mon Sep 17 00:00:00 2001 From: Jeromy Date: Sat, 16 May 2015 22:08:18 -0700 Subject: [PATCH 0387/1035] explicitly set bitswap message fullness This commit was moved from ipfs/go-bitswap@2b699fbcdd78c6899270cd1891b3fdb8dc0bbbc8 --- bitswap/bitswap.go | 2 +- bitswap/bitswap_test.go | 1 - bitswap/decision/engine_test.go | 8 ++++---- bitswap/message/message.go | 22 ++++++---------------- bitswap/message/message_test.go | 14 +++++++------- bitswap/testnet/network_test.go | 4 ++-- bitswap/wantmanager.go | 23 ++++++++++++++++++----- 7 files changed, 38 insertions(+), 36 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index c6f3c74a9..57359c0ec 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -288,7 +288,7 @@ func (bs *Bitswap) ReceiveMessage(ctx context.Context, p peer.ID, incoming bsmsg bs.dupBlocksRecvd++ } bs.counterLk.Unlock() - log.Debugf("got block %s from %s", block, p) + log.Debugf("got block %s from %s (%d,%d)", block, p, bs.blocksRecvd, bs.dupBlocksRecvd) hasBlockCtx, cancel := context.WithTimeout(ctx, hasBlockTimeout) if err := bs.HasBlock(hasBlockCtx, block); err != nil { diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index 86eb2d764..6548472c9 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -163,7 +163,6 @@ func PerformDistributionTest(t *testing.T, numInstances, numBlocks int) { } for _ = range outch { } - log.Error("DONE") }(inst) } wg.Wait() diff --git a/bitswap/decision/engine_test.go b/bitswap/decision/engine_test.go index 31e46c776..8337c4800 100644 --- a/bitswap/decision/engine_test.go +++ b/bitswap/decision/engine_test.go @@ -41,7 +41,7 @@ func TestConsistentAccounting(t *testing.T) { // Send messages from Ernie to Bert for i := 0; i < 1000; i++ { - m := message.New() + m := message.New(false) content := []string{"this", "is", "message", "i"} m.AddBlock(blocks.NewBlock([]byte(strings.Join(content, " ")))) @@ -73,7 +73,7 @@ func TestPeerIsAddedToPeersWhenMessageReceivedOrSent(t *testing.T) { sanfrancisco := newEngine(ctx, "sf") seattle := newEngine(ctx, "sea") - m := message.New() + m := message.New(true) sanfrancisco.Engine.MessageSent(seattle.Peer, m) seattle.Engine.MessageReceived(sanfrancisco.Peer, m) @@ -164,7 +164,7 @@ func TestPartnerWantsThenCancels(t *testing.T) { } func partnerWants(e *Engine, keys []string, partner peer.ID) { - add := message.New() + add := message.New(false) for i, letter := range keys { block := blocks.NewBlock([]byte(letter)) add.AddEntry(block.Key(), math.MaxInt32-i) @@ -173,7 +173,7 @@ func partnerWants(e *Engine, keys []string, partner peer.ID) { } func partnerCancels(e *Engine, keys []string, partner peer.ID) { - cancels := message.New() + cancels := message.New(false) for _, k := range keys { block := blocks.NewBlock([]byte(k)) cancels.Cancel(block.Key()) diff --git a/bitswap/message/message.go b/bitswap/message/message.go index 63f7f28b5..d885bb373 100644 --- a/bitswap/message/message.go +++ b/bitswap/message/message.go @@ -31,12 +31,7 @@ type BitSwapMessage interface { Empty() bool - // Sets whether or not the contained wantlist represents the entire wantlist - // true = full wantlist - // false = wantlist 'patch' - // default: true - SetFull(isFull bool) - + // A full wantlist is an authoritative copy, a 'non-full' wantlist is a patch-set Full() bool AddBlock(*blocks.Block) @@ -56,15 +51,15 @@ type impl struct { blocks map[u.Key]*blocks.Block } -func New() BitSwapMessage { - return newMsg() +func New(full bool) BitSwapMessage { + return newMsg(full) } -func newMsg() *impl { +func newMsg(full bool) *impl { return &impl{ blocks: make(map[u.Key]*blocks.Block), wantlist: make(map[u.Key]Entry), - full: true, + full: full, } } @@ -74,8 +69,7 @@ type Entry struct { } func newMessageFromProto(pbm pb.Message) BitSwapMessage { - m := newMsg() - m.SetFull(pbm.GetWantlist().GetFull()) + m := newMsg(pbm.GetWantlist().GetFull()) for _, e := range pbm.GetWantlist().GetEntries() { m.addEntry(u.Key(e.GetBlock()), int(e.GetPriority()), e.GetCancel()) } @@ -86,10 +80,6 @@ func newMessageFromProto(pbm pb.Message) BitSwapMessage { return m } -func (m *impl) SetFull(full bool) { - m.full = full -} - func (m *impl) Full() bool { return m.full } diff --git a/bitswap/message/message_test.go b/bitswap/message/message_test.go index cbeed8892..7a6a28a04 100644 --- a/bitswap/message/message_test.go +++ b/bitswap/message/message_test.go @@ -13,7 +13,7 @@ import ( func TestAppendWanted(t *testing.T) { const str = "foo" - m := New() + m := New(true) m.AddEntry(u.Key(str), 1) if !wantlistContains(m.ToProto().GetWantlist(), str) { @@ -44,7 +44,7 @@ func TestAppendBlock(t *testing.T) { strs = append(strs, "Celeritas") strs = append(strs, "Incendia") - m := New() + m := New(true) for _, str := range strs { block := blocks.NewBlock([]byte(str)) m.AddBlock(block) @@ -61,7 +61,7 @@ func TestAppendBlock(t *testing.T) { func TestWantlist(t *testing.T) { keystrs := []string{"foo", "bar", "baz", "bat"} - m := New() + m := New(true) for _, s := range keystrs { m.AddEntry(u.Key(s), 1) } @@ -84,7 +84,7 @@ func TestWantlist(t *testing.T) { func TestCopyProtoByValue(t *testing.T) { const str = "foo" - m := New() + m := New(true) protoBeforeAppend := m.ToProto() m.AddEntry(u.Key(str), 1) if wantlistContains(protoBeforeAppend.GetWantlist(), str) { @@ -93,7 +93,7 @@ func TestCopyProtoByValue(t *testing.T) { } func TestToNetFromNetPreservesWantList(t *testing.T) { - original := New() + original := New(true) original.AddEntry(u.Key("M"), 1) original.AddEntry(u.Key("B"), 1) original.AddEntry(u.Key("D"), 1) @@ -124,7 +124,7 @@ func TestToNetFromNetPreservesWantList(t *testing.T) { func TestToAndFromNetMessage(t *testing.T) { - original := New() + original := New(true) original.AddBlock(blocks.NewBlock([]byte("W"))) original.AddBlock(blocks.NewBlock([]byte("E"))) original.AddBlock(blocks.NewBlock([]byte("F"))) @@ -172,7 +172,7 @@ func contains(strs []string, x string) bool { func TestDuplicates(t *testing.T) { b := blocks.NewBlock([]byte("foo")) - msg := New() + msg := New(true) msg.AddEntry(b.Key(), 1) msg.AddEntry(b.Key(), 1) diff --git a/bitswap/testnet/network_test.go b/bitswap/testnet/network_test.go index c963ae9ac..9624df5f8 100644 --- a/bitswap/testnet/network_test.go +++ b/bitswap/testnet/network_test.go @@ -31,7 +31,7 @@ func TestSendMessageAsyncButWaitForResponse(t *testing.T) { fromWaiter peer.ID, msgFromWaiter bsmsg.BitSwapMessage) { - msgToWaiter := bsmsg.New() + msgToWaiter := bsmsg.New(true) msgToWaiter.AddBlock(blocks.NewBlock([]byte(expectedStr))) waiter.SendMessage(ctx, fromWaiter, msgToWaiter) })) @@ -55,7 +55,7 @@ func TestSendMessageAsyncButWaitForResponse(t *testing.T) { } })) - messageSentAsync := bsmsg.New() + messageSentAsync := bsmsg.New(true) messageSentAsync.AddBlock(blocks.NewBlock([]byte("data"))) errSending := waiter.SendMessage( context.Background(), responderPeer.ID(), messageSentAsync) diff --git a/bitswap/wantmanager.go b/bitswap/wantmanager.go index 3b2067914..eb49201a6 100644 --- a/bitswap/wantmanager.go +++ b/bitswap/wantmanager.go @@ -2,6 +2,7 @@ package bitswap import ( "sync" + "time" context "github.com/ipfs/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context" engine "github.com/ipfs/go-ipfs/exchange/bitswap/decision" @@ -94,9 +95,8 @@ func (pm *WantManager) SendBlock(ctx context.Context, env *engine.Envelope) { // throughout the network stack defer env.Sent() - msg := bsmsg.New() + msg := bsmsg.New(false) msg.AddBlock(env.Block) - msg.SetFull(false) err := pm.network.SendMessage(ctx, env.Peer, msg) if err != nil { log.Error(err) @@ -113,11 +113,10 @@ func (pm *WantManager) startPeerHandler(p peer.ID) *msgQueue { mq := newMsgQueue(p) // new peer, we will want to give them our full wantlist - fullwantlist := bsmsg.New() + fullwantlist := bsmsg.New(true) for _, e := range pm.wl.Entries() { fullwantlist.AddEntry(e.Key, e.Priority) } - fullwantlist.SetFull(true) mq.out = fullwantlist mq.work <- struct{}{} @@ -180,6 +179,7 @@ func (pm *WantManager) Disconnected(p peer.ID) { // TODO: use goprocess here once i trust it func (pm *WantManager) Run() { + tock := time.NewTicker(rebroadcastDelay.Get()) for { select { case entries := <-pm.incoming: @@ -198,6 +198,19 @@ func (pm *WantManager) Run() { p.addMessage(entries) } + case <-tock.C: + // resend entire wantlist every so often (REALLY SHOULDNT BE NECESSARY) + var es []*bsmsg.Entry + for _, e := range pm.wl.Entries() { + es = append(es, &bsmsg.Entry{Entry: e}) + } + for _, p := range pm.peers { + p.outlk.Lock() + p.out = bsmsg.New(true) + p.outlk.Unlock() + + p.addMessage(es) + } case p := <-pm.connect: pm.startPeerHandler(p) case p := <-pm.disconnect: @@ -230,7 +243,7 @@ func (mq *msgQueue) addMessage(entries []*bsmsg.Entry) { // if we have no message held, or the one we are given is full // overwrite the one we are holding if mq.out == nil { - mq.out = bsmsg.New() + mq.out = bsmsg.New(false) } // TODO: add a msg.Combine(...) method From 373b101ec777b1ebfe21d274b0a9151829aca9b5 Mon Sep 17 00:00:00 2001 From: Juan Batiz-Benet Date: Thu, 21 May 2015 01:11:57 -0400 Subject: [PATCH 0388/1035] fixup the bitswap readme This commit was moved from ipfs/go-bitswap@d16d2a56e800cd738a5c96b611263ce6891b6117 --- bitswap/README.md | 82 +++++++++++++++++++++-------------------------- 1 file changed, 36 insertions(+), 46 deletions(-) diff --git a/bitswap/README.md b/bitswap/README.md index bfa0aaa86..cfdbd27e0 100644 --- a/bitswap/README.md +++ b/bitswap/README.md @@ -1,47 +1,37 @@ -#Welcome to Bitswap -###(The data trading engine) +# Bitswap + +## Protocol +Bitswap is the data trading module for ipfs, it manages requesting and sending +blocks to and from other peers in the network. Bitswap has two main jobs, the +first is to acquire blocks requested by the client from the network. The second +is to judiciously send blocks in its posession to other peers who want them. + +Bitswap is a message based protocol, as opposed to response-reply. All messages +contain wantlists, or blocks. Upon receiving a wantlist, a node should consider +sending out wanted blocks if they have them. Upon receiving blocks, the node +should send out a notification called a 'Cancel' signifying that they no longer +want the block. At a protocol level, bitswap is very simple. + +## go-ipfs Implementation +Internally, when a message with a wantlist is received, it is sent to the +decision engine to be considered, and blocks that we have that are wanted are +placed into the peer request queue. Any block we possess that is wanted by +another peer has a task in the peer request queue created for it. The peer +request queue is a priority queue that sorts available tasks by some metric, +currently, that metric is very simple and aims to fairly address the tasks +of each other peer. More advanced decision logic will be implemented in the +future. Task workers pull tasks to be done off of the queue, retreive the block +to be sent, and send it off. The number of task workers is limited by a constant +factor. + +Client requests for new blocks are handled by the want manager, for every new +block (or set of blocks) wanted, the 'WantBlocks' method is invoked. The want +manager then ensures that connected peers are notified of the new block that we +want by sending the new entries to a message queue for each peer. The message +queue will loop while there is work available and do the following: 1) Ensure it +has a connection to its peer, 2) grab the message to be sent, and 3) send it. +If new messages are added while the loop is in steps 1 or 3, the messages are +combined into one to avoid having to keep an actual queue and send multiple +messages. The same process occurs when the client receives a block and sends a +cancel message for it. -Bitswap is the module that is responsible for requesting and providing data -blocks over the network to and from other ipfs peers. The role of bitswap is -to be a merchant in the large global marketplace of data. - -##Main Operations -Bitswap has three high level operations: - -- **GetBlocks** - - `GetBlocks` is a bitswap method used to request multiple blocks that are likely -to all be provided by the same set of peers (part of a single file, for example). - -- **GetBlock** - - `GetBlock` is a special case of `GetBlocks` that just requests a single block. - -- **HasBlock** - - `HasBlock` registers a local block with bitswap. Bitswap will then send that -block to any connected peers who want it (with the strategies approval), record -that transaction in the ledger and announce to the DHT that the block is being -provided. - -##Internal Details -All `GetBlock` requests are relayed into a single for-select loop via channels. -Calls to `GetBlocks` will have `FindProviders` called for only the first key in -the set initially, This is an optimization attempting to cut down on the number -of RPCs required. After a timeout (specified by the strategies -`GetRebroadcastDelay`) Bitswap will iterate through all keys still in the local -wantlist, perform a find providers call for each, and sent the wantlist out to -those providers. This is the fallback behaviour for cases where our initial -assumption about one peer potentially having multiple blocks in a set does not -hold true. - -When receiving messages, Bitswaps `ReceiveMessage` method is called. A bitswap -message may contain the wantlist of the peer who sent the message, and an array -of blocks that were on our local wantlist. Any blocks we receive in a bitswap -message will be passed to `HasBlock`, and the other peers wantlist gets updated -in the strategy by `bs.strategy.MessageReceived`. -If another peers wantlist is received, Bitswap will call its strategies -`ShouldSendBlockToPeer` method to determine whether or not the other peer will -be sent the block they are requesting (if we even have it). - -##Outstanding TODOs: -- [ ] Ensure only one request active per key -- [ ] More involved strategies -- [ ] Ensure only wanted blocks are counted in ledgers From 190cc779775e834fd3f799ea88adb92b0bdf8dad Mon Sep 17 00:00:00 2001 From: Jeromy Date: Sun, 17 May 2015 14:08:05 -0700 Subject: [PATCH 0389/1035] add a distribution test with the rebroadcast delay disabled This commit was moved from ipfs/go-bitswap@4d5b93fea7f2a0b5cdfadc593009eddc491657a9 --- bitswap/bitswap_test.go | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index 6548472c9..803bcd223 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -120,6 +120,18 @@ func TestLargeFile(t *testing.T) { PerformDistributionTest(t, numInstances, numBlocks) } +func TestLargeFileNoRebroadcast(t *testing.T) { + rbd := rebroadcastDelay.Get() + rebroadcastDelay.Set(time.Hour * 24 * 365 * 10) // ten years should be long enough + if testing.Short() { + t.SkipNow() + } + numInstances := 10 + numBlocks := 100 + PerformDistributionTest(t, numInstances, numBlocks) + rebroadcastDelay.Set(rbd) +} + func TestLargeFileTwoPeers(t *testing.T) { if testing.Short() { t.SkipNow() From f0b91014eeca28d5ed7d6d3d8fa10bb62fea1818 Mon Sep 17 00:00:00 2001 From: Jeromy Date: Sun, 17 May 2015 17:09:53 -0700 Subject: [PATCH 0390/1035] better bitswap logging This commit was moved from ipfs/go-bitswap@77e81da9f7c826982df3ae28edc3b2eae2c2a62c --- bitswap/bitswap.go | 2 +- bitswap/wantmanager.go | 5 ++++- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 57359c0ec..db7bc033f 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -288,7 +288,7 @@ func (bs *Bitswap) ReceiveMessage(ctx context.Context, p peer.ID, incoming bsmsg bs.dupBlocksRecvd++ } bs.counterLk.Unlock() - log.Debugf("got block %s from %s (%d,%d)", block, p, bs.blocksRecvd, bs.dupBlocksRecvd) + log.Infof("got block %s from %s (%d,%d)", block, p, bs.blocksRecvd, bs.dupBlocksRecvd) hasBlockCtx, cancel := context.WithTimeout(ctx, hasBlockTimeout) if err := bs.HasBlock(hasBlockCtx, block); err != nil { diff --git a/bitswap/wantmanager.go b/bitswap/wantmanager.go index eb49201a6..74372f7f0 100644 --- a/bitswap/wantmanager.go +++ b/bitswap/wantmanager.go @@ -66,6 +66,7 @@ type msgQueue struct { } func (pm *WantManager) WantBlocks(ks []u.Key) { + log.Infof("want blocks: %s", ks) pm.addEntries(ks, false) } @@ -97,6 +98,7 @@ func (pm *WantManager) SendBlock(ctx context.Context, env *engine.Envelope) { msg := bsmsg.New(false) msg.AddBlock(env.Block) + log.Infof("Sending block %s to %s", env.Peer, env.Block) err := pm.network.SendMessage(ctx, env.Peer, msg) if err != nil { log.Error(err) @@ -143,8 +145,9 @@ func (pm *WantManager) runQueue(mq *msgQueue) { err := pm.network.ConnectTo(pm.ctx, mq.p) if err != nil { - log.Error(err) + log.Errorf("cant connect to peer %s: %s", mq.p, err) // TODO: cant connect, what now? + continue } // grab outgoing message From 13fe49031827a06e9e057fa362b9e7b7885e24d9 Mon Sep 17 00:00:00 2001 From: Jeromy Date: Tue, 19 May 2015 11:26:50 -0700 Subject: [PATCH 0391/1035] clarify synhronization constructs This commit was moved from ipfs/go-bitswap@c3aed70f3ed0a4f06ea2a62adcfb40629a40d050 --- bitswap/wantmanager.go | 38 +++++++++++++++++--------------------- 1 file changed, 17 insertions(+), 21 deletions(-) diff --git a/bitswap/wantmanager.go b/bitswap/wantmanager.go index 74372f7f0..4efd120ef 100644 --- a/bitswap/wantmanager.go +++ b/bitswap/wantmanager.go @@ -14,23 +14,17 @@ import ( ) type WantManager struct { - receiver bsnet.Receiver - - incoming chan []*bsmsg.Entry - - // notification channel for new peers connecting - connect chan peer.ID - - // notification channel for peers disconnecting - disconnect chan peer.ID + // sync channels for Run loop + incoming chan []*bsmsg.Entry + connect chan peer.ID // notification channel for new peers connecting + disconnect chan peer.ID // notification channel for peers disconnecting + // synchronized by Run loop, only touch inside there peers map[peer.ID]*msgQueue - - wl *wantlist.Wantlist + wl *wantlist.Wantlist network bsnet.BitSwapNetwork - - ctx context.Context + ctx context.Context } func NewWantManager(ctx context.Context, network bsnet.BitSwapNetwork) *WantManager { @@ -58,8 +52,9 @@ type cancellation struct { type msgQueue struct { p peer.ID - outlk sync.Mutex - out bsmsg.BitSwapMessage + outlk sync.Mutex + out bsmsg.BitSwapMessage + network bsnet.BitSwapNetwork work chan struct{} done chan struct{} @@ -112,7 +107,7 @@ func (pm *WantManager) startPeerHandler(p peer.ID) *msgQueue { return nil } - mq := newMsgQueue(p) + mq := pm.newMsgQueue(p) // new peer, we will want to give them our full wantlist fullwantlist := bsmsg.New(true) @@ -123,7 +118,7 @@ func (pm *WantManager) startPeerHandler(p peer.ID) *msgQueue { mq.work <- struct{}{} pm.peers[p] = mq - go pm.runQueue(mq) + go mq.runQueue(pm.ctx) return mq } @@ -138,12 +133,12 @@ func (pm *WantManager) stopPeerHandler(p peer.ID) { delete(pm.peers, p) } -func (pm *WantManager) runQueue(mq *msgQueue) { +func (mq *msgQueue) runQueue(ctx context.Context) { for { select { case <-mq.work: // there is work to be done - err := pm.network.ConnectTo(pm.ctx, mq.p) + err := mq.network.ConnectTo(ctx, mq.p) if err != nil { log.Errorf("cant connect to peer %s: %s", mq.p, err) // TODO: cant connect, what now? @@ -161,7 +156,7 @@ func (pm *WantManager) runQueue(mq *msgQueue) { mq.outlk.Unlock() // send wantlist updates - err = pm.network.SendMessage(pm.ctx, mq.p, wlm) + err = mq.network.SendMessage(ctx, mq.p, wlm) if err != nil { log.Error("bitswap send error: ", err) // TODO: what do we do if this fails? @@ -224,10 +219,11 @@ func (pm *WantManager) Run() { } } -func newMsgQueue(p peer.ID) *msgQueue { +func (wm *WantManager) newMsgQueue(p peer.ID) *msgQueue { mq := new(msgQueue) mq.done = make(chan struct{}) mq.work = make(chan struct{}, 1) + mq.network = wm.network mq.p = p return mq From 27ac828577b8b1c62fe5fde427e5cd775806160d Mon Sep 17 00:00:00 2001 From: Jeromy Date: Tue, 19 May 2015 13:13:38 -0700 Subject: [PATCH 0392/1035] warning -> notice This commit was moved from ipfs/go-bitswap@67699f24717ba5c93ebede2c9bca67dd4bbaa600 --- bitswap/wantmanager.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/bitswap/wantmanager.go b/bitswap/wantmanager.go index 4efd120ef..a1ab8a022 100644 --- a/bitswap/wantmanager.go +++ b/bitswap/wantmanager.go @@ -96,7 +96,7 @@ func (pm *WantManager) SendBlock(ctx context.Context, env *engine.Envelope) { log.Infof("Sending block %s to %s", env.Peer, env.Block) err := pm.network.SendMessage(ctx, env.Peer, msg) if err != nil { - log.Error(err) + log.Noticef("sendblock error: %s", err) } } @@ -158,7 +158,7 @@ func (mq *msgQueue) runQueue(ctx context.Context) { // send wantlist updates err = mq.network.SendMessage(ctx, mq.p, wlm) if err != nil { - log.Error("bitswap send error: ", err) + log.Noticef("bitswap send error: %s", err) // TODO: what do we do if this fails? } case <-mq.done: From 51514a9e591d2aa872d7bd1afdf310a483780e82 Mon Sep 17 00:00:00 2001 From: Jeromy Date: Tue, 19 May 2015 15:48:12 -0700 Subject: [PATCH 0393/1035] defer tock.Stop() This commit was moved from ipfs/go-bitswap@321604e0c5b5b968af96c6cb722946a55b062c0d --- bitswap/wantmanager.go | 1 + 1 file changed, 1 insertion(+) diff --git a/bitswap/wantmanager.go b/bitswap/wantmanager.go index a1ab8a022..29706710f 100644 --- a/bitswap/wantmanager.go +++ b/bitswap/wantmanager.go @@ -178,6 +178,7 @@ func (pm *WantManager) Disconnected(p peer.ID) { // TODO: use goprocess here once i trust it func (pm *WantManager) Run() { tock := time.NewTicker(rebroadcastDelay.Get()) + defer tock.Stop() for { select { case entries := <-pm.incoming: From c8c52a9f10cfb20eb660c7b8ba64ec4021c7fd11 Mon Sep 17 00:00:00 2001 From: Jeromy Date: Thu, 21 May 2015 21:24:42 -0700 Subject: [PATCH 0394/1035] error -> notice, bitswap This commit was moved from ipfs/go-bitswap@a499bbac1f02cc539ecb7696cb89e43276024b6f --- bitswap/wantmanager.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bitswap/wantmanager.go b/bitswap/wantmanager.go index 29706710f..5405f5074 100644 --- a/bitswap/wantmanager.go +++ b/bitswap/wantmanager.go @@ -140,7 +140,7 @@ func (mq *msgQueue) runQueue(ctx context.Context) { err := mq.network.ConnectTo(ctx, mq.p) if err != nil { - log.Errorf("cant connect to peer %s: %s", mq.p, err) + log.Noticef("cant connect to peer %s: %s", mq.p, err) // TODO: cant connect, what now? continue } From 4db9d606cb852cd8c49a43e20c31c5aea21733ff Mon Sep 17 00:00:00 2001 From: Jeromy Date: Fri, 22 May 2015 09:08:40 -0700 Subject: [PATCH 0395/1035] fix minor data race in bitswap This commit was moved from ipfs/go-bitswap@bf637bcafe5395f58dd0865f2b45503a553cb4ad --- bitswap/bitswap.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index db7bc033f..27be53967 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -287,8 +287,10 @@ func (bs *Bitswap) ReceiveMessage(ctx context.Context, p peer.ID, incoming bsmsg if has, err := bs.blockstore.Has(block.Key()); err == nil && has { bs.dupBlocksRecvd++ } + brecvd := bs.blocksRecvd + bdup := bs.dupBlocksRecvd bs.counterLk.Unlock() - log.Infof("got block %s from %s (%d,%d)", block, p, bs.blocksRecvd, bs.dupBlocksRecvd) + log.Infof("got block %s from %s (%d,%d)", block, p, brecvd, bdup) hasBlockCtx, cancel := context.WithTimeout(ctx, hasBlockTimeout) if err := bs.HasBlock(hasBlockCtx, block); err != nil { From 4a44b1a9b3bb5781ad611c4946ae7947f5b6f37a Mon Sep 17 00:00:00 2001 From: rht Date: Tue, 26 May 2015 23:18:04 +0700 Subject: [PATCH 0396/1035] Replace 'var * bytes.Buffer' with '\1 := new(bytes.Buffer)' This commit was moved from ipfs/go-bitswap@a4f12ffcf7f042c9537245c10bee53fbf8ba7b69 --- bitswap/message/message_test.go | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/bitswap/message/message_test.go b/bitswap/message/message_test.go index 7a6a28a04..15fb7a22e 100644 --- a/bitswap/message/message_test.go +++ b/bitswap/message/message_test.go @@ -100,12 +100,12 @@ func TestToNetFromNetPreservesWantList(t *testing.T) { original.AddEntry(u.Key("T"), 1) original.AddEntry(u.Key("F"), 1) - var buf bytes.Buffer - if err := original.ToNet(&buf); err != nil { + buf := new(bytes.Buffer) + if err := original.ToNet(buf); err != nil { t.Fatal(err) } - copied, err := FromNet(&buf) + copied, err := FromNet(buf) if err != nil { t.Fatal(err) } @@ -130,12 +130,12 @@ func TestToAndFromNetMessage(t *testing.T) { original.AddBlock(blocks.NewBlock([]byte("F"))) original.AddBlock(blocks.NewBlock([]byte("M"))) - var buf bytes.Buffer - if err := original.ToNet(&buf); err != nil { + buf := new(bytes.Buffer) + if err := original.ToNet(buf); err != nil { t.Fatal(err) } - m2, err := FromNet(&buf) + m2, err := FromNet(buf) if err != nil { t.Fatal(err) } From c3f97e4eb1c46e0dc94f2580a76e337276e36ba3 Mon Sep 17 00:00:00 2001 From: Jeromy Date: Sun, 24 May 2015 23:10:04 -0700 Subject: [PATCH 0397/1035] Move findproviders out of main block request path This PR moves the addition of new blocks to our wantlist (and their subsequent broadcast to the network) outside of the clientWorker loop. This allows blocks to more quickly propogate to peers we are already connected to, where before we had to wait for the previous findProviders call in clientworker to complete before we could notify our partners of the next blocks that we want. I then changed the naming of the clientWorker and related variables to be a bit more appropriate to the model. Although the clientWorker (now named providerConnector) feels a bit awkward and should probably be changed. fix test assumption This commit was moved from ipfs/go-bitswap@e5aa2accf070e0af26fb2275b60e141426bc658e --- bitswap/bitswap.go | 2 ++ bitswap/workers.go | 4 +--- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 27be53967..f849c1ed9 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -202,6 +202,8 @@ func (bs *Bitswap) GetBlocks(ctx context.Context, keys []u.Key) (<-chan *blocks. } promise := bs.notifications.Subscribe(ctx, keys...) + bs.wm.WantBlocks(keys) + req := &blockRequest{ keys: keys, ctx: ctx, diff --git a/bitswap/workers.go b/bitswap/workers.go index 1083566a1..b41f0dd30 100644 --- a/bitswap/workers.go +++ b/bitswap/workers.go @@ -134,7 +134,7 @@ func (bs *Bitswap) provideCollector(ctx context.Context) { } } -// TODO ensure only one active request per key +// TODO: figure out clientWorkers purpose in life func (bs *Bitswap) clientWorker(parent context.Context) { defer log.Info("bitswap client worker shutting down...") @@ -147,8 +147,6 @@ func (bs *Bitswap) clientWorker(parent context.Context) { continue } - bs.wm.WantBlocks(keys) - // NB: Optimization. Assumes that providers of key[0] are likely to // be able to provide for all keys. This currently holds true in most // every situation. Later, this assumption may not hold as true. From d4c62562c4dec8c8dc22c5fd090af2307ecec4f4 Mon Sep 17 00:00:00 2001 From: Jeromy Date: Mon, 25 May 2015 18:00:34 -0700 Subject: [PATCH 0398/1035] adjust naming This commit was moved from ipfs/go-bitswap@5056a8378468663f2439c34e384321b0f8b61ca3 --- bitswap/bitswap.go | 10 ++++------ bitswap/workers.go | 8 ++++---- 2 files changed, 8 insertions(+), 10 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index f849c1ed9..58243e888 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -82,7 +82,7 @@ func New(parent context.Context, p peer.ID, network bsnet.BitSwapNetwork, notifications: notif, engine: decision.NewEngine(ctx, bstore), // TODO close the engine with Close() method network: network, - batchRequests: make(chan *blockRequest, sizeBatchRequestChan), + findKeys: make(chan *blockRequest, sizeBatchRequestChan), process: px, newBlocks: make(chan *blocks.Block, HasBlockBufferSize), provideKeys: make(chan u.Key), @@ -115,10 +115,8 @@ type Bitswap struct { notifications notifications.PubSub - // Requests for a set of related blocks - // the assumption is made that the same peer is likely to - // have more than a single block in the set - batchRequests chan *blockRequest + // send keys to a worker to find and connect to providers for them + findKeys chan *blockRequest engine *decision.Engine @@ -209,7 +207,7 @@ func (bs *Bitswap) GetBlocks(ctx context.Context, keys []u.Key) (<-chan *blocks. ctx: ctx, } select { - case bs.batchRequests <- req: + case bs.findKeys <- req: return promise, nil case <-ctx.Done(): return nil, ctx.Err() diff --git a/bitswap/workers.go b/bitswap/workers.go index b41f0dd30..7852cf93e 100644 --- a/bitswap/workers.go +++ b/bitswap/workers.go @@ -31,7 +31,7 @@ func init() { func (bs *Bitswap) startWorkers(px process.Process, ctx context.Context) { // Start up a worker to handle block requests this node is making px.Go(func(px process.Process) { - bs.clientWorker(ctx) + bs.providerConnector(ctx) }) // Start up workers to handle requests from other nodes for the data on this node @@ -134,13 +134,13 @@ func (bs *Bitswap) provideCollector(ctx context.Context) { } } -// TODO: figure out clientWorkers purpose in life -func (bs *Bitswap) clientWorker(parent context.Context) { +// connects to providers for the given keys +func (bs *Bitswap) providerConnector(parent context.Context) { defer log.Info("bitswap client worker shutting down...") for { select { - case req := <-bs.batchRequests: + case req := <-bs.findKeys: keys := req.keys if len(keys) == 0 { log.Warning("Received batch request for zero blocks") From 66be3f1679c023e771ec9a2d6f6a80b9d26f37a0 Mon Sep 17 00:00:00 2001 From: Jeromy Date: Tue, 26 May 2015 11:14:44 -0700 Subject: [PATCH 0399/1035] clean up organization of receivemessage and fix race This commit was moved from ipfs/go-bitswap@89c950aa90fbefdad73a948657cfb2247e295126 --- bitswap/bitswap.go | 25 +++++++++++++++++++------ bitswap/wantmanager.go | 4 ++-- 2 files changed, 21 insertions(+), 8 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 58243e888..d103687d2 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -270,26 +270,40 @@ func (bs *Bitswap) ReceiveMessage(ctx context.Context, p peer.ID, incoming bsmsg // TODO: this is bad, and could be easily abused. // Should only track *useful* messages in ledger - if len(incoming.Blocks()) == 0 { + iblocks := incoming.Blocks() + + if len(iblocks) == 0 { return } // quickly send out cancels, reduces chances of duplicate block receives var keys []u.Key - for _, block := range incoming.Blocks() { + for _, block := range iblocks { keys = append(keys, block.Key()) } bs.wm.CancelWants(keys) - for _, block := range incoming.Blocks() { + for _, block := range iblocks { bs.counterLk.Lock() bs.blocksRecvd++ - if has, err := bs.blockstore.Has(block.Key()); err == nil && has { + has, err := bs.blockstore.Has(block.Key()) + if err == nil && has { bs.dupBlocksRecvd++ } brecvd := bs.blocksRecvd bdup := bs.dupBlocksRecvd bs.counterLk.Unlock() + if has { + continue + } + + // put this after the duplicate check as a block not on our wantlist may + // have already been received. + if _, found := bs.wm.wl.Contains(block.Key()); !found { + log.Notice("received un-asked-for block: %s", block) + continue + } + log.Infof("got block %s from %s (%d,%d)", block, p, brecvd, bdup) hasBlockCtx, cancel := context.WithTimeout(ctx, hasBlockTimeout) @@ -302,7 +316,6 @@ func (bs *Bitswap) ReceiveMessage(ctx context.Context, p peer.ID, incoming bsmsg // Connected/Disconnected warns bitswap about peer connections func (bs *Bitswap) PeerConnected(p peer.ID) { - // TODO: add to clientWorker?? bs.wm.Connected(p) } @@ -313,7 +326,7 @@ func (bs *Bitswap) PeerDisconnected(p peer.ID) { } func (bs *Bitswap) ReceiveError(err error) { - log.Debugf("Bitswap ReceiveError: %s", err) + log.Infof("Bitswap ReceiveError: %s", err) // TODO log the network error // TODO bubble the network error up to the parent context/error logger } diff --git a/bitswap/wantmanager.go b/bitswap/wantmanager.go index 5405f5074..e87453920 100644 --- a/bitswap/wantmanager.go +++ b/bitswap/wantmanager.go @@ -21,7 +21,7 @@ type WantManager struct { // synchronized by Run loop, only touch inside there peers map[peer.ID]*msgQueue - wl *wantlist.Wantlist + wl *wantlist.ThreadSafe network bsnet.BitSwapNetwork ctx context.Context @@ -33,7 +33,7 @@ func NewWantManager(ctx context.Context, network bsnet.BitSwapNetwork) *WantMana connect: make(chan peer.ID, 10), disconnect: make(chan peer.ID, 10), peers: make(map[peer.ID]*msgQueue), - wl: wantlist.New(), + wl: wantlist.NewThreadSafe(), network: network, ctx: ctx, } From 9fd9f325c55d835ae519eba3754c8c93ac2cfe7f Mon Sep 17 00:00:00 2001 From: Jeromy Date: Wed, 27 May 2015 19:03:39 -0700 Subject: [PATCH 0400/1035] parallelize block processing This commit was moved from ipfs/go-bitswap@bc186b260d76d361c50b02b44ebeac34c08e6c8f --- bitswap/bitswap.go | 54 ++++++++++++++++++++++++---------------------- 1 file changed, 28 insertions(+), 26 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index d103687d2..7e8a0f7af 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -279,39 +279,41 @@ func (bs *Bitswap) ReceiveMessage(ctx context.Context, p peer.ID, incoming bsmsg // quickly send out cancels, reduces chances of duplicate block receives var keys []u.Key for _, block := range iblocks { - keys = append(keys, block.Key()) - } - bs.wm.CancelWants(keys) - - for _, block := range iblocks { - bs.counterLk.Lock() - bs.blocksRecvd++ - has, err := bs.blockstore.Has(block.Key()) - if err == nil && has { - bs.dupBlocksRecvd++ - } - brecvd := bs.blocksRecvd - bdup := bs.dupBlocksRecvd - bs.counterLk.Unlock() - if has { - continue - } - - // put this after the duplicate check as a block not on our wantlist may - // have already been received. if _, found := bs.wm.wl.Contains(block.Key()); !found { log.Notice("received un-asked-for block: %s", block) continue } + keys = append(keys, block.Key()) + } + bs.wm.CancelWants(keys) - log.Infof("got block %s from %s (%d,%d)", block, p, brecvd, bdup) + wg := sync.WaitGroup{} + for _, block := range iblocks { + wg.Add(1) + go func(b *blocks.Block) { + defer wg.Done() + bs.counterLk.Lock() + bs.blocksRecvd++ + has, err := bs.blockstore.Has(b.Key()) + if err == nil && has { + bs.dupBlocksRecvd++ + } + brecvd := bs.blocksRecvd + bdup := bs.dupBlocksRecvd + bs.counterLk.Unlock() + if has { + return + } - hasBlockCtx, cancel := context.WithTimeout(ctx, hasBlockTimeout) - if err := bs.HasBlock(hasBlockCtx, block); err != nil { - log.Warningf("ReceiveMessage HasBlock error: %s", err) - } - cancel() + log.Debugf("got block %s from %s (%d,%d)", b, p, brecvd, bdup) + hasBlockCtx, cancel := context.WithTimeout(ctx, hasBlockTimeout) + if err := bs.HasBlock(hasBlockCtx, b); err != nil { + log.Warningf("ReceiveMessage HasBlock error: %s", err) + } + cancel() + }(block) } + wg.Wait() } // Connected/Disconnected warns bitswap about peer connections From 0450cca6f6d3c9f1e7bc2da33bc77e22bf10fc56 Mon Sep 17 00:00:00 2001 From: Jeromy Date: Wed, 27 May 2015 21:19:07 -0700 Subject: [PATCH 0401/1035] handle error This commit was moved from ipfs/go-bitswap@8cd12955e2aea1203136af0c928cf94024210479 --- bitswap/bitswap.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 7e8a0f7af..020c8d16a 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -295,6 +295,11 @@ func (bs *Bitswap) ReceiveMessage(ctx context.Context, p peer.ID, incoming bsmsg bs.counterLk.Lock() bs.blocksRecvd++ has, err := bs.blockstore.Has(b.Key()) + if err != nil { + bs.counterLk.Unlock() + log.Noticef("blockstore.Has error: %s", err) + return + } if err == nil && has { bs.dupBlocksRecvd++ } From 29b1fa0bcc4e9eb508a8564db7f7c2000cec4f21 Mon Sep 17 00:00:00 2001 From: Jeromy Date: Mon, 1 Jun 2015 16:10:08 -0700 Subject: [PATCH 0402/1035] move util.Key into its own package under blocks This commit was moved from ipfs/go-bitswap@8cb5013401769dc3e29dc84fd1f2e002b26e07c1 --- bitswap/bitswap.go | 26 ++++++++++----------- bitswap/bitswap_test.go | 6 ++--- bitswap/decision/bench_test.go | 4 ++-- bitswap/decision/ledger.go | 14 +++++------ bitswap/decision/peer_request_queue.go | 16 ++++++------- bitswap/decision/peer_request_queue_test.go | 14 +++++------ bitswap/message/message.go | 22 ++++++++--------- bitswap/message/message_test.go | 22 ++++++++--------- bitswap/network/interface.go | 6 ++--- bitswap/network/ipfs_impl.go | 6 ++--- bitswap/notifications/notifications.go | 8 +++---- bitswap/notifications/notifications_test.go | 6 ++--- bitswap/stat.go | 4 ++-- bitswap/testnet/virtual.go | 6 ++--- bitswap/wantlist/wantlist.go | 20 ++++++++-------- bitswap/wantmanager.go | 10 ++++---- bitswap/workers.go | 8 +++---- 17 files changed, 99 insertions(+), 99 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 020c8d16a..bed1d3a47 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -12,6 +12,7 @@ import ( context "github.com/ipfs/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context" blocks "github.com/ipfs/go-ipfs/blocks" blockstore "github.com/ipfs/go-ipfs/blocks/blockstore" + key "github.com/ipfs/go-ipfs/blocks/key" exchange "github.com/ipfs/go-ipfs/exchange" decision "github.com/ipfs/go-ipfs/exchange/bitswap/decision" bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" @@ -21,7 +22,6 @@ import ( peer "github.com/ipfs/go-ipfs/p2p/peer" "github.com/ipfs/go-ipfs/thirdparty/delay" eventlog "github.com/ipfs/go-ipfs/thirdparty/eventlog" - u "github.com/ipfs/go-ipfs/util" ) var log = eventlog.Logger("bitswap") @@ -85,7 +85,7 @@ func New(parent context.Context, p peer.ID, network bsnet.BitSwapNetwork, findKeys: make(chan *blockRequest, sizeBatchRequestChan), process: px, newBlocks: make(chan *blocks.Block, HasBlockBufferSize), - provideKeys: make(chan u.Key), + provideKeys: make(chan key.Key), wm: NewWantManager(ctx, network), } go bs.wm.Run() @@ -124,7 +124,7 @@ type Bitswap struct { newBlocks chan *blocks.Block - provideKeys chan u.Key + provideKeys chan key.Key counterLk sync.Mutex blocksRecvd int @@ -132,13 +132,13 @@ type Bitswap struct { } type blockRequest struct { - keys []u.Key + keys []key.Key ctx context.Context } // GetBlock attempts to retrieve a particular block from peers within the // deadline enforced by the context. -func (bs *Bitswap) GetBlock(parent context.Context, k u.Key) (*blocks.Block, error) { +func (bs *Bitswap) GetBlock(parent context.Context, k key.Key) (*blocks.Block, error) { // Any async work initiated by this function must end when this function // returns. To ensure this, derive a new context. Note that it is okay to @@ -156,7 +156,7 @@ func (bs *Bitswap) GetBlock(parent context.Context, k u.Key) (*blocks.Block, err cancelFunc() }() - promise, err := bs.GetBlocks(ctx, []u.Key{k}) + promise, err := bs.GetBlocks(ctx, []key.Key{k}) if err != nil { return nil, err } @@ -177,8 +177,8 @@ func (bs *Bitswap) GetBlock(parent context.Context, k u.Key) (*blocks.Block, err } } -func (bs *Bitswap) WantlistForPeer(p peer.ID) []u.Key { - var out []u.Key +func (bs *Bitswap) WantlistForPeer(p peer.ID) []key.Key { + var out []key.Key for _, e := range bs.engine.WantlistForPeer(p) { out = append(out, e.Key) } @@ -192,7 +192,7 @@ func (bs *Bitswap) WantlistForPeer(p peer.ID) []u.Key { // NB: Your request remains open until the context expires. To conserve // resources, provide a context with a reasonably short deadline (ie. not one // that lasts throughout the lifetime of the server) -func (bs *Bitswap) GetBlocks(ctx context.Context, keys []u.Key) (<-chan *blocks.Block, error) { +func (bs *Bitswap) GetBlocks(ctx context.Context, keys []key.Key) (<-chan *blocks.Block, error) { select { case <-bs.process.Closing(): return nil, errors.New("bitswap is closed") @@ -246,7 +246,7 @@ func (bs *Bitswap) connectToProviders(ctx context.Context, entries []wantlist.En wg := sync.WaitGroup{} for _, e := range entries { wg.Add(1) - go func(k u.Key) { + go func(k key.Key) { defer wg.Done() child, cancel := context.WithTimeout(ctx, providerRequestTimeout) @@ -277,7 +277,7 @@ func (bs *Bitswap) ReceiveMessage(ctx context.Context, p peer.ID, incoming bsmsg } // quickly send out cancels, reduces chances of duplicate block receives - var keys []u.Key + var keys []key.Key for _, block := range iblocks { if _, found := bs.wm.wl.Contains(block.Key()); !found { log.Notice("received un-asked-for block: %s", block) @@ -342,8 +342,8 @@ func (bs *Bitswap) Close() error { return bs.process.Close() } -func (bs *Bitswap) GetWantlist() []u.Key { - var out []u.Key +func (bs *Bitswap) GetWantlist() []key.Key { + var out []key.Key for _, e := range bs.wm.wl.Entries() { out = append(out, e.Key) } diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index 803bcd223..e70b3885a 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -12,11 +12,11 @@ import ( blocks "github.com/ipfs/go-ipfs/blocks" blocksutil "github.com/ipfs/go-ipfs/blocks/blocksutil" + key "github.com/ipfs/go-ipfs/blocks/key" tn "github.com/ipfs/go-ipfs/exchange/bitswap/testnet" p2ptestutil "github.com/ipfs/go-ipfs/p2p/test/util" mockrouting "github.com/ipfs/go-ipfs/routing/mock" delay "github.com/ipfs/go-ipfs/thirdparty/delay" - u "github.com/ipfs/go-ipfs/util" ) // FIXME the tests are really sensitive to the network delay. fix them to work @@ -155,7 +155,7 @@ func PerformDistributionTest(t *testing.T, numInstances, numBlocks int) { t.Log("Give the blocks to the first instance") - var blkeys []u.Key + var blkeys []key.Key first := instances[0] for _, b := range blocks { blkeys = append(blkeys, b.Key()) @@ -227,7 +227,7 @@ func TestSendToWantingPeer(t *testing.T) { alpha := bg.Next() // peerA requests and waits for block alpha ctx, _ := context.WithTimeout(context.TODO(), waitTime) - alphaPromise, err := peerA.Exchange.GetBlocks(ctx, []u.Key{alpha.Key()}) + alphaPromise, err := peerA.Exchange.GetBlocks(ctx, []key.Key{alpha.Key()}) if err != nil { t.Fatal(err) } diff --git a/bitswap/decision/bench_test.go b/bitswap/decision/bench_test.go index 0a1e53ce1..e64815338 100644 --- a/bitswap/decision/bench_test.go +++ b/bitswap/decision/bench_test.go @@ -4,9 +4,9 @@ import ( "math" "testing" + key "github.com/ipfs/go-ipfs/blocks/key" "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" "github.com/ipfs/go-ipfs/p2p/peer" - "github.com/ipfs/go-ipfs/util" "github.com/ipfs/go-ipfs/util/testutil" ) @@ -21,6 +21,6 @@ func BenchmarkTaskQueuePush(b *testing.B) { } b.ResetTimer() for i := 0; i < b.N; i++ { - q.Push(wantlist.Entry{Key: util.Key(i), Priority: math.MaxInt32}, peers[i%len(peers)]) + q.Push(wantlist.Entry{Key: key.Key(i), Priority: math.MaxInt32}, peers[i%len(peers)]) } } diff --git a/bitswap/decision/ledger.go b/bitswap/decision/ledger.go index 51b1bc914..c0d1af8a5 100644 --- a/bitswap/decision/ledger.go +++ b/bitswap/decision/ledger.go @@ -3,20 +3,20 @@ package decision import ( "time" + key "github.com/ipfs/go-ipfs/blocks/key" wl "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" peer "github.com/ipfs/go-ipfs/p2p/peer" - u "github.com/ipfs/go-ipfs/util" ) // keySet is just a convenient alias for maps of keys, where we only care // access/lookups. -type keySet map[u.Key]struct{} +type keySet map[key.Key]struct{} func newLedger(p peer.ID) *ledger { return &ledger{ wantList: wl.New(), Partner: p, - sentToPeer: make(map[u.Key]time.Time), + sentToPeer: make(map[key.Key]time.Time), } } @@ -43,7 +43,7 @@ type ledger struct { // sentToPeer is a set of keys to ensure we dont send duplicate blocks // to a given peer - sentToPeer map[u.Key]time.Time + sentToPeer map[key.Key]time.Time } type debtRatio struct { @@ -68,16 +68,16 @@ func (l *ledger) ReceivedBytes(n int) { } // TODO: this needs to be different. We need timeouts. -func (l *ledger) Wants(k u.Key, priority int) { +func (l *ledger) Wants(k key.Key, priority int) { log.Debugf("peer %s wants %s", l.Partner, k) l.wantList.Add(k, priority) } -func (l *ledger) CancelWant(k u.Key) { +func (l *ledger) CancelWant(k key.Key) { l.wantList.Remove(k) } -func (l *ledger) WantListContains(k u.Key) (wl.Entry, bool) { +func (l *ledger) WantListContains(k key.Key) (wl.Entry, bool) { return l.wantList.Contains(k) } diff --git a/bitswap/decision/peer_request_queue.go b/bitswap/decision/peer_request_queue.go index 397a16223..0ba74edaf 100644 --- a/bitswap/decision/peer_request_queue.go +++ b/bitswap/decision/peer_request_queue.go @@ -4,17 +4,17 @@ import ( "sync" "time" + key "github.com/ipfs/go-ipfs/blocks/key" wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" peer "github.com/ipfs/go-ipfs/p2p/peer" pq "github.com/ipfs/go-ipfs/thirdparty/pq" - u "github.com/ipfs/go-ipfs/util" ) type peerRequestQueue interface { // Pop returns the next peerRequestTask. Returns nil if the peerRequestQueue is empty. Pop() *peerRequestTask Push(entry wantlist.Entry, to peer.ID) - Remove(k u.Key, p peer.ID) + Remove(k key.Key, p peer.ID) // NB: cannot expose simply expose taskQueue.Len because trashed elements // may exist. These trashed elements should not contribute to the count. } @@ -110,7 +110,7 @@ func (tl *prq) Pop() *peerRequestTask { } // Remove removes a task from the queue -func (tl *prq) Remove(k u.Key, p peer.ID) { +func (tl *prq) Remove(k key.Key, p peer.ID) { tl.lock.Lock() t, ok := tl.taskMap[taskKey(p, k)] if ok { @@ -155,7 +155,7 @@ func (t *peerRequestTask) SetIndex(i int) { } // taskKey returns a key that uniquely identifies a task. -func taskKey(p peer.ID, k u.Key) string { +func taskKey(p peer.ID, k key.Key) string { return string(p) + string(k) } @@ -186,7 +186,7 @@ type activePartner struct { activelk sync.Mutex active int - activeBlocks map[u.Key]struct{} + activeBlocks map[key.Key]struct{} // requests is the number of blocks this peer is currently requesting // request need not be locked around as it will only be modified under @@ -203,7 +203,7 @@ type activePartner struct { func newActivePartner() *activePartner { return &activePartner{ taskQueue: pq.New(wrapCmp(V1)), - activeBlocks: make(map[u.Key]struct{}), + activeBlocks: make(map[key.Key]struct{}), } } @@ -230,7 +230,7 @@ func partnerCompare(a, b pq.Elem) bool { } // StartTask signals that a task was started for this partner -func (p *activePartner) StartTask(k u.Key) { +func (p *activePartner) StartTask(k key.Key) { p.activelk.Lock() p.activeBlocks[k] = struct{}{} p.active++ @@ -238,7 +238,7 @@ func (p *activePartner) StartTask(k u.Key) { } // TaskDone signals that a task was completed for this partner -func (p *activePartner) TaskDone(k u.Key) { +func (p *activePartner) TaskDone(k key.Key) { p.activelk.Lock() delete(p.activeBlocks, k) p.active-- diff --git a/bitswap/decision/peer_request_queue_test.go b/bitswap/decision/peer_request_queue_test.go index 96c136d6f..e71782f07 100644 --- a/bitswap/decision/peer_request_queue_test.go +++ b/bitswap/decision/peer_request_queue_test.go @@ -7,8 +7,8 @@ import ( "strings" "testing" + key "github.com/ipfs/go-ipfs/blocks/key" "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" - "github.com/ipfs/go-ipfs/util" "github.com/ipfs/go-ipfs/util/testutil" ) @@ -41,10 +41,10 @@ func TestPushPop(t *testing.T) { for _, index := range rand.Perm(len(alphabet)) { // add blocks for all letters letter := alphabet[index] t.Log(partner.String()) - prq.Push(wantlist.Entry{Key: util.Key(letter), Priority: math.MaxInt32 - index}, partner) + prq.Push(wantlist.Entry{Key: key.Key(letter), Priority: math.MaxInt32 - index}, partner) } for _, consonant := range consonants { - prq.Remove(util.Key(consonant), partner) + prq.Remove(key.Key(consonant), partner) } var out []string @@ -76,10 +76,10 @@ func TestPeerRepeats(t *testing.T) { // Have each push some blocks for i := 0; i < 5; i++ { - prq.Push(wantlist.Entry{Key: util.Key(i)}, a) - prq.Push(wantlist.Entry{Key: util.Key(i)}, b) - prq.Push(wantlist.Entry{Key: util.Key(i)}, c) - prq.Push(wantlist.Entry{Key: util.Key(i)}, d) + prq.Push(wantlist.Entry{Key: key.Key(i)}, a) + prq.Push(wantlist.Entry{Key: key.Key(i)}, b) + prq.Push(wantlist.Entry{Key: key.Key(i)}, c) + prq.Push(wantlist.Entry{Key: key.Key(i)}, d) } // now, pop off four entries, there should be one from each diff --git a/bitswap/message/message.go b/bitswap/message/message.go index d885bb373..6e4979939 100644 --- a/bitswap/message/message.go +++ b/bitswap/message/message.go @@ -4,10 +4,10 @@ import ( "io" blocks "github.com/ipfs/go-ipfs/blocks" + key "github.com/ipfs/go-ipfs/blocks/key" pb "github.com/ipfs/go-ipfs/exchange/bitswap/message/internal/pb" wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" inet "github.com/ipfs/go-ipfs/p2p/net" - u "github.com/ipfs/go-ipfs/util" ggio "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/gogo/protobuf/io" proto "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/gogo/protobuf/proto" @@ -25,9 +25,9 @@ type BitSwapMessage interface { Blocks() []*blocks.Block // AddEntry adds an entry to the Wantlist. - AddEntry(key u.Key, priority int) + AddEntry(key key.Key, priority int) - Cancel(key u.Key) + Cancel(key key.Key) Empty() bool @@ -47,8 +47,8 @@ type Exportable interface { type impl struct { full bool - wantlist map[u.Key]Entry - blocks map[u.Key]*blocks.Block + wantlist map[key.Key]Entry + blocks map[key.Key]*blocks.Block } func New(full bool) BitSwapMessage { @@ -57,8 +57,8 @@ func New(full bool) BitSwapMessage { func newMsg(full bool) *impl { return &impl{ - blocks: make(map[u.Key]*blocks.Block), - wantlist: make(map[u.Key]Entry), + blocks: make(map[key.Key]*blocks.Block), + wantlist: make(map[key.Key]Entry), full: full, } } @@ -71,7 +71,7 @@ type Entry struct { func newMessageFromProto(pbm pb.Message) BitSwapMessage { m := newMsg(pbm.GetWantlist().GetFull()) for _, e := range pbm.GetWantlist().GetEntries() { - m.addEntry(u.Key(e.GetBlock()), int(e.GetPriority()), e.GetCancel()) + m.addEntry(key.Key(e.GetBlock()), int(e.GetPriority()), e.GetCancel()) } for _, d := range pbm.GetBlocks() { b := blocks.NewBlock(d) @@ -104,16 +104,16 @@ func (m *impl) Blocks() []*blocks.Block { return bs } -func (m *impl) Cancel(k u.Key) { +func (m *impl) Cancel(k key.Key) { delete(m.wantlist, k) m.addEntry(k, 0, true) } -func (m *impl) AddEntry(k u.Key, priority int) { +func (m *impl) AddEntry(k key.Key, priority int) { m.addEntry(k, priority, false) } -func (m *impl) addEntry(k u.Key, priority int, cancel bool) { +func (m *impl) addEntry(k key.Key, priority int, cancel bool) { e, exists := m.wantlist[k] if exists { e.Priority = priority diff --git a/bitswap/message/message_test.go b/bitswap/message/message_test.go index 15fb7a22e..4452b88a0 100644 --- a/bitswap/message/message_test.go +++ b/bitswap/message/message_test.go @@ -7,14 +7,14 @@ import ( proto "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/gogo/protobuf/proto" blocks "github.com/ipfs/go-ipfs/blocks" + key "github.com/ipfs/go-ipfs/blocks/key" pb "github.com/ipfs/go-ipfs/exchange/bitswap/message/internal/pb" - u "github.com/ipfs/go-ipfs/util" ) func TestAppendWanted(t *testing.T) { const str = "foo" m := New(true) - m.AddEntry(u.Key(str), 1) + m.AddEntry(key.Key(str), 1) if !wantlistContains(m.ToProto().GetWantlist(), str) { t.Fail() @@ -63,7 +63,7 @@ func TestWantlist(t *testing.T) { keystrs := []string{"foo", "bar", "baz", "bat"} m := New(true) for _, s := range keystrs { - m.AddEntry(u.Key(s), 1) + m.AddEntry(key.Key(s), 1) } exported := m.Wantlist() @@ -86,7 +86,7 @@ func TestCopyProtoByValue(t *testing.T) { const str = "foo" m := New(true) protoBeforeAppend := m.ToProto() - m.AddEntry(u.Key(str), 1) + m.AddEntry(key.Key(str), 1) if wantlistContains(protoBeforeAppend.GetWantlist(), str) { t.Fail() } @@ -94,11 +94,11 @@ func TestCopyProtoByValue(t *testing.T) { func TestToNetFromNetPreservesWantList(t *testing.T) { original := New(true) - original.AddEntry(u.Key("M"), 1) - original.AddEntry(u.Key("B"), 1) - original.AddEntry(u.Key("D"), 1) - original.AddEntry(u.Key("T"), 1) - original.AddEntry(u.Key("F"), 1) + original.AddEntry(key.Key("M"), 1) + original.AddEntry(key.Key("B"), 1) + original.AddEntry(key.Key("D"), 1) + original.AddEntry(key.Key("T"), 1) + original.AddEntry(key.Key("F"), 1) buf := new(bytes.Buffer) if err := original.ToNet(buf); err != nil { @@ -110,7 +110,7 @@ func TestToNetFromNetPreservesWantList(t *testing.T) { t.Fatal(err) } - keys := make(map[u.Key]bool) + keys := make(map[key.Key]bool) for _, k := range copied.Wantlist() { keys[k.Key] = true } @@ -140,7 +140,7 @@ func TestToAndFromNetMessage(t *testing.T) { t.Fatal(err) } - keys := make(map[u.Key]bool) + keys := make(map[key.Key]bool) for _, b := range m2.Blocks() { keys[b.Key()] = true } diff --git a/bitswap/network/interface.go b/bitswap/network/interface.go index 83fca0793..35da0f84d 100644 --- a/bitswap/network/interface.go +++ b/bitswap/network/interface.go @@ -2,10 +2,10 @@ package network import ( context "github.com/ipfs/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context" + key "github.com/ipfs/go-ipfs/blocks/key" bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" peer "github.com/ipfs/go-ipfs/p2p/peer" protocol "github.com/ipfs/go-ipfs/p2p/protocol" - u "github.com/ipfs/go-ipfs/util" ) var ProtocolBitswap protocol.ID = "/ipfs/bitswap" @@ -44,8 +44,8 @@ type Receiver interface { type Routing interface { // FindProvidersAsync returns a channel of providers for the given key - FindProvidersAsync(context.Context, u.Key, int) <-chan peer.ID + FindProvidersAsync(context.Context, key.Key, int) <-chan peer.ID // Provide provides the key to the network - Provide(context.Context, u.Key) error + Provide(context.Context, key.Key) error } diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index 4e5a1317f..78d1defd3 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -3,13 +3,13 @@ package network import ( ma "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr" context "github.com/ipfs/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context" + key "github.com/ipfs/go-ipfs/blocks/key" bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" host "github.com/ipfs/go-ipfs/p2p/host" inet "github.com/ipfs/go-ipfs/p2p/net" peer "github.com/ipfs/go-ipfs/p2p/peer" routing "github.com/ipfs/go-ipfs/routing" eventlog "github.com/ipfs/go-ipfs/thirdparty/eventlog" - util "github.com/ipfs/go-ipfs/util" ) var log = eventlog.Logger("bitswap_network") @@ -102,7 +102,7 @@ func (bsnet *impl) ConnectTo(ctx context.Context, p peer.ID) error { } // FindProvidersAsync returns a channel of providers for the given key -func (bsnet *impl) FindProvidersAsync(ctx context.Context, k util.Key, max int) <-chan peer.ID { +func (bsnet *impl) FindProvidersAsync(ctx context.Context, k key.Key, max int) <-chan peer.ID { // Since routing queries are expensive, give bitswap the peers to which we // have open connections. Note that this may cause issues if bitswap starts @@ -138,7 +138,7 @@ func (bsnet *impl) FindProvidersAsync(ctx context.Context, k util.Key, max int) } // Provide provides the key to the network -func (bsnet *impl) Provide(ctx context.Context, k util.Key) error { +func (bsnet *impl) Provide(ctx context.Context, k key.Key) error { return bsnet.routing.Provide(ctx, k) } diff --git a/bitswap/notifications/notifications.go b/bitswap/notifications/notifications.go index d1764defc..e9870940e 100644 --- a/bitswap/notifications/notifications.go +++ b/bitswap/notifications/notifications.go @@ -4,14 +4,14 @@ import ( pubsub "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/briantigerchow/pubsub" context "github.com/ipfs/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context" blocks "github.com/ipfs/go-ipfs/blocks" - u "github.com/ipfs/go-ipfs/util" + key "github.com/ipfs/go-ipfs/blocks/key" ) const bufferSize = 16 type PubSub interface { Publish(block *blocks.Block) - Subscribe(ctx context.Context, keys ...u.Key) <-chan *blocks.Block + Subscribe(ctx context.Context, keys ...key.Key) <-chan *blocks.Block Shutdown() } @@ -35,7 +35,7 @@ func (ps *impl) Shutdown() { // Subscribe returns a channel of blocks for the given |keys|. |blockChannel| // is closed if the |ctx| times out or is cancelled, or after sending len(keys) // blocks. -func (ps *impl) Subscribe(ctx context.Context, keys ...u.Key) <-chan *blocks.Block { +func (ps *impl) Subscribe(ctx context.Context, keys ...key.Key) <-chan *blocks.Block { blocksCh := make(chan *blocks.Block, len(keys)) valuesCh := make(chan interface{}, len(keys)) // provide our own channel to control buffer, prevent blocking @@ -71,7 +71,7 @@ func (ps *impl) Subscribe(ctx context.Context, keys ...u.Key) <-chan *blocks.Blo return blocksCh } -func toStrings(keys []u.Key) []string { +func toStrings(keys []key.Key) []string { strs := make([]string, 0) for _, key := range keys { strs = append(strs, string(key)) diff --git a/bitswap/notifications/notifications_test.go b/bitswap/notifications/notifications_test.go index 8cf89669b..e9be15aa4 100644 --- a/bitswap/notifications/notifications_test.go +++ b/bitswap/notifications/notifications_test.go @@ -8,7 +8,7 @@ import ( context "github.com/ipfs/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context" blocks "github.com/ipfs/go-ipfs/blocks" blocksutil "github.com/ipfs/go-ipfs/blocks/blocksutil" - "github.com/ipfs/go-ipfs/util" + key "github.com/ipfs/go-ipfs/blocks/key" ) func TestDuplicates(t *testing.T) { @@ -131,8 +131,8 @@ func TestDoesNotDeadLockIfContextCancelledBeforePublish(t *testing.T) { t.Log("generate a large number of blocks. exceed default buffer") bs := g.Blocks(1000) - ks := func() []util.Key { - var keys []util.Key + ks := func() []key.Key { + var keys []key.Key for _, b := range bs { keys = append(keys, b.Key()) } diff --git a/bitswap/stat.go b/bitswap/stat.go index a4db4c9c5..5fa0e285e 100644 --- a/bitswap/stat.go +++ b/bitswap/stat.go @@ -1,13 +1,13 @@ package bitswap import ( - u "github.com/ipfs/go-ipfs/util" + key "github.com/ipfs/go-ipfs/blocks/key" "sort" ) type Stat struct { ProvideBufLen int - Wantlist []u.Key + Wantlist []key.Key Peers []string BlocksReceived int DupBlksReceived int diff --git a/bitswap/testnet/virtual.go b/bitswap/testnet/virtual.go index f8ca0cd55..eb3424366 100644 --- a/bitswap/testnet/virtual.go +++ b/bitswap/testnet/virtual.go @@ -4,13 +4,13 @@ import ( "errors" context "github.com/ipfs/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context" + key "github.com/ipfs/go-ipfs/blocks/key" bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" peer "github.com/ipfs/go-ipfs/p2p/peer" routing "github.com/ipfs/go-ipfs/routing" mockrouting "github.com/ipfs/go-ipfs/routing/mock" delay "github.com/ipfs/go-ipfs/thirdparty/delay" - util "github.com/ipfs/go-ipfs/util" testutil "github.com/ipfs/go-ipfs/util/testutil" ) @@ -91,7 +91,7 @@ func (nc *networkClient) SendMessage( } // FindProvidersAsync returns a channel of providers for the given key -func (nc *networkClient) FindProvidersAsync(ctx context.Context, k util.Key, max int) <-chan peer.ID { +func (nc *networkClient) FindProvidersAsync(ctx context.Context, k key.Key, max int) <-chan peer.ID { // NB: this function duplicates the PeerInfo -> ID transformation in the // bitswap network adapter. Not to worry. This network client will be @@ -113,7 +113,7 @@ func (nc *networkClient) FindProvidersAsync(ctx context.Context, k util.Key, max } // Provide provides the key to the network -func (nc *networkClient) Provide(ctx context.Context, k util.Key) error { +func (nc *networkClient) Provide(ctx context.Context, k key.Key) error { return nc.routing.Provide(ctx, k) } diff --git a/bitswap/wantlist/wantlist.go b/bitswap/wantlist/wantlist.go index 508a7a09b..a82b484a4 100644 --- a/bitswap/wantlist/wantlist.go +++ b/bitswap/wantlist/wantlist.go @@ -3,7 +3,7 @@ package wantlist import ( - u "github.com/ipfs/go-ipfs/util" + key "github.com/ipfs/go-ipfs/blocks/key" "sort" "sync" ) @@ -15,14 +15,14 @@ type ThreadSafe struct { // not threadsafe type Wantlist struct { - set map[u.Key]Entry + set map[key.Key]Entry // TODO provide O(1) len accessor if cost becomes an issue } type Entry struct { // TODO consider making entries immutable so they can be shared safely and // slices can be copied efficiently. - Key u.Key + Key key.Key Priority int } @@ -40,25 +40,25 @@ func NewThreadSafe() *ThreadSafe { func New() *Wantlist { return &Wantlist{ - set: make(map[u.Key]Entry), + set: make(map[key.Key]Entry), } } -func (w *ThreadSafe) Add(k u.Key, priority int) { +func (w *ThreadSafe) Add(k key.Key, priority int) { // TODO rm defer for perf w.lk.Lock() defer w.lk.Unlock() w.Wantlist.Add(k, priority) } -func (w *ThreadSafe) Remove(k u.Key) { +func (w *ThreadSafe) Remove(k key.Key) { // TODO rm defer for perf w.lk.Lock() defer w.lk.Unlock() w.Wantlist.Remove(k) } -func (w *ThreadSafe) Contains(k u.Key) (Entry, bool) { +func (w *ThreadSafe) Contains(k key.Key) (Entry, bool) { // TODO rm defer for perf w.lk.RLock() defer w.lk.RUnlock() @@ -87,7 +87,7 @@ func (w *Wantlist) Len() int { return len(w.set) } -func (w *Wantlist) Add(k u.Key, priority int) { +func (w *Wantlist) Add(k key.Key, priority int) { if _, ok := w.set[k]; ok { return } @@ -97,11 +97,11 @@ func (w *Wantlist) Add(k u.Key, priority int) { } } -func (w *Wantlist) Remove(k u.Key) { +func (w *Wantlist) Remove(k key.Key) { delete(w.set, k) } -func (w *Wantlist) Contains(k u.Key) (Entry, bool) { +func (w *Wantlist) Contains(k key.Key) (Entry, bool) { e, ok := w.set[k] return e, ok } diff --git a/bitswap/wantmanager.go b/bitswap/wantmanager.go index e87453920..0091724ff 100644 --- a/bitswap/wantmanager.go +++ b/bitswap/wantmanager.go @@ -5,12 +5,12 @@ import ( "time" context "github.com/ipfs/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context" + key "github.com/ipfs/go-ipfs/blocks/key" engine "github.com/ipfs/go-ipfs/exchange/bitswap/decision" bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" peer "github.com/ipfs/go-ipfs/p2p/peer" - u "github.com/ipfs/go-ipfs/util" ) type WantManager struct { @@ -46,7 +46,7 @@ type msgPair struct { type cancellation struct { who peer.ID - blk u.Key + blk key.Key } type msgQueue struct { @@ -60,16 +60,16 @@ type msgQueue struct { done chan struct{} } -func (pm *WantManager) WantBlocks(ks []u.Key) { +func (pm *WantManager) WantBlocks(ks []key.Key) { log.Infof("want blocks: %s", ks) pm.addEntries(ks, false) } -func (pm *WantManager) CancelWants(ks []u.Key) { +func (pm *WantManager) CancelWants(ks []key.Key) { pm.addEntries(ks, true) } -func (pm *WantManager) addEntries(ks []u.Key, cancel bool) { +func (pm *WantManager) addEntries(ks []key.Key, cancel bool) { var entries []*bsmsg.Entry for i, k := range ks { entries = append(entries, &bsmsg.Entry{ diff --git a/bitswap/workers.go b/bitswap/workers.go index 7852cf93e..17c74a879 100644 --- a/bitswap/workers.go +++ b/bitswap/workers.go @@ -7,7 +7,7 @@ import ( process "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/goprocess" context "github.com/ipfs/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context" - u "github.com/ipfs/go-ipfs/util" + key "github.com/ipfs/go-ipfs/blocks/key" ) var TaskWorkerCount = 8 @@ -104,9 +104,9 @@ func (bs *Bitswap) provideWorker(ctx context.Context) { func (bs *Bitswap) provideCollector(ctx context.Context) { defer close(bs.provideKeys) - var toProvide []u.Key - var nextKey u.Key - var keysOut chan u.Key + var toProvide []key.Key + var nextKey key.Key + var keysOut chan key.Key for { select { From 75ed0ea47767810eff2d740baa18c30253ff9a91 Mon Sep 17 00:00:00 2001 From: Jeromy Date: Thu, 11 Jun 2015 09:22:35 -0700 Subject: [PATCH 0403/1035] prevent wantmanager from leaking goroutines (and memory) License: MIT Signed-off-by: Jeromy This commit was moved from ipfs/go-bitswap@73e55bf0797a5b5b14598425b4d8890fe7010b74 --- bitswap/wantmanager.go | 61 +++++++++++++++++++++++++----------------- 1 file changed, 37 insertions(+), 24 deletions(-) diff --git a/bitswap/wantmanager.go b/bitswap/wantmanager.go index 0091724ff..29f7b9469 100644 --- a/bitswap/wantmanager.go +++ b/bitswap/wantmanager.go @@ -137,36 +137,49 @@ func (mq *msgQueue) runQueue(ctx context.Context) { for { select { case <-mq.work: // there is work to be done - - err := mq.network.ConnectTo(ctx, mq.p) - if err != nil { - log.Noticef("cant connect to peer %s: %s", mq.p, err) - // TODO: cant connect, what now? - continue - } - - // grab outgoing message - mq.outlk.Lock() - wlm := mq.out - if wlm == nil || wlm.Empty() { - mq.outlk.Unlock() - continue - } - mq.out = nil - mq.outlk.Unlock() - - // send wantlist updates - err = mq.network.SendMessage(ctx, mq.p, wlm) - if err != nil { - log.Noticef("bitswap send error: %s", err) - // TODO: what do we do if this fails? - } + mq.doWork(ctx) case <-mq.done: return } } } +func (mq *msgQueue) doWork(ctx context.Context) { + // allow a minute for connections + // this includes looking them up in the dht + // dialing them, and handshaking + conctx, cancel := context.WithTimeout(ctx, time.Minute) + defer cancel() + + err := mq.network.ConnectTo(conctx, mq.p) + if err != nil { + log.Noticef("cant connect to peer %s: %s", mq.p, err) + // TODO: cant connect, what now? + return + } + + // grab outgoing message + mq.outlk.Lock() + wlm := mq.out + mq.out = nil + mq.outlk.Unlock() + + if wlm == nil || wlm.Empty() { + return + } + + sendctx, cancel := context.WithTimeout(ctx, time.Second*30) + defer cancel() + + // send wantlist updates + err = mq.network.SendMessage(sendctx, mq.p, wlm) + if err != nil { + log.Noticef("bitswap send error: %s", err) + // TODO: what do we do if this fails? + return + } +} + func (pm *WantManager) Connected(p peer.ID) { pm.connect <- p } From fb7f9d7d5cc500b097b198e5b1d7a4e78107c656 Mon Sep 17 00:00:00 2001 From: Jeromy Date: Thu, 11 Jun 2015 13:34:11 -0700 Subject: [PATCH 0404/1035] comments from CR License: MIT Signed-off-by: Jeromy This commit was moved from ipfs/go-bitswap@d06b99b3f325a0d719958381810d7f0e24874489 --- bitswap/wantmanager.go | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/bitswap/wantmanager.go b/bitswap/wantmanager.go index 29f7b9469..996da21eb 100644 --- a/bitswap/wantmanager.go +++ b/bitswap/wantmanager.go @@ -145,10 +145,10 @@ func (mq *msgQueue) runQueue(ctx context.Context) { } func (mq *msgQueue) doWork(ctx context.Context) { - // allow a minute for connections + // allow ten minutes for connections // this includes looking them up in the dht // dialing them, and handshaking - conctx, cancel := context.WithTimeout(ctx, time.Minute) + conctx, cancel := context.WithTimeout(ctx, time.Minute*10) defer cancel() err := mq.network.ConnectTo(conctx, mq.p) @@ -161,14 +161,14 @@ func (mq *msgQueue) doWork(ctx context.Context) { // grab outgoing message mq.outlk.Lock() wlm := mq.out - mq.out = nil - mq.outlk.Unlock() - if wlm == nil || wlm.Empty() { + mq.outlk.Unlock() return } + mq.out = nil + mq.outlk.Unlock() - sendctx, cancel := context.WithTimeout(ctx, time.Second*30) + sendctx, cancel := context.WithTimeout(ctx, time.Minute*5) defer cancel() // send wantlist updates From d940ddb67fcce37ac94196f6cb9df70c1e90f82b Mon Sep 17 00:00:00 2001 From: Jeromy Date: Fri, 12 Jun 2015 11:32:06 -0700 Subject: [PATCH 0405/1035] select with context when sending on channels License: MIT Signed-off-by: Jeromy This commit was moved from ipfs/go-bitswap@468e7655ec64a1432b5edb1458be0db5b37cabf5 --- bitswap/wantmanager.go | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/bitswap/wantmanager.go b/bitswap/wantmanager.go index 0091724ff..09b3e328a 100644 --- a/bitswap/wantmanager.go +++ b/bitswap/wantmanager.go @@ -168,11 +168,17 @@ func (mq *msgQueue) runQueue(ctx context.Context) { } func (pm *WantManager) Connected(p peer.ID) { - pm.connect <- p + select { + case pm.connect <- p: + case <-pm.ctx.Done(): + } } func (pm *WantManager) Disconnected(p peer.ID) { - pm.disconnect <- p + select { + case pm.disconnect <- p: + case <-pm.ctx.Done(): + } } // TODO: use goprocess here once i trust it From 69f13754e1761a7ba8dd30b8d0e437d9e01cf127 Mon Sep 17 00:00:00 2001 From: rht Date: Fri, 12 Jun 2015 04:48:27 +0700 Subject: [PATCH 0406/1035] Remove Notice{,f} logging interface And substitute the lines using Notice{,f} with Info{,f} License: MIT Signed-off-by: rht This commit was moved from ipfs/go-bitswap@71412d5bfb145db20b75038b43da9b5bae91be19 --- bitswap/bitswap.go | 4 ++-- bitswap/wantmanager.go | 6 +++--- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index bed1d3a47..53c89a7d9 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -280,7 +280,7 @@ func (bs *Bitswap) ReceiveMessage(ctx context.Context, p peer.ID, incoming bsmsg var keys []key.Key for _, block := range iblocks { if _, found := bs.wm.wl.Contains(block.Key()); !found { - log.Notice("received un-asked-for block: %s", block) + log.Info("received un-asked-for block: %s", block) continue } keys = append(keys, block.Key()) @@ -297,7 +297,7 @@ func (bs *Bitswap) ReceiveMessage(ctx context.Context, p peer.ID, incoming bsmsg has, err := bs.blockstore.Has(b.Key()) if err != nil { bs.counterLk.Unlock() - log.Noticef("blockstore.Has error: %s", err) + log.Infof("blockstore.Has error: %s", err) return } if err == nil && has { diff --git a/bitswap/wantmanager.go b/bitswap/wantmanager.go index 32c42776c..a8eeb58e2 100644 --- a/bitswap/wantmanager.go +++ b/bitswap/wantmanager.go @@ -96,7 +96,7 @@ func (pm *WantManager) SendBlock(ctx context.Context, env *engine.Envelope) { log.Infof("Sending block %s to %s", env.Peer, env.Block) err := pm.network.SendMessage(ctx, env.Peer, msg) if err != nil { - log.Noticef("sendblock error: %s", err) + log.Infof("sendblock error: %s", err) } } @@ -153,7 +153,7 @@ func (mq *msgQueue) doWork(ctx context.Context) { err := mq.network.ConnectTo(conctx, mq.p) if err != nil { - log.Noticef("cant connect to peer %s: %s", mq.p, err) + log.Infof("cant connect to peer %s: %s", mq.p, err) // TODO: cant connect, what now? return } @@ -174,7 +174,7 @@ func (mq *msgQueue) doWork(ctx context.Context) { // send wantlist updates err = mq.network.SendMessage(sendctx, mq.p, wlm) if err != nil { - log.Noticef("bitswap send error: %s", err) + log.Infof("bitswap send error: %s", err) // TODO: what do we do if this fails? return } From 561bcaa7138203f74e007e100044fea4f2643f44 Mon Sep 17 00:00:00 2001 From: rht Date: Sun, 14 Jun 2015 21:44:32 +0700 Subject: [PATCH 0407/1035] golint util/, thirdparty/ and exchange/bitswap/testutils.go License: MIT Signed-off-by: rht This commit was moved from ipfs/go-bitswap@373bacacc22cafbe0b6b5407d0ddd2de888eca5e --- bitswap/testutils.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/bitswap/testutils.go b/bitswap/testutils.go index 47930de69..91fdece7f 100644 --- a/bitswap/testutils.go +++ b/bitswap/testutils.go @@ -50,7 +50,7 @@ func (g *SessionGenerator) Next() Instance { } func (g *SessionGenerator) Instances(n int) []Instance { - instances := make([]Instance, 0) + var instances []Instance for j := 0; j < n; j++ { inst := g.Next() instances = append(instances, inst) @@ -87,12 +87,12 @@ func (i *Instance) SetBlockstoreLatency(t time.Duration) time.Duration { // just a much better idea. func session(ctx context.Context, net tn.Network, p testutil.Identity) Instance { bsdelay := delay.Fixed(0) - const kWriteCacheElems = 100 + const writeCacheElems = 100 adapter := net.Adapter(p) dstore := ds_sync.MutexWrap(datastore2.WithDelay(ds.NewMapDatastore(), bsdelay)) - bstore, err := blockstore.WriteCached(blockstore.NewBlockstore(ds_sync.MutexWrap(dstore)), kWriteCacheElems) + bstore, err := blockstore.WriteCached(blockstore.NewBlockstore(ds_sync.MutexWrap(dstore)), writeCacheElems) if err != nil { panic(err.Error()) // FIXME perhaps change signature and return error. } From 887547f9d438cf6d401d24bc7514b77ffe4b1962 Mon Sep 17 00:00:00 2001 From: Jeromy Date: Tue, 7 Jul 2015 12:14:57 -0700 Subject: [PATCH 0408/1035] add in some events to bitswap to emit worker information License: MIT Signed-off-by: Jeromy This commit was moved from ipfs/go-bitswap@cafee57f6ff89814bd622dd1686ddc7a786a3beb --- bitswap/bitswap.go | 10 +++++++++- bitswap/workers.go | 22 ++++++++++++++++++---- 2 files changed, 27 insertions(+), 5 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 53c89a7d9..4511e188e 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -150,7 +150,8 @@ func (bs *Bitswap) GetBlock(parent context.Context, k key.Key) (*blocks.Block, e ctx, cancelFunc := context.WithCancel(parent) ctx = eventlog.ContextWithLoggable(ctx, eventlog.Uuid("GetBlockRequest")) - defer log.EventBegin(ctx, "GetBlockRequest", &k).Done() + log.Event(ctx, "Bitswap.GetBlockRequest.Start", &k) + defer log.Event(ctx, "Bitswap.GetBlockRequest.End", &k) defer func() { cancelFunc() @@ -200,6 +201,10 @@ func (bs *Bitswap) GetBlocks(ctx context.Context, keys []key.Key) (<-chan *block } promise := bs.notifications.Subscribe(ctx, keys...) + for _, k := range keys { + log.Event(ctx, "Bitswap.GetBlockRequest.Start", &k) + } + bs.wm.WantBlocks(keys) req := &blockRequest{ @@ -310,6 +315,9 @@ func (bs *Bitswap) ReceiveMessage(ctx context.Context, p peer.ID, incoming bsmsg return } + k := b.Key() + log.Event(ctx, "Bitswap.GetBlockRequest.End", &k) + log.Debugf("got block %s from %s (%d,%d)", b, p, brecvd, bdup) hasBlockCtx, cancel := context.WithTimeout(ctx, hasBlockTimeout) if err := bs.HasBlock(hasBlockCtx, b); err != nil { diff --git a/bitswap/workers.go b/bitswap/workers.go index 17c74a879..edd05bfb3 100644 --- a/bitswap/workers.go +++ b/bitswap/workers.go @@ -7,7 +7,9 @@ import ( process "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/goprocess" context "github.com/ipfs/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context" + key "github.com/ipfs/go-ipfs/blocks/key" + eventlog "github.com/ipfs/go-ipfs/thirdparty/eventlog" ) var TaskWorkerCount = 8 @@ -36,8 +38,9 @@ func (bs *Bitswap) startWorkers(px process.Process, ctx context.Context) { // Start up workers to handle requests from other nodes for the data on this node for i := 0; i < TaskWorkerCount; i++ { + i := i px.Go(func(px process.Process) { - bs.taskWorker(ctx) + bs.taskWorker(ctx, i) }) } @@ -55,15 +58,18 @@ func (bs *Bitswap) startWorkers(px process.Process, ctx context.Context) { // consider increasing number if providing blocks bottlenecks // file transfers for i := 0; i < provideWorkers; i++ { + i := i px.Go(func(px process.Process) { - bs.provideWorker(ctx) + bs.provideWorker(ctx, i) }) } } -func (bs *Bitswap) taskWorker(ctx context.Context) { +func (bs *Bitswap) taskWorker(ctx context.Context, id int) { + idmap := eventlog.LoggableMap{"ID": id} defer log.Info("bitswap task worker shutting down...") for { + log.Event(ctx, "Bitswap.TaskWorker.Loop", idmap) select { case nextEnvelope := <-bs.engine.Outbox(): select { @@ -71,6 +77,7 @@ func (bs *Bitswap) taskWorker(ctx context.Context) { if !ok { continue } + log.Event(ctx, "Bitswap.TaskWorker.Work", eventlog.LoggableMap{"ID": id, "Target": envelope.Peer.Pretty(), "Block": envelope.Block.Multihash.B58String()}) bs.wm.SendBlock(ctx, envelope) case <-ctx.Done(): @@ -82,10 +89,13 @@ func (bs *Bitswap) taskWorker(ctx context.Context) { } } -func (bs *Bitswap) provideWorker(ctx context.Context) { +func (bs *Bitswap) provideWorker(ctx context.Context, id int) { + idmap := eventlog.LoggableMap{"ID": id} for { + log.Event(ctx, "Bitswap.ProvideWorker.Loop", idmap) select { case k, ok := <-bs.provideKeys: + log.Event(ctx, "Bitswap.ProvideWorker.Work", idmap, &k) if !ok { log.Debug("provideKeys channel closed") return @@ -139,6 +149,7 @@ func (bs *Bitswap) providerConnector(parent context.Context) { defer log.Info("bitswap client worker shutting down...") for { + log.Event(parent, "Bitswap.ProviderConnector.Loop") select { case req := <-bs.findKeys: keys := req.keys @@ -146,6 +157,7 @@ func (bs *Bitswap) providerConnector(parent context.Context) { log.Warning("Received batch request for zero blocks") continue } + log.Event(parent, "Bitswap.ProviderConnector.Work", eventlog.LoggableMap{"Keys": keys}) // NB: Optimization. Assumes that providers of key[0] are likely to // be able to provide for all keys. This currently holds true in most @@ -174,6 +186,7 @@ func (bs *Bitswap) rebroadcastWorker(parent context.Context) { defer tick.Stop() for { + log.Event(ctx, "Bitswap.Rebroadcast.idle") select { case <-tick.C: n := bs.wm.wl.Len() @@ -181,6 +194,7 @@ func (bs *Bitswap) rebroadcastWorker(parent context.Context) { log.Debug(n, "keys in bitswap wantlist") } case <-broadcastSignal.C: // resend unfulfilled wantlist keys + log.Event(ctx, "Bitswap.Rebroadcast.active") entries := bs.wm.wl.Entries() if len(entries) > 0 { bs.connectToProviders(ctx, entries) From 832aafcaf826fe9b4fc0b89c4f3bed672713e1f2 Mon Sep 17 00:00:00 2001 From: Juan Batiz-Benet Date: Thu, 9 Jul 2015 16:34:16 -0700 Subject: [PATCH 0409/1035] expose internal/pb packages. we shouldn't use internal packages. License: MIT Signed-off-by: Juan Batiz-Benet This commit was moved from ipfs/go-bitswap@7523725638e2cbee08d19f8831fbe80b5f79b603 --- bitswap/message/message.go | 2 +- bitswap/message/message_test.go | 2 +- bitswap/message/pb/Makefile | 8 ++++++++ bitswap/message/{internal => }/pb/message.pb.go | 0 bitswap/message/{internal => }/pb/message.proto | 0 5 files changed, 10 insertions(+), 2 deletions(-) create mode 100644 bitswap/message/pb/Makefile rename bitswap/message/{internal => }/pb/message.pb.go (100%) rename bitswap/message/{internal => }/pb/message.proto (100%) diff --git a/bitswap/message/message.go b/bitswap/message/message.go index 6e4979939..090970bd3 100644 --- a/bitswap/message/message.go +++ b/bitswap/message/message.go @@ -5,7 +5,7 @@ import ( blocks "github.com/ipfs/go-ipfs/blocks" key "github.com/ipfs/go-ipfs/blocks/key" - pb "github.com/ipfs/go-ipfs/exchange/bitswap/message/internal/pb" + pb "github.com/ipfs/go-ipfs/exchange/bitswap/message/pb" wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" inet "github.com/ipfs/go-ipfs/p2p/net" diff --git a/bitswap/message/message_test.go b/bitswap/message/message_test.go index 4452b88a0..70d966e0a 100644 --- a/bitswap/message/message_test.go +++ b/bitswap/message/message_test.go @@ -8,7 +8,7 @@ import ( blocks "github.com/ipfs/go-ipfs/blocks" key "github.com/ipfs/go-ipfs/blocks/key" - pb "github.com/ipfs/go-ipfs/exchange/bitswap/message/internal/pb" + pb "github.com/ipfs/go-ipfs/exchange/bitswap/message/pb" ) func TestAppendWanted(t *testing.T) { diff --git a/bitswap/message/pb/Makefile b/bitswap/message/pb/Makefile new file mode 100644 index 000000000..5bbebea07 --- /dev/null +++ b/bitswap/message/pb/Makefile @@ -0,0 +1,8 @@ +# TODO(brian): add proto tasks +all: message.pb.go + +message.pb.go: message.proto + protoc --gogo_out=. --proto_path=../../../../../:/usr/local/opt/protobuf/include:. $< + +clean: + rm message.pb.go diff --git a/bitswap/message/internal/pb/message.pb.go b/bitswap/message/pb/message.pb.go similarity index 100% rename from bitswap/message/internal/pb/message.pb.go rename to bitswap/message/pb/message.pb.go diff --git a/bitswap/message/internal/pb/message.proto b/bitswap/message/pb/message.proto similarity index 100% rename from bitswap/message/internal/pb/message.proto rename to bitswap/message/pb/message.proto From 37f64947587c198e050ad2724eb302f68c1c261e Mon Sep 17 00:00:00 2001 From: Jeromy Date: Mon, 13 Jul 2015 11:01:01 -0700 Subject: [PATCH 0410/1035] allow bitswap to attempt to write blocks to disk multiple times License: MIT Signed-off-by: Jeromy This commit was moved from ipfs/go-bitswap@5c5b77bb63368e148f857453894d7554ff04ad74 --- bitswap/bitswap.go | 60 +++++++++++++++++++++++++++++++--------------- 1 file changed, 41 insertions(+), 19 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 4511e188e..206b44f1e 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -228,7 +228,9 @@ func (bs *Bitswap) HasBlock(ctx context.Context, blk *blocks.Block) error { default: } - if err := bs.blockstore.Put(blk); err != nil { + err := bs.tryPutBlock(blk, 4) // attempt to store block up to four times + if err != nil { + log.Errorf("Error writing block to datastore: %s", err) return err } @@ -242,6 +244,18 @@ func (bs *Bitswap) HasBlock(ctx context.Context, blk *blocks.Block) error { return nil } +func (bs *Bitswap) tryPutBlock(blk *blocks.Block, attempts int) error { + var err error + for i := 0; i < attempts; i++ { + if err = bs.blockstore.Put(blk); err == nil { + break + } + + time.Sleep(time.Millisecond * time.Duration(400*(i+1))) + } + return err +} + func (bs *Bitswap) connectToProviders(ctx context.Context, entries []wantlist.Entry) { ctx, cancel := context.WithCancel(ctx) @@ -297,38 +311,46 @@ func (bs *Bitswap) ReceiveMessage(ctx context.Context, p peer.ID, incoming bsmsg wg.Add(1) go func(b *blocks.Block) { defer wg.Done() - bs.counterLk.Lock() - bs.blocksRecvd++ - has, err := bs.blockstore.Has(b.Key()) - if err != nil { - bs.counterLk.Unlock() - log.Infof("blockstore.Has error: %s", err) - return - } - if err == nil && has { - bs.dupBlocksRecvd++ - } - brecvd := bs.blocksRecvd - bdup := bs.dupBlocksRecvd - bs.counterLk.Unlock() - if has { - return + + if err := bs.updateReceiveCounters(b.Key()); err != nil { + return // ignore error, is either logged previously, or ErrAlreadyHaveBlock } k := b.Key() log.Event(ctx, "Bitswap.GetBlockRequest.End", &k) - log.Debugf("got block %s from %s (%d,%d)", b, p, brecvd, bdup) + log.Debugf("got block %s from %s", b, p) hasBlockCtx, cancel := context.WithTimeout(ctx, hasBlockTimeout) + defer cancel() if err := bs.HasBlock(hasBlockCtx, b); err != nil { log.Warningf("ReceiveMessage HasBlock error: %s", err) } - cancel() }(block) } wg.Wait() } +var ErrAlreadyHaveBlock = errors.New("already have block") + +func (bs *Bitswap) updateReceiveCounters(k key.Key) error { + bs.counterLk.Lock() + defer bs.counterLk.Unlock() + bs.blocksRecvd++ + has, err := bs.blockstore.Has(k) + if err != nil { + log.Infof("blockstore.Has error: %s", err) + return err + } + if err == nil && has { + bs.dupBlocksRecvd++ + } + + if has { + return ErrAlreadyHaveBlock + } + return nil +} + // Connected/Disconnected warns bitswap about peer connections func (bs *Bitswap) PeerConnected(p peer.ID) { bs.wm.Connected(p) From 41b5a8ed8c7d846a4acdf4748a28154b46e85b22 Mon Sep 17 00:00:00 2001 From: Jeromy Date: Mon, 13 Jul 2015 11:24:49 -0700 Subject: [PATCH 0411/1035] publish block before writing to disk License: MIT Signed-off-by: Jeromy This commit was moved from ipfs/go-bitswap@18c0cefd4c38500f07a6312deb54ef20eeacd54e --- bitswap/bitswap.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 206b44f1e..75c347fd0 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -228,13 +228,14 @@ func (bs *Bitswap) HasBlock(ctx context.Context, blk *blocks.Block) error { default: } + bs.notifications.Publish(blk) + err := bs.tryPutBlock(blk, 4) // attempt to store block up to four times if err != nil { log.Errorf("Error writing block to datastore: %s", err) return err } - bs.notifications.Publish(blk) select { case bs.newBlocks <- blk: // send block off to be reprovided From 72fa2f8d6aef68dbc2e2afef623825c3611db0a4 Mon Sep 17 00:00:00 2001 From: Jeromy Date: Tue, 14 Jul 2015 11:11:16 -0700 Subject: [PATCH 0412/1035] fix race introduced in bitswap License: MIT Signed-off-by: Jeromy This commit was moved from ipfs/go-bitswap@10b8d5714844f5fa1216fbe15f3c32fdf3de1303 --- bitswap/bitswap.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 75c347fd0..5234aefc9 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -228,14 +228,14 @@ func (bs *Bitswap) HasBlock(ctx context.Context, blk *blocks.Block) error { default: } - bs.notifications.Publish(blk) - err := bs.tryPutBlock(blk, 4) // attempt to store block up to four times if err != nil { log.Errorf("Error writing block to datastore: %s", err) return err } + bs.notifications.Publish(blk) + select { case bs.newBlocks <- blk: // send block off to be reprovided From f7fd4a8102d583c0c36d6d9bace06b96497ff46b Mon Sep 17 00:00:00 2001 From: Jeromy Date: Tue, 14 Jul 2015 14:04:56 -0700 Subject: [PATCH 0413/1035] making the daemon shutdown quicker License: MIT Signed-off-by: Jeromy This commit was moved from ipfs/go-bitswap@088143a4f154f10002e006166e23842b72a38f7e --- bitswap/wantmanager.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/bitswap/wantmanager.go b/bitswap/wantmanager.go index a8eeb58e2..3b4626a4d 100644 --- a/bitswap/wantmanager.go +++ b/bitswap/wantmanager.go @@ -140,6 +140,8 @@ func (mq *msgQueue) runQueue(ctx context.Context) { mq.doWork(ctx) case <-mq.done: return + case <-ctx.Done(): + return } } } From ea2fb93a50080ec62922b8b6943959aa146c32e5 Mon Sep 17 00:00:00 2001 From: Karthik Bala Date: Mon, 6 Jul 2015 15:10:13 -0700 Subject: [PATCH 0414/1035] add transport logic to mocknet License: MIT Signed-off-by: Karthik Bala This commit was moved from ipfs/go-bitswap@078db5dee0e322ef425e0aa2ea0bd6eae419f590 --- bitswap/testutils.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/bitswap/testutils.go b/bitswap/testutils.go index 91fdece7f..3dad2afed 100644 --- a/bitswap/testutils.go +++ b/bitswap/testutils.go @@ -46,7 +46,7 @@ func (g *SessionGenerator) Next() Instance { if err != nil { panic("FIXME") // TODO change signature } - return session(g.ctx, g.net, p) + return Session(g.ctx, g.net, p) } func (g *SessionGenerator) Instances(n int) []Instance { @@ -85,7 +85,7 @@ func (i *Instance) SetBlockstoreLatency(t time.Duration) time.Duration { // NB: It's easy make mistakes by providing the same peer ID to two different // sessions. To safeguard, use the SessionGenerator to generate sessions. It's // just a much better idea. -func session(ctx context.Context, net tn.Network, p testutil.Identity) Instance { +func Session(ctx context.Context, net tn.Network, p testutil.Identity) Instance { bsdelay := delay.Fixed(0) const writeCacheElems = 100 From 7ab0dcb8e5f769b20fe522f5fa52465e081be545 Mon Sep 17 00:00:00 2001 From: Juan Batiz-Benet Date: Tue, 4 Aug 2015 19:53:39 +0200 Subject: [PATCH 0415/1035] bitswap/provide: improved rate limiting this PR greatly speeds up providing and add. (1) Instead of idling workers, we move to a ratelimiter-based worker. We put this max at 512, so that means _up to_ 512 goroutines. This is very small load on the node, as each worker is providing to the dht, which means mostly waiting. It DOES put a large load on the DHT. but i want to try this out for a while and see if it's a problem. We can decide later if it is a problem for the network (nothing stops anyone from re-compiling, but the defaults of course matter). (2) We add a buffer size for provideKeys, which means that we block the add process much less. this is a very cheap buffer, as it only stores keys (it may be even cheaper with a lock + ring buffer instead of a channel...). This makes add blazing fast-- it was being rate limited by providing. Add should not be ratelimited by providing (much, if any) as the user wants to just store the stuff in the local node's repo. This buffer is initially set to 4096, which means: 4096 * keysize (~258 bytes + go overhead) ~ 1-1.5MB this buffer only last a few sec to mins, and is an ok thing to do for the sake of very fast adds. (this could be a configurable paramter, certainly for low-mem footprint use cases). At the moment this is not much, compared to block sizes. (3) We make the providing EventBegin() + Done(), so that we can track how long a provide takes, and we can remove workers as they finish in bsdash and similar tools. License: MIT Signed-off-by: Juan Batiz-Benet This commit was moved from ipfs/go-bitswap@06b49918b5c07bd54460bb5d71f7239e79667cd7 --- bitswap/bitswap.go | 7 ++-- bitswap/workers.go | 87 +++++++++++++++++++++++----------------------- 2 files changed, 48 insertions(+), 46 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 5234aefc9..cbc9bcf4f 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -39,8 +39,9 @@ const ( // kMaxPriority is the max priority as defined by the bitswap protocol kMaxPriority = math.MaxInt32 - HasBlockBufferSize = 256 - provideWorkers = 4 + HasBlockBufferSize = 256 + provideKeysBufferSize = 2048 + provideWorkerMax = 512 ) var rebroadcastDelay = delay.Fixed(time.Second * 10) @@ -85,7 +86,7 @@ func New(parent context.Context, p peer.ID, network bsnet.BitSwapNetwork, findKeys: make(chan *blockRequest, sizeBatchRequestChan), process: px, newBlocks: make(chan *blocks.Block, HasBlockBufferSize), - provideKeys: make(chan key.Key), + provideKeys: make(chan key.Key, provideKeysBufferSize), wm: NewWantManager(ctx, network), } go bs.wm.Run() diff --git a/bitswap/workers.go b/bitswap/workers.go index edd05bfb3..e19cf2fbc 100644 --- a/bitswap/workers.go +++ b/bitswap/workers.go @@ -1,12 +1,12 @@ package bitswap import ( - "os" - "strconv" "time" process "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/goprocess" + ratelimit "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/goprocess/ratelimit" context "github.com/ipfs/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context" + waitable "github.com/ipfs/go-ipfs/thirdparty/waitable" key "github.com/ipfs/go-ipfs/blocks/key" eventlog "github.com/ipfs/go-ipfs/thirdparty/eventlog" @@ -14,22 +14,6 @@ import ( var TaskWorkerCount = 8 -func init() { - twc := os.Getenv("IPFS_BITSWAP_TASK_WORKERS") - if twc != "" { - n, err := strconv.Atoi(twc) - if err != nil { - log.Error(err) - return - } - if n > 0 { - TaskWorkerCount = n - } else { - log.Errorf("Invalid value of '%d' for IPFS_BITSWAP_TASK_WORKERS", n) - } - } -} - func (bs *Bitswap) startWorkers(px process.Process, ctx context.Context) { // Start up a worker to handle block requests this node is making px.Go(func(px process.Process) { @@ -57,12 +41,7 @@ func (bs *Bitswap) startWorkers(px process.Process, ctx context.Context) { // Spawn up multiple workers to handle incoming blocks // consider increasing number if providing blocks bottlenecks // file transfers - for i := 0; i < provideWorkers; i++ { - i := i - px.Go(func(px process.Process) { - bs.provideWorker(ctx, i) - }) - } + px.Go(bs.provideWorker) } func (bs *Bitswap) taskWorker(ctx context.Context, id int) { @@ -77,7 +56,11 @@ func (bs *Bitswap) taskWorker(ctx context.Context, id int) { if !ok { continue } - log.Event(ctx, "Bitswap.TaskWorker.Work", eventlog.LoggableMap{"ID": id, "Target": envelope.Peer.Pretty(), "Block": envelope.Block.Multihash.B58String()}) + log.Event(ctx, "Bitswap.TaskWorker.Work", eventlog.LoggableMap{ + "ID": id, + "Target": envelope.Peer.Pretty(), + "Block": envelope.Block.Multihash.B58String(), + }) bs.wm.SendBlock(ctx, envelope) case <-ctx.Done(): @@ -89,27 +72,45 @@ func (bs *Bitswap) taskWorker(ctx context.Context, id int) { } } -func (bs *Bitswap) provideWorker(ctx context.Context, id int) { - idmap := eventlog.LoggableMap{"ID": id} - for { - log.Event(ctx, "Bitswap.ProvideWorker.Loop", idmap) - select { - case k, ok := <-bs.provideKeys: - log.Event(ctx, "Bitswap.ProvideWorker.Work", idmap, &k) - if !ok { - log.Debug("provideKeys channel closed") - return - } - ctx, cancel := context.WithTimeout(ctx, provideTimeout) - err := bs.network.Provide(ctx, k) - if err != nil { +func (bs *Bitswap) provideWorker(px process.Process) { + + limiter := ratelimit.NewRateLimiter(px, provideWorkerMax) + + limitedGoProvide := func(k key.Key, wid int) { + ev := eventlog.LoggableMap{"ID": wid} + limiter.LimitedGo(func(px process.Process) { + + ctx := waitable.Context(px) // derive ctx from px + defer log.EventBegin(ctx, "Bitswap.ProvideWorker.Work", ev, &k).Done() + + ctx, cancel := context.WithTimeout(ctx, provideTimeout) // timeout ctx + defer cancel() + + if err := bs.network.Provide(ctx, k); err != nil { log.Error(err) } - cancel() - case <-ctx.Done(): - return - } + }) } + + // worker spawner, reads from bs.provideKeys until it closes, spawning a + // _ratelimited_ number of workers to handle each key. + limiter.Go(func(px process.Process) { + for wid := 2; ; wid++ { + ev := eventlog.LoggableMap{"ID": 1} + log.Event(waitable.Context(px), "Bitswap.ProvideWorker.Loop", ev) + + select { + case <-px.Closing(): + return + case k, ok := <-bs.provideKeys: + if !ok { + log.Debug("provideKeys channel closed") + return + } + limitedGoProvide(k, wid) + } + } + }) } func (bs *Bitswap) provideCollector(ctx context.Context) { From 55222499b541d3ce329bad655ee7f4c8f6a653ec Mon Sep 17 00:00:00 2001 From: rht Date: Sun, 23 Aug 2015 19:33:53 +0700 Subject: [PATCH 0416/1035] Fix 'ctx, _' to have explicit cancel License: MIT Signed-off-by: rht This commit was moved from ipfs/go-bitswap@69ff434c5df109855eb29eb46f1d44a0b96e3113 --- bitswap/bitswap_test.go | 15 ++++++++++----- bitswap/notifications/notifications_test.go | 3 ++- 2 files changed, 12 insertions(+), 6 deletions(-) diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index e70b3885a..41f0e6c08 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -50,7 +50,8 @@ func TestProviderForKeyButNetworkCannotFind(t *testing.T) { // TODO revisit this solo := g.Next() defer solo.Exchange.Close() - ctx, _ := context.WithTimeout(context.Background(), time.Nanosecond) + ctx, cancel := context.WithTimeout(context.Background(), time.Nanosecond) + defer cancel() _, err := solo.Exchange.GetBlock(ctx, block.Key()) if err != context.DeadlineExceeded { @@ -76,7 +77,8 @@ func TestGetBlockFromPeerAfterPeerAnnounces(t *testing.T) { wantsBlock := peers[1] defer wantsBlock.Exchange.Close() - ctx, _ := context.WithTimeout(context.Background(), time.Second) + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() received, err := wantsBlock.Exchange.GetBlock(ctx, block.Key()) if err != nil { t.Log(err) @@ -226,14 +228,16 @@ func TestSendToWantingPeer(t *testing.T) { alpha := bg.Next() // peerA requests and waits for block alpha - ctx, _ := context.WithTimeout(context.TODO(), waitTime) + ctx, cancel := context.WithTimeout(context.TODO(), waitTime) + defer cancel() alphaPromise, err := peerA.Exchange.GetBlocks(ctx, []key.Key{alpha.Key()}) if err != nil { t.Fatal(err) } // peerB announces to the network that he has block alpha - ctx, _ = context.WithTimeout(context.TODO(), timeout) + ctx, cancel = context.WithTimeout(context.TODO(), timeout) + defer cancel() err = peerB.Exchange.HasBlock(ctx, alpha) if err != nil { t.Fatal(err) @@ -266,7 +270,8 @@ func TestBasicBitswap(t *testing.T) { t.Fatal(err) } - ctx, _ := context.WithTimeout(context.TODO(), time.Second*5) + ctx, cancel := context.WithTimeout(context.TODO(), time.Second*5) + defer cancel() blk, err := instances[1].Exchange.GetBlock(ctx, blocks[0].Key()) if err != nil { t.Fatal(err) diff --git a/bitswap/notifications/notifications_test.go b/bitswap/notifications/notifications_test.go index e9be15aa4..8ab9887ff 100644 --- a/bitswap/notifications/notifications_test.go +++ b/bitswap/notifications/notifications_test.go @@ -112,7 +112,8 @@ func TestSubscribeIsANoopWhenCalledWithNoKeys(t *testing.T) { func TestCarryOnWhenDeadlineExpires(t *testing.T) { impossibleDeadline := time.Nanosecond - fastExpiringCtx, _ := context.WithTimeout(context.Background(), impossibleDeadline) + fastExpiringCtx, cancel := context.WithTimeout(context.Background(), impossibleDeadline) + defer cancel() n := New() defer n.Shutdown() From a72e72596530965526eb61a0ed9faf339b602aaf Mon Sep 17 00:00:00 2001 From: rht Date: Sun, 23 Aug 2015 19:55:45 +0700 Subject: [PATCH 0417/1035] Replace context.TODO in test files with context.Background License: MIT Signed-off-by: rht This commit was moved from ipfs/go-bitswap@b7de75d3604f5cd43e96e8113ab82a06027f5ad3 --- bitswap/bitswap_test.go | 13 +++++++------ bitswap/notifications/notifications_test.go | 2 +- bitswap/testutils.go | 2 +- 3 files changed, 9 insertions(+), 8 deletions(-) diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index 41f0e6c08..8f4b6f61f 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -144,6 +144,7 @@ func TestLargeFileTwoPeers(t *testing.T) { } func PerformDistributionTest(t *testing.T, numInstances, numBlocks int) { + ctx := context.Background() if testing.Short() { t.SkipNow() } @@ -161,7 +162,7 @@ func PerformDistributionTest(t *testing.T, numInstances, numBlocks int) { first := instances[0] for _, b := range blocks { blkeys = append(blkeys, b.Key()) - first.Exchange.HasBlock(context.Background(), b) + first.Exchange.HasBlock(ctx, b) } t.Log("Distribute!") @@ -171,7 +172,7 @@ func PerformDistributionTest(t *testing.T, numInstances, numBlocks int) { wg.Add(1) go func(inst Instance) { defer wg.Done() - outch, err := inst.Exchange.GetBlocks(context.TODO(), blkeys) + outch, err := inst.Exchange.GetBlocks(ctx, blkeys) if err != nil { t.Fatal(err) } @@ -228,7 +229,7 @@ func TestSendToWantingPeer(t *testing.T) { alpha := bg.Next() // peerA requests and waits for block alpha - ctx, cancel := context.WithTimeout(context.TODO(), waitTime) + ctx, cancel := context.WithTimeout(context.Background(), waitTime) defer cancel() alphaPromise, err := peerA.Exchange.GetBlocks(ctx, []key.Key{alpha.Key()}) if err != nil { @@ -236,7 +237,7 @@ func TestSendToWantingPeer(t *testing.T) { } // peerB announces to the network that he has block alpha - ctx, cancel = context.WithTimeout(context.TODO(), timeout) + ctx, cancel = context.WithTimeout(context.Background(), timeout) defer cancel() err = peerB.Exchange.HasBlock(ctx, alpha) if err != nil { @@ -265,12 +266,12 @@ func TestBasicBitswap(t *testing.T) { instances := sg.Instances(2) blocks := bg.Blocks(1) - err := instances[0].Exchange.HasBlock(context.TODO(), blocks[0]) + err := instances[0].Exchange.HasBlock(context.Background(), blocks[0]) if err != nil { t.Fatal(err) } - ctx, cancel := context.WithTimeout(context.TODO(), time.Second*5) + ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) defer cancel() blk, err := instances[1].Exchange.GetBlock(ctx, blocks[0].Key()) if err != nil { diff --git a/bitswap/notifications/notifications_test.go b/bitswap/notifications/notifications_test.go index 8ab9887ff..96ed1c4e3 100644 --- a/bitswap/notifications/notifications_test.go +++ b/bitswap/notifications/notifications_test.go @@ -103,7 +103,7 @@ func TestDuplicateSubscribe(t *testing.T) { func TestSubscribeIsANoopWhenCalledWithNoKeys(t *testing.T) { n := New() defer n.Shutdown() - ch := n.Subscribe(context.TODO()) // no keys provided + ch := n.Subscribe(context.Background()) // no keys provided if _, ok := <-ch; ok { t.Fatal("should be closed if no keys provided") } diff --git a/bitswap/testutils.go b/bitswap/testutils.go index 3dad2afed..5bf28036d 100644 --- a/bitswap/testutils.go +++ b/bitswap/testutils.go @@ -18,7 +18,7 @@ import ( // WARNING: this uses RandTestBogusIdentity DO NOT USE for NON TESTS! func NewTestSessionGenerator( net tn.Network) SessionGenerator { - ctx, cancel := context.WithCancel(context.TODO()) + ctx, cancel := context.WithCancel(context.Background()) return SessionGenerator{ net: net, seq: 0, From bce0352252cb3fac173a28d9dc0ace995050e1c1 Mon Sep 17 00:00:00 2001 From: Juan Batiz-Benet Date: Sat, 5 Sep 2015 04:37:58 +0200 Subject: [PATCH 0418/1035] bitswap/workers: fix proc / ctx wiring This commit changes the order of the proc/ctx wiring, to ensure that the proc has been setup correctly before exiting. License: MIT Signed-off-by: Juan Batiz-Benet This commit was moved from ipfs/go-bitswap@3fb165284e0f977e368195db96d76ee3945ed99c --- bitswap/bitswap.go | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index cbc9bcf4f..8bc88481b 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -9,6 +9,7 @@ import ( "time" process "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/goprocess" + procctx "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/goprocess/context" context "github.com/ipfs/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context" blocks "github.com/ipfs/go-ipfs/blocks" blockstore "github.com/ipfs/go-ipfs/blocks/blockstore" @@ -68,15 +69,6 @@ func New(parent context.Context, p peer.ID, network bsnet.BitSwapNetwork, return nil }) - go func() { - <-px.Closing() // process closes first - cancelFunc() - }() - go func() { - <-ctx.Done() // parent cancelled first - px.Close() - }() - bs := &Bitswap{ self: p, blockstore: bstore, @@ -94,6 +86,15 @@ func New(parent context.Context, p peer.ID, network bsnet.BitSwapNetwork, // Start up bitswaps async worker routines bs.startWorkers(px, ctx) + + // bind the context and process. + // do it over here to avoid closing before all setup is done. + go func() { + <-px.Closing() // process closes first + cancelFunc() + }() + procctx.CloseAfterContext(px, ctx) // parent cancelled first + return bs } From f83dd92e57ef9a7dc315630f07e88e8b6878afd9 Mon Sep 17 00:00:00 2001 From: Jeromy Date: Tue, 8 Sep 2015 21:15:53 -0700 Subject: [PATCH 0419/1035] use new methods from goprocess/context, remove thirdparty/waitable License: MIT Signed-off-by: Jeromy This commit was moved from ipfs/go-bitswap@a33537907c9aa1e9a6304ab55751de68dd400f73 --- bitswap/workers.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/bitswap/workers.go b/bitswap/workers.go index e19cf2fbc..b33ea9221 100644 --- a/bitswap/workers.go +++ b/bitswap/workers.go @@ -4,9 +4,9 @@ import ( "time" process "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/goprocess" + procctx "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/goprocess/context" ratelimit "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/goprocess/ratelimit" context "github.com/ipfs/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context" - waitable "github.com/ipfs/go-ipfs/thirdparty/waitable" key "github.com/ipfs/go-ipfs/blocks/key" eventlog "github.com/ipfs/go-ipfs/thirdparty/eventlog" @@ -80,7 +80,7 @@ func (bs *Bitswap) provideWorker(px process.Process) { ev := eventlog.LoggableMap{"ID": wid} limiter.LimitedGo(func(px process.Process) { - ctx := waitable.Context(px) // derive ctx from px + ctx := procctx.OnClosingContext(px) // derive ctx from px defer log.EventBegin(ctx, "Bitswap.ProvideWorker.Work", ev, &k).Done() ctx, cancel := context.WithTimeout(ctx, provideTimeout) // timeout ctx @@ -97,7 +97,7 @@ func (bs *Bitswap) provideWorker(px process.Process) { limiter.Go(func(px process.Process) { for wid := 2; ; wid++ { ev := eventlog.LoggableMap{"ID": 1} - log.Event(waitable.Context(px), "Bitswap.ProvideWorker.Loop", ev) + log.Event(procctx.OnClosingContext(px), "Bitswap.ProvideWorker.Loop", ev) select { case <-px.Closing(): From c2532881dcd62195a3f1d6402f6cec81bf1016fc Mon Sep 17 00:00:00 2001 From: Jeromy Date: Wed, 9 Sep 2015 10:50:56 -0700 Subject: [PATCH 0420/1035] implement unwant command to remove blocks from wantlist License: MIT Signed-off-by: Jeromy This commit was moved from ipfs/go-bitswap@2d917bab6e8aab7f7946f765bfada353fbb9d075 --- bitswap/bitswap.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 8bc88481b..28582fe82 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -221,6 +221,11 @@ func (bs *Bitswap) GetBlocks(ctx context.Context, keys []key.Key) (<-chan *block } } +// CancelWant removes a given key from the wantlist +func (bs *Bitswap) CancelWants(ks []key.Key) { + bs.wm.CancelWants(ks) +} + // HasBlock announces the existance of a block to this bitswap service. The // service will potentially notify its peers. func (bs *Bitswap) HasBlock(ctx context.Context, blk *blocks.Block) error { From 1abe972dfe28f14ebbabcc7c363cdf53850adbfa Mon Sep 17 00:00:00 2001 From: Jeromy Date: Mon, 14 Sep 2015 17:33:03 -0700 Subject: [PATCH 0421/1035] extract logging License: MIT Signed-off-by: Jeromy This commit was moved from ipfs/go-bitswap@d7ed6e95ca1a37b67fed440ddb32baaebaeea455 --- bitswap/bitswap.go | 6 +++--- bitswap/decision/engine.go | 4 ++-- bitswap/network/ipfs_impl.go | 4 ++-- bitswap/workers.go | 12 ++++++------ 4 files changed, 13 insertions(+), 13 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 8bc88481b..ad472f327 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -22,10 +22,10 @@ import ( wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" peer "github.com/ipfs/go-ipfs/p2p/peer" "github.com/ipfs/go-ipfs/thirdparty/delay" - eventlog "github.com/ipfs/go-ipfs/thirdparty/eventlog" + logging "github.com/ipfs/go-ipfs/vendor/go-log-v1.0.0" ) -var log = eventlog.Logger("bitswap") +var log = logging.Logger("bitswap") const ( // maxProvidersPerRequest specifies the maximum number of providers desired @@ -151,7 +151,7 @@ func (bs *Bitswap) GetBlock(parent context.Context, k key.Key) (*blocks.Block, e ctx, cancelFunc := context.WithCancel(parent) - ctx = eventlog.ContextWithLoggable(ctx, eventlog.Uuid("GetBlockRequest")) + ctx = logging.ContextWithLoggable(ctx, logging.Uuid("GetBlockRequest")) log.Event(ctx, "Bitswap.GetBlockRequest.Start", &k) defer log.Event(ctx, "Bitswap.GetBlockRequest.End", &k) diff --git a/bitswap/decision/engine.go b/bitswap/decision/engine.go index d08636d80..85dde9eb7 100644 --- a/bitswap/decision/engine.go +++ b/bitswap/decision/engine.go @@ -10,7 +10,7 @@ import ( bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" wl "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" peer "github.com/ipfs/go-ipfs/p2p/peer" - eventlog "github.com/ipfs/go-ipfs/thirdparty/eventlog" + logging "github.com/ipfs/go-ipfs/vendor/go-log-v1.0.0" ) // TODO consider taking responsibility for other types of requests. For @@ -43,7 +43,7 @@ import ( // whatever it sees fit to produce desired outcomes (get wanted keys // quickly, maintain good relationships with peers, etc). -var log = eventlog.Logger("engine") +var log = logging.Logger("engine") const ( // outboxChanBuffer must be 0 to prevent stale messages from being sent diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index 78d1defd3..c0a4b2d3a 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -9,10 +9,10 @@ import ( inet "github.com/ipfs/go-ipfs/p2p/net" peer "github.com/ipfs/go-ipfs/p2p/peer" routing "github.com/ipfs/go-ipfs/routing" - eventlog "github.com/ipfs/go-ipfs/thirdparty/eventlog" + logging "github.com/ipfs/go-ipfs/vendor/go-log-v1.0.0" ) -var log = eventlog.Logger("bitswap_network") +var log = logging.Logger("bitswap_network") // NewFromIpfsHost returns a BitSwapNetwork supported by underlying IPFS host func NewFromIpfsHost(host host.Host, r routing.IpfsRouting) BitSwapNetwork { diff --git a/bitswap/workers.go b/bitswap/workers.go index b33ea9221..41dd94abe 100644 --- a/bitswap/workers.go +++ b/bitswap/workers.go @@ -9,7 +9,7 @@ import ( context "github.com/ipfs/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context" key "github.com/ipfs/go-ipfs/blocks/key" - eventlog "github.com/ipfs/go-ipfs/thirdparty/eventlog" + logging "github.com/ipfs/go-ipfs/vendor/go-log-v1.0.0" ) var TaskWorkerCount = 8 @@ -45,7 +45,7 @@ func (bs *Bitswap) startWorkers(px process.Process, ctx context.Context) { } func (bs *Bitswap) taskWorker(ctx context.Context, id int) { - idmap := eventlog.LoggableMap{"ID": id} + idmap := logging.LoggableMap{"ID": id} defer log.Info("bitswap task worker shutting down...") for { log.Event(ctx, "Bitswap.TaskWorker.Loop", idmap) @@ -56,7 +56,7 @@ func (bs *Bitswap) taskWorker(ctx context.Context, id int) { if !ok { continue } - log.Event(ctx, "Bitswap.TaskWorker.Work", eventlog.LoggableMap{ + log.Event(ctx, "Bitswap.TaskWorker.Work", logging.LoggableMap{ "ID": id, "Target": envelope.Peer.Pretty(), "Block": envelope.Block.Multihash.B58String(), @@ -77,7 +77,7 @@ func (bs *Bitswap) provideWorker(px process.Process) { limiter := ratelimit.NewRateLimiter(px, provideWorkerMax) limitedGoProvide := func(k key.Key, wid int) { - ev := eventlog.LoggableMap{"ID": wid} + ev := logging.LoggableMap{"ID": wid} limiter.LimitedGo(func(px process.Process) { ctx := procctx.OnClosingContext(px) // derive ctx from px @@ -96,7 +96,7 @@ func (bs *Bitswap) provideWorker(px process.Process) { // _ratelimited_ number of workers to handle each key. limiter.Go(func(px process.Process) { for wid := 2; ; wid++ { - ev := eventlog.LoggableMap{"ID": 1} + ev := logging.LoggableMap{"ID": 1} log.Event(procctx.OnClosingContext(px), "Bitswap.ProvideWorker.Loop", ev) select { @@ -158,7 +158,7 @@ func (bs *Bitswap) providerConnector(parent context.Context) { log.Warning("Received batch request for zero blocks") continue } - log.Event(parent, "Bitswap.ProviderConnector.Work", eventlog.LoggableMap{"Keys": keys}) + log.Event(parent, "Bitswap.ProviderConnector.Work", logging.LoggableMap{"Keys": keys}) // NB: Optimization. Assumes that providers of key[0] are likely to // be able to provide for all keys. This currently holds true in most From dfc1a5a80cd825fa54d48eac201a589c3a2b306b Mon Sep 17 00:00:00 2001 From: Jeromy Date: Wed, 2 Sep 2015 14:44:04 -0700 Subject: [PATCH 0422/1035] remove context from HasBlock, use bitswap process instead License: MIT Signed-off-by: Jeromy This commit was moved from ipfs/go-bitswap@afee5e338e008336fca874921f74024b8627ab99 --- bitswap/bitswap.go | 10 ++++------ bitswap/bitswap_test.go | 11 ++++------- 2 files changed, 8 insertions(+), 13 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 059e23414..2f2e88ea4 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -228,7 +228,7 @@ func (bs *Bitswap) CancelWants(ks []key.Key) { // HasBlock announces the existance of a block to this bitswap service. The // service will potentially notify its peers. -func (bs *Bitswap) HasBlock(ctx context.Context, blk *blocks.Block) error { +func (bs *Bitswap) HasBlock(blk *blocks.Block) error { select { case <-bs.process.Closing(): return errors.New("bitswap is closed") @@ -246,8 +246,8 @@ func (bs *Bitswap) HasBlock(ctx context.Context, blk *blocks.Block) error { select { case bs.newBlocks <- blk: // send block off to be reprovided - case <-ctx.Done(): - return ctx.Err() + case <-bs.process.Closing(): + return bs.process.Close() } return nil } @@ -328,9 +328,7 @@ func (bs *Bitswap) ReceiveMessage(ctx context.Context, p peer.ID, incoming bsmsg log.Event(ctx, "Bitswap.GetBlockRequest.End", &k) log.Debugf("got block %s from %s", b, p) - hasBlockCtx, cancel := context.WithTimeout(ctx, hasBlockTimeout) - defer cancel() - if err := bs.HasBlock(hasBlockCtx, b); err != nil { + if err := bs.HasBlock(b); err != nil { log.Warningf("ReceiveMessage HasBlock error: %s", err) } }(block) diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index 8f4b6f61f..c6de90d78 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -70,7 +70,7 @@ func TestGetBlockFromPeerAfterPeerAnnounces(t *testing.T) { hasBlock := peers[0] defer hasBlock.Exchange.Close() - if err := hasBlock.Exchange.HasBlock(context.Background(), block); err != nil { + if err := hasBlock.Exchange.HasBlock(block); err != nil { t.Fatal(err) } @@ -162,7 +162,7 @@ func PerformDistributionTest(t *testing.T, numInstances, numBlocks int) { first := instances[0] for _, b := range blocks { blkeys = append(blkeys, b.Key()) - first.Exchange.HasBlock(ctx, b) + first.Exchange.HasBlock(b) } t.Log("Distribute!") @@ -224,7 +224,6 @@ func TestSendToWantingPeer(t *testing.T) { t.Logf("Session %v\n", peerA.Peer) t.Logf("Session %v\n", peerB.Peer) - timeout := time.Second waitTime := time.Second * 5 alpha := bg.Next() @@ -237,9 +236,7 @@ func TestSendToWantingPeer(t *testing.T) { } // peerB announces to the network that he has block alpha - ctx, cancel = context.WithTimeout(context.Background(), timeout) - defer cancel() - err = peerB.Exchange.HasBlock(ctx, alpha) + err = peerB.Exchange.HasBlock(alpha) if err != nil { t.Fatal(err) } @@ -266,7 +263,7 @@ func TestBasicBitswap(t *testing.T) { instances := sg.Instances(2) blocks := bg.Blocks(1) - err := instances[0].Exchange.HasBlock(context.Background(), blocks[0]) + err := instances[0].Exchange.HasBlock(blocks[0]) if err != nil { t.Fatal(err) } From ffb845dbc74407a410821d36d9f4d16886563d52 Mon Sep 17 00:00:00 2001 From: Jeromy Date: Fri, 25 Sep 2015 14:02:20 -0700 Subject: [PATCH 0423/1035] allow bitswap stat to output wasted bytes bitswap stat can now track bytes that are wasted by receiving duplicate blocks. ps, gitcop smells License: MIT Signed-off-by: Jeromy This commit was moved from ipfs/go-bitswap@ead5a9dcb46258134fc83760a8b4c980de74c6fe --- bitswap/bitswap.go | 8 +++++--- bitswap/stat.go | 2 ++ 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 2f2e88ea4..32d748177 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -131,6 +131,7 @@ type Bitswap struct { counterLk sync.Mutex blocksRecvd int dupBlocksRecvd int + dupDataRecvd uint64 } type blockRequest struct { @@ -320,7 +321,7 @@ func (bs *Bitswap) ReceiveMessage(ctx context.Context, p peer.ID, incoming bsmsg go func(b *blocks.Block) { defer wg.Done() - if err := bs.updateReceiveCounters(b.Key()); err != nil { + if err := bs.updateReceiveCounters(b); err != nil { return // ignore error, is either logged previously, or ErrAlreadyHaveBlock } @@ -338,17 +339,18 @@ func (bs *Bitswap) ReceiveMessage(ctx context.Context, p peer.ID, incoming bsmsg var ErrAlreadyHaveBlock = errors.New("already have block") -func (bs *Bitswap) updateReceiveCounters(k key.Key) error { +func (bs *Bitswap) updateReceiveCounters(b *blocks.Block) error { bs.counterLk.Lock() defer bs.counterLk.Unlock() bs.blocksRecvd++ - has, err := bs.blockstore.Has(k) + has, err := bs.blockstore.Has(b.Key()) if err != nil { log.Infof("blockstore.Has error: %s", err) return err } if err == nil && has { bs.dupBlocksRecvd++ + bs.dupDataRecvd += uint64(len(b.Data)) } if has { diff --git a/bitswap/stat.go b/bitswap/stat.go index 5fa0e285e..956a4c5b7 100644 --- a/bitswap/stat.go +++ b/bitswap/stat.go @@ -11,6 +11,7 @@ type Stat struct { Peers []string BlocksReceived int DupBlksReceived int + DupDataReceived uint64 } func (bs *Bitswap) Stat() (*Stat, error) { @@ -20,6 +21,7 @@ func (bs *Bitswap) Stat() (*Stat, error) { bs.counterLk.Lock() st.BlocksReceived = bs.blocksRecvd st.DupBlksReceived = bs.dupBlocksRecvd + st.DupDataReceived = bs.dupDataRecvd bs.counterLk.Unlock() for _, p := range bs.engine.Peers() { From 400c8a4dda186114f8166fd66bd238f44ccae331 Mon Sep 17 00:00:00 2001 From: Jeromy Date: Thu, 1 Oct 2015 11:22:28 -0700 Subject: [PATCH 0424/1035] replace imports with absolute path instead of using symlink License: MIT Signed-off-by: Jeromy This commit was moved from ipfs/go-bitswap@a06e4b5d69d3699d1ae94695fd18ba87df170d5f --- bitswap/bitswap.go | 2 +- bitswap/decision/engine.go | 2 +- bitswap/network/ipfs_impl.go | 2 +- bitswap/workers.go | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 32d748177..ffe5f5489 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -22,7 +22,7 @@ import ( wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" peer "github.com/ipfs/go-ipfs/p2p/peer" "github.com/ipfs/go-ipfs/thirdparty/delay" - logging "github.com/ipfs/go-ipfs/vendor/go-log-v1.0.0" + logging "github.com/ipfs/go-ipfs/vendor/QmXJkcEXB6C9h6Ytb6rrUTFU56Ro62zxgrbxTT3dgjQGA8/go-log" ) var log = logging.Logger("bitswap") diff --git a/bitswap/decision/engine.go b/bitswap/decision/engine.go index 85dde9eb7..16ebab9eb 100644 --- a/bitswap/decision/engine.go +++ b/bitswap/decision/engine.go @@ -10,7 +10,7 @@ import ( bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" wl "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" peer "github.com/ipfs/go-ipfs/p2p/peer" - logging "github.com/ipfs/go-ipfs/vendor/go-log-v1.0.0" + logging "github.com/ipfs/go-ipfs/vendor/QmXJkcEXB6C9h6Ytb6rrUTFU56Ro62zxgrbxTT3dgjQGA8/go-log" ) // TODO consider taking responsibility for other types of requests. For diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index c0a4b2d3a..64fc27ad6 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -9,7 +9,7 @@ import ( inet "github.com/ipfs/go-ipfs/p2p/net" peer "github.com/ipfs/go-ipfs/p2p/peer" routing "github.com/ipfs/go-ipfs/routing" - logging "github.com/ipfs/go-ipfs/vendor/go-log-v1.0.0" + logging "github.com/ipfs/go-ipfs/vendor/QmXJkcEXB6C9h6Ytb6rrUTFU56Ro62zxgrbxTT3dgjQGA8/go-log" ) var log = logging.Logger("bitswap_network") diff --git a/bitswap/workers.go b/bitswap/workers.go index 41dd94abe..60f8ffc22 100644 --- a/bitswap/workers.go +++ b/bitswap/workers.go @@ -9,7 +9,7 @@ import ( context "github.com/ipfs/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context" key "github.com/ipfs/go-ipfs/blocks/key" - logging "github.com/ipfs/go-ipfs/vendor/go-log-v1.0.0" + logging "github.com/ipfs/go-ipfs/vendor/QmXJkcEXB6C9h6Ytb6rrUTFU56Ro62zxgrbxTT3dgjQGA8/go-log" ) var TaskWorkerCount = 8 From 7031ce9dff54ecc95a73ca83aea6189b446369ed Mon Sep 17 00:00:00 2001 From: Jeromy Date: Sun, 11 Oct 2015 21:22:57 -0700 Subject: [PATCH 0425/1035] fix random bitswap hangs License: MIT Signed-off-by: Jeromy This commit was moved from ipfs/go-bitswap@ff038ecf4e5ebe53998f92e6c89ecf6b80237cea --- bitswap/wantmanager.go | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/bitswap/wantmanager.go b/bitswap/wantmanager.go index 3b4626a4d..2fae23515 100644 --- a/bitswap/wantmanager.go +++ b/bitswap/wantmanager.go @@ -56,6 +56,8 @@ type msgQueue struct { out bsmsg.BitSwapMessage network bsnet.BitSwapNetwork + refcnt int + work chan struct{} done chan struct{} } @@ -101,13 +103,13 @@ func (pm *WantManager) SendBlock(ctx context.Context, env *engine.Envelope) { } func (pm *WantManager) startPeerHandler(p peer.ID) *msgQueue { - _, ok := pm.peers[p] + mq, ok := pm.peers[p] if ok { - // TODO: log an error? + mq.refcnt++ return nil } - mq := pm.newMsgQueue(p) + mq = pm.newMsgQueue(p) // new peer, we will want to give them our full wantlist fullwantlist := bsmsg.New(true) @@ -129,6 +131,11 @@ func (pm *WantManager) stopPeerHandler(p peer.ID) { return } + pq.refcnt-- + if pq.refcnt > 0 { + return + } + close(pq.done) delete(pm.peers, p) } @@ -247,6 +254,7 @@ func (wm *WantManager) newMsgQueue(p peer.ID) *msgQueue { mq.work = make(chan struct{}, 1) mq.network = wm.network mq.p = p + mq.refcnt = 1 return mq } From 750f7cb498334607aa6854065766844a51aa44ea Mon Sep 17 00:00:00 2001 From: Jeromy Date: Sun, 18 Oct 2015 12:25:53 -0700 Subject: [PATCH 0426/1035] fix panic in bitswap working limit spawning License: MIT Signed-off-by: Jeromy This commit was moved from ipfs/go-bitswap@b01706f03ec9a76667885ef694648e3203e3106e --- bitswap/workers.go | 48 +++++++++++++++++++++++++--------------------- 1 file changed, 26 insertions(+), 22 deletions(-) diff --git a/bitswap/workers.go b/bitswap/workers.go index 60f8ffc22..2873f8c67 100644 --- a/bitswap/workers.go +++ b/bitswap/workers.go @@ -5,7 +5,6 @@ import ( process "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/goprocess" procctx "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/goprocess/context" - ratelimit "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/goprocess/ratelimit" context "github.com/ipfs/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context" key "github.com/ipfs/go-ipfs/blocks/key" @@ -74,43 +73,48 @@ func (bs *Bitswap) taskWorker(ctx context.Context, id int) { func (bs *Bitswap) provideWorker(px process.Process) { - limiter := ratelimit.NewRateLimiter(px, provideWorkerMax) + limit := make(chan struct{}, provideWorkerMax) limitedGoProvide := func(k key.Key, wid int) { + defer func() { + // replace token when done + <-limit + }() ev := logging.LoggableMap{"ID": wid} - limiter.LimitedGo(func(px process.Process) { - ctx := procctx.OnClosingContext(px) // derive ctx from px - defer log.EventBegin(ctx, "Bitswap.ProvideWorker.Work", ev, &k).Done() + ctx := procctx.OnClosingContext(px) // derive ctx from px + defer log.EventBegin(ctx, "Bitswap.ProvideWorker.Work", ev, &k).Done() - ctx, cancel := context.WithTimeout(ctx, provideTimeout) // timeout ctx - defer cancel() + ctx, cancel := context.WithTimeout(ctx, provideTimeout) // timeout ctx + defer cancel() - if err := bs.network.Provide(ctx, k); err != nil { - log.Error(err) - } - }) + if err := bs.network.Provide(ctx, k); err != nil { + log.Error(err) + } } // worker spawner, reads from bs.provideKeys until it closes, spawning a // _ratelimited_ number of workers to handle each key. - limiter.Go(func(px process.Process) { - for wid := 2; ; wid++ { - ev := logging.LoggableMap{"ID": 1} - log.Event(procctx.OnClosingContext(px), "Bitswap.ProvideWorker.Loop", ev) + for wid := 2; ; wid++ { + ev := logging.LoggableMap{"ID": 1} + log.Event(procctx.OnClosingContext(px), "Bitswap.ProvideWorker.Loop", ev) + select { + case <-px.Closing(): + return + case k, ok := <-bs.provideKeys: + if !ok { + log.Debug("provideKeys channel closed") + return + } select { case <-px.Closing(): return - case k, ok := <-bs.provideKeys: - if !ok { - log.Debug("provideKeys channel closed") - return - } - limitedGoProvide(k, wid) + case limit <- struct{}{}: + go limitedGoProvide(k, wid) } } - }) + } } func (bs *Bitswap) provideCollector(ctx context.Context) { From ce3520077527bc59e6cb32b237ddb4e5218bea54 Mon Sep 17 00:00:00 2001 From: Henry Date: Fri, 23 Oct 2015 08:26:18 +0200 Subject: [PATCH 0427/1035] bitswap: clean log printf and humanize dup data count License: MIT Signed-off-by: Henry This commit was moved from ipfs/go-bitswap@f6dce1ca5a758d25ebb0c984a87d5ecf9f14d8cb --- bitswap/bitswap.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index ffe5f5489..f3a4ad6fb 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -308,7 +308,7 @@ func (bs *Bitswap) ReceiveMessage(ctx context.Context, p peer.ID, incoming bsmsg var keys []key.Key for _, block := range iblocks { if _, found := bs.wm.wl.Contains(block.Key()); !found { - log.Info("received un-asked-for block: %s", block) + log.Infof("received un-asked-for %s from %s", block, p) continue } keys = append(keys, block.Key()) From 30ad7031ca61db9d213093476ded24b17924a3da Mon Sep 17 00:00:00 2001 From: Jeromy Date: Tue, 27 Oct 2015 10:47:32 -0700 Subject: [PATCH 0428/1035] update code to use new logging changes License: MIT Signed-off-by: Jeromy This commit was moved from ipfs/go-bitswap@8e4f09e5626029d8d273180a135236d864ca579b --- bitswap/bitswap.go | 2 +- bitswap/decision/engine.go | 2 +- bitswap/network/ipfs_impl.go | 2 +- bitswap/workers.go | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index f3a4ad6fb..630f08f31 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -22,7 +22,7 @@ import ( wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" peer "github.com/ipfs/go-ipfs/p2p/peer" "github.com/ipfs/go-ipfs/thirdparty/delay" - logging "github.com/ipfs/go-ipfs/vendor/QmXJkcEXB6C9h6Ytb6rrUTFU56Ro62zxgrbxTT3dgjQGA8/go-log" + logging "github.com/ipfs/go-ipfs/vendor/QmTBXYb6y2ZcJmoXVKk3pf9rzSEjbCg7tQaJW7RSuH14nv/go-log" ) var log = logging.Logger("bitswap") diff --git a/bitswap/decision/engine.go b/bitswap/decision/engine.go index 16ebab9eb..778350903 100644 --- a/bitswap/decision/engine.go +++ b/bitswap/decision/engine.go @@ -10,7 +10,7 @@ import ( bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" wl "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" peer "github.com/ipfs/go-ipfs/p2p/peer" - logging "github.com/ipfs/go-ipfs/vendor/QmXJkcEXB6C9h6Ytb6rrUTFU56Ro62zxgrbxTT3dgjQGA8/go-log" + logging "github.com/ipfs/go-ipfs/vendor/QmTBXYb6y2ZcJmoXVKk3pf9rzSEjbCg7tQaJW7RSuH14nv/go-log" ) // TODO consider taking responsibility for other types of requests. For diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index 64fc27ad6..6b2efe6b8 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -9,7 +9,7 @@ import ( inet "github.com/ipfs/go-ipfs/p2p/net" peer "github.com/ipfs/go-ipfs/p2p/peer" routing "github.com/ipfs/go-ipfs/routing" - logging "github.com/ipfs/go-ipfs/vendor/QmXJkcEXB6C9h6Ytb6rrUTFU56Ro62zxgrbxTT3dgjQGA8/go-log" + logging "github.com/ipfs/go-ipfs/vendor/QmTBXYb6y2ZcJmoXVKk3pf9rzSEjbCg7tQaJW7RSuH14nv/go-log" ) var log = logging.Logger("bitswap_network") diff --git a/bitswap/workers.go b/bitswap/workers.go index 2873f8c67..7b791f020 100644 --- a/bitswap/workers.go +++ b/bitswap/workers.go @@ -8,7 +8,7 @@ import ( context "github.com/ipfs/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context" key "github.com/ipfs/go-ipfs/blocks/key" - logging "github.com/ipfs/go-ipfs/vendor/QmXJkcEXB6C9h6Ytb6rrUTFU56Ro62zxgrbxTT3dgjQGA8/go-log" + logging "github.com/ipfs/go-ipfs/vendor/QmTBXYb6y2ZcJmoXVKk3pf9rzSEjbCg7tQaJW7RSuH14nv/go-log" ) var TaskWorkerCount = 8 From 0caa6a672a15312af8febd55f691685581ba6a84 Mon Sep 17 00:00:00 2001 From: Jeromy Date: Thu, 29 Oct 2015 21:22:53 -0700 Subject: [PATCH 0429/1035] vendor logging lib update License: MIT Signed-off-by: Jeromy This commit was moved from ipfs/go-bitswap@34cb3acfb0eabe2be20ddb55897376e845c4956c --- bitswap/bitswap.go | 2 +- bitswap/decision/engine.go | 2 +- bitswap/network/ipfs_impl.go | 2 +- bitswap/workers.go | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 630f08f31..7d7954e47 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -22,7 +22,7 @@ import ( wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" peer "github.com/ipfs/go-ipfs/p2p/peer" "github.com/ipfs/go-ipfs/thirdparty/delay" - logging "github.com/ipfs/go-ipfs/vendor/QmTBXYb6y2ZcJmoXVKk3pf9rzSEjbCg7tQaJW7RSuH14nv/go-log" + logging "github.com/ipfs/go-ipfs/vendor/QmQg1J6vikuXF9oDvm4wpdeAUvvkVEKW1EYDw9HhTMnP2b/go-log" ) var log = logging.Logger("bitswap") diff --git a/bitswap/decision/engine.go b/bitswap/decision/engine.go index 778350903..03c13d99e 100644 --- a/bitswap/decision/engine.go +++ b/bitswap/decision/engine.go @@ -10,7 +10,7 @@ import ( bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" wl "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" peer "github.com/ipfs/go-ipfs/p2p/peer" - logging "github.com/ipfs/go-ipfs/vendor/QmTBXYb6y2ZcJmoXVKk3pf9rzSEjbCg7tQaJW7RSuH14nv/go-log" + logging "github.com/ipfs/go-ipfs/vendor/QmQg1J6vikuXF9oDvm4wpdeAUvvkVEKW1EYDw9HhTMnP2b/go-log" ) // TODO consider taking responsibility for other types of requests. For diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index 6b2efe6b8..e97211f48 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -9,7 +9,7 @@ import ( inet "github.com/ipfs/go-ipfs/p2p/net" peer "github.com/ipfs/go-ipfs/p2p/peer" routing "github.com/ipfs/go-ipfs/routing" - logging "github.com/ipfs/go-ipfs/vendor/QmTBXYb6y2ZcJmoXVKk3pf9rzSEjbCg7tQaJW7RSuH14nv/go-log" + logging "github.com/ipfs/go-ipfs/vendor/QmQg1J6vikuXF9oDvm4wpdeAUvvkVEKW1EYDw9HhTMnP2b/go-log" ) var log = logging.Logger("bitswap_network") diff --git a/bitswap/workers.go b/bitswap/workers.go index 7b791f020..04d9fc2d2 100644 --- a/bitswap/workers.go +++ b/bitswap/workers.go @@ -8,7 +8,7 @@ import ( context "github.com/ipfs/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context" key "github.com/ipfs/go-ipfs/blocks/key" - logging "github.com/ipfs/go-ipfs/vendor/QmTBXYb6y2ZcJmoXVKk3pf9rzSEjbCg7tQaJW7RSuH14nv/go-log" + logging "github.com/ipfs/go-ipfs/vendor/QmQg1J6vikuXF9oDvm4wpdeAUvvkVEKW1EYDw9HhTMnP2b/go-log" ) var TaskWorkerCount = 8 From e4017c11f8bfafabb320ef31fd3d6822098f291e Mon Sep 17 00:00:00 2001 From: Christian Couder Date: Sat, 12 Dec 2015 21:04:21 +0100 Subject: [PATCH 0430/1035] exchange/bitswap/bitswap_test: fix t.Fatal in a goroutine License: MIT Signed-off-by: Christian Couder This commit was moved from ipfs/go-bitswap@a547c8ee5c715c753c0bb6f39479482cb84d9ac2 --- bitswap/bitswap_test.go | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index c6de90d78..3a2dba62f 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -168,19 +168,31 @@ func PerformDistributionTest(t *testing.T, numInstances, numBlocks int) { t.Log("Distribute!") wg := sync.WaitGroup{} + errs := make(chan error) + for _, inst := range instances[1:] { wg.Add(1) go func(inst Instance) { defer wg.Done() outch, err := inst.Exchange.GetBlocks(ctx, blkeys) if err != nil { - t.Fatal(err) + errs <- err } for _ = range outch { } }(inst) } - wg.Wait() + + go func() { + wg.Wait() + close(errs) + }() + + for err := range errs { + if err != nil { + t.Fatal(err) + } + } t.Log("Verify!") From d933fd69340285fbe572dfd9b22792fec1fd3076 Mon Sep 17 00:00:00 2001 From: Jeromy Date: Fri, 4 Dec 2015 14:25:13 -0800 Subject: [PATCH 0431/1035] use mfs for adds License: MIT Signed-off-by: Jeromy This commit was moved from ipfs/go-bitswap@68d6c7f773f5f1533f75e11df36a37ee055170cb --- bitswap/workers.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bitswap/workers.go b/bitswap/workers.go index 04d9fc2d2..fbf0d20db 100644 --- a/bitswap/workers.go +++ b/bitswap/workers.go @@ -89,7 +89,7 @@ func (bs *Bitswap) provideWorker(px process.Process) { defer cancel() if err := bs.network.Provide(ctx, k); err != nil { - log.Error(err) + //log.Error(err) } } From 856c7ce33ad74ea0574195576970c71636fae9fc Mon Sep 17 00:00:00 2001 From: Jeromy Date: Sun, 6 Dec 2015 11:03:50 -0800 Subject: [PATCH 0432/1035] cleanup and more testing License: MIT Signed-off-by: Jeromy This commit was moved from ipfs/go-bitswap@b36957198aab9df47b2ad7c30454e24bf41b493d --- bitswap/workers.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bitswap/workers.go b/bitswap/workers.go index fbf0d20db..04d9fc2d2 100644 --- a/bitswap/workers.go +++ b/bitswap/workers.go @@ -89,7 +89,7 @@ func (bs *Bitswap) provideWorker(px process.Process) { defer cancel() if err := bs.network.Provide(ctx, k); err != nil { - //log.Error(err) + log.Error(err) } } From bedd35e244febf82ff4ce4d85e0628cb2dbf260a Mon Sep 17 00:00:00 2001 From: Jeromy Date: Sat, 5 Dec 2015 19:20:15 -0800 Subject: [PATCH 0433/1035] Flatten multipart file transfers License: MIT Signed-off-by: Jeromy This commit was moved from ipfs/go-bitswap@328cd1a88a3880f71dbed14472941c3a7aa8b0d4 --- bitswap/workers.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bitswap/workers.go b/bitswap/workers.go index 04d9fc2d2..0c8b8de5d 100644 --- a/bitswap/workers.go +++ b/bitswap/workers.go @@ -89,7 +89,7 @@ func (bs *Bitswap) provideWorker(px process.Process) { defer cancel() if err := bs.network.Provide(ctx, k); err != nil { - log.Error(err) + log.Warning(err) } } From 4ed410cfd7c93f23100da1b2d7f72e2b90c22d5d Mon Sep 17 00:00:00 2001 From: Jeromy Date: Sat, 2 Jan 2016 17:56:42 -0800 Subject: [PATCH 0434/1035] vendor in new go-datastore License: MIT Signed-off-by: Jeromy This commit was moved from ipfs/go-bitswap@48d92d8b4e532b19308285f2fc5f0bdd813cab79 --- bitswap/decision/engine_test.go | 4 ++-- bitswap/testnet/peernet.go | 2 +- bitswap/testutils.go | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/bitswap/decision/engine_test.go b/bitswap/decision/engine_test.go index 8337c4800..d9e1fc202 100644 --- a/bitswap/decision/engine_test.go +++ b/bitswap/decision/engine_test.go @@ -8,8 +8,8 @@ import ( "sync" "testing" - ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore" - dssync "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore/sync" + ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore" + dssync "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore/sync" context "github.com/ipfs/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context" blocks "github.com/ipfs/go-ipfs/blocks" blockstore "github.com/ipfs/go-ipfs/blocks/blockstore" diff --git a/bitswap/testnet/peernet.go b/bitswap/testnet/peernet.go index 446224b6b..90f3412d2 100644 --- a/bitswap/testnet/peernet.go +++ b/bitswap/testnet/peernet.go @@ -1,7 +1,7 @@ package bitswap import ( - ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore" + ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore" context "github.com/ipfs/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context" bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" mockpeernet "github.com/ipfs/go-ipfs/p2p/net/mock" diff --git a/bitswap/testutils.go b/bitswap/testutils.go index 5bf28036d..f66a17e50 100644 --- a/bitswap/testutils.go +++ b/bitswap/testutils.go @@ -3,8 +3,8 @@ package bitswap import ( "time" - ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore" - ds_sync "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore/sync" + ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore" + ds_sync "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore/sync" context "github.com/ipfs/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context" blockstore "github.com/ipfs/go-ipfs/blocks/blockstore" tn "github.com/ipfs/go-ipfs/exchange/bitswap/testnet" From bda181370161b618574b338a1d33bfc08f05e5df Mon Sep 17 00:00:00 2001 From: Jeromy Date: Wed, 27 Jan 2016 14:28:34 -0800 Subject: [PATCH 0435/1035] initial vendoring of libp2p outside of the repo with gx License: MIT Signed-off-by: Jeromy This commit was moved from ipfs/go-bitswap@fc9de650f50effa9bca00f77ad14a7e592247bc4 --- bitswap/bitswap.go | 6 +++--- bitswap/bitswap_test.go | 4 ++-- bitswap/decision/bench_test.go | 2 +- bitswap/decision/engine.go | 6 +++--- bitswap/decision/engine_test.go | 4 ++-- bitswap/decision/ledger.go | 2 +- bitswap/decision/peer_request_queue.go | 2 +- bitswap/message/message.go | 2 +- bitswap/network/interface.go | 6 +++--- bitswap/network/ipfs_impl.go | 14 +++++++------- bitswap/notifications/notifications.go | 2 +- bitswap/notifications/notifications_test.go | 2 +- bitswap/testnet/interface.go | 2 +- bitswap/testnet/network_test.go | 4 ++-- bitswap/testnet/peernet.go | 6 +++--- bitswap/testnet/virtual.go | 4 ++-- bitswap/testutils.go | 6 +++--- bitswap/wantmanager.go | 4 ++-- bitswap/workers.go | 4 ++-- 19 files changed, 41 insertions(+), 41 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 7d7954e47..724e3d4a7 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -10,7 +10,7 @@ import ( process "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/goprocess" procctx "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/goprocess/context" - context "github.com/ipfs/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context" + context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" blocks "github.com/ipfs/go-ipfs/blocks" blockstore "github.com/ipfs/go-ipfs/blocks/blockstore" key "github.com/ipfs/go-ipfs/blocks/key" @@ -20,9 +20,9 @@ import ( bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" notifications "github.com/ipfs/go-ipfs/exchange/bitswap/notifications" wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" - peer "github.com/ipfs/go-ipfs/p2p/peer" "github.com/ipfs/go-ipfs/thirdparty/delay" - logging "github.com/ipfs/go-ipfs/vendor/QmQg1J6vikuXF9oDvm4wpdeAUvvkVEKW1EYDw9HhTMnP2b/go-log" + logging "gx/ipfs/QmaPaGNE2GqnfJjRRpQuQuFHuJn4FZvsrGxdik4kgxCkBi/go-log" + peer "gx/ipfs/QmY3NAw959vbE1oJooP9HchcRdBsbxhgQsEZTRhKgvoSuC/go-libp2p/p2p/peer" ) var log = logging.Logger("bitswap") diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index 3a2dba62f..806b35b2b 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -7,16 +7,16 @@ import ( "time" detectrace "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-detect-race" - context "github.com/ipfs/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context" + context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" travis "github.com/ipfs/go-ipfs/util/testutil/ci/travis" blocks "github.com/ipfs/go-ipfs/blocks" blocksutil "github.com/ipfs/go-ipfs/blocks/blocksutil" key "github.com/ipfs/go-ipfs/blocks/key" tn "github.com/ipfs/go-ipfs/exchange/bitswap/testnet" - p2ptestutil "github.com/ipfs/go-ipfs/p2p/test/util" mockrouting "github.com/ipfs/go-ipfs/routing/mock" delay "github.com/ipfs/go-ipfs/thirdparty/delay" + p2ptestutil "gx/ipfs/QmY3NAw959vbE1oJooP9HchcRdBsbxhgQsEZTRhKgvoSuC/go-libp2p/p2p/test/util" ) // FIXME the tests are really sensitive to the network delay. fix them to work diff --git a/bitswap/decision/bench_test.go b/bitswap/decision/bench_test.go index e64815338..9eaf6225a 100644 --- a/bitswap/decision/bench_test.go +++ b/bitswap/decision/bench_test.go @@ -6,8 +6,8 @@ import ( key "github.com/ipfs/go-ipfs/blocks/key" "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" - "github.com/ipfs/go-ipfs/p2p/peer" "github.com/ipfs/go-ipfs/util/testutil" + "gx/ipfs/QmY3NAw959vbE1oJooP9HchcRdBsbxhgQsEZTRhKgvoSuC/go-libp2p/p2p/peer" ) // FWIW: At the time of this commit, including a timestamp in task increases diff --git a/bitswap/decision/engine.go b/bitswap/decision/engine.go index 03c13d99e..27e520e4e 100644 --- a/bitswap/decision/engine.go +++ b/bitswap/decision/engine.go @@ -4,13 +4,13 @@ package decision import ( "sync" - context "github.com/ipfs/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context" + context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" blocks "github.com/ipfs/go-ipfs/blocks" bstore "github.com/ipfs/go-ipfs/blocks/blockstore" bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" wl "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" - peer "github.com/ipfs/go-ipfs/p2p/peer" - logging "github.com/ipfs/go-ipfs/vendor/QmQg1J6vikuXF9oDvm4wpdeAUvvkVEKW1EYDw9HhTMnP2b/go-log" + logging "gx/ipfs/QmaPaGNE2GqnfJjRRpQuQuFHuJn4FZvsrGxdik4kgxCkBi/go-log" + peer "gx/ipfs/QmY3NAw959vbE1oJooP9HchcRdBsbxhgQsEZTRhKgvoSuC/go-libp2p/p2p/peer" ) // TODO consider taking responsibility for other types of requests. For diff --git a/bitswap/decision/engine_test.go b/bitswap/decision/engine_test.go index d9e1fc202..78554950e 100644 --- a/bitswap/decision/engine_test.go +++ b/bitswap/decision/engine_test.go @@ -10,12 +10,12 @@ import ( ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore" dssync "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore/sync" - context "github.com/ipfs/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context" + context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" blocks "github.com/ipfs/go-ipfs/blocks" blockstore "github.com/ipfs/go-ipfs/blocks/blockstore" message "github.com/ipfs/go-ipfs/exchange/bitswap/message" - peer "github.com/ipfs/go-ipfs/p2p/peer" testutil "github.com/ipfs/go-ipfs/util/testutil" + peer "gx/ipfs/QmY3NAw959vbE1oJooP9HchcRdBsbxhgQsEZTRhKgvoSuC/go-libp2p/p2p/peer" ) type peerAndEngine struct { diff --git a/bitswap/decision/ledger.go b/bitswap/decision/ledger.go index c0d1af8a5..e8fa8fe58 100644 --- a/bitswap/decision/ledger.go +++ b/bitswap/decision/ledger.go @@ -5,7 +5,7 @@ import ( key "github.com/ipfs/go-ipfs/blocks/key" wl "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" - peer "github.com/ipfs/go-ipfs/p2p/peer" + peer "gx/ipfs/QmY3NAw959vbE1oJooP9HchcRdBsbxhgQsEZTRhKgvoSuC/go-libp2p/p2p/peer" ) // keySet is just a convenient alias for maps of keys, where we only care diff --git a/bitswap/decision/peer_request_queue.go b/bitswap/decision/peer_request_queue.go index 0ba74edaf..7e22be7fd 100644 --- a/bitswap/decision/peer_request_queue.go +++ b/bitswap/decision/peer_request_queue.go @@ -6,8 +6,8 @@ import ( key "github.com/ipfs/go-ipfs/blocks/key" wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" - peer "github.com/ipfs/go-ipfs/p2p/peer" pq "github.com/ipfs/go-ipfs/thirdparty/pq" + peer "gx/ipfs/QmY3NAw959vbE1oJooP9HchcRdBsbxhgQsEZTRhKgvoSuC/go-libp2p/p2p/peer" ) type peerRequestQueue interface { diff --git a/bitswap/message/message.go b/bitswap/message/message.go index 090970bd3..2146d3941 100644 --- a/bitswap/message/message.go +++ b/bitswap/message/message.go @@ -7,7 +7,7 @@ import ( key "github.com/ipfs/go-ipfs/blocks/key" pb "github.com/ipfs/go-ipfs/exchange/bitswap/message/pb" wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" - inet "github.com/ipfs/go-ipfs/p2p/net" + inet "gx/ipfs/QmY3NAw959vbE1oJooP9HchcRdBsbxhgQsEZTRhKgvoSuC/go-libp2p/p2p/net" ggio "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/gogo/protobuf/io" proto "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/gogo/protobuf/proto" diff --git a/bitswap/network/interface.go b/bitswap/network/interface.go index 35da0f84d..282647741 100644 --- a/bitswap/network/interface.go +++ b/bitswap/network/interface.go @@ -1,11 +1,11 @@ package network import ( - context "github.com/ipfs/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context" + context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" key "github.com/ipfs/go-ipfs/blocks/key" bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" - peer "github.com/ipfs/go-ipfs/p2p/peer" - protocol "github.com/ipfs/go-ipfs/p2p/protocol" + peer "gx/ipfs/QmY3NAw959vbE1oJooP9HchcRdBsbxhgQsEZTRhKgvoSuC/go-libp2p/p2p/peer" + protocol "gx/ipfs/QmY3NAw959vbE1oJooP9HchcRdBsbxhgQsEZTRhKgvoSuC/go-libp2p/p2p/protocol" ) var ProtocolBitswap protocol.ID = "/ipfs/bitswap" diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index e97211f48..3cfcb0e5a 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -1,15 +1,15 @@ package network import ( - ma "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr" - context "github.com/ipfs/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context" + context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" key "github.com/ipfs/go-ipfs/blocks/key" bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" - host "github.com/ipfs/go-ipfs/p2p/host" - inet "github.com/ipfs/go-ipfs/p2p/net" - peer "github.com/ipfs/go-ipfs/p2p/peer" routing "github.com/ipfs/go-ipfs/routing" - logging "github.com/ipfs/go-ipfs/vendor/QmQg1J6vikuXF9oDvm4wpdeAUvvkVEKW1EYDw9HhTMnP2b/go-log" + logging "gx/ipfs/QmaPaGNE2GqnfJjRRpQuQuFHuJn4FZvsrGxdik4kgxCkBi/go-log" + ma "gx/ipfs/QmR3JkmZBKYXgNMNsNZawm914455Qof3PEopwuVSeXG7aV/go-multiaddr" + host "gx/ipfs/QmY3NAw959vbE1oJooP9HchcRdBsbxhgQsEZTRhKgvoSuC/go-libp2p/p2p/host" + inet "gx/ipfs/QmY3NAw959vbE1oJooP9HchcRdBsbxhgQsEZTRhKgvoSuC/go-libp2p/p2p/net" + peer "gx/ipfs/QmY3NAw959vbE1oJooP9HchcRdBsbxhgQsEZTRhKgvoSuC/go-libp2p/p2p/peer" ) var log = logging.Logger("bitswap_network") @@ -46,7 +46,7 @@ func (bsnet *impl) newStreamToPeer(ctx context.Context, p peer.ID) (inet.Stream, return nil, err } - return bsnet.host.NewStream(ProtocolBitswap, p) + return bsnet.host.NewStream(ctx, ProtocolBitswap, p) } func (bsnet *impl) SendMessage( diff --git a/bitswap/notifications/notifications.go b/bitswap/notifications/notifications.go index e9870940e..79479b84d 100644 --- a/bitswap/notifications/notifications.go +++ b/bitswap/notifications/notifications.go @@ -2,7 +2,7 @@ package notifications import ( pubsub "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/briantigerchow/pubsub" - context "github.com/ipfs/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context" + context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" blocks "github.com/ipfs/go-ipfs/blocks" key "github.com/ipfs/go-ipfs/blocks/key" ) diff --git a/bitswap/notifications/notifications_test.go b/bitswap/notifications/notifications_test.go index 96ed1c4e3..36b156969 100644 --- a/bitswap/notifications/notifications_test.go +++ b/bitswap/notifications/notifications_test.go @@ -5,7 +5,7 @@ import ( "testing" "time" - context "github.com/ipfs/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context" + context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" blocks "github.com/ipfs/go-ipfs/blocks" blocksutil "github.com/ipfs/go-ipfs/blocks/blocksutil" key "github.com/ipfs/go-ipfs/blocks/key" diff --git a/bitswap/testnet/interface.go b/bitswap/testnet/interface.go index b0d01b79f..6d49ba5da 100644 --- a/bitswap/testnet/interface.go +++ b/bitswap/testnet/interface.go @@ -2,8 +2,8 @@ package bitswap import ( bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" - peer "github.com/ipfs/go-ipfs/p2p/peer" "github.com/ipfs/go-ipfs/util/testutil" + peer "gx/ipfs/QmY3NAw959vbE1oJooP9HchcRdBsbxhgQsEZTRhKgvoSuC/go-libp2p/p2p/peer" ) type Network interface { diff --git a/bitswap/testnet/network_test.go b/bitswap/testnet/network_test.go index 9624df5f8..5e99ed55d 100644 --- a/bitswap/testnet/network_test.go +++ b/bitswap/testnet/network_test.go @@ -4,14 +4,14 @@ import ( "sync" "testing" - context "github.com/ipfs/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context" + context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" blocks "github.com/ipfs/go-ipfs/blocks" bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" - peer "github.com/ipfs/go-ipfs/p2p/peer" mockrouting "github.com/ipfs/go-ipfs/routing/mock" delay "github.com/ipfs/go-ipfs/thirdparty/delay" testutil "github.com/ipfs/go-ipfs/util/testutil" + peer "gx/ipfs/QmY3NAw959vbE1oJooP9HchcRdBsbxhgQsEZTRhKgvoSuC/go-libp2p/p2p/peer" ) func TestSendMessageAsyncButWaitForResponse(t *testing.T) { diff --git a/bitswap/testnet/peernet.go b/bitswap/testnet/peernet.go index 90f3412d2..b979c208f 100644 --- a/bitswap/testnet/peernet.go +++ b/bitswap/testnet/peernet.go @@ -2,12 +2,12 @@ package bitswap import ( ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore" - context "github.com/ipfs/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context" + context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" - mockpeernet "github.com/ipfs/go-ipfs/p2p/net/mock" - peer "github.com/ipfs/go-ipfs/p2p/peer" mockrouting "github.com/ipfs/go-ipfs/routing/mock" testutil "github.com/ipfs/go-ipfs/util/testutil" + mockpeernet "gx/ipfs/QmY3NAw959vbE1oJooP9HchcRdBsbxhgQsEZTRhKgvoSuC/go-libp2p/p2p/net/mock" + peer "gx/ipfs/QmY3NAw959vbE1oJooP9HchcRdBsbxhgQsEZTRhKgvoSuC/go-libp2p/p2p/peer" ) type peernet struct { diff --git a/bitswap/testnet/virtual.go b/bitswap/testnet/virtual.go index eb3424366..dd9c1c6a1 100644 --- a/bitswap/testnet/virtual.go +++ b/bitswap/testnet/virtual.go @@ -3,15 +3,15 @@ package bitswap import ( "errors" - context "github.com/ipfs/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context" + context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" key "github.com/ipfs/go-ipfs/blocks/key" bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" - peer "github.com/ipfs/go-ipfs/p2p/peer" routing "github.com/ipfs/go-ipfs/routing" mockrouting "github.com/ipfs/go-ipfs/routing/mock" delay "github.com/ipfs/go-ipfs/thirdparty/delay" testutil "github.com/ipfs/go-ipfs/util/testutil" + peer "gx/ipfs/QmY3NAw959vbE1oJooP9HchcRdBsbxhgQsEZTRhKgvoSuC/go-libp2p/p2p/peer" ) func VirtualNetwork(rs mockrouting.Server, d delay.D) Network { diff --git a/bitswap/testutils.go b/bitswap/testutils.go index f66a17e50..b09f69224 100644 --- a/bitswap/testutils.go +++ b/bitswap/testutils.go @@ -5,14 +5,14 @@ import ( ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore" ds_sync "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore/sync" - context "github.com/ipfs/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context" + context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" blockstore "github.com/ipfs/go-ipfs/blocks/blockstore" tn "github.com/ipfs/go-ipfs/exchange/bitswap/testnet" - peer "github.com/ipfs/go-ipfs/p2p/peer" - p2ptestutil "github.com/ipfs/go-ipfs/p2p/test/util" delay "github.com/ipfs/go-ipfs/thirdparty/delay" datastore2 "github.com/ipfs/go-ipfs/util/datastore2" testutil "github.com/ipfs/go-ipfs/util/testutil" + peer "gx/ipfs/QmY3NAw959vbE1oJooP9HchcRdBsbxhgQsEZTRhKgvoSuC/go-libp2p/p2p/peer" + p2ptestutil "gx/ipfs/QmY3NAw959vbE1oJooP9HchcRdBsbxhgQsEZTRhKgvoSuC/go-libp2p/p2p/test/util" ) // WARNING: this uses RandTestBogusIdentity DO NOT USE for NON TESTS! diff --git a/bitswap/wantmanager.go b/bitswap/wantmanager.go index 2fae23515..f6616b946 100644 --- a/bitswap/wantmanager.go +++ b/bitswap/wantmanager.go @@ -4,13 +4,13 @@ import ( "sync" "time" - context "github.com/ipfs/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context" + context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" key "github.com/ipfs/go-ipfs/blocks/key" engine "github.com/ipfs/go-ipfs/exchange/bitswap/decision" bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" - peer "github.com/ipfs/go-ipfs/p2p/peer" + peer "gx/ipfs/QmY3NAw959vbE1oJooP9HchcRdBsbxhgQsEZTRhKgvoSuC/go-libp2p/p2p/peer" ) type WantManager struct { diff --git a/bitswap/workers.go b/bitswap/workers.go index 0c8b8de5d..ea066a242 100644 --- a/bitswap/workers.go +++ b/bitswap/workers.go @@ -5,10 +5,10 @@ import ( process "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/goprocess" procctx "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/goprocess/context" - context "github.com/ipfs/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context" + context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" key "github.com/ipfs/go-ipfs/blocks/key" - logging "github.com/ipfs/go-ipfs/vendor/QmQg1J6vikuXF9oDvm4wpdeAUvvkVEKW1EYDw9HhTMnP2b/go-log" + logging "gx/ipfs/QmaPaGNE2GqnfJjRRpQuQuFHuJn4FZvsrGxdik4kgxCkBi/go-log" ) var TaskWorkerCount = 8 From bca77fbad1a77cc36de1ce39a3f4983638a02a37 Mon Sep 17 00:00:00 2001 From: Jeromy Date: Thu, 28 Jan 2016 09:43:06 -0800 Subject: [PATCH 0436/1035] go-keyspace dep from libp2p added License: MIT Signed-off-by: Jeromy This commit was moved from ipfs/go-bitswap@cb2f9e8b4740a001f4aaaa491b29be2302f95a5c --- bitswap/bitswap.go | 4 ++-- bitswap/bitswap_test.go | 4 ++-- bitswap/decision/bench_test.go | 2 +- bitswap/decision/engine.go | 4 ++-- bitswap/decision/engine_test.go | 4 ++-- bitswap/decision/ledger.go | 2 +- bitswap/decision/peer_request_queue.go | 2 +- bitswap/message/message.go | 2 +- bitswap/network/interface.go | 6 +++--- bitswap/network/ipfs_impl.go | 10 +++++----- bitswap/testnet/interface.go | 2 +- bitswap/testnet/network_test.go | 4 ++-- bitswap/testnet/peernet.go | 6 +++--- bitswap/testnet/virtual.go | 4 ++-- bitswap/testutils.go | 6 +++--- bitswap/wantmanager.go | 4 ++-- 16 files changed, 33 insertions(+), 33 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 724e3d4a7..3a0557e90 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -10,7 +10,6 @@ import ( process "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/goprocess" procctx "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/goprocess/context" - context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" blocks "github.com/ipfs/go-ipfs/blocks" blockstore "github.com/ipfs/go-ipfs/blocks/blockstore" key "github.com/ipfs/go-ipfs/blocks/key" @@ -21,8 +20,9 @@ import ( notifications "github.com/ipfs/go-ipfs/exchange/bitswap/notifications" wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" "github.com/ipfs/go-ipfs/thirdparty/delay" + peer "gx/ipfs/QmZxtCsPRgCnCXwVPUjcBiFckkG5NMYM4Pthwe6X4C8uQq/go-libp2p/p2p/peer" + context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" logging "gx/ipfs/QmaPaGNE2GqnfJjRRpQuQuFHuJn4FZvsrGxdik4kgxCkBi/go-log" - peer "gx/ipfs/QmY3NAw959vbE1oJooP9HchcRdBsbxhgQsEZTRhKgvoSuC/go-libp2p/p2p/peer" ) var log = logging.Logger("bitswap") diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index 806b35b2b..a6fd5ed00 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -7,8 +7,8 @@ import ( "time" detectrace "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-detect-race" - context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" travis "github.com/ipfs/go-ipfs/util/testutil/ci/travis" + context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" blocks "github.com/ipfs/go-ipfs/blocks" blocksutil "github.com/ipfs/go-ipfs/blocks/blocksutil" @@ -16,7 +16,7 @@ import ( tn "github.com/ipfs/go-ipfs/exchange/bitswap/testnet" mockrouting "github.com/ipfs/go-ipfs/routing/mock" delay "github.com/ipfs/go-ipfs/thirdparty/delay" - p2ptestutil "gx/ipfs/QmY3NAw959vbE1oJooP9HchcRdBsbxhgQsEZTRhKgvoSuC/go-libp2p/p2p/test/util" + p2ptestutil "gx/ipfs/QmZxtCsPRgCnCXwVPUjcBiFckkG5NMYM4Pthwe6X4C8uQq/go-libp2p/p2p/test/util" ) // FIXME the tests are really sensitive to the network delay. fix them to work diff --git a/bitswap/decision/bench_test.go b/bitswap/decision/bench_test.go index 9eaf6225a..27aa4d7e7 100644 --- a/bitswap/decision/bench_test.go +++ b/bitswap/decision/bench_test.go @@ -7,7 +7,7 @@ import ( key "github.com/ipfs/go-ipfs/blocks/key" "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" "github.com/ipfs/go-ipfs/util/testutil" - "gx/ipfs/QmY3NAw959vbE1oJooP9HchcRdBsbxhgQsEZTRhKgvoSuC/go-libp2p/p2p/peer" + "gx/ipfs/QmZxtCsPRgCnCXwVPUjcBiFckkG5NMYM4Pthwe6X4C8uQq/go-libp2p/p2p/peer" ) // FWIW: At the time of this commit, including a timestamp in task increases diff --git a/bitswap/decision/engine.go b/bitswap/decision/engine.go index 27e520e4e..f303ef64c 100644 --- a/bitswap/decision/engine.go +++ b/bitswap/decision/engine.go @@ -4,13 +4,13 @@ package decision import ( "sync" - context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" blocks "github.com/ipfs/go-ipfs/blocks" bstore "github.com/ipfs/go-ipfs/blocks/blockstore" bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" wl "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" + peer "gx/ipfs/QmZxtCsPRgCnCXwVPUjcBiFckkG5NMYM4Pthwe6X4C8uQq/go-libp2p/p2p/peer" + context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" logging "gx/ipfs/QmaPaGNE2GqnfJjRRpQuQuFHuJn4FZvsrGxdik4kgxCkBi/go-log" - peer "gx/ipfs/QmY3NAw959vbE1oJooP9HchcRdBsbxhgQsEZTRhKgvoSuC/go-libp2p/p2p/peer" ) // TODO consider taking responsibility for other types of requests. For diff --git a/bitswap/decision/engine_test.go b/bitswap/decision/engine_test.go index 78554950e..c9a52ff80 100644 --- a/bitswap/decision/engine_test.go +++ b/bitswap/decision/engine_test.go @@ -10,12 +10,12 @@ import ( ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore" dssync "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore/sync" - context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" blocks "github.com/ipfs/go-ipfs/blocks" blockstore "github.com/ipfs/go-ipfs/blocks/blockstore" message "github.com/ipfs/go-ipfs/exchange/bitswap/message" testutil "github.com/ipfs/go-ipfs/util/testutil" - peer "gx/ipfs/QmY3NAw959vbE1oJooP9HchcRdBsbxhgQsEZTRhKgvoSuC/go-libp2p/p2p/peer" + peer "gx/ipfs/QmZxtCsPRgCnCXwVPUjcBiFckkG5NMYM4Pthwe6X4C8uQq/go-libp2p/p2p/peer" + context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" ) type peerAndEngine struct { diff --git a/bitswap/decision/ledger.go b/bitswap/decision/ledger.go index e8fa8fe58..728fc80e3 100644 --- a/bitswap/decision/ledger.go +++ b/bitswap/decision/ledger.go @@ -5,7 +5,7 @@ import ( key "github.com/ipfs/go-ipfs/blocks/key" wl "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" - peer "gx/ipfs/QmY3NAw959vbE1oJooP9HchcRdBsbxhgQsEZTRhKgvoSuC/go-libp2p/p2p/peer" + peer "gx/ipfs/QmZxtCsPRgCnCXwVPUjcBiFckkG5NMYM4Pthwe6X4C8uQq/go-libp2p/p2p/peer" ) // keySet is just a convenient alias for maps of keys, where we only care diff --git a/bitswap/decision/peer_request_queue.go b/bitswap/decision/peer_request_queue.go index 7e22be7fd..b59501792 100644 --- a/bitswap/decision/peer_request_queue.go +++ b/bitswap/decision/peer_request_queue.go @@ -7,7 +7,7 @@ import ( key "github.com/ipfs/go-ipfs/blocks/key" wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" pq "github.com/ipfs/go-ipfs/thirdparty/pq" - peer "gx/ipfs/QmY3NAw959vbE1oJooP9HchcRdBsbxhgQsEZTRhKgvoSuC/go-libp2p/p2p/peer" + peer "gx/ipfs/QmZxtCsPRgCnCXwVPUjcBiFckkG5NMYM4Pthwe6X4C8uQq/go-libp2p/p2p/peer" ) type peerRequestQueue interface { diff --git a/bitswap/message/message.go b/bitswap/message/message.go index 2146d3941..553dc2155 100644 --- a/bitswap/message/message.go +++ b/bitswap/message/message.go @@ -7,7 +7,7 @@ import ( key "github.com/ipfs/go-ipfs/blocks/key" pb "github.com/ipfs/go-ipfs/exchange/bitswap/message/pb" wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" - inet "gx/ipfs/QmY3NAw959vbE1oJooP9HchcRdBsbxhgQsEZTRhKgvoSuC/go-libp2p/p2p/net" + inet "gx/ipfs/QmZxtCsPRgCnCXwVPUjcBiFckkG5NMYM4Pthwe6X4C8uQq/go-libp2p/p2p/net" ggio "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/gogo/protobuf/io" proto "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/gogo/protobuf/proto" diff --git a/bitswap/network/interface.go b/bitswap/network/interface.go index 282647741..f5b22e882 100644 --- a/bitswap/network/interface.go +++ b/bitswap/network/interface.go @@ -1,11 +1,11 @@ package network import ( - context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" key "github.com/ipfs/go-ipfs/blocks/key" bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" - peer "gx/ipfs/QmY3NAw959vbE1oJooP9HchcRdBsbxhgQsEZTRhKgvoSuC/go-libp2p/p2p/peer" - protocol "gx/ipfs/QmY3NAw959vbE1oJooP9HchcRdBsbxhgQsEZTRhKgvoSuC/go-libp2p/p2p/protocol" + peer "gx/ipfs/QmZxtCsPRgCnCXwVPUjcBiFckkG5NMYM4Pthwe6X4C8uQq/go-libp2p/p2p/peer" + protocol "gx/ipfs/QmZxtCsPRgCnCXwVPUjcBiFckkG5NMYM4Pthwe6X4C8uQq/go-libp2p/p2p/protocol" + context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" ) var ProtocolBitswap protocol.ID = "/ipfs/bitswap" diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index 3cfcb0e5a..179497b0a 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -1,15 +1,15 @@ package network import ( - context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" key "github.com/ipfs/go-ipfs/blocks/key" bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" routing "github.com/ipfs/go-ipfs/routing" - logging "gx/ipfs/QmaPaGNE2GqnfJjRRpQuQuFHuJn4FZvsrGxdik4kgxCkBi/go-log" ma "gx/ipfs/QmR3JkmZBKYXgNMNsNZawm914455Qof3PEopwuVSeXG7aV/go-multiaddr" - host "gx/ipfs/QmY3NAw959vbE1oJooP9HchcRdBsbxhgQsEZTRhKgvoSuC/go-libp2p/p2p/host" - inet "gx/ipfs/QmY3NAw959vbE1oJooP9HchcRdBsbxhgQsEZTRhKgvoSuC/go-libp2p/p2p/net" - peer "gx/ipfs/QmY3NAw959vbE1oJooP9HchcRdBsbxhgQsEZTRhKgvoSuC/go-libp2p/p2p/peer" + host "gx/ipfs/QmZxtCsPRgCnCXwVPUjcBiFckkG5NMYM4Pthwe6X4C8uQq/go-libp2p/p2p/host" + inet "gx/ipfs/QmZxtCsPRgCnCXwVPUjcBiFckkG5NMYM4Pthwe6X4C8uQq/go-libp2p/p2p/net" + peer "gx/ipfs/QmZxtCsPRgCnCXwVPUjcBiFckkG5NMYM4Pthwe6X4C8uQq/go-libp2p/p2p/peer" + context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" + logging "gx/ipfs/QmaPaGNE2GqnfJjRRpQuQuFHuJn4FZvsrGxdik4kgxCkBi/go-log" ) var log = logging.Logger("bitswap_network") diff --git a/bitswap/testnet/interface.go b/bitswap/testnet/interface.go index 6d49ba5da..614367e05 100644 --- a/bitswap/testnet/interface.go +++ b/bitswap/testnet/interface.go @@ -3,7 +3,7 @@ package bitswap import ( bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" "github.com/ipfs/go-ipfs/util/testutil" - peer "gx/ipfs/QmY3NAw959vbE1oJooP9HchcRdBsbxhgQsEZTRhKgvoSuC/go-libp2p/p2p/peer" + peer "gx/ipfs/QmZxtCsPRgCnCXwVPUjcBiFckkG5NMYM4Pthwe6X4C8uQq/go-libp2p/p2p/peer" ) type Network interface { diff --git a/bitswap/testnet/network_test.go b/bitswap/testnet/network_test.go index 5e99ed55d..071e500b8 100644 --- a/bitswap/testnet/network_test.go +++ b/bitswap/testnet/network_test.go @@ -4,14 +4,14 @@ import ( "sync" "testing" - context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" blocks "github.com/ipfs/go-ipfs/blocks" bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" mockrouting "github.com/ipfs/go-ipfs/routing/mock" delay "github.com/ipfs/go-ipfs/thirdparty/delay" testutil "github.com/ipfs/go-ipfs/util/testutil" - peer "gx/ipfs/QmY3NAw959vbE1oJooP9HchcRdBsbxhgQsEZTRhKgvoSuC/go-libp2p/p2p/peer" + peer "gx/ipfs/QmZxtCsPRgCnCXwVPUjcBiFckkG5NMYM4Pthwe6X4C8uQq/go-libp2p/p2p/peer" + context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" ) func TestSendMessageAsyncButWaitForResponse(t *testing.T) { diff --git a/bitswap/testnet/peernet.go b/bitswap/testnet/peernet.go index b979c208f..c579d0900 100644 --- a/bitswap/testnet/peernet.go +++ b/bitswap/testnet/peernet.go @@ -2,12 +2,12 @@ package bitswap import ( ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore" - context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" mockrouting "github.com/ipfs/go-ipfs/routing/mock" testutil "github.com/ipfs/go-ipfs/util/testutil" - mockpeernet "gx/ipfs/QmY3NAw959vbE1oJooP9HchcRdBsbxhgQsEZTRhKgvoSuC/go-libp2p/p2p/net/mock" - peer "gx/ipfs/QmY3NAw959vbE1oJooP9HchcRdBsbxhgQsEZTRhKgvoSuC/go-libp2p/p2p/peer" + mockpeernet "gx/ipfs/QmZxtCsPRgCnCXwVPUjcBiFckkG5NMYM4Pthwe6X4C8uQq/go-libp2p/p2p/net/mock" + peer "gx/ipfs/QmZxtCsPRgCnCXwVPUjcBiFckkG5NMYM4Pthwe6X4C8uQq/go-libp2p/p2p/peer" + context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" ) type peernet struct { diff --git a/bitswap/testnet/virtual.go b/bitswap/testnet/virtual.go index dd9c1c6a1..4f6418f6f 100644 --- a/bitswap/testnet/virtual.go +++ b/bitswap/testnet/virtual.go @@ -3,7 +3,6 @@ package bitswap import ( "errors" - context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" key "github.com/ipfs/go-ipfs/blocks/key" bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" @@ -11,7 +10,8 @@ import ( mockrouting "github.com/ipfs/go-ipfs/routing/mock" delay "github.com/ipfs/go-ipfs/thirdparty/delay" testutil "github.com/ipfs/go-ipfs/util/testutil" - peer "gx/ipfs/QmY3NAw959vbE1oJooP9HchcRdBsbxhgQsEZTRhKgvoSuC/go-libp2p/p2p/peer" + peer "gx/ipfs/QmZxtCsPRgCnCXwVPUjcBiFckkG5NMYM4Pthwe6X4C8uQq/go-libp2p/p2p/peer" + context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" ) func VirtualNetwork(rs mockrouting.Server, d delay.D) Network { diff --git a/bitswap/testutils.go b/bitswap/testutils.go index b09f69224..51ac66323 100644 --- a/bitswap/testutils.go +++ b/bitswap/testutils.go @@ -5,14 +5,14 @@ import ( ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore" ds_sync "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore/sync" - context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" blockstore "github.com/ipfs/go-ipfs/blocks/blockstore" tn "github.com/ipfs/go-ipfs/exchange/bitswap/testnet" delay "github.com/ipfs/go-ipfs/thirdparty/delay" datastore2 "github.com/ipfs/go-ipfs/util/datastore2" testutil "github.com/ipfs/go-ipfs/util/testutil" - peer "gx/ipfs/QmY3NAw959vbE1oJooP9HchcRdBsbxhgQsEZTRhKgvoSuC/go-libp2p/p2p/peer" - p2ptestutil "gx/ipfs/QmY3NAw959vbE1oJooP9HchcRdBsbxhgQsEZTRhKgvoSuC/go-libp2p/p2p/test/util" + peer "gx/ipfs/QmZxtCsPRgCnCXwVPUjcBiFckkG5NMYM4Pthwe6X4C8uQq/go-libp2p/p2p/peer" + p2ptestutil "gx/ipfs/QmZxtCsPRgCnCXwVPUjcBiFckkG5NMYM4Pthwe6X4C8uQq/go-libp2p/p2p/test/util" + context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" ) // WARNING: this uses RandTestBogusIdentity DO NOT USE for NON TESTS! diff --git a/bitswap/wantmanager.go b/bitswap/wantmanager.go index f6616b946..8176907f5 100644 --- a/bitswap/wantmanager.go +++ b/bitswap/wantmanager.go @@ -4,13 +4,13 @@ import ( "sync" "time" - context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" key "github.com/ipfs/go-ipfs/blocks/key" engine "github.com/ipfs/go-ipfs/exchange/bitswap/decision" bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" - peer "gx/ipfs/QmY3NAw959vbE1oJooP9HchcRdBsbxhgQsEZTRhKgvoSuC/go-libp2p/p2p/peer" + peer "gx/ipfs/QmZxtCsPRgCnCXwVPUjcBiFckkG5NMYM4Pthwe6X4C8uQq/go-libp2p/p2p/peer" + context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" ) type WantManager struct { From 68a8d45c858e8ec037210489bcd18adb07813c10 Mon Sep 17 00:00:00 2001 From: Jeromy Date: Thu, 28 Jan 2016 10:07:26 -0800 Subject: [PATCH 0437/1035] correct go-log dep License: MIT Signed-off-by: Jeromy This commit was moved from ipfs/go-bitswap@e0d6a64b161883765788bb7a37a5015c5e471afd --- bitswap/bitswap.go | 2 +- bitswap/decision/engine.go | 2 +- bitswap/network/ipfs_impl.go | 2 +- bitswap/notifications/notifications.go | 2 +- bitswap/notifications/notifications_test.go | 2 +- bitswap/workers.go | 2 +- 6 files changed, 6 insertions(+), 6 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 3a0557e90..b1b1187c4 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -22,7 +22,7 @@ import ( "github.com/ipfs/go-ipfs/thirdparty/delay" peer "gx/ipfs/QmZxtCsPRgCnCXwVPUjcBiFckkG5NMYM4Pthwe6X4C8uQq/go-libp2p/p2p/peer" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" - logging "gx/ipfs/QmaPaGNE2GqnfJjRRpQuQuFHuJn4FZvsrGxdik4kgxCkBi/go-log" + logging "gx/ipfs/Qmazh5oNUVsDZTs2g59rq8aYQqwpss8tcUWQzor5sCCEuH/go-log" ) var log = logging.Logger("bitswap") diff --git a/bitswap/decision/engine.go b/bitswap/decision/engine.go index f303ef64c..55cc90b96 100644 --- a/bitswap/decision/engine.go +++ b/bitswap/decision/engine.go @@ -10,7 +10,7 @@ import ( wl "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" peer "gx/ipfs/QmZxtCsPRgCnCXwVPUjcBiFckkG5NMYM4Pthwe6X4C8uQq/go-libp2p/p2p/peer" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" - logging "gx/ipfs/QmaPaGNE2GqnfJjRRpQuQuFHuJn4FZvsrGxdik4kgxCkBi/go-log" + logging "gx/ipfs/Qmazh5oNUVsDZTs2g59rq8aYQqwpss8tcUWQzor5sCCEuH/go-log" ) // TODO consider taking responsibility for other types of requests. For diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index 179497b0a..e02c68003 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -9,7 +9,7 @@ import ( inet "gx/ipfs/QmZxtCsPRgCnCXwVPUjcBiFckkG5NMYM4Pthwe6X4C8uQq/go-libp2p/p2p/net" peer "gx/ipfs/QmZxtCsPRgCnCXwVPUjcBiFckkG5NMYM4Pthwe6X4C8uQq/go-libp2p/p2p/peer" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" - logging "gx/ipfs/QmaPaGNE2GqnfJjRRpQuQuFHuJn4FZvsrGxdik4kgxCkBi/go-log" + logging "gx/ipfs/Qmazh5oNUVsDZTs2g59rq8aYQqwpss8tcUWQzor5sCCEuH/go-log" ) var log = logging.Logger("bitswap_network") diff --git a/bitswap/notifications/notifications.go b/bitswap/notifications/notifications.go index 79479b84d..8a83bba9b 100644 --- a/bitswap/notifications/notifications.go +++ b/bitswap/notifications/notifications.go @@ -2,9 +2,9 @@ package notifications import ( pubsub "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/briantigerchow/pubsub" - context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" blocks "github.com/ipfs/go-ipfs/blocks" key "github.com/ipfs/go-ipfs/blocks/key" + context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" ) const bufferSize = 16 diff --git a/bitswap/notifications/notifications_test.go b/bitswap/notifications/notifications_test.go index 36b156969..02acbd13f 100644 --- a/bitswap/notifications/notifications_test.go +++ b/bitswap/notifications/notifications_test.go @@ -5,10 +5,10 @@ import ( "testing" "time" - context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" blocks "github.com/ipfs/go-ipfs/blocks" blocksutil "github.com/ipfs/go-ipfs/blocks/blocksutil" key "github.com/ipfs/go-ipfs/blocks/key" + context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" ) func TestDuplicates(t *testing.T) { diff --git a/bitswap/workers.go b/bitswap/workers.go index ea066a242..b9dc963be 100644 --- a/bitswap/workers.go +++ b/bitswap/workers.go @@ -8,7 +8,7 @@ import ( context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" key "github.com/ipfs/go-ipfs/blocks/key" - logging "gx/ipfs/QmaPaGNE2GqnfJjRRpQuQuFHuJn4FZvsrGxdik4kgxCkBi/go-log" + logging "gx/ipfs/Qmazh5oNUVsDZTs2g59rq8aYQqwpss8tcUWQzor5sCCEuH/go-log" ) var TaskWorkerCount = 8 From 0e00c44bd74bab0cec3120b8581b6297eaee53e8 Mon Sep 17 00:00:00 2001 From: Jeromy Date: Sun, 31 Jan 2016 10:19:50 -0800 Subject: [PATCH 0438/1035] update libp2p dep License: MIT Signed-off-by: Jeromy This commit was moved from ipfs/go-bitswap@5df2fc0661fe010b24ef07577d3f903b686f0dd5 --- bitswap/bitswap.go | 2 +- bitswap/bitswap_test.go | 2 +- bitswap/decision/bench_test.go | 2 +- bitswap/decision/engine.go | 2 +- bitswap/decision/engine_test.go | 2 +- bitswap/decision/ledger.go | 2 +- bitswap/decision/peer_request_queue.go | 2 +- bitswap/message/message.go | 2 +- bitswap/network/interface.go | 4 ++-- bitswap/network/ipfs_impl.go | 6 +++--- bitswap/testnet/interface.go | 2 +- bitswap/testnet/network_test.go | 2 +- bitswap/testnet/peernet.go | 4 ++-- bitswap/testnet/virtual.go | 2 +- bitswap/testutils.go | 4 ++-- bitswap/wantmanager.go | 2 +- 16 files changed, 21 insertions(+), 21 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index b1b1187c4..b50dc86a3 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -20,7 +20,7 @@ import ( notifications "github.com/ipfs/go-ipfs/exchange/bitswap/notifications" wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" "github.com/ipfs/go-ipfs/thirdparty/delay" - peer "gx/ipfs/QmZxtCsPRgCnCXwVPUjcBiFckkG5NMYM4Pthwe6X4C8uQq/go-libp2p/p2p/peer" + peer "gx/ipfs/QmQQCBoWhMZtStYuAAo2uDNNLit9n7yX5ANBecfjKq4XBn/go-libp2p/p2p/peer" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" logging "gx/ipfs/Qmazh5oNUVsDZTs2g59rq8aYQqwpss8tcUWQzor5sCCEuH/go-log" ) diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index a6fd5ed00..a84cea5d7 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -16,7 +16,7 @@ import ( tn "github.com/ipfs/go-ipfs/exchange/bitswap/testnet" mockrouting "github.com/ipfs/go-ipfs/routing/mock" delay "github.com/ipfs/go-ipfs/thirdparty/delay" - p2ptestutil "gx/ipfs/QmZxtCsPRgCnCXwVPUjcBiFckkG5NMYM4Pthwe6X4C8uQq/go-libp2p/p2p/test/util" + p2ptestutil "gx/ipfs/QmQQCBoWhMZtStYuAAo2uDNNLit9n7yX5ANBecfjKq4XBn/go-libp2p/p2p/test/util" ) // FIXME the tests are really sensitive to the network delay. fix them to work diff --git a/bitswap/decision/bench_test.go b/bitswap/decision/bench_test.go index 27aa4d7e7..3c87bd43e 100644 --- a/bitswap/decision/bench_test.go +++ b/bitswap/decision/bench_test.go @@ -7,7 +7,7 @@ import ( key "github.com/ipfs/go-ipfs/blocks/key" "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" "github.com/ipfs/go-ipfs/util/testutil" - "gx/ipfs/QmZxtCsPRgCnCXwVPUjcBiFckkG5NMYM4Pthwe6X4C8uQq/go-libp2p/p2p/peer" + "gx/ipfs/QmQQCBoWhMZtStYuAAo2uDNNLit9n7yX5ANBecfjKq4XBn/go-libp2p/p2p/peer" ) // FWIW: At the time of this commit, including a timestamp in task increases diff --git a/bitswap/decision/engine.go b/bitswap/decision/engine.go index 55cc90b96..c9c879458 100644 --- a/bitswap/decision/engine.go +++ b/bitswap/decision/engine.go @@ -8,7 +8,7 @@ import ( bstore "github.com/ipfs/go-ipfs/blocks/blockstore" bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" wl "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" - peer "gx/ipfs/QmZxtCsPRgCnCXwVPUjcBiFckkG5NMYM4Pthwe6X4C8uQq/go-libp2p/p2p/peer" + peer "gx/ipfs/QmQQCBoWhMZtStYuAAo2uDNNLit9n7yX5ANBecfjKq4XBn/go-libp2p/p2p/peer" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" logging "gx/ipfs/Qmazh5oNUVsDZTs2g59rq8aYQqwpss8tcUWQzor5sCCEuH/go-log" ) diff --git a/bitswap/decision/engine_test.go b/bitswap/decision/engine_test.go index c9a52ff80..0d6aee7cc 100644 --- a/bitswap/decision/engine_test.go +++ b/bitswap/decision/engine_test.go @@ -14,7 +14,7 @@ import ( blockstore "github.com/ipfs/go-ipfs/blocks/blockstore" message "github.com/ipfs/go-ipfs/exchange/bitswap/message" testutil "github.com/ipfs/go-ipfs/util/testutil" - peer "gx/ipfs/QmZxtCsPRgCnCXwVPUjcBiFckkG5NMYM4Pthwe6X4C8uQq/go-libp2p/p2p/peer" + peer "gx/ipfs/QmQQCBoWhMZtStYuAAo2uDNNLit9n7yX5ANBecfjKq4XBn/go-libp2p/p2p/peer" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" ) diff --git a/bitswap/decision/ledger.go b/bitswap/decision/ledger.go index 728fc80e3..6d3acfc47 100644 --- a/bitswap/decision/ledger.go +++ b/bitswap/decision/ledger.go @@ -5,7 +5,7 @@ import ( key "github.com/ipfs/go-ipfs/blocks/key" wl "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" - peer "gx/ipfs/QmZxtCsPRgCnCXwVPUjcBiFckkG5NMYM4Pthwe6X4C8uQq/go-libp2p/p2p/peer" + peer "gx/ipfs/QmQQCBoWhMZtStYuAAo2uDNNLit9n7yX5ANBecfjKq4XBn/go-libp2p/p2p/peer" ) // keySet is just a convenient alias for maps of keys, where we only care diff --git a/bitswap/decision/peer_request_queue.go b/bitswap/decision/peer_request_queue.go index b59501792..55e4f2adc 100644 --- a/bitswap/decision/peer_request_queue.go +++ b/bitswap/decision/peer_request_queue.go @@ -7,7 +7,7 @@ import ( key "github.com/ipfs/go-ipfs/blocks/key" wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" pq "github.com/ipfs/go-ipfs/thirdparty/pq" - peer "gx/ipfs/QmZxtCsPRgCnCXwVPUjcBiFckkG5NMYM4Pthwe6X4C8uQq/go-libp2p/p2p/peer" + peer "gx/ipfs/QmQQCBoWhMZtStYuAAo2uDNNLit9n7yX5ANBecfjKq4XBn/go-libp2p/p2p/peer" ) type peerRequestQueue interface { diff --git a/bitswap/message/message.go b/bitswap/message/message.go index 553dc2155..6152fb3ab 100644 --- a/bitswap/message/message.go +++ b/bitswap/message/message.go @@ -7,7 +7,7 @@ import ( key "github.com/ipfs/go-ipfs/blocks/key" pb "github.com/ipfs/go-ipfs/exchange/bitswap/message/pb" wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" - inet "gx/ipfs/QmZxtCsPRgCnCXwVPUjcBiFckkG5NMYM4Pthwe6X4C8uQq/go-libp2p/p2p/net" + inet "gx/ipfs/QmQQCBoWhMZtStYuAAo2uDNNLit9n7yX5ANBecfjKq4XBn/go-libp2p/p2p/net" ggio "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/gogo/protobuf/io" proto "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/gogo/protobuf/proto" diff --git a/bitswap/network/interface.go b/bitswap/network/interface.go index f5b22e882..173d4b6ae 100644 --- a/bitswap/network/interface.go +++ b/bitswap/network/interface.go @@ -3,8 +3,8 @@ package network import ( key "github.com/ipfs/go-ipfs/blocks/key" bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" - peer "gx/ipfs/QmZxtCsPRgCnCXwVPUjcBiFckkG5NMYM4Pthwe6X4C8uQq/go-libp2p/p2p/peer" - protocol "gx/ipfs/QmZxtCsPRgCnCXwVPUjcBiFckkG5NMYM4Pthwe6X4C8uQq/go-libp2p/p2p/protocol" + peer "gx/ipfs/QmQQCBoWhMZtStYuAAo2uDNNLit9n7yX5ANBecfjKq4XBn/go-libp2p/p2p/peer" + protocol "gx/ipfs/QmQQCBoWhMZtStYuAAo2uDNNLit9n7yX5ANBecfjKq4XBn/go-libp2p/p2p/protocol" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" ) diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index e02c68003..e20ec300d 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -4,10 +4,10 @@ import ( key "github.com/ipfs/go-ipfs/blocks/key" bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" routing "github.com/ipfs/go-ipfs/routing" + host "gx/ipfs/QmQQCBoWhMZtStYuAAo2uDNNLit9n7yX5ANBecfjKq4XBn/go-libp2p/p2p/host" + inet "gx/ipfs/QmQQCBoWhMZtStYuAAo2uDNNLit9n7yX5ANBecfjKq4XBn/go-libp2p/p2p/net" + peer "gx/ipfs/QmQQCBoWhMZtStYuAAo2uDNNLit9n7yX5ANBecfjKq4XBn/go-libp2p/p2p/peer" ma "gx/ipfs/QmR3JkmZBKYXgNMNsNZawm914455Qof3PEopwuVSeXG7aV/go-multiaddr" - host "gx/ipfs/QmZxtCsPRgCnCXwVPUjcBiFckkG5NMYM4Pthwe6X4C8uQq/go-libp2p/p2p/host" - inet "gx/ipfs/QmZxtCsPRgCnCXwVPUjcBiFckkG5NMYM4Pthwe6X4C8uQq/go-libp2p/p2p/net" - peer "gx/ipfs/QmZxtCsPRgCnCXwVPUjcBiFckkG5NMYM4Pthwe6X4C8uQq/go-libp2p/p2p/peer" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" logging "gx/ipfs/Qmazh5oNUVsDZTs2g59rq8aYQqwpss8tcUWQzor5sCCEuH/go-log" ) diff --git a/bitswap/testnet/interface.go b/bitswap/testnet/interface.go index 614367e05..a1371841d 100644 --- a/bitswap/testnet/interface.go +++ b/bitswap/testnet/interface.go @@ -3,7 +3,7 @@ package bitswap import ( bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" "github.com/ipfs/go-ipfs/util/testutil" - peer "gx/ipfs/QmZxtCsPRgCnCXwVPUjcBiFckkG5NMYM4Pthwe6X4C8uQq/go-libp2p/p2p/peer" + peer "gx/ipfs/QmQQCBoWhMZtStYuAAo2uDNNLit9n7yX5ANBecfjKq4XBn/go-libp2p/p2p/peer" ) type Network interface { diff --git a/bitswap/testnet/network_test.go b/bitswap/testnet/network_test.go index 071e500b8..7da6510f3 100644 --- a/bitswap/testnet/network_test.go +++ b/bitswap/testnet/network_test.go @@ -10,7 +10,7 @@ import ( mockrouting "github.com/ipfs/go-ipfs/routing/mock" delay "github.com/ipfs/go-ipfs/thirdparty/delay" testutil "github.com/ipfs/go-ipfs/util/testutil" - peer "gx/ipfs/QmZxtCsPRgCnCXwVPUjcBiFckkG5NMYM4Pthwe6X4C8uQq/go-libp2p/p2p/peer" + peer "gx/ipfs/QmQQCBoWhMZtStYuAAo2uDNNLit9n7yX5ANBecfjKq4XBn/go-libp2p/p2p/peer" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" ) diff --git a/bitswap/testnet/peernet.go b/bitswap/testnet/peernet.go index c579d0900..c1782c0e0 100644 --- a/bitswap/testnet/peernet.go +++ b/bitswap/testnet/peernet.go @@ -5,8 +5,8 @@ import ( bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" mockrouting "github.com/ipfs/go-ipfs/routing/mock" testutil "github.com/ipfs/go-ipfs/util/testutil" - mockpeernet "gx/ipfs/QmZxtCsPRgCnCXwVPUjcBiFckkG5NMYM4Pthwe6X4C8uQq/go-libp2p/p2p/net/mock" - peer "gx/ipfs/QmZxtCsPRgCnCXwVPUjcBiFckkG5NMYM4Pthwe6X4C8uQq/go-libp2p/p2p/peer" + mockpeernet "gx/ipfs/QmQQCBoWhMZtStYuAAo2uDNNLit9n7yX5ANBecfjKq4XBn/go-libp2p/p2p/net/mock" + peer "gx/ipfs/QmQQCBoWhMZtStYuAAo2uDNNLit9n7yX5ANBecfjKq4XBn/go-libp2p/p2p/peer" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" ) diff --git a/bitswap/testnet/virtual.go b/bitswap/testnet/virtual.go index 4f6418f6f..422042f99 100644 --- a/bitswap/testnet/virtual.go +++ b/bitswap/testnet/virtual.go @@ -10,7 +10,7 @@ import ( mockrouting "github.com/ipfs/go-ipfs/routing/mock" delay "github.com/ipfs/go-ipfs/thirdparty/delay" testutil "github.com/ipfs/go-ipfs/util/testutil" - peer "gx/ipfs/QmZxtCsPRgCnCXwVPUjcBiFckkG5NMYM4Pthwe6X4C8uQq/go-libp2p/p2p/peer" + peer "gx/ipfs/QmQQCBoWhMZtStYuAAo2uDNNLit9n7yX5ANBecfjKq4XBn/go-libp2p/p2p/peer" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" ) diff --git a/bitswap/testutils.go b/bitswap/testutils.go index 51ac66323..5f3c9c8e5 100644 --- a/bitswap/testutils.go +++ b/bitswap/testutils.go @@ -10,8 +10,8 @@ import ( delay "github.com/ipfs/go-ipfs/thirdparty/delay" datastore2 "github.com/ipfs/go-ipfs/util/datastore2" testutil "github.com/ipfs/go-ipfs/util/testutil" - peer "gx/ipfs/QmZxtCsPRgCnCXwVPUjcBiFckkG5NMYM4Pthwe6X4C8uQq/go-libp2p/p2p/peer" - p2ptestutil "gx/ipfs/QmZxtCsPRgCnCXwVPUjcBiFckkG5NMYM4Pthwe6X4C8uQq/go-libp2p/p2p/test/util" + peer "gx/ipfs/QmQQCBoWhMZtStYuAAo2uDNNLit9n7yX5ANBecfjKq4XBn/go-libp2p/p2p/peer" + p2ptestutil "gx/ipfs/QmQQCBoWhMZtStYuAAo2uDNNLit9n7yX5ANBecfjKq4XBn/go-libp2p/p2p/test/util" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" ) diff --git a/bitswap/wantmanager.go b/bitswap/wantmanager.go index 8176907f5..8049a0a11 100644 --- a/bitswap/wantmanager.go +++ b/bitswap/wantmanager.go @@ -9,7 +9,7 @@ import ( bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" - peer "gx/ipfs/QmZxtCsPRgCnCXwVPUjcBiFckkG5NMYM4Pthwe6X4C8uQq/go-libp2p/p2p/peer" + peer "gx/ipfs/QmQQCBoWhMZtStYuAAo2uDNNLit9n7yX5ANBecfjKq4XBn/go-libp2p/p2p/peer" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" ) From b1a9da3a5a9aa14ead2c50243921891342b0b311 Mon Sep 17 00:00:00 2001 From: Jeromy Date: Sun, 31 Jan 2016 15:37:39 -0800 Subject: [PATCH 0439/1035] do that last thing again License: MIT Signed-off-by: Jeromy This commit was moved from ipfs/go-bitswap@a6cf027edb644b0af03b8a48e981b76ca9c7fb17 --- bitswap/bitswap.go | 2 +- bitswap/bitswap_test.go | 2 +- bitswap/decision/bench_test.go | 2 +- bitswap/decision/engine.go | 2 +- bitswap/decision/engine_test.go | 2 +- bitswap/decision/ledger.go | 2 +- bitswap/decision/peer_request_queue.go | 2 +- bitswap/message/message.go | 2 +- bitswap/network/interface.go | 4 ++-- bitswap/network/ipfs_impl.go | 6 +++--- bitswap/testnet/interface.go | 2 +- bitswap/testnet/network_test.go | 2 +- bitswap/testnet/peernet.go | 4 ++-- bitswap/testnet/virtual.go | 2 +- bitswap/testutils.go | 4 ++-- bitswap/wantmanager.go | 2 +- 16 files changed, 21 insertions(+), 21 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index b50dc86a3..17f4f3686 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -20,7 +20,7 @@ import ( notifications "github.com/ipfs/go-ipfs/exchange/bitswap/notifications" wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" "github.com/ipfs/go-ipfs/thirdparty/delay" - peer "gx/ipfs/QmQQCBoWhMZtStYuAAo2uDNNLit9n7yX5ANBecfjKq4XBn/go-libp2p/p2p/peer" + peer "gx/ipfs/QmUBogf4nUefBjmYjn6jfsfPJRkmDGSeMhNj4usRKq69f4/go-libp2p/p2p/peer" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" logging "gx/ipfs/Qmazh5oNUVsDZTs2g59rq8aYQqwpss8tcUWQzor5sCCEuH/go-log" ) diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index a84cea5d7..04a1fb709 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -16,7 +16,7 @@ import ( tn "github.com/ipfs/go-ipfs/exchange/bitswap/testnet" mockrouting "github.com/ipfs/go-ipfs/routing/mock" delay "github.com/ipfs/go-ipfs/thirdparty/delay" - p2ptestutil "gx/ipfs/QmQQCBoWhMZtStYuAAo2uDNNLit9n7yX5ANBecfjKq4XBn/go-libp2p/p2p/test/util" + p2ptestutil "gx/ipfs/QmUBogf4nUefBjmYjn6jfsfPJRkmDGSeMhNj4usRKq69f4/go-libp2p/p2p/test/util" ) // FIXME the tests are really sensitive to the network delay. fix them to work diff --git a/bitswap/decision/bench_test.go b/bitswap/decision/bench_test.go index 3c87bd43e..7a230fa57 100644 --- a/bitswap/decision/bench_test.go +++ b/bitswap/decision/bench_test.go @@ -7,7 +7,7 @@ import ( key "github.com/ipfs/go-ipfs/blocks/key" "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" "github.com/ipfs/go-ipfs/util/testutil" - "gx/ipfs/QmQQCBoWhMZtStYuAAo2uDNNLit9n7yX5ANBecfjKq4XBn/go-libp2p/p2p/peer" + "gx/ipfs/QmUBogf4nUefBjmYjn6jfsfPJRkmDGSeMhNj4usRKq69f4/go-libp2p/p2p/peer" ) // FWIW: At the time of this commit, including a timestamp in task increases diff --git a/bitswap/decision/engine.go b/bitswap/decision/engine.go index c9c879458..5cf6809d3 100644 --- a/bitswap/decision/engine.go +++ b/bitswap/decision/engine.go @@ -8,7 +8,7 @@ import ( bstore "github.com/ipfs/go-ipfs/blocks/blockstore" bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" wl "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" - peer "gx/ipfs/QmQQCBoWhMZtStYuAAo2uDNNLit9n7yX5ANBecfjKq4XBn/go-libp2p/p2p/peer" + peer "gx/ipfs/QmUBogf4nUefBjmYjn6jfsfPJRkmDGSeMhNj4usRKq69f4/go-libp2p/p2p/peer" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" logging "gx/ipfs/Qmazh5oNUVsDZTs2g59rq8aYQqwpss8tcUWQzor5sCCEuH/go-log" ) diff --git a/bitswap/decision/engine_test.go b/bitswap/decision/engine_test.go index 0d6aee7cc..53a660c7d 100644 --- a/bitswap/decision/engine_test.go +++ b/bitswap/decision/engine_test.go @@ -14,7 +14,7 @@ import ( blockstore "github.com/ipfs/go-ipfs/blocks/blockstore" message "github.com/ipfs/go-ipfs/exchange/bitswap/message" testutil "github.com/ipfs/go-ipfs/util/testutil" - peer "gx/ipfs/QmQQCBoWhMZtStYuAAo2uDNNLit9n7yX5ANBecfjKq4XBn/go-libp2p/p2p/peer" + peer "gx/ipfs/QmUBogf4nUefBjmYjn6jfsfPJRkmDGSeMhNj4usRKq69f4/go-libp2p/p2p/peer" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" ) diff --git a/bitswap/decision/ledger.go b/bitswap/decision/ledger.go index 6d3acfc47..0cdd7e37b 100644 --- a/bitswap/decision/ledger.go +++ b/bitswap/decision/ledger.go @@ -5,7 +5,7 @@ import ( key "github.com/ipfs/go-ipfs/blocks/key" wl "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" - peer "gx/ipfs/QmQQCBoWhMZtStYuAAo2uDNNLit9n7yX5ANBecfjKq4XBn/go-libp2p/p2p/peer" + peer "gx/ipfs/QmUBogf4nUefBjmYjn6jfsfPJRkmDGSeMhNj4usRKq69f4/go-libp2p/p2p/peer" ) // keySet is just a convenient alias for maps of keys, where we only care diff --git a/bitswap/decision/peer_request_queue.go b/bitswap/decision/peer_request_queue.go index 55e4f2adc..e0fc91989 100644 --- a/bitswap/decision/peer_request_queue.go +++ b/bitswap/decision/peer_request_queue.go @@ -7,7 +7,7 @@ import ( key "github.com/ipfs/go-ipfs/blocks/key" wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" pq "github.com/ipfs/go-ipfs/thirdparty/pq" - peer "gx/ipfs/QmQQCBoWhMZtStYuAAo2uDNNLit9n7yX5ANBecfjKq4XBn/go-libp2p/p2p/peer" + peer "gx/ipfs/QmUBogf4nUefBjmYjn6jfsfPJRkmDGSeMhNj4usRKq69f4/go-libp2p/p2p/peer" ) type peerRequestQueue interface { diff --git a/bitswap/message/message.go b/bitswap/message/message.go index 6152fb3ab..a0acf8d35 100644 --- a/bitswap/message/message.go +++ b/bitswap/message/message.go @@ -7,7 +7,7 @@ import ( key "github.com/ipfs/go-ipfs/blocks/key" pb "github.com/ipfs/go-ipfs/exchange/bitswap/message/pb" wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" - inet "gx/ipfs/QmQQCBoWhMZtStYuAAo2uDNNLit9n7yX5ANBecfjKq4XBn/go-libp2p/p2p/net" + inet "gx/ipfs/QmUBogf4nUefBjmYjn6jfsfPJRkmDGSeMhNj4usRKq69f4/go-libp2p/p2p/net" ggio "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/gogo/protobuf/io" proto "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/gogo/protobuf/proto" diff --git a/bitswap/network/interface.go b/bitswap/network/interface.go index 173d4b6ae..a81b5fcff 100644 --- a/bitswap/network/interface.go +++ b/bitswap/network/interface.go @@ -3,8 +3,8 @@ package network import ( key "github.com/ipfs/go-ipfs/blocks/key" bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" - peer "gx/ipfs/QmQQCBoWhMZtStYuAAo2uDNNLit9n7yX5ANBecfjKq4XBn/go-libp2p/p2p/peer" - protocol "gx/ipfs/QmQQCBoWhMZtStYuAAo2uDNNLit9n7yX5ANBecfjKq4XBn/go-libp2p/p2p/protocol" + peer "gx/ipfs/QmUBogf4nUefBjmYjn6jfsfPJRkmDGSeMhNj4usRKq69f4/go-libp2p/p2p/peer" + protocol "gx/ipfs/QmUBogf4nUefBjmYjn6jfsfPJRkmDGSeMhNj4usRKq69f4/go-libp2p/p2p/protocol" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" ) diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index e20ec300d..b641b5e8f 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -4,10 +4,10 @@ import ( key "github.com/ipfs/go-ipfs/blocks/key" bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" routing "github.com/ipfs/go-ipfs/routing" - host "gx/ipfs/QmQQCBoWhMZtStYuAAo2uDNNLit9n7yX5ANBecfjKq4XBn/go-libp2p/p2p/host" - inet "gx/ipfs/QmQQCBoWhMZtStYuAAo2uDNNLit9n7yX5ANBecfjKq4XBn/go-libp2p/p2p/net" - peer "gx/ipfs/QmQQCBoWhMZtStYuAAo2uDNNLit9n7yX5ANBecfjKq4XBn/go-libp2p/p2p/peer" ma "gx/ipfs/QmR3JkmZBKYXgNMNsNZawm914455Qof3PEopwuVSeXG7aV/go-multiaddr" + host "gx/ipfs/QmUBogf4nUefBjmYjn6jfsfPJRkmDGSeMhNj4usRKq69f4/go-libp2p/p2p/host" + inet "gx/ipfs/QmUBogf4nUefBjmYjn6jfsfPJRkmDGSeMhNj4usRKq69f4/go-libp2p/p2p/net" + peer "gx/ipfs/QmUBogf4nUefBjmYjn6jfsfPJRkmDGSeMhNj4usRKq69f4/go-libp2p/p2p/peer" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" logging "gx/ipfs/Qmazh5oNUVsDZTs2g59rq8aYQqwpss8tcUWQzor5sCCEuH/go-log" ) diff --git a/bitswap/testnet/interface.go b/bitswap/testnet/interface.go index a1371841d..f79af6d62 100644 --- a/bitswap/testnet/interface.go +++ b/bitswap/testnet/interface.go @@ -3,7 +3,7 @@ package bitswap import ( bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" "github.com/ipfs/go-ipfs/util/testutil" - peer "gx/ipfs/QmQQCBoWhMZtStYuAAo2uDNNLit9n7yX5ANBecfjKq4XBn/go-libp2p/p2p/peer" + peer "gx/ipfs/QmUBogf4nUefBjmYjn6jfsfPJRkmDGSeMhNj4usRKq69f4/go-libp2p/p2p/peer" ) type Network interface { diff --git a/bitswap/testnet/network_test.go b/bitswap/testnet/network_test.go index 7da6510f3..69f1fa73e 100644 --- a/bitswap/testnet/network_test.go +++ b/bitswap/testnet/network_test.go @@ -10,7 +10,7 @@ import ( mockrouting "github.com/ipfs/go-ipfs/routing/mock" delay "github.com/ipfs/go-ipfs/thirdparty/delay" testutil "github.com/ipfs/go-ipfs/util/testutil" - peer "gx/ipfs/QmQQCBoWhMZtStYuAAo2uDNNLit9n7yX5ANBecfjKq4XBn/go-libp2p/p2p/peer" + peer "gx/ipfs/QmUBogf4nUefBjmYjn6jfsfPJRkmDGSeMhNj4usRKq69f4/go-libp2p/p2p/peer" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" ) diff --git a/bitswap/testnet/peernet.go b/bitswap/testnet/peernet.go index c1782c0e0..8b0d7aabe 100644 --- a/bitswap/testnet/peernet.go +++ b/bitswap/testnet/peernet.go @@ -5,8 +5,8 @@ import ( bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" mockrouting "github.com/ipfs/go-ipfs/routing/mock" testutil "github.com/ipfs/go-ipfs/util/testutil" - mockpeernet "gx/ipfs/QmQQCBoWhMZtStYuAAo2uDNNLit9n7yX5ANBecfjKq4XBn/go-libp2p/p2p/net/mock" - peer "gx/ipfs/QmQQCBoWhMZtStYuAAo2uDNNLit9n7yX5ANBecfjKq4XBn/go-libp2p/p2p/peer" + mockpeernet "gx/ipfs/QmUBogf4nUefBjmYjn6jfsfPJRkmDGSeMhNj4usRKq69f4/go-libp2p/p2p/net/mock" + peer "gx/ipfs/QmUBogf4nUefBjmYjn6jfsfPJRkmDGSeMhNj4usRKq69f4/go-libp2p/p2p/peer" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" ) diff --git a/bitswap/testnet/virtual.go b/bitswap/testnet/virtual.go index 422042f99..b7b2e7472 100644 --- a/bitswap/testnet/virtual.go +++ b/bitswap/testnet/virtual.go @@ -10,7 +10,7 @@ import ( mockrouting "github.com/ipfs/go-ipfs/routing/mock" delay "github.com/ipfs/go-ipfs/thirdparty/delay" testutil "github.com/ipfs/go-ipfs/util/testutil" - peer "gx/ipfs/QmQQCBoWhMZtStYuAAo2uDNNLit9n7yX5ANBecfjKq4XBn/go-libp2p/p2p/peer" + peer "gx/ipfs/QmUBogf4nUefBjmYjn6jfsfPJRkmDGSeMhNj4usRKq69f4/go-libp2p/p2p/peer" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" ) diff --git a/bitswap/testutils.go b/bitswap/testutils.go index 5f3c9c8e5..8a8861771 100644 --- a/bitswap/testutils.go +++ b/bitswap/testutils.go @@ -10,8 +10,8 @@ import ( delay "github.com/ipfs/go-ipfs/thirdparty/delay" datastore2 "github.com/ipfs/go-ipfs/util/datastore2" testutil "github.com/ipfs/go-ipfs/util/testutil" - peer "gx/ipfs/QmQQCBoWhMZtStYuAAo2uDNNLit9n7yX5ANBecfjKq4XBn/go-libp2p/p2p/peer" - p2ptestutil "gx/ipfs/QmQQCBoWhMZtStYuAAo2uDNNLit9n7yX5ANBecfjKq4XBn/go-libp2p/p2p/test/util" + peer "gx/ipfs/QmUBogf4nUefBjmYjn6jfsfPJRkmDGSeMhNj4usRKq69f4/go-libp2p/p2p/peer" + p2ptestutil "gx/ipfs/QmUBogf4nUefBjmYjn6jfsfPJRkmDGSeMhNj4usRKq69f4/go-libp2p/p2p/test/util" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" ) diff --git a/bitswap/wantmanager.go b/bitswap/wantmanager.go index 8049a0a11..243edac37 100644 --- a/bitswap/wantmanager.go +++ b/bitswap/wantmanager.go @@ -9,7 +9,7 @@ import ( bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" - peer "gx/ipfs/QmQQCBoWhMZtStYuAAo2uDNNLit9n7yX5ANBecfjKq4XBn/go-libp2p/p2p/peer" + peer "gx/ipfs/QmUBogf4nUefBjmYjn6jfsfPJRkmDGSeMhNj4usRKq69f4/go-libp2p/p2p/peer" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" ) From 93ce09950afb20363ead9b4c73272cf32e4527f7 Mon Sep 17 00:00:00 2001 From: Thomas Gardner Date: Sun, 24 Jan 2016 14:18:03 +1000 Subject: [PATCH 0440/1035] trivial: various superficial fixes misc/completion/ipfs-completion.bash: add `ipfs stats` to BASH completion core/commands/mount_unix.go: ensure error is not nil before printing it contribute.md: fix bibliography indexing in example core/commands/swarm.go: change tabs to spaces in USAGE message *: 80-column readability improvements License: MIT Signed-off-by: Thomas Gardner This commit was moved from ipfs/go-bitswap@1519a59ccbe5448ce70f32b1316a28dee4898e51 --- bitswap/decision/engine.go | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/bitswap/decision/engine.go b/bitswap/decision/engine.go index 03c13d99e..78e02dbd7 100644 --- a/bitswap/decision/engine.go +++ b/bitswap/decision/engine.go @@ -21,7 +21,8 @@ import ( // batches/combines and takes all of these into consideration. // // Right now, messages go onto the network for four reasons: -// 1. an initial `sendwantlist` message to a provider of the first key in a request +// 1. an initial `sendwantlist` message to a provider of the first key in a +// request // 2. a periodic full sweep of `sendwantlist` messages to all providers // 3. upon receipt of blocks, a `cancel` message to all peers // 4. draining the priority queue of `blockrequests` from peers @@ -34,9 +35,10 @@ import ( // Some examples of what would be possible: // // * when sending out the wantlists, include `cancel` requests -// * when handling `blockrequests`, include `sendwantlist` and `cancel` as appropriate +// * when handling `blockrequests`, include `sendwantlist` and `cancel` as +// appropriate // * when handling `cancel`, if we recently received a wanted block from a -// peer, include a partial wantlist that contains a few other high priority +// peer, include a partial wantlist that contains a few other high priority // blocks // // In a sense, if we treat the decision engine as a black box, it could do From 5ee18a98317f1ad2faf0cc7e3269b42539d3ba2d Mon Sep 17 00:00:00 2001 From: Jeromy Date: Mon, 8 Feb 2016 15:59:22 -0800 Subject: [PATCH 0441/1035] wait for peers in wantmanager to all appear License: MIT Signed-off-by: Jeromy This commit was moved from ipfs/go-bitswap@097fdc3d90f79894bd62fedb4bb2afe7768797ba --- bitswap/bitswap_test.go | 13 +++++++++++++ bitswap/wantmanager.go | 18 ++++++++++++++++-- 2 files changed, 29 insertions(+), 2 deletions(-) diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index 04a1fb709..435779fd8 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -158,6 +158,19 @@ func PerformDistributionTest(t *testing.T, numInstances, numBlocks int) { t.Log("Give the blocks to the first instance") + nump := len(instances) - 1 + // assert we're properly connected + for _, inst := range instances { + peers := inst.Exchange.wm.ConnectedPeers() + for i := 0; i < 10 && len(peers) != nump; i++ { + time.Sleep(time.Millisecond * 50) + peers = inst.Exchange.wm.ConnectedPeers() + } + if len(peers) != nump { + t.Fatal("not enough peers connected to instance") + } + } + var blkeys []key.Key first := instances[0] for _, b := range blocks { diff --git a/bitswap/wantmanager.go b/bitswap/wantmanager.go index 243edac37..73bd4b4c8 100644 --- a/bitswap/wantmanager.go +++ b/bitswap/wantmanager.go @@ -16,8 +16,9 @@ import ( type WantManager struct { // sync channels for Run loop incoming chan []*bsmsg.Entry - connect chan peer.ID // notification channel for new peers connecting - disconnect chan peer.ID // notification channel for peers disconnecting + connect chan peer.ID // notification channel for new peers connecting + disconnect chan peer.ID // notification channel for peers disconnecting + peerReqs chan chan []peer.ID // channel to request connected peers on // synchronized by Run loop, only touch inside there peers map[peer.ID]*msgQueue @@ -32,6 +33,7 @@ func NewWantManager(ctx context.Context, network bsnet.BitSwapNetwork) *WantMana incoming: make(chan []*bsmsg.Entry, 10), connect: make(chan peer.ID, 10), disconnect: make(chan peer.ID, 10), + peerReqs: make(chan chan []peer.ID), peers: make(map[peer.ID]*msgQueue), wl: wantlist.NewThreadSafe(), network: network, @@ -88,6 +90,12 @@ func (pm *WantManager) addEntries(ks []key.Key, cancel bool) { } } +func (pm *WantManager) ConnectedPeers() []peer.ID { + resp := make(chan []peer.ID) + pm.peerReqs <- resp + return <-resp +} + func (pm *WantManager) SendBlock(ctx context.Context, env *engine.Envelope) { // Blocks need to be sent synchronously to maintain proper backpressure // throughout the network stack @@ -242,6 +250,12 @@ func (pm *WantManager) Run() { pm.startPeerHandler(p) case p := <-pm.disconnect: pm.stopPeerHandler(p) + case req := <-pm.peerReqs: + var peers []peer.ID + for p := range pm.peers { + peers = append(peers, p) + } + req <- peers case <-pm.ctx.Done(): return } From 01cc828038b94cd05b5877317486ae3f600b287a Mon Sep 17 00:00:00 2001 From: Jeromy Date: Mon, 8 Feb 2016 16:45:15 -0800 Subject: [PATCH 0442/1035] remove goprocess from godeps, use gx vendored one License: MIT Signed-off-by: Jeromy This commit was moved from ipfs/go-bitswap@0ee5bc476a3f24ba2276e22a744af247dcbf2be0 --- bitswap/bitswap.go | 4 ++-- bitswap/workers.go | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 17f4f3686..3d3add327 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -8,8 +8,6 @@ import ( "sync" "time" - process "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/goprocess" - procctx "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/goprocess/context" blocks "github.com/ipfs/go-ipfs/blocks" blockstore "github.com/ipfs/go-ipfs/blocks/blockstore" key "github.com/ipfs/go-ipfs/blocks/key" @@ -20,6 +18,8 @@ import ( notifications "github.com/ipfs/go-ipfs/exchange/bitswap/notifications" wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" "github.com/ipfs/go-ipfs/thirdparty/delay" + process "gx/ipfs/QmQopLATEYMNg7dVqZRNDfeE2S1yKy8zrRh5xnYiuqeZBn/goprocess" + procctx "gx/ipfs/QmQopLATEYMNg7dVqZRNDfeE2S1yKy8zrRh5xnYiuqeZBn/goprocess/context" peer "gx/ipfs/QmUBogf4nUefBjmYjn6jfsfPJRkmDGSeMhNj4usRKq69f4/go-libp2p/p2p/peer" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" logging "gx/ipfs/Qmazh5oNUVsDZTs2g59rq8aYQqwpss8tcUWQzor5sCCEuH/go-log" diff --git a/bitswap/workers.go b/bitswap/workers.go index b9dc963be..46f5693f4 100644 --- a/bitswap/workers.go +++ b/bitswap/workers.go @@ -3,8 +3,8 @@ package bitswap import ( "time" - process "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/goprocess" - procctx "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/goprocess/context" + process "gx/ipfs/QmQopLATEYMNg7dVqZRNDfeE2S1yKy8zrRh5xnYiuqeZBn/goprocess" + procctx "gx/ipfs/QmQopLATEYMNg7dVqZRNDfeE2S1yKy8zrRh5xnYiuqeZBn/goprocess/context" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" key "github.com/ipfs/go-ipfs/blocks/key" From 25fcac3733aa754d14c8acc8ce603f95a414216b Mon Sep 17 00:00:00 2001 From: Jeromy Date: Tue, 9 Feb 2016 10:07:20 -0800 Subject: [PATCH 0443/1035] remove gogo-protobuf from godeps, use gx vendored License: MIT Signed-off-by: Jeromy This commit was moved from ipfs/go-bitswap@24e784ae79b5032d86175d1dc7d421f780bb8dc6 --- bitswap/message/message.go | 4 ++-- bitswap/message/message_test.go | 2 +- bitswap/message/pb/message.pb.go | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/bitswap/message/message.go b/bitswap/message/message.go index a0acf8d35..d8c7408e0 100644 --- a/bitswap/message/message.go +++ b/bitswap/message/message.go @@ -9,8 +9,8 @@ import ( wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" inet "gx/ipfs/QmUBogf4nUefBjmYjn6jfsfPJRkmDGSeMhNj4usRKq69f4/go-libp2p/p2p/net" - ggio "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/gogo/protobuf/io" - proto "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/gogo/protobuf/proto" + ggio "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/io" + proto "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/proto" ) // TODO move message.go into the bitswap package diff --git a/bitswap/message/message_test.go b/bitswap/message/message_test.go index 70d966e0a..db79208d2 100644 --- a/bitswap/message/message_test.go +++ b/bitswap/message/message_test.go @@ -4,7 +4,7 @@ import ( "bytes" "testing" - proto "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/gogo/protobuf/proto" + proto "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/proto" blocks "github.com/ipfs/go-ipfs/blocks" key "github.com/ipfs/go-ipfs/blocks/key" diff --git a/bitswap/message/pb/message.pb.go b/bitswap/message/pb/message.pb.go index 828d1a225..02f9f2944 100644 --- a/bitswap/message/pb/message.pb.go +++ b/bitswap/message/pb/message.pb.go @@ -13,7 +13,7 @@ It has these top-level messages: */ package bitswap_message_pb -import proto "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/gogo/protobuf/proto" +import proto "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/proto" import math "math" // Reference imports to suppress errors if they are not otherwise used. From 53d56f5e8e3359fc8193165bf0e917dc3c2fc6b8 Mon Sep 17 00:00:00 2001 From: Jeromy Date: Tue, 9 Feb 2016 10:56:19 -0800 Subject: [PATCH 0444/1035] Use gx vendored go-ipfs-utils where possible For the rest of the packages in util, move them to thirdparty and update the references. util is gone! License: MIT Signed-off-by: Jeromy This commit was moved from ipfs/go-bitswap@779ea51bf134da46c53900720530dcf639337c80 --- bitswap/bitswap_test.go | 2 +- bitswap/decision/bench_test.go | 2 +- bitswap/decision/engine_test.go | 2 +- bitswap/decision/peer_request_queue_test.go | 2 +- bitswap/testnet/interface.go | 2 +- bitswap/testnet/network_test.go | 2 +- bitswap/testnet/peernet.go | 2 +- bitswap/testnet/virtual.go | 2 +- bitswap/testutils.go | 4 ++-- 9 files changed, 10 insertions(+), 10 deletions(-) diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index 435779fd8..22ff04606 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -7,7 +7,7 @@ import ( "time" detectrace "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-detect-race" - travis "github.com/ipfs/go-ipfs/util/testutil/ci/travis" + travis "github.com/ipfs/go-ipfs/thirdparty/testutil/ci/travis" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" blocks "github.com/ipfs/go-ipfs/blocks" diff --git a/bitswap/decision/bench_test.go b/bitswap/decision/bench_test.go index 7a230fa57..a761c5b96 100644 --- a/bitswap/decision/bench_test.go +++ b/bitswap/decision/bench_test.go @@ -6,7 +6,7 @@ import ( key "github.com/ipfs/go-ipfs/blocks/key" "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" - "github.com/ipfs/go-ipfs/util/testutil" + "github.com/ipfs/go-ipfs/thirdparty/testutil" "gx/ipfs/QmUBogf4nUefBjmYjn6jfsfPJRkmDGSeMhNj4usRKq69f4/go-libp2p/p2p/peer" ) diff --git a/bitswap/decision/engine_test.go b/bitswap/decision/engine_test.go index 53a660c7d..b47d4063a 100644 --- a/bitswap/decision/engine_test.go +++ b/bitswap/decision/engine_test.go @@ -13,7 +13,7 @@ import ( blocks "github.com/ipfs/go-ipfs/blocks" blockstore "github.com/ipfs/go-ipfs/blocks/blockstore" message "github.com/ipfs/go-ipfs/exchange/bitswap/message" - testutil "github.com/ipfs/go-ipfs/util/testutil" + testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" peer "gx/ipfs/QmUBogf4nUefBjmYjn6jfsfPJRkmDGSeMhNj4usRKq69f4/go-libp2p/p2p/peer" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" ) diff --git a/bitswap/decision/peer_request_queue_test.go b/bitswap/decision/peer_request_queue_test.go index e71782f07..a2d96a9c6 100644 --- a/bitswap/decision/peer_request_queue_test.go +++ b/bitswap/decision/peer_request_queue_test.go @@ -9,7 +9,7 @@ import ( key "github.com/ipfs/go-ipfs/blocks/key" "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" - "github.com/ipfs/go-ipfs/util/testutil" + "github.com/ipfs/go-ipfs/thirdparty/testutil" ) func TestPushPop(t *testing.T) { diff --git a/bitswap/testnet/interface.go b/bitswap/testnet/interface.go index f79af6d62..11be6249b 100644 --- a/bitswap/testnet/interface.go +++ b/bitswap/testnet/interface.go @@ -2,7 +2,7 @@ package bitswap import ( bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" - "github.com/ipfs/go-ipfs/util/testutil" + "github.com/ipfs/go-ipfs/thirdparty/testutil" peer "gx/ipfs/QmUBogf4nUefBjmYjn6jfsfPJRkmDGSeMhNj4usRKq69f4/go-libp2p/p2p/peer" ) diff --git a/bitswap/testnet/network_test.go b/bitswap/testnet/network_test.go index 69f1fa73e..59c912b25 100644 --- a/bitswap/testnet/network_test.go +++ b/bitswap/testnet/network_test.go @@ -9,7 +9,7 @@ import ( bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" mockrouting "github.com/ipfs/go-ipfs/routing/mock" delay "github.com/ipfs/go-ipfs/thirdparty/delay" - testutil "github.com/ipfs/go-ipfs/util/testutil" + testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" peer "gx/ipfs/QmUBogf4nUefBjmYjn6jfsfPJRkmDGSeMhNj4usRKq69f4/go-libp2p/p2p/peer" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" ) diff --git a/bitswap/testnet/peernet.go b/bitswap/testnet/peernet.go index 8b0d7aabe..4224ad73d 100644 --- a/bitswap/testnet/peernet.go +++ b/bitswap/testnet/peernet.go @@ -4,7 +4,7 @@ import ( ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore" bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" mockrouting "github.com/ipfs/go-ipfs/routing/mock" - testutil "github.com/ipfs/go-ipfs/util/testutil" + testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" mockpeernet "gx/ipfs/QmUBogf4nUefBjmYjn6jfsfPJRkmDGSeMhNj4usRKq69f4/go-libp2p/p2p/net/mock" peer "gx/ipfs/QmUBogf4nUefBjmYjn6jfsfPJRkmDGSeMhNj4usRKq69f4/go-libp2p/p2p/peer" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" diff --git a/bitswap/testnet/virtual.go b/bitswap/testnet/virtual.go index b7b2e7472..1c69337e9 100644 --- a/bitswap/testnet/virtual.go +++ b/bitswap/testnet/virtual.go @@ -9,7 +9,7 @@ import ( routing "github.com/ipfs/go-ipfs/routing" mockrouting "github.com/ipfs/go-ipfs/routing/mock" delay "github.com/ipfs/go-ipfs/thirdparty/delay" - testutil "github.com/ipfs/go-ipfs/util/testutil" + testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" peer "gx/ipfs/QmUBogf4nUefBjmYjn6jfsfPJRkmDGSeMhNj4usRKq69f4/go-libp2p/p2p/peer" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" ) diff --git a/bitswap/testutils.go b/bitswap/testutils.go index 8a8861771..19037dafe 100644 --- a/bitswap/testutils.go +++ b/bitswap/testutils.go @@ -7,9 +7,9 @@ import ( ds_sync "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore/sync" blockstore "github.com/ipfs/go-ipfs/blocks/blockstore" tn "github.com/ipfs/go-ipfs/exchange/bitswap/testnet" + datastore2 "github.com/ipfs/go-ipfs/thirdparty/datastore2" delay "github.com/ipfs/go-ipfs/thirdparty/delay" - datastore2 "github.com/ipfs/go-ipfs/util/datastore2" - testutil "github.com/ipfs/go-ipfs/util/testutil" + testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" peer "gx/ipfs/QmUBogf4nUefBjmYjn6jfsfPJRkmDGSeMhNj4usRKq69f4/go-libp2p/p2p/peer" p2ptestutil "gx/ipfs/QmUBogf4nUefBjmYjn6jfsfPJRkmDGSeMhNj4usRKq69f4/go-libp2p/p2p/test/util" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" From 6ced3e1f327601ec47cddccd799750e3ed9e2bde Mon Sep 17 00:00:00 2001 From: Jeromy Date: Fri, 27 Nov 2015 16:03:16 -0800 Subject: [PATCH 0445/1035] introduce low memory flag License: MIT Signed-off-by: Jeromy This commit was moved from ipfs/go-bitswap@d6cc96c2ff3b91deece4aaa32c2379e22e736657 --- bitswap/bitswap.go | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 3d3add327..e4bb1582f 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -17,6 +17,7 @@ import ( bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" notifications "github.com/ipfs/go-ipfs/exchange/bitswap/notifications" wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" + flags "github.com/ipfs/go-ipfs/flags" "github.com/ipfs/go-ipfs/thirdparty/delay" process "gx/ipfs/QmQopLATEYMNg7dVqZRNDfeE2S1yKy8zrRh5xnYiuqeZBn/goprocess" procctx "gx/ipfs/QmQopLATEYMNg7dVqZRNDfeE2S1yKy8zrRh5xnYiuqeZBn/goprocess/context" @@ -39,12 +40,22 @@ const ( sizeBatchRequestChan = 32 // kMaxPriority is the max priority as defined by the bitswap protocol kMaxPriority = math.MaxInt32 +) +var ( HasBlockBufferSize = 256 provideKeysBufferSize = 2048 provideWorkerMax = 512 ) +func init() { + if flags.LowMemMode { + HasBlockBufferSize = 64 + provideKeysBufferSize = 512 + provideWorkerMax = 16 + } +} + var rebroadcastDelay = delay.Fixed(time.Second * 10) // New initializes a BitSwap instance that communicates over the provided From 5a2133d6314b1c983619529bd6e74dcdc3ec4930 Mon Sep 17 00:00:00 2001 From: Jeromy Date: Wed, 9 Mar 2016 09:53:19 -0800 Subject: [PATCH 0446/1035] update libp2p dep License: MIT Signed-off-by: Jeromy This commit was moved from ipfs/go-bitswap@3534c6d4858952aed5a438074008b402cea5bd73 --- bitswap/bitswap.go | 2 +- bitswap/bitswap_test.go | 2 +- bitswap/decision/bench_test.go | 2 +- bitswap/decision/engine.go | 2 +- bitswap/decision/engine_test.go | 2 +- bitswap/decision/ledger.go | 2 +- bitswap/decision/peer_request_queue.go | 2 +- bitswap/message/message.go | 2 +- bitswap/network/interface.go | 4 ++-- bitswap/network/ipfs_impl.go | 8 ++++---- bitswap/testnet/interface.go | 2 +- bitswap/testnet/network_test.go | 2 +- bitswap/testnet/peernet.go | 4 ++-- bitswap/testnet/virtual.go | 2 +- bitswap/testutils.go | 4 ++-- bitswap/wantmanager.go | 2 +- 16 files changed, 22 insertions(+), 22 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index e4bb1582f..dc25dafbd 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -19,9 +19,9 @@ import ( wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" flags "github.com/ipfs/go-ipfs/flags" "github.com/ipfs/go-ipfs/thirdparty/delay" + peer "gx/ipfs/QmNefBbWHR9JEiP3KDVqZsBLQVRmH3GBG2D2Ke24SsFqfW/go-libp2p/p2p/peer" process "gx/ipfs/QmQopLATEYMNg7dVqZRNDfeE2S1yKy8zrRh5xnYiuqeZBn/goprocess" procctx "gx/ipfs/QmQopLATEYMNg7dVqZRNDfeE2S1yKy8zrRh5xnYiuqeZBn/goprocess/context" - peer "gx/ipfs/QmUBogf4nUefBjmYjn6jfsfPJRkmDGSeMhNj4usRKq69f4/go-libp2p/p2p/peer" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" logging "gx/ipfs/Qmazh5oNUVsDZTs2g59rq8aYQqwpss8tcUWQzor5sCCEuH/go-log" ) diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index 22ff04606..ea2259cf2 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -16,7 +16,7 @@ import ( tn "github.com/ipfs/go-ipfs/exchange/bitswap/testnet" mockrouting "github.com/ipfs/go-ipfs/routing/mock" delay "github.com/ipfs/go-ipfs/thirdparty/delay" - p2ptestutil "gx/ipfs/QmUBogf4nUefBjmYjn6jfsfPJRkmDGSeMhNj4usRKq69f4/go-libp2p/p2p/test/util" + p2ptestutil "gx/ipfs/QmNefBbWHR9JEiP3KDVqZsBLQVRmH3GBG2D2Ke24SsFqfW/go-libp2p/p2p/test/util" ) // FIXME the tests are really sensitive to the network delay. fix them to work diff --git a/bitswap/decision/bench_test.go b/bitswap/decision/bench_test.go index a761c5b96..e2b2788d2 100644 --- a/bitswap/decision/bench_test.go +++ b/bitswap/decision/bench_test.go @@ -7,7 +7,7 @@ import ( key "github.com/ipfs/go-ipfs/blocks/key" "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" "github.com/ipfs/go-ipfs/thirdparty/testutil" - "gx/ipfs/QmUBogf4nUefBjmYjn6jfsfPJRkmDGSeMhNj4usRKq69f4/go-libp2p/p2p/peer" + "gx/ipfs/QmNefBbWHR9JEiP3KDVqZsBLQVRmH3GBG2D2Ke24SsFqfW/go-libp2p/p2p/peer" ) // FWIW: At the time of this commit, including a timestamp in task increases diff --git a/bitswap/decision/engine.go b/bitswap/decision/engine.go index 77c7f6428..46eb3c112 100644 --- a/bitswap/decision/engine.go +++ b/bitswap/decision/engine.go @@ -8,7 +8,7 @@ import ( bstore "github.com/ipfs/go-ipfs/blocks/blockstore" bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" wl "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" - peer "gx/ipfs/QmUBogf4nUefBjmYjn6jfsfPJRkmDGSeMhNj4usRKq69f4/go-libp2p/p2p/peer" + peer "gx/ipfs/QmNefBbWHR9JEiP3KDVqZsBLQVRmH3GBG2D2Ke24SsFqfW/go-libp2p/p2p/peer" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" logging "gx/ipfs/Qmazh5oNUVsDZTs2g59rq8aYQqwpss8tcUWQzor5sCCEuH/go-log" ) diff --git a/bitswap/decision/engine_test.go b/bitswap/decision/engine_test.go index b47d4063a..65b1c623d 100644 --- a/bitswap/decision/engine_test.go +++ b/bitswap/decision/engine_test.go @@ -14,7 +14,7 @@ import ( blockstore "github.com/ipfs/go-ipfs/blocks/blockstore" message "github.com/ipfs/go-ipfs/exchange/bitswap/message" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" - peer "gx/ipfs/QmUBogf4nUefBjmYjn6jfsfPJRkmDGSeMhNj4usRKq69f4/go-libp2p/p2p/peer" + peer "gx/ipfs/QmNefBbWHR9JEiP3KDVqZsBLQVRmH3GBG2D2Ke24SsFqfW/go-libp2p/p2p/peer" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" ) diff --git a/bitswap/decision/ledger.go b/bitswap/decision/ledger.go index 0cdd7e37b..3fdc62e04 100644 --- a/bitswap/decision/ledger.go +++ b/bitswap/decision/ledger.go @@ -5,7 +5,7 @@ import ( key "github.com/ipfs/go-ipfs/blocks/key" wl "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" - peer "gx/ipfs/QmUBogf4nUefBjmYjn6jfsfPJRkmDGSeMhNj4usRKq69f4/go-libp2p/p2p/peer" + peer "gx/ipfs/QmNefBbWHR9JEiP3KDVqZsBLQVRmH3GBG2D2Ke24SsFqfW/go-libp2p/p2p/peer" ) // keySet is just a convenient alias for maps of keys, where we only care diff --git a/bitswap/decision/peer_request_queue.go b/bitswap/decision/peer_request_queue.go index e0fc91989..40967d3e1 100644 --- a/bitswap/decision/peer_request_queue.go +++ b/bitswap/decision/peer_request_queue.go @@ -7,7 +7,7 @@ import ( key "github.com/ipfs/go-ipfs/blocks/key" wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" pq "github.com/ipfs/go-ipfs/thirdparty/pq" - peer "gx/ipfs/QmUBogf4nUefBjmYjn6jfsfPJRkmDGSeMhNj4usRKq69f4/go-libp2p/p2p/peer" + peer "gx/ipfs/QmNefBbWHR9JEiP3KDVqZsBLQVRmH3GBG2D2Ke24SsFqfW/go-libp2p/p2p/peer" ) type peerRequestQueue interface { diff --git a/bitswap/message/message.go b/bitswap/message/message.go index d8c7408e0..632bc59f9 100644 --- a/bitswap/message/message.go +++ b/bitswap/message/message.go @@ -7,7 +7,7 @@ import ( key "github.com/ipfs/go-ipfs/blocks/key" pb "github.com/ipfs/go-ipfs/exchange/bitswap/message/pb" wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" - inet "gx/ipfs/QmUBogf4nUefBjmYjn6jfsfPJRkmDGSeMhNj4usRKq69f4/go-libp2p/p2p/net" + inet "gx/ipfs/QmNefBbWHR9JEiP3KDVqZsBLQVRmH3GBG2D2Ke24SsFqfW/go-libp2p/p2p/net" ggio "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/io" proto "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/proto" diff --git a/bitswap/network/interface.go b/bitswap/network/interface.go index a81b5fcff..a278ca272 100644 --- a/bitswap/network/interface.go +++ b/bitswap/network/interface.go @@ -3,8 +3,8 @@ package network import ( key "github.com/ipfs/go-ipfs/blocks/key" bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" - peer "gx/ipfs/QmUBogf4nUefBjmYjn6jfsfPJRkmDGSeMhNj4usRKq69f4/go-libp2p/p2p/peer" - protocol "gx/ipfs/QmUBogf4nUefBjmYjn6jfsfPJRkmDGSeMhNj4usRKq69f4/go-libp2p/p2p/protocol" + peer "gx/ipfs/QmNefBbWHR9JEiP3KDVqZsBLQVRmH3GBG2D2Ke24SsFqfW/go-libp2p/p2p/peer" + protocol "gx/ipfs/QmNefBbWHR9JEiP3KDVqZsBLQVRmH3GBG2D2Ke24SsFqfW/go-libp2p/p2p/protocol" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" ) diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index b641b5e8f..7200916c7 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -4,12 +4,12 @@ import ( key "github.com/ipfs/go-ipfs/blocks/key" bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" routing "github.com/ipfs/go-ipfs/routing" - ma "gx/ipfs/QmR3JkmZBKYXgNMNsNZawm914455Qof3PEopwuVSeXG7aV/go-multiaddr" - host "gx/ipfs/QmUBogf4nUefBjmYjn6jfsfPJRkmDGSeMhNj4usRKq69f4/go-libp2p/p2p/host" - inet "gx/ipfs/QmUBogf4nUefBjmYjn6jfsfPJRkmDGSeMhNj4usRKq69f4/go-libp2p/p2p/net" - peer "gx/ipfs/QmUBogf4nUefBjmYjn6jfsfPJRkmDGSeMhNj4usRKq69f4/go-libp2p/p2p/peer" + host "gx/ipfs/QmNefBbWHR9JEiP3KDVqZsBLQVRmH3GBG2D2Ke24SsFqfW/go-libp2p/p2p/host" + inet "gx/ipfs/QmNefBbWHR9JEiP3KDVqZsBLQVRmH3GBG2D2Ke24SsFqfW/go-libp2p/p2p/net" + peer "gx/ipfs/QmNefBbWHR9JEiP3KDVqZsBLQVRmH3GBG2D2Ke24SsFqfW/go-libp2p/p2p/peer" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" logging "gx/ipfs/Qmazh5oNUVsDZTs2g59rq8aYQqwpss8tcUWQzor5sCCEuH/go-log" + ma "gx/ipfs/QmcobAGsCjYt5DXoq9et9L8yR8er7o7Cu3DTvpaq12jYSz/go-multiaddr" ) var log = logging.Logger("bitswap_network") diff --git a/bitswap/testnet/interface.go b/bitswap/testnet/interface.go index 11be6249b..6b7b0aa0d 100644 --- a/bitswap/testnet/interface.go +++ b/bitswap/testnet/interface.go @@ -3,7 +3,7 @@ package bitswap import ( bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" "github.com/ipfs/go-ipfs/thirdparty/testutil" - peer "gx/ipfs/QmUBogf4nUefBjmYjn6jfsfPJRkmDGSeMhNj4usRKq69f4/go-libp2p/p2p/peer" + peer "gx/ipfs/QmNefBbWHR9JEiP3KDVqZsBLQVRmH3GBG2D2Ke24SsFqfW/go-libp2p/p2p/peer" ) type Network interface { diff --git a/bitswap/testnet/network_test.go b/bitswap/testnet/network_test.go index 59c912b25..74dad02ee 100644 --- a/bitswap/testnet/network_test.go +++ b/bitswap/testnet/network_test.go @@ -10,7 +10,7 @@ import ( mockrouting "github.com/ipfs/go-ipfs/routing/mock" delay "github.com/ipfs/go-ipfs/thirdparty/delay" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" - peer "gx/ipfs/QmUBogf4nUefBjmYjn6jfsfPJRkmDGSeMhNj4usRKq69f4/go-libp2p/p2p/peer" + peer "gx/ipfs/QmNefBbWHR9JEiP3KDVqZsBLQVRmH3GBG2D2Ke24SsFqfW/go-libp2p/p2p/peer" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" ) diff --git a/bitswap/testnet/peernet.go b/bitswap/testnet/peernet.go index 4224ad73d..9d30d8286 100644 --- a/bitswap/testnet/peernet.go +++ b/bitswap/testnet/peernet.go @@ -5,8 +5,8 @@ import ( bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" mockrouting "github.com/ipfs/go-ipfs/routing/mock" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" - mockpeernet "gx/ipfs/QmUBogf4nUefBjmYjn6jfsfPJRkmDGSeMhNj4usRKq69f4/go-libp2p/p2p/net/mock" - peer "gx/ipfs/QmUBogf4nUefBjmYjn6jfsfPJRkmDGSeMhNj4usRKq69f4/go-libp2p/p2p/peer" + mockpeernet "gx/ipfs/QmNefBbWHR9JEiP3KDVqZsBLQVRmH3GBG2D2Ke24SsFqfW/go-libp2p/p2p/net/mock" + peer "gx/ipfs/QmNefBbWHR9JEiP3KDVqZsBLQVRmH3GBG2D2Ke24SsFqfW/go-libp2p/p2p/peer" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" ) diff --git a/bitswap/testnet/virtual.go b/bitswap/testnet/virtual.go index 1c69337e9..92270f451 100644 --- a/bitswap/testnet/virtual.go +++ b/bitswap/testnet/virtual.go @@ -10,7 +10,7 @@ import ( mockrouting "github.com/ipfs/go-ipfs/routing/mock" delay "github.com/ipfs/go-ipfs/thirdparty/delay" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" - peer "gx/ipfs/QmUBogf4nUefBjmYjn6jfsfPJRkmDGSeMhNj4usRKq69f4/go-libp2p/p2p/peer" + peer "gx/ipfs/QmNefBbWHR9JEiP3KDVqZsBLQVRmH3GBG2D2Ke24SsFqfW/go-libp2p/p2p/peer" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" ) diff --git a/bitswap/testutils.go b/bitswap/testutils.go index 19037dafe..e022e9c94 100644 --- a/bitswap/testutils.go +++ b/bitswap/testutils.go @@ -10,8 +10,8 @@ import ( datastore2 "github.com/ipfs/go-ipfs/thirdparty/datastore2" delay "github.com/ipfs/go-ipfs/thirdparty/delay" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" - peer "gx/ipfs/QmUBogf4nUefBjmYjn6jfsfPJRkmDGSeMhNj4usRKq69f4/go-libp2p/p2p/peer" - p2ptestutil "gx/ipfs/QmUBogf4nUefBjmYjn6jfsfPJRkmDGSeMhNj4usRKq69f4/go-libp2p/p2p/test/util" + peer "gx/ipfs/QmNefBbWHR9JEiP3KDVqZsBLQVRmH3GBG2D2Ke24SsFqfW/go-libp2p/p2p/peer" + p2ptestutil "gx/ipfs/QmNefBbWHR9JEiP3KDVqZsBLQVRmH3GBG2D2Ke24SsFqfW/go-libp2p/p2p/test/util" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" ) diff --git a/bitswap/wantmanager.go b/bitswap/wantmanager.go index 73bd4b4c8..744e1e52a 100644 --- a/bitswap/wantmanager.go +++ b/bitswap/wantmanager.go @@ -9,7 +9,7 @@ import ( bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" - peer "gx/ipfs/QmUBogf4nUefBjmYjn6jfsfPJRkmDGSeMhNj4usRKq69f4/go-libp2p/p2p/peer" + peer "gx/ipfs/QmNefBbWHR9JEiP3KDVqZsBLQVRmH3GBG2D2Ke24SsFqfW/go-libp2p/p2p/peer" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" ) From 0fca403f4e915953261ea2ae225981d3d997131b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Michael=20Mur=C3=A9?= Date: Sun, 20 Mar 2016 17:07:25 +0100 Subject: [PATCH 0447/1035] clean deprecated Key.Pretty() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit License: MIT Signed-off-by: Michael Muré This commit was moved from ipfs/go-bitswap@12cdf9443167e350b57f249b4e7e73db9df20f6b --- bitswap/message/message.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bitswap/message/message.go b/bitswap/message/message.go index 632bc59f9..41496ed91 100644 --- a/bitswap/message/message.go +++ b/bitswap/message/message.go @@ -173,7 +173,7 @@ func (m *impl) ToNet(w io.Writer) error { func (m *impl) Loggable() map[string]interface{} { var blocks []string for _, v := range m.blocks { - blocks = append(blocks, v.Key().Pretty()) + blocks = append(blocks, v.Key().B58String()) } return map[string]interface{}{ "blocks": blocks, From 2264327e0dcfa5381cd4e1ebb577546341f4fcca Mon Sep 17 00:00:00 2001 From: Jeromy Date: Tue, 29 Mar 2016 19:18:14 -0700 Subject: [PATCH 0448/1035] update utp and cleanup more godeps along the way License: MIT Signed-off-by: Jeromy This commit was moved from ipfs/go-bitswap@9e70ab1289f6a8c4652d0ee53680703d791c62d1 --- bitswap/bitswap.go | 2 +- bitswap/bitswap_test.go | 2 +- bitswap/decision/bench_test.go | 2 +- bitswap/decision/engine.go | 2 +- bitswap/decision/engine_test.go | 2 +- bitswap/decision/ledger.go | 2 +- bitswap/decision/peer_request_queue.go | 2 +- bitswap/message/message.go | 2 +- bitswap/network/interface.go | 4 ++-- bitswap/network/ipfs_impl.go | 6 +++--- bitswap/testnet/interface.go | 2 +- bitswap/testnet/network_test.go | 2 +- bitswap/testnet/peernet.go | 4 ++-- bitswap/testnet/virtual.go | 2 +- bitswap/testutils.go | 4 ++-- bitswap/wantmanager.go | 2 +- 16 files changed, 21 insertions(+), 21 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index dc25dafbd..8c3ae8917 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -19,9 +19,9 @@ import ( wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" flags "github.com/ipfs/go-ipfs/flags" "github.com/ipfs/go-ipfs/thirdparty/delay" - peer "gx/ipfs/QmNefBbWHR9JEiP3KDVqZsBLQVRmH3GBG2D2Ke24SsFqfW/go-libp2p/p2p/peer" process "gx/ipfs/QmQopLATEYMNg7dVqZRNDfeE2S1yKy8zrRh5xnYiuqeZBn/goprocess" procctx "gx/ipfs/QmQopLATEYMNg7dVqZRNDfeE2S1yKy8zrRh5xnYiuqeZBn/goprocess/context" + peer "gx/ipfs/QmSN2ELGRp4T9kjqiSsSNJRUeR9JKXzQEgwe1HH3tdSGbC/go-libp2p/p2p/peer" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" logging "gx/ipfs/Qmazh5oNUVsDZTs2g59rq8aYQqwpss8tcUWQzor5sCCEuH/go-log" ) diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index ea2259cf2..09ed778a7 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -16,7 +16,7 @@ import ( tn "github.com/ipfs/go-ipfs/exchange/bitswap/testnet" mockrouting "github.com/ipfs/go-ipfs/routing/mock" delay "github.com/ipfs/go-ipfs/thirdparty/delay" - p2ptestutil "gx/ipfs/QmNefBbWHR9JEiP3KDVqZsBLQVRmH3GBG2D2Ke24SsFqfW/go-libp2p/p2p/test/util" + p2ptestutil "gx/ipfs/QmSN2ELGRp4T9kjqiSsSNJRUeR9JKXzQEgwe1HH3tdSGbC/go-libp2p/p2p/test/util" ) // FIXME the tests are really sensitive to the network delay. fix them to work diff --git a/bitswap/decision/bench_test.go b/bitswap/decision/bench_test.go index e2b2788d2..c1e7c0c68 100644 --- a/bitswap/decision/bench_test.go +++ b/bitswap/decision/bench_test.go @@ -7,7 +7,7 @@ import ( key "github.com/ipfs/go-ipfs/blocks/key" "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" "github.com/ipfs/go-ipfs/thirdparty/testutil" - "gx/ipfs/QmNefBbWHR9JEiP3KDVqZsBLQVRmH3GBG2D2Ke24SsFqfW/go-libp2p/p2p/peer" + "gx/ipfs/QmSN2ELGRp4T9kjqiSsSNJRUeR9JKXzQEgwe1HH3tdSGbC/go-libp2p/p2p/peer" ) // FWIW: At the time of this commit, including a timestamp in task increases diff --git a/bitswap/decision/engine.go b/bitswap/decision/engine.go index 46eb3c112..8769f6ad7 100644 --- a/bitswap/decision/engine.go +++ b/bitswap/decision/engine.go @@ -8,7 +8,7 @@ import ( bstore "github.com/ipfs/go-ipfs/blocks/blockstore" bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" wl "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" - peer "gx/ipfs/QmNefBbWHR9JEiP3KDVqZsBLQVRmH3GBG2D2Ke24SsFqfW/go-libp2p/p2p/peer" + peer "gx/ipfs/QmSN2ELGRp4T9kjqiSsSNJRUeR9JKXzQEgwe1HH3tdSGbC/go-libp2p/p2p/peer" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" logging "gx/ipfs/Qmazh5oNUVsDZTs2g59rq8aYQqwpss8tcUWQzor5sCCEuH/go-log" ) diff --git a/bitswap/decision/engine_test.go b/bitswap/decision/engine_test.go index 65b1c623d..1fa45a422 100644 --- a/bitswap/decision/engine_test.go +++ b/bitswap/decision/engine_test.go @@ -14,7 +14,7 @@ import ( blockstore "github.com/ipfs/go-ipfs/blocks/blockstore" message "github.com/ipfs/go-ipfs/exchange/bitswap/message" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" - peer "gx/ipfs/QmNefBbWHR9JEiP3KDVqZsBLQVRmH3GBG2D2Ke24SsFqfW/go-libp2p/p2p/peer" + peer "gx/ipfs/QmSN2ELGRp4T9kjqiSsSNJRUeR9JKXzQEgwe1HH3tdSGbC/go-libp2p/p2p/peer" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" ) diff --git a/bitswap/decision/ledger.go b/bitswap/decision/ledger.go index 3fdc62e04..0e63c3e05 100644 --- a/bitswap/decision/ledger.go +++ b/bitswap/decision/ledger.go @@ -5,7 +5,7 @@ import ( key "github.com/ipfs/go-ipfs/blocks/key" wl "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" - peer "gx/ipfs/QmNefBbWHR9JEiP3KDVqZsBLQVRmH3GBG2D2Ke24SsFqfW/go-libp2p/p2p/peer" + peer "gx/ipfs/QmSN2ELGRp4T9kjqiSsSNJRUeR9JKXzQEgwe1HH3tdSGbC/go-libp2p/p2p/peer" ) // keySet is just a convenient alias for maps of keys, where we only care diff --git a/bitswap/decision/peer_request_queue.go b/bitswap/decision/peer_request_queue.go index 40967d3e1..f9589de1f 100644 --- a/bitswap/decision/peer_request_queue.go +++ b/bitswap/decision/peer_request_queue.go @@ -7,7 +7,7 @@ import ( key "github.com/ipfs/go-ipfs/blocks/key" wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" pq "github.com/ipfs/go-ipfs/thirdparty/pq" - peer "gx/ipfs/QmNefBbWHR9JEiP3KDVqZsBLQVRmH3GBG2D2Ke24SsFqfW/go-libp2p/p2p/peer" + peer "gx/ipfs/QmSN2ELGRp4T9kjqiSsSNJRUeR9JKXzQEgwe1HH3tdSGbC/go-libp2p/p2p/peer" ) type peerRequestQueue interface { diff --git a/bitswap/message/message.go b/bitswap/message/message.go index 41496ed91..d293034c7 100644 --- a/bitswap/message/message.go +++ b/bitswap/message/message.go @@ -7,7 +7,7 @@ import ( key "github.com/ipfs/go-ipfs/blocks/key" pb "github.com/ipfs/go-ipfs/exchange/bitswap/message/pb" wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" - inet "gx/ipfs/QmNefBbWHR9JEiP3KDVqZsBLQVRmH3GBG2D2Ke24SsFqfW/go-libp2p/p2p/net" + inet "gx/ipfs/QmSN2ELGRp4T9kjqiSsSNJRUeR9JKXzQEgwe1HH3tdSGbC/go-libp2p/p2p/net" ggio "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/io" proto "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/proto" diff --git a/bitswap/network/interface.go b/bitswap/network/interface.go index a278ca272..f509191e4 100644 --- a/bitswap/network/interface.go +++ b/bitswap/network/interface.go @@ -3,8 +3,8 @@ package network import ( key "github.com/ipfs/go-ipfs/blocks/key" bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" - peer "gx/ipfs/QmNefBbWHR9JEiP3KDVqZsBLQVRmH3GBG2D2Ke24SsFqfW/go-libp2p/p2p/peer" - protocol "gx/ipfs/QmNefBbWHR9JEiP3KDVqZsBLQVRmH3GBG2D2Ke24SsFqfW/go-libp2p/p2p/protocol" + peer "gx/ipfs/QmSN2ELGRp4T9kjqiSsSNJRUeR9JKXzQEgwe1HH3tdSGbC/go-libp2p/p2p/peer" + protocol "gx/ipfs/QmSN2ELGRp4T9kjqiSsSNJRUeR9JKXzQEgwe1HH3tdSGbC/go-libp2p/p2p/protocol" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" ) diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index 7200916c7..7f67aaf2a 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -4,9 +4,9 @@ import ( key "github.com/ipfs/go-ipfs/blocks/key" bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" routing "github.com/ipfs/go-ipfs/routing" - host "gx/ipfs/QmNefBbWHR9JEiP3KDVqZsBLQVRmH3GBG2D2Ke24SsFqfW/go-libp2p/p2p/host" - inet "gx/ipfs/QmNefBbWHR9JEiP3KDVqZsBLQVRmH3GBG2D2Ke24SsFqfW/go-libp2p/p2p/net" - peer "gx/ipfs/QmNefBbWHR9JEiP3KDVqZsBLQVRmH3GBG2D2Ke24SsFqfW/go-libp2p/p2p/peer" + host "gx/ipfs/QmSN2ELGRp4T9kjqiSsSNJRUeR9JKXzQEgwe1HH3tdSGbC/go-libp2p/p2p/host" + inet "gx/ipfs/QmSN2ELGRp4T9kjqiSsSNJRUeR9JKXzQEgwe1HH3tdSGbC/go-libp2p/p2p/net" + peer "gx/ipfs/QmSN2ELGRp4T9kjqiSsSNJRUeR9JKXzQEgwe1HH3tdSGbC/go-libp2p/p2p/peer" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" logging "gx/ipfs/Qmazh5oNUVsDZTs2g59rq8aYQqwpss8tcUWQzor5sCCEuH/go-log" ma "gx/ipfs/QmcobAGsCjYt5DXoq9et9L8yR8er7o7Cu3DTvpaq12jYSz/go-multiaddr" diff --git a/bitswap/testnet/interface.go b/bitswap/testnet/interface.go index 6b7b0aa0d..12984ece5 100644 --- a/bitswap/testnet/interface.go +++ b/bitswap/testnet/interface.go @@ -3,7 +3,7 @@ package bitswap import ( bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" "github.com/ipfs/go-ipfs/thirdparty/testutil" - peer "gx/ipfs/QmNefBbWHR9JEiP3KDVqZsBLQVRmH3GBG2D2Ke24SsFqfW/go-libp2p/p2p/peer" + peer "gx/ipfs/QmSN2ELGRp4T9kjqiSsSNJRUeR9JKXzQEgwe1HH3tdSGbC/go-libp2p/p2p/peer" ) type Network interface { diff --git a/bitswap/testnet/network_test.go b/bitswap/testnet/network_test.go index 74dad02ee..bfd1bdcf4 100644 --- a/bitswap/testnet/network_test.go +++ b/bitswap/testnet/network_test.go @@ -10,7 +10,7 @@ import ( mockrouting "github.com/ipfs/go-ipfs/routing/mock" delay "github.com/ipfs/go-ipfs/thirdparty/delay" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" - peer "gx/ipfs/QmNefBbWHR9JEiP3KDVqZsBLQVRmH3GBG2D2Ke24SsFqfW/go-libp2p/p2p/peer" + peer "gx/ipfs/QmSN2ELGRp4T9kjqiSsSNJRUeR9JKXzQEgwe1HH3tdSGbC/go-libp2p/p2p/peer" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" ) diff --git a/bitswap/testnet/peernet.go b/bitswap/testnet/peernet.go index 9d30d8286..3058d24fa 100644 --- a/bitswap/testnet/peernet.go +++ b/bitswap/testnet/peernet.go @@ -5,8 +5,8 @@ import ( bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" mockrouting "github.com/ipfs/go-ipfs/routing/mock" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" - mockpeernet "gx/ipfs/QmNefBbWHR9JEiP3KDVqZsBLQVRmH3GBG2D2Ke24SsFqfW/go-libp2p/p2p/net/mock" - peer "gx/ipfs/QmNefBbWHR9JEiP3KDVqZsBLQVRmH3GBG2D2Ke24SsFqfW/go-libp2p/p2p/peer" + mockpeernet "gx/ipfs/QmSN2ELGRp4T9kjqiSsSNJRUeR9JKXzQEgwe1HH3tdSGbC/go-libp2p/p2p/net/mock" + peer "gx/ipfs/QmSN2ELGRp4T9kjqiSsSNJRUeR9JKXzQEgwe1HH3tdSGbC/go-libp2p/p2p/peer" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" ) diff --git a/bitswap/testnet/virtual.go b/bitswap/testnet/virtual.go index 92270f451..15cd7821b 100644 --- a/bitswap/testnet/virtual.go +++ b/bitswap/testnet/virtual.go @@ -10,7 +10,7 @@ import ( mockrouting "github.com/ipfs/go-ipfs/routing/mock" delay "github.com/ipfs/go-ipfs/thirdparty/delay" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" - peer "gx/ipfs/QmNefBbWHR9JEiP3KDVqZsBLQVRmH3GBG2D2Ke24SsFqfW/go-libp2p/p2p/peer" + peer "gx/ipfs/QmSN2ELGRp4T9kjqiSsSNJRUeR9JKXzQEgwe1HH3tdSGbC/go-libp2p/p2p/peer" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" ) diff --git a/bitswap/testutils.go b/bitswap/testutils.go index e022e9c94..83715eb85 100644 --- a/bitswap/testutils.go +++ b/bitswap/testutils.go @@ -10,8 +10,8 @@ import ( datastore2 "github.com/ipfs/go-ipfs/thirdparty/datastore2" delay "github.com/ipfs/go-ipfs/thirdparty/delay" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" - peer "gx/ipfs/QmNefBbWHR9JEiP3KDVqZsBLQVRmH3GBG2D2Ke24SsFqfW/go-libp2p/p2p/peer" - p2ptestutil "gx/ipfs/QmNefBbWHR9JEiP3KDVqZsBLQVRmH3GBG2D2Ke24SsFqfW/go-libp2p/p2p/test/util" + peer "gx/ipfs/QmSN2ELGRp4T9kjqiSsSNJRUeR9JKXzQEgwe1HH3tdSGbC/go-libp2p/p2p/peer" + p2ptestutil "gx/ipfs/QmSN2ELGRp4T9kjqiSsSNJRUeR9JKXzQEgwe1HH3tdSGbC/go-libp2p/p2p/test/util" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" ) diff --git a/bitswap/wantmanager.go b/bitswap/wantmanager.go index 744e1e52a..c2b6f6b50 100644 --- a/bitswap/wantmanager.go +++ b/bitswap/wantmanager.go @@ -9,7 +9,7 @@ import ( bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" - peer "gx/ipfs/QmNefBbWHR9JEiP3KDVqZsBLQVRmH3GBG2D2Ke24SsFqfW/go-libp2p/p2p/peer" + peer "gx/ipfs/QmSN2ELGRp4T9kjqiSsSNJRUeR9JKXzQEgwe1HH3tdSGbC/go-libp2p/p2p/peer" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" ) From 06a0db1911496993edbfc814792ea1d15030a95d Mon Sep 17 00:00:00 2001 From: Jeromy Date: Wed, 6 Apr 2016 15:42:06 -0700 Subject: [PATCH 0449/1035] switch to new libp2p with mss crypto License: MIT Signed-off-by: Jeromy This commit was moved from ipfs/go-bitswap@661dd4bfe82148e1c7578329678611a4c63df3ab --- bitswap/bitswap.go | 2 +- bitswap/bitswap_test.go | 2 +- bitswap/decision/bench_test.go | 2 +- bitswap/decision/engine.go | 2 +- bitswap/decision/engine_test.go | 2 +- bitswap/decision/ledger.go | 2 +- bitswap/decision/peer_request_queue.go | 2 +- bitswap/message/message.go | 2 +- bitswap/network/interface.go | 4 ++-- bitswap/network/ipfs_impl.go | 6 +++--- bitswap/testnet/interface.go | 2 +- bitswap/testnet/network_test.go | 2 +- bitswap/testnet/peernet.go | 4 ++-- bitswap/testnet/virtual.go | 2 +- bitswap/testutils.go | 4 ++-- bitswap/wantmanager.go | 2 +- 16 files changed, 21 insertions(+), 21 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 8c3ae8917..368400c42 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -21,7 +21,7 @@ import ( "github.com/ipfs/go-ipfs/thirdparty/delay" process "gx/ipfs/QmQopLATEYMNg7dVqZRNDfeE2S1yKy8zrRh5xnYiuqeZBn/goprocess" procctx "gx/ipfs/QmQopLATEYMNg7dVqZRNDfeE2S1yKy8zrRh5xnYiuqeZBn/goprocess/context" - peer "gx/ipfs/QmSN2ELGRp4T9kjqiSsSNJRUeR9JKXzQEgwe1HH3tdSGbC/go-libp2p/p2p/peer" + peer "gx/ipfs/QmZMehXD2w81qeVJP6r1mmocxwsD7kqAvuzGm2QWDw1H88/go-libp2p/p2p/peer" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" logging "gx/ipfs/Qmazh5oNUVsDZTs2g59rq8aYQqwpss8tcUWQzor5sCCEuH/go-log" ) diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index 09ed778a7..b696a1736 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -16,7 +16,7 @@ import ( tn "github.com/ipfs/go-ipfs/exchange/bitswap/testnet" mockrouting "github.com/ipfs/go-ipfs/routing/mock" delay "github.com/ipfs/go-ipfs/thirdparty/delay" - p2ptestutil "gx/ipfs/QmSN2ELGRp4T9kjqiSsSNJRUeR9JKXzQEgwe1HH3tdSGbC/go-libp2p/p2p/test/util" + p2ptestutil "gx/ipfs/QmZMehXD2w81qeVJP6r1mmocxwsD7kqAvuzGm2QWDw1H88/go-libp2p/p2p/test/util" ) // FIXME the tests are really sensitive to the network delay. fix them to work diff --git a/bitswap/decision/bench_test.go b/bitswap/decision/bench_test.go index c1e7c0c68..2a04a1e13 100644 --- a/bitswap/decision/bench_test.go +++ b/bitswap/decision/bench_test.go @@ -7,7 +7,7 @@ import ( key "github.com/ipfs/go-ipfs/blocks/key" "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" "github.com/ipfs/go-ipfs/thirdparty/testutil" - "gx/ipfs/QmSN2ELGRp4T9kjqiSsSNJRUeR9JKXzQEgwe1HH3tdSGbC/go-libp2p/p2p/peer" + "gx/ipfs/QmZMehXD2w81qeVJP6r1mmocxwsD7kqAvuzGm2QWDw1H88/go-libp2p/p2p/peer" ) // FWIW: At the time of this commit, including a timestamp in task increases diff --git a/bitswap/decision/engine.go b/bitswap/decision/engine.go index 8769f6ad7..064e50d2b 100644 --- a/bitswap/decision/engine.go +++ b/bitswap/decision/engine.go @@ -8,7 +8,7 @@ import ( bstore "github.com/ipfs/go-ipfs/blocks/blockstore" bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" wl "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" - peer "gx/ipfs/QmSN2ELGRp4T9kjqiSsSNJRUeR9JKXzQEgwe1HH3tdSGbC/go-libp2p/p2p/peer" + peer "gx/ipfs/QmZMehXD2w81qeVJP6r1mmocxwsD7kqAvuzGm2QWDw1H88/go-libp2p/p2p/peer" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" logging "gx/ipfs/Qmazh5oNUVsDZTs2g59rq8aYQqwpss8tcUWQzor5sCCEuH/go-log" ) diff --git a/bitswap/decision/engine_test.go b/bitswap/decision/engine_test.go index 1fa45a422..573a1eb1f 100644 --- a/bitswap/decision/engine_test.go +++ b/bitswap/decision/engine_test.go @@ -14,7 +14,7 @@ import ( blockstore "github.com/ipfs/go-ipfs/blocks/blockstore" message "github.com/ipfs/go-ipfs/exchange/bitswap/message" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" - peer "gx/ipfs/QmSN2ELGRp4T9kjqiSsSNJRUeR9JKXzQEgwe1HH3tdSGbC/go-libp2p/p2p/peer" + peer "gx/ipfs/QmZMehXD2w81qeVJP6r1mmocxwsD7kqAvuzGm2QWDw1H88/go-libp2p/p2p/peer" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" ) diff --git a/bitswap/decision/ledger.go b/bitswap/decision/ledger.go index 0e63c3e05..c1cc2e49f 100644 --- a/bitswap/decision/ledger.go +++ b/bitswap/decision/ledger.go @@ -5,7 +5,7 @@ import ( key "github.com/ipfs/go-ipfs/blocks/key" wl "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" - peer "gx/ipfs/QmSN2ELGRp4T9kjqiSsSNJRUeR9JKXzQEgwe1HH3tdSGbC/go-libp2p/p2p/peer" + peer "gx/ipfs/QmZMehXD2w81qeVJP6r1mmocxwsD7kqAvuzGm2QWDw1H88/go-libp2p/p2p/peer" ) // keySet is just a convenient alias for maps of keys, where we only care diff --git a/bitswap/decision/peer_request_queue.go b/bitswap/decision/peer_request_queue.go index f9589de1f..4eaea55f6 100644 --- a/bitswap/decision/peer_request_queue.go +++ b/bitswap/decision/peer_request_queue.go @@ -7,7 +7,7 @@ import ( key "github.com/ipfs/go-ipfs/blocks/key" wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" pq "github.com/ipfs/go-ipfs/thirdparty/pq" - peer "gx/ipfs/QmSN2ELGRp4T9kjqiSsSNJRUeR9JKXzQEgwe1HH3tdSGbC/go-libp2p/p2p/peer" + peer "gx/ipfs/QmZMehXD2w81qeVJP6r1mmocxwsD7kqAvuzGm2QWDw1H88/go-libp2p/p2p/peer" ) type peerRequestQueue interface { diff --git a/bitswap/message/message.go b/bitswap/message/message.go index d293034c7..6a564fc8e 100644 --- a/bitswap/message/message.go +++ b/bitswap/message/message.go @@ -7,7 +7,7 @@ import ( key "github.com/ipfs/go-ipfs/blocks/key" pb "github.com/ipfs/go-ipfs/exchange/bitswap/message/pb" wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" - inet "gx/ipfs/QmSN2ELGRp4T9kjqiSsSNJRUeR9JKXzQEgwe1HH3tdSGbC/go-libp2p/p2p/net" + inet "gx/ipfs/QmZMehXD2w81qeVJP6r1mmocxwsD7kqAvuzGm2QWDw1H88/go-libp2p/p2p/net" ggio "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/io" proto "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/proto" diff --git a/bitswap/network/interface.go b/bitswap/network/interface.go index f509191e4..042e9cd5c 100644 --- a/bitswap/network/interface.go +++ b/bitswap/network/interface.go @@ -3,8 +3,8 @@ package network import ( key "github.com/ipfs/go-ipfs/blocks/key" bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" - peer "gx/ipfs/QmSN2ELGRp4T9kjqiSsSNJRUeR9JKXzQEgwe1HH3tdSGbC/go-libp2p/p2p/peer" - protocol "gx/ipfs/QmSN2ELGRp4T9kjqiSsSNJRUeR9JKXzQEgwe1HH3tdSGbC/go-libp2p/p2p/protocol" + peer "gx/ipfs/QmZMehXD2w81qeVJP6r1mmocxwsD7kqAvuzGm2QWDw1H88/go-libp2p/p2p/peer" + protocol "gx/ipfs/QmZMehXD2w81qeVJP6r1mmocxwsD7kqAvuzGm2QWDw1H88/go-libp2p/p2p/protocol" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" ) diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index 7f67aaf2a..a56bfb55c 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -4,9 +4,9 @@ import ( key "github.com/ipfs/go-ipfs/blocks/key" bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" routing "github.com/ipfs/go-ipfs/routing" - host "gx/ipfs/QmSN2ELGRp4T9kjqiSsSNJRUeR9JKXzQEgwe1HH3tdSGbC/go-libp2p/p2p/host" - inet "gx/ipfs/QmSN2ELGRp4T9kjqiSsSNJRUeR9JKXzQEgwe1HH3tdSGbC/go-libp2p/p2p/net" - peer "gx/ipfs/QmSN2ELGRp4T9kjqiSsSNJRUeR9JKXzQEgwe1HH3tdSGbC/go-libp2p/p2p/peer" + host "gx/ipfs/QmZMehXD2w81qeVJP6r1mmocxwsD7kqAvuzGm2QWDw1H88/go-libp2p/p2p/host" + inet "gx/ipfs/QmZMehXD2w81qeVJP6r1mmocxwsD7kqAvuzGm2QWDw1H88/go-libp2p/p2p/net" + peer "gx/ipfs/QmZMehXD2w81qeVJP6r1mmocxwsD7kqAvuzGm2QWDw1H88/go-libp2p/p2p/peer" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" logging "gx/ipfs/Qmazh5oNUVsDZTs2g59rq8aYQqwpss8tcUWQzor5sCCEuH/go-log" ma "gx/ipfs/QmcobAGsCjYt5DXoq9et9L8yR8er7o7Cu3DTvpaq12jYSz/go-multiaddr" diff --git a/bitswap/testnet/interface.go b/bitswap/testnet/interface.go index 12984ece5..9d52d499b 100644 --- a/bitswap/testnet/interface.go +++ b/bitswap/testnet/interface.go @@ -3,7 +3,7 @@ package bitswap import ( bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" "github.com/ipfs/go-ipfs/thirdparty/testutil" - peer "gx/ipfs/QmSN2ELGRp4T9kjqiSsSNJRUeR9JKXzQEgwe1HH3tdSGbC/go-libp2p/p2p/peer" + peer "gx/ipfs/QmZMehXD2w81qeVJP6r1mmocxwsD7kqAvuzGm2QWDw1H88/go-libp2p/p2p/peer" ) type Network interface { diff --git a/bitswap/testnet/network_test.go b/bitswap/testnet/network_test.go index bfd1bdcf4..015e51b38 100644 --- a/bitswap/testnet/network_test.go +++ b/bitswap/testnet/network_test.go @@ -10,7 +10,7 @@ import ( mockrouting "github.com/ipfs/go-ipfs/routing/mock" delay "github.com/ipfs/go-ipfs/thirdparty/delay" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" - peer "gx/ipfs/QmSN2ELGRp4T9kjqiSsSNJRUeR9JKXzQEgwe1HH3tdSGbC/go-libp2p/p2p/peer" + peer "gx/ipfs/QmZMehXD2w81qeVJP6r1mmocxwsD7kqAvuzGm2QWDw1H88/go-libp2p/p2p/peer" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" ) diff --git a/bitswap/testnet/peernet.go b/bitswap/testnet/peernet.go index 3058d24fa..7ec3ce5a3 100644 --- a/bitswap/testnet/peernet.go +++ b/bitswap/testnet/peernet.go @@ -5,8 +5,8 @@ import ( bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" mockrouting "github.com/ipfs/go-ipfs/routing/mock" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" - mockpeernet "gx/ipfs/QmSN2ELGRp4T9kjqiSsSNJRUeR9JKXzQEgwe1HH3tdSGbC/go-libp2p/p2p/net/mock" - peer "gx/ipfs/QmSN2ELGRp4T9kjqiSsSNJRUeR9JKXzQEgwe1HH3tdSGbC/go-libp2p/p2p/peer" + mockpeernet "gx/ipfs/QmZMehXD2w81qeVJP6r1mmocxwsD7kqAvuzGm2QWDw1H88/go-libp2p/p2p/net/mock" + peer "gx/ipfs/QmZMehXD2w81qeVJP6r1mmocxwsD7kqAvuzGm2QWDw1H88/go-libp2p/p2p/peer" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" ) diff --git a/bitswap/testnet/virtual.go b/bitswap/testnet/virtual.go index 15cd7821b..79fb397a9 100644 --- a/bitswap/testnet/virtual.go +++ b/bitswap/testnet/virtual.go @@ -10,7 +10,7 @@ import ( mockrouting "github.com/ipfs/go-ipfs/routing/mock" delay "github.com/ipfs/go-ipfs/thirdparty/delay" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" - peer "gx/ipfs/QmSN2ELGRp4T9kjqiSsSNJRUeR9JKXzQEgwe1HH3tdSGbC/go-libp2p/p2p/peer" + peer "gx/ipfs/QmZMehXD2w81qeVJP6r1mmocxwsD7kqAvuzGm2QWDw1H88/go-libp2p/p2p/peer" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" ) diff --git a/bitswap/testutils.go b/bitswap/testutils.go index 83715eb85..88052aed8 100644 --- a/bitswap/testutils.go +++ b/bitswap/testutils.go @@ -10,8 +10,8 @@ import ( datastore2 "github.com/ipfs/go-ipfs/thirdparty/datastore2" delay "github.com/ipfs/go-ipfs/thirdparty/delay" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" - peer "gx/ipfs/QmSN2ELGRp4T9kjqiSsSNJRUeR9JKXzQEgwe1HH3tdSGbC/go-libp2p/p2p/peer" - p2ptestutil "gx/ipfs/QmSN2ELGRp4T9kjqiSsSNJRUeR9JKXzQEgwe1HH3tdSGbC/go-libp2p/p2p/test/util" + peer "gx/ipfs/QmZMehXD2w81qeVJP6r1mmocxwsD7kqAvuzGm2QWDw1H88/go-libp2p/p2p/peer" + p2ptestutil "gx/ipfs/QmZMehXD2w81qeVJP6r1mmocxwsD7kqAvuzGm2QWDw1H88/go-libp2p/p2p/test/util" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" ) diff --git a/bitswap/wantmanager.go b/bitswap/wantmanager.go index c2b6f6b50..277f3aa82 100644 --- a/bitswap/wantmanager.go +++ b/bitswap/wantmanager.go @@ -9,7 +9,7 @@ import ( bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" - peer "gx/ipfs/QmSN2ELGRp4T9kjqiSsSNJRUeR9JKXzQEgwe1HH3tdSGbC/go-libp2p/p2p/peer" + peer "gx/ipfs/QmZMehXD2w81qeVJP6r1mmocxwsD7kqAvuzGm2QWDw1H88/go-libp2p/p2p/peer" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" ) From 586d3b462b563222fd8df2223ccd1f79e20a3aed Mon Sep 17 00:00:00 2001 From: Jeromy Date: Mon, 11 Apr 2016 12:52:54 -0700 Subject: [PATCH 0450/1035] update libp2p dep to fix hanging listeners problem License: MIT Signed-off-by: Jeromy This commit was moved from ipfs/go-bitswap@c23d8d1666845abaee891d0ca2a761fb3e8da092 --- bitswap/bitswap.go | 2 +- bitswap/bitswap_test.go | 2 +- bitswap/decision/bench_test.go | 2 +- bitswap/decision/engine.go | 2 +- bitswap/decision/engine_test.go | 2 +- bitswap/decision/ledger.go | 2 +- bitswap/decision/peer_request_queue.go | 2 +- bitswap/message/message.go | 2 +- bitswap/network/interface.go | 4 ++-- bitswap/network/ipfs_impl.go | 6 +++--- bitswap/testnet/interface.go | 2 +- bitswap/testnet/network_test.go | 2 +- bitswap/testnet/peernet.go | 4 ++-- bitswap/testnet/virtual.go | 2 +- bitswap/testutils.go | 4 ++-- bitswap/wantmanager.go | 2 +- 16 files changed, 21 insertions(+), 21 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 368400c42..a0a977ed1 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -21,9 +21,9 @@ import ( "github.com/ipfs/go-ipfs/thirdparty/delay" process "gx/ipfs/QmQopLATEYMNg7dVqZRNDfeE2S1yKy8zrRh5xnYiuqeZBn/goprocess" procctx "gx/ipfs/QmQopLATEYMNg7dVqZRNDfeE2S1yKy8zrRh5xnYiuqeZBn/goprocess/context" - peer "gx/ipfs/QmZMehXD2w81qeVJP6r1mmocxwsD7kqAvuzGm2QWDw1H88/go-libp2p/p2p/peer" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" logging "gx/ipfs/Qmazh5oNUVsDZTs2g59rq8aYQqwpss8tcUWQzor5sCCEuH/go-log" + peer "gx/ipfs/QmccGfZs3rzku8Bv6sTPH3bMUKD1EVod8srgRjt5csdmva/go-libp2p/p2p/peer" ) var log = logging.Logger("bitswap") diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index b696a1736..9eee6f2fd 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -16,7 +16,7 @@ import ( tn "github.com/ipfs/go-ipfs/exchange/bitswap/testnet" mockrouting "github.com/ipfs/go-ipfs/routing/mock" delay "github.com/ipfs/go-ipfs/thirdparty/delay" - p2ptestutil "gx/ipfs/QmZMehXD2w81qeVJP6r1mmocxwsD7kqAvuzGm2QWDw1H88/go-libp2p/p2p/test/util" + p2ptestutil "gx/ipfs/QmccGfZs3rzku8Bv6sTPH3bMUKD1EVod8srgRjt5csdmva/go-libp2p/p2p/test/util" ) // FIXME the tests are really sensitive to the network delay. fix them to work diff --git a/bitswap/decision/bench_test.go b/bitswap/decision/bench_test.go index 2a04a1e13..74bc38439 100644 --- a/bitswap/decision/bench_test.go +++ b/bitswap/decision/bench_test.go @@ -7,7 +7,7 @@ import ( key "github.com/ipfs/go-ipfs/blocks/key" "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" "github.com/ipfs/go-ipfs/thirdparty/testutil" - "gx/ipfs/QmZMehXD2w81qeVJP6r1mmocxwsD7kqAvuzGm2QWDw1H88/go-libp2p/p2p/peer" + "gx/ipfs/QmccGfZs3rzku8Bv6sTPH3bMUKD1EVod8srgRjt5csdmva/go-libp2p/p2p/peer" ) // FWIW: At the time of this commit, including a timestamp in task increases diff --git a/bitswap/decision/engine.go b/bitswap/decision/engine.go index 064e50d2b..ad631dd56 100644 --- a/bitswap/decision/engine.go +++ b/bitswap/decision/engine.go @@ -8,9 +8,9 @@ import ( bstore "github.com/ipfs/go-ipfs/blocks/blockstore" bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" wl "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" - peer "gx/ipfs/QmZMehXD2w81qeVJP6r1mmocxwsD7kqAvuzGm2QWDw1H88/go-libp2p/p2p/peer" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" logging "gx/ipfs/Qmazh5oNUVsDZTs2g59rq8aYQqwpss8tcUWQzor5sCCEuH/go-log" + peer "gx/ipfs/QmccGfZs3rzku8Bv6sTPH3bMUKD1EVod8srgRjt5csdmva/go-libp2p/p2p/peer" ) // TODO consider taking responsibility for other types of requests. For diff --git a/bitswap/decision/engine_test.go b/bitswap/decision/engine_test.go index 573a1eb1f..ea8a3b664 100644 --- a/bitswap/decision/engine_test.go +++ b/bitswap/decision/engine_test.go @@ -14,8 +14,8 @@ import ( blockstore "github.com/ipfs/go-ipfs/blocks/blockstore" message "github.com/ipfs/go-ipfs/exchange/bitswap/message" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" - peer "gx/ipfs/QmZMehXD2w81qeVJP6r1mmocxwsD7kqAvuzGm2QWDw1H88/go-libp2p/p2p/peer" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" + peer "gx/ipfs/QmccGfZs3rzku8Bv6sTPH3bMUKD1EVod8srgRjt5csdmva/go-libp2p/p2p/peer" ) type peerAndEngine struct { diff --git a/bitswap/decision/ledger.go b/bitswap/decision/ledger.go index c1cc2e49f..e2fe86bed 100644 --- a/bitswap/decision/ledger.go +++ b/bitswap/decision/ledger.go @@ -5,7 +5,7 @@ import ( key "github.com/ipfs/go-ipfs/blocks/key" wl "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" - peer "gx/ipfs/QmZMehXD2w81qeVJP6r1mmocxwsD7kqAvuzGm2QWDw1H88/go-libp2p/p2p/peer" + peer "gx/ipfs/QmccGfZs3rzku8Bv6sTPH3bMUKD1EVod8srgRjt5csdmva/go-libp2p/p2p/peer" ) // keySet is just a convenient alias for maps of keys, where we only care diff --git a/bitswap/decision/peer_request_queue.go b/bitswap/decision/peer_request_queue.go index 4eaea55f6..5f0e6748d 100644 --- a/bitswap/decision/peer_request_queue.go +++ b/bitswap/decision/peer_request_queue.go @@ -7,7 +7,7 @@ import ( key "github.com/ipfs/go-ipfs/blocks/key" wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" pq "github.com/ipfs/go-ipfs/thirdparty/pq" - peer "gx/ipfs/QmZMehXD2w81qeVJP6r1mmocxwsD7kqAvuzGm2QWDw1H88/go-libp2p/p2p/peer" + peer "gx/ipfs/QmccGfZs3rzku8Bv6sTPH3bMUKD1EVod8srgRjt5csdmva/go-libp2p/p2p/peer" ) type peerRequestQueue interface { diff --git a/bitswap/message/message.go b/bitswap/message/message.go index 6a564fc8e..c91a5b6ec 100644 --- a/bitswap/message/message.go +++ b/bitswap/message/message.go @@ -7,7 +7,7 @@ import ( key "github.com/ipfs/go-ipfs/blocks/key" pb "github.com/ipfs/go-ipfs/exchange/bitswap/message/pb" wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" - inet "gx/ipfs/QmZMehXD2w81qeVJP6r1mmocxwsD7kqAvuzGm2QWDw1H88/go-libp2p/p2p/net" + inet "gx/ipfs/QmccGfZs3rzku8Bv6sTPH3bMUKD1EVod8srgRjt5csdmva/go-libp2p/p2p/net" ggio "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/io" proto "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/proto" diff --git a/bitswap/network/interface.go b/bitswap/network/interface.go index 042e9cd5c..481b9d0e1 100644 --- a/bitswap/network/interface.go +++ b/bitswap/network/interface.go @@ -3,9 +3,9 @@ package network import ( key "github.com/ipfs/go-ipfs/blocks/key" bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" - peer "gx/ipfs/QmZMehXD2w81qeVJP6r1mmocxwsD7kqAvuzGm2QWDw1H88/go-libp2p/p2p/peer" - protocol "gx/ipfs/QmZMehXD2w81qeVJP6r1mmocxwsD7kqAvuzGm2QWDw1H88/go-libp2p/p2p/protocol" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" + peer "gx/ipfs/QmccGfZs3rzku8Bv6sTPH3bMUKD1EVod8srgRjt5csdmva/go-libp2p/p2p/peer" + protocol "gx/ipfs/QmccGfZs3rzku8Bv6sTPH3bMUKD1EVod8srgRjt5csdmva/go-libp2p/p2p/protocol" ) var ProtocolBitswap protocol.ID = "/ipfs/bitswap" diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index a56bfb55c..717367eb6 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -4,11 +4,11 @@ import ( key "github.com/ipfs/go-ipfs/blocks/key" bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" routing "github.com/ipfs/go-ipfs/routing" - host "gx/ipfs/QmZMehXD2w81qeVJP6r1mmocxwsD7kqAvuzGm2QWDw1H88/go-libp2p/p2p/host" - inet "gx/ipfs/QmZMehXD2w81qeVJP6r1mmocxwsD7kqAvuzGm2QWDw1H88/go-libp2p/p2p/net" - peer "gx/ipfs/QmZMehXD2w81qeVJP6r1mmocxwsD7kqAvuzGm2QWDw1H88/go-libp2p/p2p/peer" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" logging "gx/ipfs/Qmazh5oNUVsDZTs2g59rq8aYQqwpss8tcUWQzor5sCCEuH/go-log" + host "gx/ipfs/QmccGfZs3rzku8Bv6sTPH3bMUKD1EVod8srgRjt5csdmva/go-libp2p/p2p/host" + inet "gx/ipfs/QmccGfZs3rzku8Bv6sTPH3bMUKD1EVod8srgRjt5csdmva/go-libp2p/p2p/net" + peer "gx/ipfs/QmccGfZs3rzku8Bv6sTPH3bMUKD1EVod8srgRjt5csdmva/go-libp2p/p2p/peer" ma "gx/ipfs/QmcobAGsCjYt5DXoq9et9L8yR8er7o7Cu3DTvpaq12jYSz/go-multiaddr" ) diff --git a/bitswap/testnet/interface.go b/bitswap/testnet/interface.go index 9d52d499b..bde882a5c 100644 --- a/bitswap/testnet/interface.go +++ b/bitswap/testnet/interface.go @@ -3,7 +3,7 @@ package bitswap import ( bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" "github.com/ipfs/go-ipfs/thirdparty/testutil" - peer "gx/ipfs/QmZMehXD2w81qeVJP6r1mmocxwsD7kqAvuzGm2QWDw1H88/go-libp2p/p2p/peer" + peer "gx/ipfs/QmccGfZs3rzku8Bv6sTPH3bMUKD1EVod8srgRjt5csdmva/go-libp2p/p2p/peer" ) type Network interface { diff --git a/bitswap/testnet/network_test.go b/bitswap/testnet/network_test.go index 015e51b38..da0af814f 100644 --- a/bitswap/testnet/network_test.go +++ b/bitswap/testnet/network_test.go @@ -10,8 +10,8 @@ import ( mockrouting "github.com/ipfs/go-ipfs/routing/mock" delay "github.com/ipfs/go-ipfs/thirdparty/delay" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" - peer "gx/ipfs/QmZMehXD2w81qeVJP6r1mmocxwsD7kqAvuzGm2QWDw1H88/go-libp2p/p2p/peer" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" + peer "gx/ipfs/QmccGfZs3rzku8Bv6sTPH3bMUKD1EVod8srgRjt5csdmva/go-libp2p/p2p/peer" ) func TestSendMessageAsyncButWaitForResponse(t *testing.T) { diff --git a/bitswap/testnet/peernet.go b/bitswap/testnet/peernet.go index 7ec3ce5a3..34a39d783 100644 --- a/bitswap/testnet/peernet.go +++ b/bitswap/testnet/peernet.go @@ -5,9 +5,9 @@ import ( bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" mockrouting "github.com/ipfs/go-ipfs/routing/mock" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" - mockpeernet "gx/ipfs/QmZMehXD2w81qeVJP6r1mmocxwsD7kqAvuzGm2QWDw1H88/go-libp2p/p2p/net/mock" - peer "gx/ipfs/QmZMehXD2w81qeVJP6r1mmocxwsD7kqAvuzGm2QWDw1H88/go-libp2p/p2p/peer" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" + mockpeernet "gx/ipfs/QmccGfZs3rzku8Bv6sTPH3bMUKD1EVod8srgRjt5csdmva/go-libp2p/p2p/net/mock" + peer "gx/ipfs/QmccGfZs3rzku8Bv6sTPH3bMUKD1EVod8srgRjt5csdmva/go-libp2p/p2p/peer" ) type peernet struct { diff --git a/bitswap/testnet/virtual.go b/bitswap/testnet/virtual.go index 79fb397a9..4932838bb 100644 --- a/bitswap/testnet/virtual.go +++ b/bitswap/testnet/virtual.go @@ -10,8 +10,8 @@ import ( mockrouting "github.com/ipfs/go-ipfs/routing/mock" delay "github.com/ipfs/go-ipfs/thirdparty/delay" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" - peer "gx/ipfs/QmZMehXD2w81qeVJP6r1mmocxwsD7kqAvuzGm2QWDw1H88/go-libp2p/p2p/peer" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" + peer "gx/ipfs/QmccGfZs3rzku8Bv6sTPH3bMUKD1EVod8srgRjt5csdmva/go-libp2p/p2p/peer" ) func VirtualNetwork(rs mockrouting.Server, d delay.D) Network { diff --git a/bitswap/testutils.go b/bitswap/testutils.go index 88052aed8..a4970b34e 100644 --- a/bitswap/testutils.go +++ b/bitswap/testutils.go @@ -10,9 +10,9 @@ import ( datastore2 "github.com/ipfs/go-ipfs/thirdparty/datastore2" delay "github.com/ipfs/go-ipfs/thirdparty/delay" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" - peer "gx/ipfs/QmZMehXD2w81qeVJP6r1mmocxwsD7kqAvuzGm2QWDw1H88/go-libp2p/p2p/peer" - p2ptestutil "gx/ipfs/QmZMehXD2w81qeVJP6r1mmocxwsD7kqAvuzGm2QWDw1H88/go-libp2p/p2p/test/util" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" + peer "gx/ipfs/QmccGfZs3rzku8Bv6sTPH3bMUKD1EVod8srgRjt5csdmva/go-libp2p/p2p/peer" + p2ptestutil "gx/ipfs/QmccGfZs3rzku8Bv6sTPH3bMUKD1EVod8srgRjt5csdmva/go-libp2p/p2p/test/util" ) // WARNING: this uses RandTestBogusIdentity DO NOT USE for NON TESTS! diff --git a/bitswap/wantmanager.go b/bitswap/wantmanager.go index 277f3aa82..a2be89b1d 100644 --- a/bitswap/wantmanager.go +++ b/bitswap/wantmanager.go @@ -9,8 +9,8 @@ import ( bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" - peer "gx/ipfs/QmZMehXD2w81qeVJP6r1mmocxwsD7kqAvuzGm2QWDw1H88/go-libp2p/p2p/peer" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" + peer "gx/ipfs/QmccGfZs3rzku8Bv6sTPH3bMUKD1EVod8srgRjt5csdmva/go-libp2p/p2p/peer" ) type WantManager struct { From 70aa8ac20f2bc5c39f019e4f71fa222879f1e003 Mon Sep 17 00:00:00 2001 From: Lars Gierth Date: Sat, 16 Apr 2016 21:23:47 -0700 Subject: [PATCH 0451/1035] Update go-libp2p License: MIT Signed-off-by: Lars Gierth This commit was moved from ipfs/go-bitswap@e55d995c1137df61db5bd2fa6351bd9a828f3a46 --- bitswap/bitswap.go | 2 +- bitswap/bitswap_test.go | 2 +- bitswap/decision/bench_test.go | 2 +- bitswap/decision/engine.go | 2 +- bitswap/decision/engine_test.go | 2 +- bitswap/decision/ledger.go | 2 +- bitswap/decision/peer_request_queue.go | 2 +- bitswap/message/message.go | 2 +- bitswap/network/interface.go | 4 ++-- bitswap/network/ipfs_impl.go | 6 +++--- bitswap/testnet/interface.go | 2 +- bitswap/testnet/network_test.go | 2 +- bitswap/testnet/peernet.go | 4 ++-- bitswap/testnet/virtual.go | 2 +- bitswap/testutils.go | 4 ++-- bitswap/wantmanager.go | 2 +- 16 files changed, 21 insertions(+), 21 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index a0a977ed1..d5dd95312 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -21,9 +21,9 @@ import ( "github.com/ipfs/go-ipfs/thirdparty/delay" process "gx/ipfs/QmQopLATEYMNg7dVqZRNDfeE2S1yKy8zrRh5xnYiuqeZBn/goprocess" procctx "gx/ipfs/QmQopLATEYMNg7dVqZRNDfeE2S1yKy8zrRh5xnYiuqeZBn/goprocess/context" + peer "gx/ipfs/QmYgaiNVVL7f2nydijAwpDRunRkmxfu3PoK87Y3pH84uAW/go-libp2p/p2p/peer" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" logging "gx/ipfs/Qmazh5oNUVsDZTs2g59rq8aYQqwpss8tcUWQzor5sCCEuH/go-log" - peer "gx/ipfs/QmccGfZs3rzku8Bv6sTPH3bMUKD1EVod8srgRjt5csdmva/go-libp2p/p2p/peer" ) var log = logging.Logger("bitswap") diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index 9eee6f2fd..3852b15a5 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -16,7 +16,7 @@ import ( tn "github.com/ipfs/go-ipfs/exchange/bitswap/testnet" mockrouting "github.com/ipfs/go-ipfs/routing/mock" delay "github.com/ipfs/go-ipfs/thirdparty/delay" - p2ptestutil "gx/ipfs/QmccGfZs3rzku8Bv6sTPH3bMUKD1EVod8srgRjt5csdmva/go-libp2p/p2p/test/util" + p2ptestutil "gx/ipfs/QmYgaiNVVL7f2nydijAwpDRunRkmxfu3PoK87Y3pH84uAW/go-libp2p/p2p/test/util" ) // FIXME the tests are really sensitive to the network delay. fix them to work diff --git a/bitswap/decision/bench_test.go b/bitswap/decision/bench_test.go index 74bc38439..d030aa5a2 100644 --- a/bitswap/decision/bench_test.go +++ b/bitswap/decision/bench_test.go @@ -7,7 +7,7 @@ import ( key "github.com/ipfs/go-ipfs/blocks/key" "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" "github.com/ipfs/go-ipfs/thirdparty/testutil" - "gx/ipfs/QmccGfZs3rzku8Bv6sTPH3bMUKD1EVod8srgRjt5csdmva/go-libp2p/p2p/peer" + "gx/ipfs/QmYgaiNVVL7f2nydijAwpDRunRkmxfu3PoK87Y3pH84uAW/go-libp2p/p2p/peer" ) // FWIW: At the time of this commit, including a timestamp in task increases diff --git a/bitswap/decision/engine.go b/bitswap/decision/engine.go index ad631dd56..1d3142520 100644 --- a/bitswap/decision/engine.go +++ b/bitswap/decision/engine.go @@ -8,9 +8,9 @@ import ( bstore "github.com/ipfs/go-ipfs/blocks/blockstore" bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" wl "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" + peer "gx/ipfs/QmYgaiNVVL7f2nydijAwpDRunRkmxfu3PoK87Y3pH84uAW/go-libp2p/p2p/peer" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" logging "gx/ipfs/Qmazh5oNUVsDZTs2g59rq8aYQqwpss8tcUWQzor5sCCEuH/go-log" - peer "gx/ipfs/QmccGfZs3rzku8Bv6sTPH3bMUKD1EVod8srgRjt5csdmva/go-libp2p/p2p/peer" ) // TODO consider taking responsibility for other types of requests. For diff --git a/bitswap/decision/engine_test.go b/bitswap/decision/engine_test.go index ea8a3b664..756e78b2f 100644 --- a/bitswap/decision/engine_test.go +++ b/bitswap/decision/engine_test.go @@ -14,8 +14,8 @@ import ( blockstore "github.com/ipfs/go-ipfs/blocks/blockstore" message "github.com/ipfs/go-ipfs/exchange/bitswap/message" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" + peer "gx/ipfs/QmYgaiNVVL7f2nydijAwpDRunRkmxfu3PoK87Y3pH84uAW/go-libp2p/p2p/peer" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" - peer "gx/ipfs/QmccGfZs3rzku8Bv6sTPH3bMUKD1EVod8srgRjt5csdmva/go-libp2p/p2p/peer" ) type peerAndEngine struct { diff --git a/bitswap/decision/ledger.go b/bitswap/decision/ledger.go index e2fe86bed..101feb85a 100644 --- a/bitswap/decision/ledger.go +++ b/bitswap/decision/ledger.go @@ -5,7 +5,7 @@ import ( key "github.com/ipfs/go-ipfs/blocks/key" wl "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" - peer "gx/ipfs/QmccGfZs3rzku8Bv6sTPH3bMUKD1EVod8srgRjt5csdmva/go-libp2p/p2p/peer" + peer "gx/ipfs/QmYgaiNVVL7f2nydijAwpDRunRkmxfu3PoK87Y3pH84uAW/go-libp2p/p2p/peer" ) // keySet is just a convenient alias for maps of keys, where we only care diff --git a/bitswap/decision/peer_request_queue.go b/bitswap/decision/peer_request_queue.go index 5f0e6748d..5a669419e 100644 --- a/bitswap/decision/peer_request_queue.go +++ b/bitswap/decision/peer_request_queue.go @@ -7,7 +7,7 @@ import ( key "github.com/ipfs/go-ipfs/blocks/key" wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" pq "github.com/ipfs/go-ipfs/thirdparty/pq" - peer "gx/ipfs/QmccGfZs3rzku8Bv6sTPH3bMUKD1EVod8srgRjt5csdmva/go-libp2p/p2p/peer" + peer "gx/ipfs/QmYgaiNVVL7f2nydijAwpDRunRkmxfu3PoK87Y3pH84uAW/go-libp2p/p2p/peer" ) type peerRequestQueue interface { diff --git a/bitswap/message/message.go b/bitswap/message/message.go index c91a5b6ec..81fd16458 100644 --- a/bitswap/message/message.go +++ b/bitswap/message/message.go @@ -7,7 +7,7 @@ import ( key "github.com/ipfs/go-ipfs/blocks/key" pb "github.com/ipfs/go-ipfs/exchange/bitswap/message/pb" wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" - inet "gx/ipfs/QmccGfZs3rzku8Bv6sTPH3bMUKD1EVod8srgRjt5csdmva/go-libp2p/p2p/net" + inet "gx/ipfs/QmYgaiNVVL7f2nydijAwpDRunRkmxfu3PoK87Y3pH84uAW/go-libp2p/p2p/net" ggio "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/io" proto "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/proto" diff --git a/bitswap/network/interface.go b/bitswap/network/interface.go index 481b9d0e1..70915af2d 100644 --- a/bitswap/network/interface.go +++ b/bitswap/network/interface.go @@ -3,9 +3,9 @@ package network import ( key "github.com/ipfs/go-ipfs/blocks/key" bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" + peer "gx/ipfs/QmYgaiNVVL7f2nydijAwpDRunRkmxfu3PoK87Y3pH84uAW/go-libp2p/p2p/peer" + protocol "gx/ipfs/QmYgaiNVVL7f2nydijAwpDRunRkmxfu3PoK87Y3pH84uAW/go-libp2p/p2p/protocol" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" - peer "gx/ipfs/QmccGfZs3rzku8Bv6sTPH3bMUKD1EVod8srgRjt5csdmva/go-libp2p/p2p/peer" - protocol "gx/ipfs/QmccGfZs3rzku8Bv6sTPH3bMUKD1EVod8srgRjt5csdmva/go-libp2p/p2p/protocol" ) var ProtocolBitswap protocol.ID = "/ipfs/bitswap" diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index 717367eb6..a820f95de 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -4,11 +4,11 @@ import ( key "github.com/ipfs/go-ipfs/blocks/key" bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" routing "github.com/ipfs/go-ipfs/routing" + host "gx/ipfs/QmYgaiNVVL7f2nydijAwpDRunRkmxfu3PoK87Y3pH84uAW/go-libp2p/p2p/host" + inet "gx/ipfs/QmYgaiNVVL7f2nydijAwpDRunRkmxfu3PoK87Y3pH84uAW/go-libp2p/p2p/net" + peer "gx/ipfs/QmYgaiNVVL7f2nydijAwpDRunRkmxfu3PoK87Y3pH84uAW/go-libp2p/p2p/peer" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" logging "gx/ipfs/Qmazh5oNUVsDZTs2g59rq8aYQqwpss8tcUWQzor5sCCEuH/go-log" - host "gx/ipfs/QmccGfZs3rzku8Bv6sTPH3bMUKD1EVod8srgRjt5csdmva/go-libp2p/p2p/host" - inet "gx/ipfs/QmccGfZs3rzku8Bv6sTPH3bMUKD1EVod8srgRjt5csdmva/go-libp2p/p2p/net" - peer "gx/ipfs/QmccGfZs3rzku8Bv6sTPH3bMUKD1EVod8srgRjt5csdmva/go-libp2p/p2p/peer" ma "gx/ipfs/QmcobAGsCjYt5DXoq9et9L8yR8er7o7Cu3DTvpaq12jYSz/go-multiaddr" ) diff --git a/bitswap/testnet/interface.go b/bitswap/testnet/interface.go index bde882a5c..492014b6a 100644 --- a/bitswap/testnet/interface.go +++ b/bitswap/testnet/interface.go @@ -3,7 +3,7 @@ package bitswap import ( bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" "github.com/ipfs/go-ipfs/thirdparty/testutil" - peer "gx/ipfs/QmccGfZs3rzku8Bv6sTPH3bMUKD1EVod8srgRjt5csdmva/go-libp2p/p2p/peer" + peer "gx/ipfs/QmYgaiNVVL7f2nydijAwpDRunRkmxfu3PoK87Y3pH84uAW/go-libp2p/p2p/peer" ) type Network interface { diff --git a/bitswap/testnet/network_test.go b/bitswap/testnet/network_test.go index da0af814f..47cb5e3d1 100644 --- a/bitswap/testnet/network_test.go +++ b/bitswap/testnet/network_test.go @@ -10,8 +10,8 @@ import ( mockrouting "github.com/ipfs/go-ipfs/routing/mock" delay "github.com/ipfs/go-ipfs/thirdparty/delay" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" + peer "gx/ipfs/QmYgaiNVVL7f2nydijAwpDRunRkmxfu3PoK87Y3pH84uAW/go-libp2p/p2p/peer" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" - peer "gx/ipfs/QmccGfZs3rzku8Bv6sTPH3bMUKD1EVod8srgRjt5csdmva/go-libp2p/p2p/peer" ) func TestSendMessageAsyncButWaitForResponse(t *testing.T) { diff --git a/bitswap/testnet/peernet.go b/bitswap/testnet/peernet.go index 34a39d783..7a14143f3 100644 --- a/bitswap/testnet/peernet.go +++ b/bitswap/testnet/peernet.go @@ -5,9 +5,9 @@ import ( bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" mockrouting "github.com/ipfs/go-ipfs/routing/mock" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" + mockpeernet "gx/ipfs/QmYgaiNVVL7f2nydijAwpDRunRkmxfu3PoK87Y3pH84uAW/go-libp2p/p2p/net/mock" + peer "gx/ipfs/QmYgaiNVVL7f2nydijAwpDRunRkmxfu3PoK87Y3pH84uAW/go-libp2p/p2p/peer" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" - mockpeernet "gx/ipfs/QmccGfZs3rzku8Bv6sTPH3bMUKD1EVod8srgRjt5csdmva/go-libp2p/p2p/net/mock" - peer "gx/ipfs/QmccGfZs3rzku8Bv6sTPH3bMUKD1EVod8srgRjt5csdmva/go-libp2p/p2p/peer" ) type peernet struct { diff --git a/bitswap/testnet/virtual.go b/bitswap/testnet/virtual.go index 4932838bb..f151d1159 100644 --- a/bitswap/testnet/virtual.go +++ b/bitswap/testnet/virtual.go @@ -10,8 +10,8 @@ import ( mockrouting "github.com/ipfs/go-ipfs/routing/mock" delay "github.com/ipfs/go-ipfs/thirdparty/delay" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" + peer "gx/ipfs/QmYgaiNVVL7f2nydijAwpDRunRkmxfu3PoK87Y3pH84uAW/go-libp2p/p2p/peer" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" - peer "gx/ipfs/QmccGfZs3rzku8Bv6sTPH3bMUKD1EVod8srgRjt5csdmva/go-libp2p/p2p/peer" ) func VirtualNetwork(rs mockrouting.Server, d delay.D) Network { diff --git a/bitswap/testutils.go b/bitswap/testutils.go index a4970b34e..4b9a6d167 100644 --- a/bitswap/testutils.go +++ b/bitswap/testutils.go @@ -10,9 +10,9 @@ import ( datastore2 "github.com/ipfs/go-ipfs/thirdparty/datastore2" delay "github.com/ipfs/go-ipfs/thirdparty/delay" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" + peer "gx/ipfs/QmYgaiNVVL7f2nydijAwpDRunRkmxfu3PoK87Y3pH84uAW/go-libp2p/p2p/peer" + p2ptestutil "gx/ipfs/QmYgaiNVVL7f2nydijAwpDRunRkmxfu3PoK87Y3pH84uAW/go-libp2p/p2p/test/util" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" - peer "gx/ipfs/QmccGfZs3rzku8Bv6sTPH3bMUKD1EVod8srgRjt5csdmva/go-libp2p/p2p/peer" - p2ptestutil "gx/ipfs/QmccGfZs3rzku8Bv6sTPH3bMUKD1EVod8srgRjt5csdmva/go-libp2p/p2p/test/util" ) // WARNING: this uses RandTestBogusIdentity DO NOT USE for NON TESTS! diff --git a/bitswap/wantmanager.go b/bitswap/wantmanager.go index a2be89b1d..14da0c86a 100644 --- a/bitswap/wantmanager.go +++ b/bitswap/wantmanager.go @@ -9,8 +9,8 @@ import ( bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" + peer "gx/ipfs/QmYgaiNVVL7f2nydijAwpDRunRkmxfu3PoK87Y3pH84uAW/go-libp2p/p2p/peer" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" - peer "gx/ipfs/QmccGfZs3rzku8Bv6sTPH3bMUKD1EVod8srgRjt5csdmva/go-libp2p/p2p/peer" ) type WantManager struct { From b8a95cbbf4b1e949591b7ae55e530752f7822722 Mon Sep 17 00:00:00 2001 From: Lars Gierth Date: Sat, 16 Apr 2016 21:38:22 -0700 Subject: [PATCH 0452/1035] Use extracted go-libp2p-crypto, -secio, -peer packages License: MIT Signed-off-by: Lars Gierth This commit was moved from ipfs/go-bitswap@cba821a889efa5fb815de405d85b427adfb27b2d --- bitswap/bitswap.go | 2 +- bitswap/decision/bench_test.go | 2 +- bitswap/decision/engine.go | 2 +- bitswap/decision/engine_test.go | 2 +- bitswap/decision/ledger.go | 2 +- bitswap/decision/peer_request_queue.go | 2 +- bitswap/network/interface.go | 2 +- bitswap/network/ipfs_impl.go | 2 +- bitswap/testnet/interface.go | 2 +- bitswap/testnet/network_test.go | 2 +- bitswap/testnet/peernet.go | 2 +- bitswap/testnet/virtual.go | 2 +- bitswap/testutils.go | 2 +- bitswap/wantmanager.go | 2 +- 14 files changed, 14 insertions(+), 14 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index d5dd95312..8e7f4df48 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -21,7 +21,7 @@ import ( "github.com/ipfs/go-ipfs/thirdparty/delay" process "gx/ipfs/QmQopLATEYMNg7dVqZRNDfeE2S1yKy8zrRh5xnYiuqeZBn/goprocess" procctx "gx/ipfs/QmQopLATEYMNg7dVqZRNDfeE2S1yKy8zrRh5xnYiuqeZBn/goprocess/context" - peer "gx/ipfs/QmYgaiNVVL7f2nydijAwpDRunRkmxfu3PoK87Y3pH84uAW/go-libp2p/p2p/peer" + peer "gx/ipfs/QmZwZjMVGss5rqYsJVGy18gNbkTJffFyq2x1uJ4e4p3ZAt/go-libp2p-peer" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" logging "gx/ipfs/Qmazh5oNUVsDZTs2g59rq8aYQqwpss8tcUWQzor5sCCEuH/go-log" ) diff --git a/bitswap/decision/bench_test.go b/bitswap/decision/bench_test.go index d030aa5a2..7b1d26fd9 100644 --- a/bitswap/decision/bench_test.go +++ b/bitswap/decision/bench_test.go @@ -7,7 +7,7 @@ import ( key "github.com/ipfs/go-ipfs/blocks/key" "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" "github.com/ipfs/go-ipfs/thirdparty/testutil" - "gx/ipfs/QmYgaiNVVL7f2nydijAwpDRunRkmxfu3PoK87Y3pH84uAW/go-libp2p/p2p/peer" + "gx/ipfs/QmZwZjMVGss5rqYsJVGy18gNbkTJffFyq2x1uJ4e4p3ZAt/go-libp2p-peer" ) // FWIW: At the time of this commit, including a timestamp in task increases diff --git a/bitswap/decision/engine.go b/bitswap/decision/engine.go index 1d3142520..6d2577b72 100644 --- a/bitswap/decision/engine.go +++ b/bitswap/decision/engine.go @@ -8,7 +8,7 @@ import ( bstore "github.com/ipfs/go-ipfs/blocks/blockstore" bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" wl "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" - peer "gx/ipfs/QmYgaiNVVL7f2nydijAwpDRunRkmxfu3PoK87Y3pH84uAW/go-libp2p/p2p/peer" + peer "gx/ipfs/QmZwZjMVGss5rqYsJVGy18gNbkTJffFyq2x1uJ4e4p3ZAt/go-libp2p-peer" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" logging "gx/ipfs/Qmazh5oNUVsDZTs2g59rq8aYQqwpss8tcUWQzor5sCCEuH/go-log" ) diff --git a/bitswap/decision/engine_test.go b/bitswap/decision/engine_test.go index 756e78b2f..d496096bb 100644 --- a/bitswap/decision/engine_test.go +++ b/bitswap/decision/engine_test.go @@ -14,7 +14,7 @@ import ( blockstore "github.com/ipfs/go-ipfs/blocks/blockstore" message "github.com/ipfs/go-ipfs/exchange/bitswap/message" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" - peer "gx/ipfs/QmYgaiNVVL7f2nydijAwpDRunRkmxfu3PoK87Y3pH84uAW/go-libp2p/p2p/peer" + peer "gx/ipfs/QmZwZjMVGss5rqYsJVGy18gNbkTJffFyq2x1uJ4e4p3ZAt/go-libp2p-peer" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" ) diff --git a/bitswap/decision/ledger.go b/bitswap/decision/ledger.go index 101feb85a..de133524e 100644 --- a/bitswap/decision/ledger.go +++ b/bitswap/decision/ledger.go @@ -5,7 +5,7 @@ import ( key "github.com/ipfs/go-ipfs/blocks/key" wl "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" - peer "gx/ipfs/QmYgaiNVVL7f2nydijAwpDRunRkmxfu3PoK87Y3pH84uAW/go-libp2p/p2p/peer" + peer "gx/ipfs/QmZwZjMVGss5rqYsJVGy18gNbkTJffFyq2x1uJ4e4p3ZAt/go-libp2p-peer" ) // keySet is just a convenient alias for maps of keys, where we only care diff --git a/bitswap/decision/peer_request_queue.go b/bitswap/decision/peer_request_queue.go index 5a669419e..02535f7a8 100644 --- a/bitswap/decision/peer_request_queue.go +++ b/bitswap/decision/peer_request_queue.go @@ -7,7 +7,7 @@ import ( key "github.com/ipfs/go-ipfs/blocks/key" wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" pq "github.com/ipfs/go-ipfs/thirdparty/pq" - peer "gx/ipfs/QmYgaiNVVL7f2nydijAwpDRunRkmxfu3PoK87Y3pH84uAW/go-libp2p/p2p/peer" + peer "gx/ipfs/QmZwZjMVGss5rqYsJVGy18gNbkTJffFyq2x1uJ4e4p3ZAt/go-libp2p-peer" ) type peerRequestQueue interface { diff --git a/bitswap/network/interface.go b/bitswap/network/interface.go index 70915af2d..d39fe4026 100644 --- a/bitswap/network/interface.go +++ b/bitswap/network/interface.go @@ -3,8 +3,8 @@ package network import ( key "github.com/ipfs/go-ipfs/blocks/key" bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" - peer "gx/ipfs/QmYgaiNVVL7f2nydijAwpDRunRkmxfu3PoK87Y3pH84uAW/go-libp2p/p2p/peer" protocol "gx/ipfs/QmYgaiNVVL7f2nydijAwpDRunRkmxfu3PoK87Y3pH84uAW/go-libp2p/p2p/protocol" + peer "gx/ipfs/QmZwZjMVGss5rqYsJVGy18gNbkTJffFyq2x1uJ4e4p3ZAt/go-libp2p-peer" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" ) diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index a820f95de..2d1512660 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -6,7 +6,7 @@ import ( routing "github.com/ipfs/go-ipfs/routing" host "gx/ipfs/QmYgaiNVVL7f2nydijAwpDRunRkmxfu3PoK87Y3pH84uAW/go-libp2p/p2p/host" inet "gx/ipfs/QmYgaiNVVL7f2nydijAwpDRunRkmxfu3PoK87Y3pH84uAW/go-libp2p/p2p/net" - peer "gx/ipfs/QmYgaiNVVL7f2nydijAwpDRunRkmxfu3PoK87Y3pH84uAW/go-libp2p/p2p/peer" + peer "gx/ipfs/QmZwZjMVGss5rqYsJVGy18gNbkTJffFyq2x1uJ4e4p3ZAt/go-libp2p-peer" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" logging "gx/ipfs/Qmazh5oNUVsDZTs2g59rq8aYQqwpss8tcUWQzor5sCCEuH/go-log" ma "gx/ipfs/QmcobAGsCjYt5DXoq9et9L8yR8er7o7Cu3DTvpaq12jYSz/go-multiaddr" diff --git a/bitswap/testnet/interface.go b/bitswap/testnet/interface.go index 492014b6a..73fb8bac7 100644 --- a/bitswap/testnet/interface.go +++ b/bitswap/testnet/interface.go @@ -3,7 +3,7 @@ package bitswap import ( bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" "github.com/ipfs/go-ipfs/thirdparty/testutil" - peer "gx/ipfs/QmYgaiNVVL7f2nydijAwpDRunRkmxfu3PoK87Y3pH84uAW/go-libp2p/p2p/peer" + peer "gx/ipfs/QmZwZjMVGss5rqYsJVGy18gNbkTJffFyq2x1uJ4e4p3ZAt/go-libp2p-peer" ) type Network interface { diff --git a/bitswap/testnet/network_test.go b/bitswap/testnet/network_test.go index 47cb5e3d1..609e51f7e 100644 --- a/bitswap/testnet/network_test.go +++ b/bitswap/testnet/network_test.go @@ -10,7 +10,7 @@ import ( mockrouting "github.com/ipfs/go-ipfs/routing/mock" delay "github.com/ipfs/go-ipfs/thirdparty/delay" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" - peer "gx/ipfs/QmYgaiNVVL7f2nydijAwpDRunRkmxfu3PoK87Y3pH84uAW/go-libp2p/p2p/peer" + peer "gx/ipfs/QmZwZjMVGss5rqYsJVGy18gNbkTJffFyq2x1uJ4e4p3ZAt/go-libp2p-peer" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" ) diff --git a/bitswap/testnet/peernet.go b/bitswap/testnet/peernet.go index 7a14143f3..7b2255b8e 100644 --- a/bitswap/testnet/peernet.go +++ b/bitswap/testnet/peernet.go @@ -6,7 +6,7 @@ import ( mockrouting "github.com/ipfs/go-ipfs/routing/mock" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" mockpeernet "gx/ipfs/QmYgaiNVVL7f2nydijAwpDRunRkmxfu3PoK87Y3pH84uAW/go-libp2p/p2p/net/mock" - peer "gx/ipfs/QmYgaiNVVL7f2nydijAwpDRunRkmxfu3PoK87Y3pH84uAW/go-libp2p/p2p/peer" + peer "gx/ipfs/QmZwZjMVGss5rqYsJVGy18gNbkTJffFyq2x1uJ4e4p3ZAt/go-libp2p-peer" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" ) diff --git a/bitswap/testnet/virtual.go b/bitswap/testnet/virtual.go index f151d1159..40cb9e13f 100644 --- a/bitswap/testnet/virtual.go +++ b/bitswap/testnet/virtual.go @@ -10,7 +10,7 @@ import ( mockrouting "github.com/ipfs/go-ipfs/routing/mock" delay "github.com/ipfs/go-ipfs/thirdparty/delay" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" - peer "gx/ipfs/QmYgaiNVVL7f2nydijAwpDRunRkmxfu3PoK87Y3pH84uAW/go-libp2p/p2p/peer" + peer "gx/ipfs/QmZwZjMVGss5rqYsJVGy18gNbkTJffFyq2x1uJ4e4p3ZAt/go-libp2p-peer" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" ) diff --git a/bitswap/testutils.go b/bitswap/testutils.go index 4b9a6d167..504fb4f96 100644 --- a/bitswap/testutils.go +++ b/bitswap/testutils.go @@ -10,8 +10,8 @@ import ( datastore2 "github.com/ipfs/go-ipfs/thirdparty/datastore2" delay "github.com/ipfs/go-ipfs/thirdparty/delay" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" - peer "gx/ipfs/QmYgaiNVVL7f2nydijAwpDRunRkmxfu3PoK87Y3pH84uAW/go-libp2p/p2p/peer" p2ptestutil "gx/ipfs/QmYgaiNVVL7f2nydijAwpDRunRkmxfu3PoK87Y3pH84uAW/go-libp2p/p2p/test/util" + peer "gx/ipfs/QmZwZjMVGss5rqYsJVGy18gNbkTJffFyq2x1uJ4e4p3ZAt/go-libp2p-peer" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" ) diff --git a/bitswap/wantmanager.go b/bitswap/wantmanager.go index 14da0c86a..f80acbfae 100644 --- a/bitswap/wantmanager.go +++ b/bitswap/wantmanager.go @@ -9,7 +9,7 @@ import ( bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" - peer "gx/ipfs/QmYgaiNVVL7f2nydijAwpDRunRkmxfu3PoK87Y3pH84uAW/go-libp2p/p2p/peer" + peer "gx/ipfs/QmZwZjMVGss5rqYsJVGy18gNbkTJffFyq2x1uJ4e4p3ZAt/go-libp2p-peer" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" ) From 377962a6c43de3c3d7ee5e425ebe6ede16c37ea5 Mon Sep 17 00:00:00 2001 From: Jeromy Date: Fri, 20 Nov 2015 15:24:14 -0800 Subject: [PATCH 0453/1035] wire contexts into bitswap requests more deeply License: MIT Signed-off-by: Jeromy This commit was moved from ipfs/go-bitswap@b3005fb29a5595b2a925db38a353bbd15f47ba1b --- bitswap/bitswap.go | 51 ++++++++--------------- bitswap/decision/engine.go | 2 +- bitswap/decision/ledger.go | 6 ++- bitswap/wantlist/wantlist.go | 30 ++++++++++---- bitswap/wantmanager.go | 19 ++++++--- bitswap/workers.go | 79 ++++++++++++++++++++---------------- 6 files changed, 102 insertions(+), 85 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 8e7f4df48..bf509fc55 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -86,7 +86,7 @@ func New(parent context.Context, p peer.ID, network bsnet.BitSwapNetwork, notifications: notif, engine: decision.NewEngine(ctx, bstore), // TODO close the engine with Close() method network: network, - findKeys: make(chan *blockRequest, sizeBatchRequestChan), + findKeys: make(chan *wantlist.Entry, sizeBatchRequestChan), process: px, newBlocks: make(chan *blocks.Block, HasBlockBufferSize), provideKeys: make(chan key.Key, provideKeysBufferSize), @@ -129,7 +129,7 @@ type Bitswap struct { notifications notifications.PubSub // send keys to a worker to find and connect to providers for them - findKeys chan *blockRequest + findKeys chan *wantlist.Entry engine *decision.Engine @@ -146,8 +146,8 @@ type Bitswap struct { } type blockRequest struct { - keys []key.Key - ctx context.Context + key key.Key + ctx context.Context } // GetBlock attempts to retrieve a particular block from peers within the @@ -208,6 +208,12 @@ func (bs *Bitswap) WantlistForPeer(p peer.ID) []key.Key { // resources, provide a context with a reasonably short deadline (ie. not one // that lasts throughout the lifetime of the server) func (bs *Bitswap) GetBlocks(ctx context.Context, keys []key.Key) (<-chan *blocks.Block, error) { + if len(keys) == 0 { + out := make(chan *blocks.Block) + close(out) + return out, nil + } + select { case <-bs.process.Closing(): return nil, errors.New("bitswap is closed") @@ -219,11 +225,14 @@ func (bs *Bitswap) GetBlocks(ctx context.Context, keys []key.Key) (<-chan *block log.Event(ctx, "Bitswap.GetBlockRequest.Start", &k) } - bs.wm.WantBlocks(keys) + bs.wm.WantBlocks(ctx, keys) - req := &blockRequest{ - keys: keys, - ctx: ctx, + // NB: Optimization. Assumes that providers of key[0] are likely to + // be able to provide for all keys. This currently holds true in most + // every situation. Later, this assumption may not hold as true. + req := &wantlist.Entry{ + Key: keys[0], + Ctx: ctx, } select { case bs.findKeys <- req: @@ -276,32 +285,6 @@ func (bs *Bitswap) tryPutBlock(blk *blocks.Block, attempts int) error { return err } -func (bs *Bitswap) connectToProviders(ctx context.Context, entries []wantlist.Entry) { - - ctx, cancel := context.WithCancel(ctx) - defer cancel() - - // Get providers for all entries in wantlist (could take a while) - wg := sync.WaitGroup{} - for _, e := range entries { - wg.Add(1) - go func(k key.Key) { - defer wg.Done() - - child, cancel := context.WithTimeout(ctx, providerRequestTimeout) - defer cancel() - providers := bs.network.FindProvidersAsync(child, k, maxProvidersPerRequest) - for prov := range providers { - go func(p peer.ID) { - bs.network.ConnectTo(ctx, p) - }(prov) - } - }(e.Key) - } - - wg.Wait() // make sure all our children do finish. -} - func (bs *Bitswap) ReceiveMessage(ctx context.Context, p peer.ID, incoming bsmsg.BitSwapMessage) { // This call records changes to wantlists, blocks received, // and number of bytes transfered. diff --git a/bitswap/decision/engine.go b/bitswap/decision/engine.go index 6d2577b72..6a026858f 100644 --- a/bitswap/decision/engine.go +++ b/bitswap/decision/engine.go @@ -217,7 +217,7 @@ func (e *Engine) MessageReceived(p peer.ID, m bsmsg.BitSwapMessage) error { e.peerRequestQueue.Remove(entry.Key, p) } else { log.Debugf("wants %s - %d", entry.Key, entry.Priority) - l.Wants(entry.Key, entry.Priority) + l.Wants(entry.Ctx, entry.Key, entry.Priority) if exists, err := e.bs.Has(entry.Key); err == nil && exists { e.peerRequestQueue.Push(entry.Entry, p) newWorkExists = true diff --git a/bitswap/decision/ledger.go b/bitswap/decision/ledger.go index de133524e..7b8982e47 100644 --- a/bitswap/decision/ledger.go +++ b/bitswap/decision/ledger.go @@ -6,6 +6,8 @@ import ( key "github.com/ipfs/go-ipfs/blocks/key" wl "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" peer "gx/ipfs/QmZwZjMVGss5rqYsJVGy18gNbkTJffFyq2x1uJ4e4p3ZAt/go-libp2p-peer" + + "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" ) // keySet is just a convenient alias for maps of keys, where we only care @@ -68,9 +70,9 @@ func (l *ledger) ReceivedBytes(n int) { } // TODO: this needs to be different. We need timeouts. -func (l *ledger) Wants(k key.Key, priority int) { +func (l *ledger) Wants(ctx context.Context, k key.Key, priority int) { log.Debugf("peer %s wants %s", l.Partner, k) - l.wantList.Add(k, priority) + l.wantList.Add(ctx, k, priority) } func (l *ledger) CancelWant(k key.Key) { diff --git a/bitswap/wantlist/wantlist.go b/bitswap/wantlist/wantlist.go index a82b484a4..545b98f7c 100644 --- a/bitswap/wantlist/wantlist.go +++ b/bitswap/wantlist/wantlist.go @@ -3,9 +3,12 @@ package wantlist import ( - key "github.com/ipfs/go-ipfs/blocks/key" "sort" "sync" + + key "github.com/ipfs/go-ipfs/blocks/key" + + "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" ) type ThreadSafe struct { @@ -16,7 +19,6 @@ type ThreadSafe struct { // not threadsafe type Wantlist struct { set map[key.Key]Entry - // TODO provide O(1) len accessor if cost becomes an issue } type Entry struct { @@ -24,6 +26,7 @@ type Entry struct { // slices can be copied efficiently. Key key.Key Priority int + Ctx context.Context } type entrySlice []Entry @@ -44,22 +47,25 @@ func New() *Wantlist { } } -func (w *ThreadSafe) Add(k key.Key, priority int) { - // TODO rm defer for perf +func (w *ThreadSafe) Add(ctx context.Context, k key.Key, priority int) { + w.lk.Lock() + defer w.lk.Unlock() + w.Wantlist.Add(ctx, k, priority) +} + +func (w *ThreadSafe) AddEntry(e Entry) { w.lk.Lock() defer w.lk.Unlock() - w.Wantlist.Add(k, priority) + w.Wantlist.AddEntry(e) } func (w *ThreadSafe) Remove(k key.Key) { - // TODO rm defer for perf w.lk.Lock() defer w.lk.Unlock() w.Wantlist.Remove(k) } func (w *ThreadSafe) Contains(k key.Key) (Entry, bool) { - // TODO rm defer for perf w.lk.RLock() defer w.lk.RUnlock() return w.Wantlist.Contains(k) @@ -87,14 +93,22 @@ func (w *Wantlist) Len() int { return len(w.set) } -func (w *Wantlist) Add(k key.Key, priority int) { +func (w *Wantlist) Add(ctx context.Context, k key.Key, priority int) { if _, ok := w.set[k]; ok { return } w.set[k] = Entry{ Key: k, Priority: priority, + Ctx: ctx, + } +} + +func (w *Wantlist) AddEntry(e Entry) { + if _, ok := w.set[e.Key]; ok { + return } + w.set[e.Key] = e } func (w *Wantlist) Remove(k key.Key) { diff --git a/bitswap/wantmanager.go b/bitswap/wantmanager.go index f80acbfae..be68b3faa 100644 --- a/bitswap/wantmanager.go +++ b/bitswap/wantmanager.go @@ -64,16 +64,16 @@ type msgQueue struct { done chan struct{} } -func (pm *WantManager) WantBlocks(ks []key.Key) { +func (pm *WantManager) WantBlocks(ctx context.Context, ks []key.Key) { log.Infof("want blocks: %s", ks) - pm.addEntries(ks, false) + pm.addEntries(ctx, ks, false) } func (pm *WantManager) CancelWants(ks []key.Key) { - pm.addEntries(ks, true) + pm.addEntries(context.TODO(), ks, true) } -func (pm *WantManager) addEntries(ks []key.Key, cancel bool) { +func (pm *WantManager) addEntries(ctx context.Context, ks []key.Key, cancel bool) { var entries []*bsmsg.Entry for i, k := range ks { entries = append(entries, &bsmsg.Entry{ @@ -81,6 +81,7 @@ func (pm *WantManager) addEntries(ks []key.Key, cancel bool) { Entry: wantlist.Entry{ Key: k, Priority: kMaxPriority - i, + Ctx: ctx, }, }) } @@ -224,7 +225,7 @@ func (pm *WantManager) Run() { if e.Cancel { pm.wl.Remove(e.Key) } else { - pm.wl.Add(e.Key, e.Priority) + pm.wl.AddEntry(e.Entry) } } @@ -237,6 +238,14 @@ func (pm *WantManager) Run() { // resend entire wantlist every so often (REALLY SHOULDNT BE NECESSARY) var es []*bsmsg.Entry for _, e := range pm.wl.Entries() { + select { + case <-e.Ctx.Done(): + // entry has been cancelled + // simply continue, the entry will be removed from the + // wantlist soon enough + continue + default: + } es = append(es, &bsmsg.Entry{Entry: e}) } for _, p := range pm.peers { diff --git a/bitswap/workers.go b/bitswap/workers.go index 46f5693f4..1bd9154f5 100644 --- a/bitswap/workers.go +++ b/bitswap/workers.go @@ -1,6 +1,7 @@ package bitswap import ( + "sync" "time" process "gx/ipfs/QmQopLATEYMNg7dVqZRNDfeE2S1yKy8zrRh5xnYiuqeZBn/goprocess" @@ -8,6 +9,8 @@ import ( context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" key "github.com/ipfs/go-ipfs/blocks/key" + wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" + peer "gx/ipfs/QmZwZjMVGss5rqYsJVGy18gNbkTJffFyq2x1uJ4e4p3ZAt/go-libp2p-peer" logging "gx/ipfs/Qmazh5oNUVsDZTs2g59rq8aYQqwpss8tcUWQzor5sCCEuH/go-log" ) @@ -16,7 +19,7 @@ var TaskWorkerCount = 8 func (bs *Bitswap) startWorkers(px process.Process, ctx context.Context) { // Start up a worker to handle block requests this node is making px.Go(func(px process.Process) { - bs.providerConnector(ctx) + bs.providerQueryManager(ctx) }) // Start up workers to handle requests from other nodes for the data on this node @@ -149,37 +152,6 @@ func (bs *Bitswap) provideCollector(ctx context.Context) { } } -// connects to providers for the given keys -func (bs *Bitswap) providerConnector(parent context.Context) { - defer log.Info("bitswap client worker shutting down...") - - for { - log.Event(parent, "Bitswap.ProviderConnector.Loop") - select { - case req := <-bs.findKeys: - keys := req.keys - if len(keys) == 0 { - log.Warning("Received batch request for zero blocks") - continue - } - log.Event(parent, "Bitswap.ProviderConnector.Work", logging.LoggableMap{"Keys": keys}) - - // NB: Optimization. Assumes that providers of key[0] are likely to - // be able to provide for all keys. This currently holds true in most - // every situation. Later, this assumption may not hold as true. - child, cancel := context.WithTimeout(req.ctx, providerRequestTimeout) - providers := bs.network.FindProvidersAsync(child, keys[0], maxProvidersPerRequest) - for p := range providers { - go bs.network.ConnectTo(req.ctx, p) - } - cancel() - - case <-parent.Done(): - return - } - } -} - func (bs *Bitswap) rebroadcastWorker(parent context.Context) { ctx, cancel := context.WithCancel(parent) defer cancel() @@ -200,12 +172,49 @@ func (bs *Bitswap) rebroadcastWorker(parent context.Context) { } case <-broadcastSignal.C: // resend unfulfilled wantlist keys log.Event(ctx, "Bitswap.Rebroadcast.active") - entries := bs.wm.wl.Entries() - if len(entries) > 0 { - bs.connectToProviders(ctx, entries) + for _, e := range bs.wm.wl.Entries() { + bs.findKeys <- &e } case <-parent.Done(): return } } } + +func (bs *Bitswap) providerQueryManager(ctx context.Context) { + var activeLk sync.Mutex + active := make(map[key.Key]*wantlist.Entry) + + for { + select { + case e := <-bs.findKeys: + activeLk.Lock() + if _, ok := active[e.Key]; ok { + activeLk.Unlock() + continue + } + active[e.Key] = e + activeLk.Unlock() + + go func(e *wantlist.Entry) { + child, cancel := context.WithTimeout(e.Ctx, providerRequestTimeout) + defer cancel() + providers := bs.network.FindProvidersAsync(child, e.Key, maxProvidersPerRequest) + for p := range providers { + go func(p peer.ID) { + err := bs.network.ConnectTo(child, p) + if err != nil { + log.Debug("failed to connect to provider %s: %s", p, err) + } + }(p) + } + activeLk.Lock() + delete(active, e.Key) + activeLk.Unlock() + }(e) + + case <-ctx.Done(): + return + } + } +} From 6fe8ead28faaef3a0141df4fa6b2be7ca2a9f748 Mon Sep 17 00:00:00 2001 From: Jeromy Date: Wed, 27 Apr 2016 10:39:48 -0700 Subject: [PATCH 0454/1035] update libp2p with utp dep License: MIT Signed-off-by: Jeromy This commit was moved from ipfs/go-bitswap@c57e98efb70536d6e7684bed74c70ebf6b695301 --- bitswap/bitswap_test.go | 2 +- bitswap/message/message.go | 2 +- bitswap/network/interface.go | 2 +- bitswap/network/ipfs_impl.go | 4 ++-- bitswap/testnet/peernet.go | 2 +- bitswap/testutils.go | 2 +- 6 files changed, 7 insertions(+), 7 deletions(-) diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index 3852b15a5..a994019ff 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -16,7 +16,7 @@ import ( tn "github.com/ipfs/go-ipfs/exchange/bitswap/testnet" mockrouting "github.com/ipfs/go-ipfs/routing/mock" delay "github.com/ipfs/go-ipfs/thirdparty/delay" - p2ptestutil "gx/ipfs/QmYgaiNVVL7f2nydijAwpDRunRkmxfu3PoK87Y3pH84uAW/go-libp2p/p2p/test/util" + p2ptestutil "gx/ipfs/QmXDvxcXUYn2DDnGKJwdQPxkJgG83jBTp5UmmNzeHzqbj5/go-libp2p/p2p/test/util" ) // FIXME the tests are really sensitive to the network delay. fix them to work diff --git a/bitswap/message/message.go b/bitswap/message/message.go index 81fd16458..6cff5e554 100644 --- a/bitswap/message/message.go +++ b/bitswap/message/message.go @@ -7,7 +7,7 @@ import ( key "github.com/ipfs/go-ipfs/blocks/key" pb "github.com/ipfs/go-ipfs/exchange/bitswap/message/pb" wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" - inet "gx/ipfs/QmYgaiNVVL7f2nydijAwpDRunRkmxfu3PoK87Y3pH84uAW/go-libp2p/p2p/net" + inet "gx/ipfs/QmXDvxcXUYn2DDnGKJwdQPxkJgG83jBTp5UmmNzeHzqbj5/go-libp2p/p2p/net" ggio "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/io" proto "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/proto" diff --git a/bitswap/network/interface.go b/bitswap/network/interface.go index d39fe4026..018714de0 100644 --- a/bitswap/network/interface.go +++ b/bitswap/network/interface.go @@ -3,7 +3,7 @@ package network import ( key "github.com/ipfs/go-ipfs/blocks/key" bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" - protocol "gx/ipfs/QmYgaiNVVL7f2nydijAwpDRunRkmxfu3PoK87Y3pH84uAW/go-libp2p/p2p/protocol" + protocol "gx/ipfs/QmXDvxcXUYn2DDnGKJwdQPxkJgG83jBTp5UmmNzeHzqbj5/go-libp2p/p2p/protocol" peer "gx/ipfs/QmZwZjMVGss5rqYsJVGy18gNbkTJffFyq2x1uJ4e4p3ZAt/go-libp2p-peer" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" ) diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index 2d1512660..f52d949ff 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -4,8 +4,8 @@ import ( key "github.com/ipfs/go-ipfs/blocks/key" bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" routing "github.com/ipfs/go-ipfs/routing" - host "gx/ipfs/QmYgaiNVVL7f2nydijAwpDRunRkmxfu3PoK87Y3pH84uAW/go-libp2p/p2p/host" - inet "gx/ipfs/QmYgaiNVVL7f2nydijAwpDRunRkmxfu3PoK87Y3pH84uAW/go-libp2p/p2p/net" + host "gx/ipfs/QmXDvxcXUYn2DDnGKJwdQPxkJgG83jBTp5UmmNzeHzqbj5/go-libp2p/p2p/host" + inet "gx/ipfs/QmXDvxcXUYn2DDnGKJwdQPxkJgG83jBTp5UmmNzeHzqbj5/go-libp2p/p2p/net" peer "gx/ipfs/QmZwZjMVGss5rqYsJVGy18gNbkTJffFyq2x1uJ4e4p3ZAt/go-libp2p-peer" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" logging "gx/ipfs/Qmazh5oNUVsDZTs2g59rq8aYQqwpss8tcUWQzor5sCCEuH/go-log" diff --git a/bitswap/testnet/peernet.go b/bitswap/testnet/peernet.go index 7b2255b8e..904b4b712 100644 --- a/bitswap/testnet/peernet.go +++ b/bitswap/testnet/peernet.go @@ -5,7 +5,7 @@ import ( bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" mockrouting "github.com/ipfs/go-ipfs/routing/mock" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" - mockpeernet "gx/ipfs/QmYgaiNVVL7f2nydijAwpDRunRkmxfu3PoK87Y3pH84uAW/go-libp2p/p2p/net/mock" + mockpeernet "gx/ipfs/QmXDvxcXUYn2DDnGKJwdQPxkJgG83jBTp5UmmNzeHzqbj5/go-libp2p/p2p/net/mock" peer "gx/ipfs/QmZwZjMVGss5rqYsJVGy18gNbkTJffFyq2x1uJ4e4p3ZAt/go-libp2p-peer" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" ) diff --git a/bitswap/testutils.go b/bitswap/testutils.go index 504fb4f96..23fc6e74b 100644 --- a/bitswap/testutils.go +++ b/bitswap/testutils.go @@ -10,7 +10,7 @@ import ( datastore2 "github.com/ipfs/go-ipfs/thirdparty/datastore2" delay "github.com/ipfs/go-ipfs/thirdparty/delay" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" - p2ptestutil "gx/ipfs/QmYgaiNVVL7f2nydijAwpDRunRkmxfu3PoK87Y3pH84uAW/go-libp2p/p2p/test/util" + p2ptestutil "gx/ipfs/QmXDvxcXUYn2DDnGKJwdQPxkJgG83jBTp5UmmNzeHzqbj5/go-libp2p/p2p/test/util" peer "gx/ipfs/QmZwZjMVGss5rqYsJVGy18gNbkTJffFyq2x1uJ4e4p3ZAt/go-libp2p-peer" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" ) From d41c0ff8f010d2fa24df7ee64a420ccfabb3d2a5 Mon Sep 17 00:00:00 2001 From: Jeromy Date: Wed, 27 Apr 2016 15:16:11 -0700 Subject: [PATCH 0455/1035] add test for double getting a block License: MIT Signed-off-by: Jeromy This commit was moved from ipfs/go-bitswap@88fb6cf09dd6f93ee09e88f1ec87ea91a72fb392 --- bitswap/bitswap_test.go | 52 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 52 insertions(+) diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index 3852b15a5..0df1f9b2c 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -308,3 +308,55 @@ func TestBasicBitswap(t *testing.T) { } } } + +func TestDoubleGet(t *testing.T) { + net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay)) + sg := NewTestSessionGenerator(net) + defer sg.Close() + bg := blocksutil.NewBlockGenerator() + + t.Log("Test a one node trying to get one block from another") + + instances := sg.Instances(2) + blocks := bg.Blocks(1) + + ctx1, cancel1 := context.WithCancel(context.Background()) + + blkch1, err := instances[1].Exchange.GetBlocks(ctx1, []key.Key{blocks[0].Key()}) + if err != nil { + t.Fatal(err) + } + + ctx2, cancel2 := context.WithCancel(context.Background()) + defer cancel2() + + blkch2, err := instances[1].Exchange.GetBlocks(ctx2, []key.Key{blocks[0].Key()}) + if err != nil { + t.Fatal(err) + } + + cancel1() + + _, ok := <-blkch1 + if ok { + t.Fatal("expected channel to be closed") + } + + err = instances[0].Exchange.HasBlock(blocks[0]) + if err != nil { + t.Fatal(err) + } + + blk, ok := <-blkch2 + if !ok { + t.Fatal("expected to get the block here") + } + t.Log(blk) + + for _, inst := range instances { + err := inst.Exchange.Close() + if err != nil { + t.Fatal(err) + } + } +} From ff341c65f73083cff930ce02ab66af86deab8c3a Mon Sep 17 00:00:00 2001 From: Jeromy Date: Wed, 27 Apr 2016 20:45:06 -0700 Subject: [PATCH 0456/1035] fix doubleGet issue caused by hasblock not announcing License: MIT Signed-off-by: Jeromy This commit was moved from ipfs/go-bitswap@710d2509985fd085d4656db5edb06cea879cf677 --- bitswap/bitswap.go | 2 ++ bitswap/bitswap_test.go | 2 ++ bitswap/decision/engine.go | 36 ++++++++++++++++++++++++++---------- bitswap/decision/ledger.go | 7 ++----- bitswap/wantlist/wantlist.go | 33 +++++++++++++++++++++++++-------- 5 files changed, 57 insertions(+), 23 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index bf509fc55..c34dbc89b 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -264,6 +264,8 @@ func (bs *Bitswap) HasBlock(blk *blocks.Block) error { bs.notifications.Publish(blk) + bs.engine.AddBlock(blk) + select { case bs.newBlocks <- blk: // send block off to be reprovided diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index 0df1f9b2c..aa367edb1 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -335,6 +335,8 @@ func TestDoubleGet(t *testing.T) { t.Fatal(err) } + // ensure both requests make it into the wantlist at the same time + time.Sleep(time.Millisecond * 100) cancel1() _, ok := <-blkch1 diff --git a/bitswap/decision/engine.go b/bitswap/decision/engine.go index 6a026858f..8d738e306 100644 --- a/bitswap/decision/engine.go +++ b/bitswap/decision/engine.go @@ -83,7 +83,7 @@ type Engine struct { bs bstore.Blockstore - lock sync.RWMutex // protects the fields immediatly below + lock sync.Mutex // protects the fields immediatly below // ledgerMap lists Ledgers by their Partner key. ledgerMap map[peer.ID]*ledger } @@ -178,8 +178,8 @@ func (e *Engine) Outbox() <-chan (<-chan *Envelope) { // Returns a slice of Peers with whom the local node has active sessions func (e *Engine) Peers() []peer.ID { - e.lock.RLock() - defer e.lock.RUnlock() + e.lock.Lock() + defer e.lock.Unlock() response := make([]peer.ID, 0) for _, ledger := range e.ledgerMap { @@ -217,7 +217,7 @@ func (e *Engine) MessageReceived(p peer.ID, m bsmsg.BitSwapMessage) error { e.peerRequestQueue.Remove(entry.Key, p) } else { log.Debugf("wants %s - %d", entry.Key, entry.Priority) - l.Wants(entry.Ctx, entry.Key, entry.Priority) + l.Wants(entry.Key, entry.Priority) if exists, err := e.bs.Has(entry.Key); err == nil && exists { e.peerRequestQueue.Push(entry.Entry, p) newWorkExists = true @@ -228,16 +228,32 @@ func (e *Engine) MessageReceived(p peer.ID, m bsmsg.BitSwapMessage) error { for _, block := range m.Blocks() { log.Debugf("got block %s %d bytes", block.Key(), len(block.Data)) l.ReceivedBytes(len(block.Data)) - for _, l := range e.ledgerMap { - if entry, ok := l.WantListContains(block.Key()); ok { - e.peerRequestQueue.Push(entry, l.Partner) - newWorkExists = true - } - } } return nil } +func (e *Engine) addBlock(block *blocks.Block) { + work := false + + for _, l := range e.ledgerMap { + if entry, ok := l.WantListContains(block.Key()); ok { + e.peerRequestQueue.Push(entry, l.Partner) + work = true + } + } + + if work { + e.signalNewWork() + } +} + +func (e *Engine) AddBlock(block *blocks.Block) { + e.lock.Lock() + defer e.lock.Unlock() + + e.addBlock(block) +} + // TODO add contents of m.WantList() to my local wantlist? NB: could introduce // race conditions where I send a message, but MessageSent gets handled after // MessageReceived. The information in the local wantlist could become diff --git a/bitswap/decision/ledger.go b/bitswap/decision/ledger.go index 7b8982e47..95239de4e 100644 --- a/bitswap/decision/ledger.go +++ b/bitswap/decision/ledger.go @@ -6,8 +6,6 @@ import ( key "github.com/ipfs/go-ipfs/blocks/key" wl "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" peer "gx/ipfs/QmZwZjMVGss5rqYsJVGy18gNbkTJffFyq2x1uJ4e4p3ZAt/go-libp2p-peer" - - "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" ) // keySet is just a convenient alias for maps of keys, where we only care @@ -69,10 +67,9 @@ func (l *ledger) ReceivedBytes(n int) { l.Accounting.BytesRecv += uint64(n) } -// TODO: this needs to be different. We need timeouts. -func (l *ledger) Wants(ctx context.Context, k key.Key, priority int) { +func (l *ledger) Wants(k key.Key, priority int) { log.Debugf("peer %s wants %s", l.Partner, k) - l.wantList.Add(ctx, k, priority) + l.wantList.Add(k, priority) } func (l *ledger) CancelWant(k key.Key) { diff --git a/bitswap/wantlist/wantlist.go b/bitswap/wantlist/wantlist.go index 545b98f7c..77b959a65 100644 --- a/bitswap/wantlist/wantlist.go +++ b/bitswap/wantlist/wantlist.go @@ -22,11 +22,12 @@ type Wantlist struct { } type Entry struct { - // TODO consider making entries immutable so they can be shared safely and - // slices can be copied efficiently. Key key.Key Priority int - Ctx context.Context + + Ctx context.Context + cancel func() + RefCnt int } type entrySlice []Entry @@ -47,10 +48,10 @@ func New() *Wantlist { } } -func (w *ThreadSafe) Add(ctx context.Context, k key.Key, priority int) { +func (w *ThreadSafe) Add(k key.Key, priority int) { w.lk.Lock() defer w.lk.Unlock() - w.Wantlist.Add(ctx, k, priority) + w.Wantlist.Add(k, priority) } func (w *ThreadSafe) AddEntry(e Entry) { @@ -93,14 +94,19 @@ func (w *Wantlist) Len() int { return len(w.set) } -func (w *Wantlist) Add(ctx context.Context, k key.Key, priority int) { - if _, ok := w.set[k]; ok { +func (w *Wantlist) Add(k key.Key, priority int) { + if e, ok := w.set[k]; ok { + e.RefCnt++ return } + + ctx, cancel := context.WithCancel(context.Background()) w.set[k] = Entry{ Key: k, Priority: priority, Ctx: ctx, + cancel: cancel, + RefCnt: 1, } } @@ -112,7 +118,18 @@ func (w *Wantlist) AddEntry(e Entry) { } func (w *Wantlist) Remove(k key.Key) { - delete(w.set, k) + e, ok := w.set[k] + if !ok { + return + } + + e.RefCnt-- + if e.RefCnt <= 0 { + delete(w.set, k) + if e.cancel != nil { + e.cancel() + } + } } func (w *Wantlist) Contains(k key.Key) (Entry, bool) { From 1f5cc9e9981a3280668e75c54890fc92dd3302b7 Mon Sep 17 00:00:00 2001 From: Hector Sanjuan Date: Wed, 4 May 2016 22:56:39 +0200 Subject: [PATCH 0457/1035] Update go-log to 1.1.0 and fix calls to go-log.Uuid License: MIT Signed-off-by: Hector Sanjuan This commit was moved from ipfs/go-bitswap@b09356be60ebd9c091502aacc29a0b0fd4f82211 --- bitswap/bitswap.go | 12 ++++++------ bitswap/decision/engine.go | 2 +- bitswap/network/ipfs_impl.go | 2 +- bitswap/workers.go | 2 +- 4 files changed, 9 insertions(+), 9 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index c34dbc89b..4457dea29 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -8,6 +8,12 @@ import ( "sync" "time" + process "gx/ipfs/QmQopLATEYMNg7dVqZRNDfeE2S1yKy8zrRh5xnYiuqeZBn/goprocess" + procctx "gx/ipfs/QmQopLATEYMNg7dVqZRNDfeE2S1yKy8zrRh5xnYiuqeZBn/goprocess/context" + peer "gx/ipfs/QmZwZjMVGss5rqYsJVGy18gNbkTJffFyq2x1uJ4e4p3ZAt/go-libp2p-peer" + context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" + logging "gx/ipfs/QmaDNZ4QMdBdku1YZWBysufYyoQt1negQGNav6PLYarbY8/go-log" + blocks "github.com/ipfs/go-ipfs/blocks" blockstore "github.com/ipfs/go-ipfs/blocks/blockstore" key "github.com/ipfs/go-ipfs/blocks/key" @@ -19,11 +25,6 @@ import ( wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" flags "github.com/ipfs/go-ipfs/flags" "github.com/ipfs/go-ipfs/thirdparty/delay" - process "gx/ipfs/QmQopLATEYMNg7dVqZRNDfeE2S1yKy8zrRh5xnYiuqeZBn/goprocess" - procctx "gx/ipfs/QmQopLATEYMNg7dVqZRNDfeE2S1yKy8zrRh5xnYiuqeZBn/goprocess/context" - peer "gx/ipfs/QmZwZjMVGss5rqYsJVGy18gNbkTJffFyq2x1uJ4e4p3ZAt/go-libp2p-peer" - context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" - logging "gx/ipfs/Qmazh5oNUVsDZTs2g59rq8aYQqwpss8tcUWQzor5sCCEuH/go-log" ) var log = logging.Logger("bitswap") @@ -163,7 +164,6 @@ func (bs *Bitswap) GetBlock(parent context.Context, k key.Key) (*blocks.Block, e ctx, cancelFunc := context.WithCancel(parent) - ctx = logging.ContextWithLoggable(ctx, logging.Uuid("GetBlockRequest")) log.Event(ctx, "Bitswap.GetBlockRequest.Start", &k) defer log.Event(ctx, "Bitswap.GetBlockRequest.End", &k) diff --git a/bitswap/decision/engine.go b/bitswap/decision/engine.go index 8d738e306..366e8ab23 100644 --- a/bitswap/decision/engine.go +++ b/bitswap/decision/engine.go @@ -10,7 +10,7 @@ import ( wl "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" peer "gx/ipfs/QmZwZjMVGss5rqYsJVGy18gNbkTJffFyq2x1uJ4e4p3ZAt/go-libp2p-peer" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" - logging "gx/ipfs/Qmazh5oNUVsDZTs2g59rq8aYQqwpss8tcUWQzor5sCCEuH/go-log" + logging "gx/ipfs/QmaDNZ4QMdBdku1YZWBysufYyoQt1negQGNav6PLYarbY8/go-log" ) // TODO consider taking responsibility for other types of requests. For diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index f52d949ff..e46d073a4 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -8,7 +8,7 @@ import ( inet "gx/ipfs/QmXDvxcXUYn2DDnGKJwdQPxkJgG83jBTp5UmmNzeHzqbj5/go-libp2p/p2p/net" peer "gx/ipfs/QmZwZjMVGss5rqYsJVGy18gNbkTJffFyq2x1uJ4e4p3ZAt/go-libp2p-peer" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" - logging "gx/ipfs/Qmazh5oNUVsDZTs2g59rq8aYQqwpss8tcUWQzor5sCCEuH/go-log" + logging "gx/ipfs/QmaDNZ4QMdBdku1YZWBysufYyoQt1negQGNav6PLYarbY8/go-log" ma "gx/ipfs/QmcobAGsCjYt5DXoq9et9L8yR8er7o7Cu3DTvpaq12jYSz/go-multiaddr" ) diff --git a/bitswap/workers.go b/bitswap/workers.go index 1bd9154f5..a9dbaa6f2 100644 --- a/bitswap/workers.go +++ b/bitswap/workers.go @@ -11,7 +11,7 @@ import ( key "github.com/ipfs/go-ipfs/blocks/key" wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" peer "gx/ipfs/QmZwZjMVGss5rqYsJVGy18gNbkTJffFyq2x1uJ4e4p3ZAt/go-libp2p-peer" - logging "gx/ipfs/Qmazh5oNUVsDZTs2g59rq8aYQqwpss8tcUWQzor5sCCEuH/go-log" + logging "gx/ipfs/QmaDNZ4QMdBdku1YZWBysufYyoQt1negQGNav6PLYarbY8/go-log" ) var TaskWorkerCount = 8 From 7b7f33ce5b7e5c0d3f6eb567f8c587881ce97292 Mon Sep 17 00:00:00 2001 From: Hector Sanjuan Date: Thu, 5 May 2016 00:54:20 +0200 Subject: [PATCH 0458/1035] Restore go-log.Uuid() calls as loggables.Uuid() calls License: MIT Signed-off-by: Hector Sanjuan This commit was moved from ipfs/go-bitswap@f4350456774da20c07fd5e0049449a55ccaa6b26 --- bitswap/bitswap.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 4457dea29..59e84d4b0 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -25,6 +25,7 @@ import ( wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" flags "github.com/ipfs/go-ipfs/flags" "github.com/ipfs/go-ipfs/thirdparty/delay" + loggables "github.com/ipfs/go-ipfs/thirdparty/loggables" ) var log = logging.Logger("bitswap") @@ -164,6 +165,7 @@ func (bs *Bitswap) GetBlock(parent context.Context, k key.Key) (*blocks.Block, e ctx, cancelFunc := context.WithCancel(parent) + ctx = logging.ContextWithLoggable(ctx, loggables.Uuid("GetBlockRequest")) log.Event(ctx, "Bitswap.GetBlockRequest.Start", &k) defer log.Event(ctx, "Bitswap.GetBlockRequest.End", &k) From a281127669cda560603654f91094883361357534 Mon Sep 17 00:00:00 2001 From: Kevin Atkinson Date: Thu, 5 May 2016 18:00:43 -0400 Subject: [PATCH 0459/1035] Make blocks.Block an interface. License: MIT Signed-off-by: Kevin Atkinson This commit was moved from ipfs/go-bitswap@aa8e4cd74deca2a7c9e92b55fc0f8dd183e1be98 --- bitswap/bitswap.go | 20 ++++++++++---------- bitswap/bitswap_test.go | 4 ++-- bitswap/decision/engine.go | 12 ++++++------ bitswap/decision/engine_test.go | 2 +- bitswap/message/message.go | 16 ++++++++-------- bitswap/notifications/notifications.go | 12 ++++++------ bitswap/notifications/notifications_test.go | 6 +++--- bitswap/testnet/network_test.go | 2 +- bitswap/workers.go | 2 +- 9 files changed, 38 insertions(+), 38 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 59e84d4b0..68f7f3e8d 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -90,7 +90,7 @@ func New(parent context.Context, p peer.ID, network bsnet.BitSwapNetwork, network: network, findKeys: make(chan *wantlist.Entry, sizeBatchRequestChan), process: px, - newBlocks: make(chan *blocks.Block, HasBlockBufferSize), + newBlocks: make(chan blocks.Block, HasBlockBufferSize), provideKeys: make(chan key.Key, provideKeysBufferSize), wm: NewWantManager(ctx, network), } @@ -137,7 +137,7 @@ type Bitswap struct { process process.Process - newBlocks chan *blocks.Block + newBlocks chan blocks.Block provideKeys chan key.Key @@ -154,7 +154,7 @@ type blockRequest struct { // GetBlock attempts to retrieve a particular block from peers within the // deadline enforced by the context. -func (bs *Bitswap) GetBlock(parent context.Context, k key.Key) (*blocks.Block, error) { +func (bs *Bitswap) GetBlock(parent context.Context, k key.Key) (blocks.Block, error) { // Any async work initiated by this function must end when this function // returns. To ensure this, derive a new context. Note that it is okay to @@ -209,9 +209,9 @@ func (bs *Bitswap) WantlistForPeer(p peer.ID) []key.Key { // NB: Your request remains open until the context expires. To conserve // resources, provide a context with a reasonably short deadline (ie. not one // that lasts throughout the lifetime of the server) -func (bs *Bitswap) GetBlocks(ctx context.Context, keys []key.Key) (<-chan *blocks.Block, error) { +func (bs *Bitswap) GetBlocks(ctx context.Context, keys []key.Key) (<-chan blocks.Block, error) { if len(keys) == 0 { - out := make(chan *blocks.Block) + out := make(chan blocks.Block) close(out) return out, nil } @@ -251,7 +251,7 @@ func (bs *Bitswap) CancelWants(ks []key.Key) { // HasBlock announces the existance of a block to this bitswap service. The // service will potentially notify its peers. -func (bs *Bitswap) HasBlock(blk *blocks.Block) error { +func (bs *Bitswap) HasBlock(blk blocks.Block) error { select { case <-bs.process.Closing(): return errors.New("bitswap is closed") @@ -277,7 +277,7 @@ func (bs *Bitswap) HasBlock(blk *blocks.Block) error { return nil } -func (bs *Bitswap) tryPutBlock(blk *blocks.Block, attempts int) error { +func (bs *Bitswap) tryPutBlock(blk blocks.Block, attempts int) error { var err error for i := 0; i < attempts; i++ { if err = bs.blockstore.Put(blk); err == nil { @@ -316,7 +316,7 @@ func (bs *Bitswap) ReceiveMessage(ctx context.Context, p peer.ID, incoming bsmsg wg := sync.WaitGroup{} for _, block := range iblocks { wg.Add(1) - go func(b *blocks.Block) { + go func(b blocks.Block) { defer wg.Done() if err := bs.updateReceiveCounters(b); err != nil { @@ -337,7 +337,7 @@ func (bs *Bitswap) ReceiveMessage(ctx context.Context, p peer.ID, incoming bsmsg var ErrAlreadyHaveBlock = errors.New("already have block") -func (bs *Bitswap) updateReceiveCounters(b *blocks.Block) error { +func (bs *Bitswap) updateReceiveCounters(b blocks.Block) error { bs.counterLk.Lock() defer bs.counterLk.Unlock() bs.blocksRecvd++ @@ -348,7 +348,7 @@ func (bs *Bitswap) updateReceiveCounters(b *blocks.Block) error { } if err == nil && has { bs.dupBlocksRecvd++ - bs.dupDataRecvd += uint64(len(b.Data)) + bs.dupDataRecvd += uint64(len(b.Data())) } if has { diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index d7fde792b..baab322e2 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -85,7 +85,7 @@ func TestGetBlockFromPeerAfterPeerAnnounces(t *testing.T) { t.Fatal("Expected to succeed") } - if !bytes.Equal(block.Data, received.Data) { + if !bytes.Equal(block.Data(), received.Data()) { t.Fatal("Data doesn't match") } } @@ -218,7 +218,7 @@ func PerformDistributionTest(t *testing.T, numInstances, numBlocks int) { } } -func getOrFail(bitswap Instance, b *blocks.Block, t *testing.T, wg *sync.WaitGroup) { +func getOrFail(bitswap Instance, b blocks.Block, t *testing.T, wg *sync.WaitGroup) { if _, err := bitswap.Blockstore().Get(b.Key()); err != nil { _, err := bitswap.Exchange.GetBlock(context.Background(), b.Key()) if err != nil { diff --git a/bitswap/decision/engine.go b/bitswap/decision/engine.go index 366e8ab23..87a77b086 100644 --- a/bitswap/decision/engine.go +++ b/bitswap/decision/engine.go @@ -58,7 +58,7 @@ type Envelope struct { Peer peer.ID // Block is the payload - Block *blocks.Block + Block blocks.Block // A callback to notify the decision queue that the task is complete Sent func() @@ -226,13 +226,13 @@ func (e *Engine) MessageReceived(p peer.ID, m bsmsg.BitSwapMessage) error { } for _, block := range m.Blocks() { - log.Debugf("got block %s %d bytes", block.Key(), len(block.Data)) - l.ReceivedBytes(len(block.Data)) + log.Debugf("got block %s %d bytes", block.Key(), len(block.Data())) + l.ReceivedBytes(len(block.Data())) } return nil } -func (e *Engine) addBlock(block *blocks.Block) { +func (e *Engine) addBlock(block blocks.Block) { work := false for _, l := range e.ledgerMap { @@ -247,7 +247,7 @@ func (e *Engine) addBlock(block *blocks.Block) { } } -func (e *Engine) AddBlock(block *blocks.Block) { +func (e *Engine) AddBlock(block blocks.Block) { e.lock.Lock() defer e.lock.Unlock() @@ -266,7 +266,7 @@ func (e *Engine) MessageSent(p peer.ID, m bsmsg.BitSwapMessage) error { l := e.findOrCreate(p) for _, block := range m.Blocks() { - l.SentBytes(len(block.Data)) + l.SentBytes(len(block.Data())) l.wantList.Remove(block.Key()) e.peerRequestQueue.Remove(block.Key(), p) } diff --git a/bitswap/decision/engine_test.go b/bitswap/decision/engine_test.go index d496096bb..4d906276b 100644 --- a/bitswap/decision/engine_test.go +++ b/bitswap/decision/engine_test.go @@ -188,7 +188,7 @@ func checkHandledInOrder(t *testing.T, e *Engine, keys []string) error { received := envelope.Block expected := blocks.NewBlock([]byte(k)) if received.Key() != expected.Key() { - return errors.New(fmt.Sprintln("received", string(received.Data), "expected", string(expected.Data))) + return errors.New(fmt.Sprintln("received", string(received.Data()), "expected", string(expected.Data()))) } } return nil diff --git a/bitswap/message/message.go b/bitswap/message/message.go index 6cff5e554..76afd0cbf 100644 --- a/bitswap/message/message.go +++ b/bitswap/message/message.go @@ -22,7 +22,7 @@ type BitSwapMessage interface { Wantlist() []Entry // Blocks returns a slice of unique blocks - Blocks() []*blocks.Block + Blocks() []blocks.Block // AddEntry adds an entry to the Wantlist. AddEntry(key key.Key, priority int) @@ -34,7 +34,7 @@ type BitSwapMessage interface { // A full wantlist is an authoritative copy, a 'non-full' wantlist is a patch-set Full() bool - AddBlock(*blocks.Block) + AddBlock(blocks.Block) Exportable Loggable() map[string]interface{} @@ -48,7 +48,7 @@ type Exportable interface { type impl struct { full bool wantlist map[key.Key]Entry - blocks map[key.Key]*blocks.Block + blocks map[key.Key]blocks.Block } func New(full bool) BitSwapMessage { @@ -57,7 +57,7 @@ func New(full bool) BitSwapMessage { func newMsg(full bool) *impl { return &impl{ - blocks: make(map[key.Key]*blocks.Block), + blocks: make(map[key.Key]blocks.Block), wantlist: make(map[key.Key]Entry), full: full, } @@ -96,8 +96,8 @@ func (m *impl) Wantlist() []Entry { return out } -func (m *impl) Blocks() []*blocks.Block { - bs := make([]*blocks.Block, 0, len(m.blocks)) +func (m *impl) Blocks() []blocks.Block { + bs := make([]blocks.Block, 0, len(m.blocks)) for _, block := range m.blocks { bs = append(bs, block) } @@ -129,7 +129,7 @@ func (m *impl) addEntry(k key.Key, priority int, cancel bool) { } } -func (m *impl) AddBlock(b *blocks.Block) { +func (m *impl) AddBlock(b blocks.Block) { m.blocks[b.Key()] = b } @@ -156,7 +156,7 @@ func (m *impl) ToProto() *pb.Message { }) } for _, b := range m.Blocks() { - pbm.Blocks = append(pbm.Blocks, b.Data) + pbm.Blocks = append(pbm.Blocks, b.Data()) } return pbm } diff --git a/bitswap/notifications/notifications.go b/bitswap/notifications/notifications.go index 8a83bba9b..0b7f4f33a 100644 --- a/bitswap/notifications/notifications.go +++ b/bitswap/notifications/notifications.go @@ -10,8 +10,8 @@ import ( const bufferSize = 16 type PubSub interface { - Publish(block *blocks.Block) - Subscribe(ctx context.Context, keys ...key.Key) <-chan *blocks.Block + Publish(block blocks.Block) + Subscribe(ctx context.Context, keys ...key.Key) <-chan blocks.Block Shutdown() } @@ -23,7 +23,7 @@ type impl struct { wrapped pubsub.PubSub } -func (ps *impl) Publish(block *blocks.Block) { +func (ps *impl) Publish(block blocks.Block) { topic := string(block.Key()) ps.wrapped.Pub(block, topic) } @@ -35,9 +35,9 @@ func (ps *impl) Shutdown() { // Subscribe returns a channel of blocks for the given |keys|. |blockChannel| // is closed if the |ctx| times out or is cancelled, or after sending len(keys) // blocks. -func (ps *impl) Subscribe(ctx context.Context, keys ...key.Key) <-chan *blocks.Block { +func (ps *impl) Subscribe(ctx context.Context, keys ...key.Key) <-chan blocks.Block { - blocksCh := make(chan *blocks.Block, len(keys)) + blocksCh := make(chan blocks.Block, len(keys)) valuesCh := make(chan interface{}, len(keys)) // provide our own channel to control buffer, prevent blocking if len(keys) == 0 { close(blocksCh) @@ -55,7 +55,7 @@ func (ps *impl) Subscribe(ctx context.Context, keys ...key.Key) <-chan *blocks.B if !ok { return } - block, ok := val.(*blocks.Block) + block, ok := val.(blocks.Block) if !ok { return } diff --git a/bitswap/notifications/notifications_test.go b/bitswap/notifications/notifications_test.go index 02acbd13f..3e923b84e 100644 --- a/bitswap/notifications/notifications_test.go +++ b/bitswap/notifications/notifications_test.go @@ -151,15 +151,15 @@ func TestDoesNotDeadLockIfContextCancelledBeforePublish(t *testing.T) { t.Log("publishing the large number of blocks to the ignored channel must not deadlock") } -func assertBlockChannelNil(t *testing.T, blockChannel <-chan *blocks.Block) { +func assertBlockChannelNil(t *testing.T, blockChannel <-chan blocks.Block) { _, ok := <-blockChannel if ok { t.Fail() } } -func assertBlocksEqual(t *testing.T, a, b *blocks.Block) { - if !bytes.Equal(a.Data, b.Data) { +func assertBlocksEqual(t *testing.T, a, b blocks.Block) { + if !bytes.Equal(a.Data(), b.Data()) { t.Fatal("blocks aren't equal") } if a.Key() != b.Key() { diff --git a/bitswap/testnet/network_test.go b/bitswap/testnet/network_test.go index 609e51f7e..4db57ac8e 100644 --- a/bitswap/testnet/network_test.go +++ b/bitswap/testnet/network_test.go @@ -44,7 +44,7 @@ func TestSendMessageAsyncButWaitForResponse(t *testing.T) { // TODO assert that this came from the correct peer and that the message contents are as expected ok := false for _, b := range msgFromResponder.Blocks() { - if string(b.Data) == expectedStr { + if string(b.Data()) == expectedStr { wg.Done() ok = true } diff --git a/bitswap/workers.go b/bitswap/workers.go index a9dbaa6f2..2c190d000 100644 --- a/bitswap/workers.go +++ b/bitswap/workers.go @@ -61,7 +61,7 @@ func (bs *Bitswap) taskWorker(ctx context.Context, id int) { log.Event(ctx, "Bitswap.TaskWorker.Work", logging.LoggableMap{ "ID": id, "Target": envelope.Peer.Pretty(), - "Block": envelope.Block.Multihash.B58String(), + "Block": envelope.Block.Multihash().B58String(), }) bs.wm.SendBlock(ctx, envelope) From 5af2d55b3301f34e3bf4b6df932ae39f9ea2fa01 Mon Sep 17 00:00:00 2001 From: Jeromy Date: Thu, 5 May 2016 16:28:40 -0700 Subject: [PATCH 0460/1035] allow bitswap to read multiple messages per stream License: MIT Signed-off-by: Jeromy This commit was moved from ipfs/go-bitswap@58d17505d96145c3fe900691bae2408537a66456 --- bitswap/network/ipfs_impl.go | 22 ++++++++++++---------- 1 file changed, 12 insertions(+), 10 deletions(-) diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index 2d1512660..e0f2667ce 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -150,17 +150,19 @@ func (bsnet *impl) handleNewStream(s inet.Stream) { return } - received, err := bsmsg.FromNet(s) - if err != nil { - go bsnet.receiver.ReceiveError(err) - log.Debugf("bitswap net handleNewStream from %s error: %s", s.Conn().RemotePeer(), err) - return - } + for { + received, err := bsmsg.FromNet(s) + if err != nil { + go bsnet.receiver.ReceiveError(err) + log.Debugf("bitswap net handleNewStream from %s error: %s", s.Conn().RemotePeer(), err) + return + } - p := s.Conn().RemotePeer() - ctx := context.Background() - log.Debugf("bitswap net handleNewStream from %s", s.Conn().RemotePeer()) - bsnet.receiver.ReceiveMessage(ctx, p, received) + p := s.Conn().RemotePeer() + ctx := context.Background() + log.Debugf("bitswap net handleNewStream from %s", s.Conn().RemotePeer()) + bsnet.receiver.ReceiveMessage(ctx, p, received) + } } type netNotifiee impl From 371be5d493314c44d5623c7aac821298ed6e29e7 Mon Sep 17 00:00:00 2001 From: Jeromy Date: Tue, 10 May 2016 16:06:28 -0700 Subject: [PATCH 0461/1035] update libp2p with go-multiaddr and go-stream-muxer updates License: MIT Signed-off-by: Jeromy This commit was moved from ipfs/go-bitswap@f64b013756b13e753e68e4dfadee5f9d191d4478 --- bitswap/bitswap.go | 2 +- bitswap/bitswap_test.go | 2 +- bitswap/decision/bench_test.go | 2 +- bitswap/decision/engine.go | 2 +- bitswap/decision/engine_test.go | 2 +- bitswap/decision/ledger.go | 2 +- bitswap/decision/peer_request_queue.go | 2 +- bitswap/message/message.go | 2 +- bitswap/network/interface.go | 4 ++-- bitswap/network/ipfs_impl.go | 8 ++++---- bitswap/testnet/interface.go | 2 +- bitswap/testnet/network_test.go | 2 +- bitswap/testnet/peernet.go | 4 ++-- bitswap/testnet/virtual.go | 2 +- bitswap/testutils.go | 4 ++-- bitswap/wantmanager.go | 2 +- bitswap/workers.go | 2 +- 17 files changed, 23 insertions(+), 23 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 68f7f3e8d..9f5c92d04 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -10,7 +10,7 @@ import ( process "gx/ipfs/QmQopLATEYMNg7dVqZRNDfeE2S1yKy8zrRh5xnYiuqeZBn/goprocess" procctx "gx/ipfs/QmQopLATEYMNg7dVqZRNDfeE2S1yKy8zrRh5xnYiuqeZBn/goprocess/context" - peer "gx/ipfs/QmZwZjMVGss5rqYsJVGy18gNbkTJffFyq2x1uJ4e4p3ZAt/go-libp2p-peer" + peer "gx/ipfs/QmZpD74pUj6vuxTp1o6LhA3JavC2Bvh9fsWPPVvHnD9sE7/go-libp2p-peer" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" logging "gx/ipfs/QmaDNZ4QMdBdku1YZWBysufYyoQt1negQGNav6PLYarbY8/go-log" diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index baab322e2..e752bcf1f 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -16,7 +16,7 @@ import ( tn "github.com/ipfs/go-ipfs/exchange/bitswap/testnet" mockrouting "github.com/ipfs/go-ipfs/routing/mock" delay "github.com/ipfs/go-ipfs/thirdparty/delay" - p2ptestutil "gx/ipfs/QmXDvxcXUYn2DDnGKJwdQPxkJgG83jBTp5UmmNzeHzqbj5/go-libp2p/p2p/test/util" + p2ptestutil "gx/ipfs/QmcQTVCQWCN2MYgBHpFXE5S56rcg2mRsxaRgMYmA1UWgA8/go-libp2p/p2p/test/util" ) // FIXME the tests are really sensitive to the network delay. fix them to work diff --git a/bitswap/decision/bench_test.go b/bitswap/decision/bench_test.go index 7b1d26fd9..d9ab28766 100644 --- a/bitswap/decision/bench_test.go +++ b/bitswap/decision/bench_test.go @@ -7,7 +7,7 @@ import ( key "github.com/ipfs/go-ipfs/blocks/key" "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" "github.com/ipfs/go-ipfs/thirdparty/testutil" - "gx/ipfs/QmZwZjMVGss5rqYsJVGy18gNbkTJffFyq2x1uJ4e4p3ZAt/go-libp2p-peer" + "gx/ipfs/QmZpD74pUj6vuxTp1o6LhA3JavC2Bvh9fsWPPVvHnD9sE7/go-libp2p-peer" ) // FWIW: At the time of this commit, including a timestamp in task increases diff --git a/bitswap/decision/engine.go b/bitswap/decision/engine.go index 87a77b086..2fae95094 100644 --- a/bitswap/decision/engine.go +++ b/bitswap/decision/engine.go @@ -8,7 +8,7 @@ import ( bstore "github.com/ipfs/go-ipfs/blocks/blockstore" bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" wl "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" - peer "gx/ipfs/QmZwZjMVGss5rqYsJVGy18gNbkTJffFyq2x1uJ4e4p3ZAt/go-libp2p-peer" + peer "gx/ipfs/QmZpD74pUj6vuxTp1o6LhA3JavC2Bvh9fsWPPVvHnD9sE7/go-libp2p-peer" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" logging "gx/ipfs/QmaDNZ4QMdBdku1YZWBysufYyoQt1negQGNav6PLYarbY8/go-log" ) diff --git a/bitswap/decision/engine_test.go b/bitswap/decision/engine_test.go index 4d906276b..3d1dfb8bc 100644 --- a/bitswap/decision/engine_test.go +++ b/bitswap/decision/engine_test.go @@ -14,7 +14,7 @@ import ( blockstore "github.com/ipfs/go-ipfs/blocks/blockstore" message "github.com/ipfs/go-ipfs/exchange/bitswap/message" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" - peer "gx/ipfs/QmZwZjMVGss5rqYsJVGy18gNbkTJffFyq2x1uJ4e4p3ZAt/go-libp2p-peer" + peer "gx/ipfs/QmZpD74pUj6vuxTp1o6LhA3JavC2Bvh9fsWPPVvHnD9sE7/go-libp2p-peer" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" ) diff --git a/bitswap/decision/ledger.go b/bitswap/decision/ledger.go index 95239de4e..2c8ad65b6 100644 --- a/bitswap/decision/ledger.go +++ b/bitswap/decision/ledger.go @@ -5,7 +5,7 @@ import ( key "github.com/ipfs/go-ipfs/blocks/key" wl "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" - peer "gx/ipfs/QmZwZjMVGss5rqYsJVGy18gNbkTJffFyq2x1uJ4e4p3ZAt/go-libp2p-peer" + peer "gx/ipfs/QmZpD74pUj6vuxTp1o6LhA3JavC2Bvh9fsWPPVvHnD9sE7/go-libp2p-peer" ) // keySet is just a convenient alias for maps of keys, where we only care diff --git a/bitswap/decision/peer_request_queue.go b/bitswap/decision/peer_request_queue.go index 02535f7a8..4b3313d87 100644 --- a/bitswap/decision/peer_request_queue.go +++ b/bitswap/decision/peer_request_queue.go @@ -7,7 +7,7 @@ import ( key "github.com/ipfs/go-ipfs/blocks/key" wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" pq "github.com/ipfs/go-ipfs/thirdparty/pq" - peer "gx/ipfs/QmZwZjMVGss5rqYsJVGy18gNbkTJffFyq2x1uJ4e4p3ZAt/go-libp2p-peer" + peer "gx/ipfs/QmZpD74pUj6vuxTp1o6LhA3JavC2Bvh9fsWPPVvHnD9sE7/go-libp2p-peer" ) type peerRequestQueue interface { diff --git a/bitswap/message/message.go b/bitswap/message/message.go index 76afd0cbf..64146ab0b 100644 --- a/bitswap/message/message.go +++ b/bitswap/message/message.go @@ -7,7 +7,7 @@ import ( key "github.com/ipfs/go-ipfs/blocks/key" pb "github.com/ipfs/go-ipfs/exchange/bitswap/message/pb" wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" - inet "gx/ipfs/QmXDvxcXUYn2DDnGKJwdQPxkJgG83jBTp5UmmNzeHzqbj5/go-libp2p/p2p/net" + inet "gx/ipfs/QmcQTVCQWCN2MYgBHpFXE5S56rcg2mRsxaRgMYmA1UWgA8/go-libp2p/p2p/net" ggio "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/io" proto "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/proto" diff --git a/bitswap/network/interface.go b/bitswap/network/interface.go index 018714de0..e90b4db51 100644 --- a/bitswap/network/interface.go +++ b/bitswap/network/interface.go @@ -3,9 +3,9 @@ package network import ( key "github.com/ipfs/go-ipfs/blocks/key" bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" - protocol "gx/ipfs/QmXDvxcXUYn2DDnGKJwdQPxkJgG83jBTp5UmmNzeHzqbj5/go-libp2p/p2p/protocol" - peer "gx/ipfs/QmZwZjMVGss5rqYsJVGy18gNbkTJffFyq2x1uJ4e4p3ZAt/go-libp2p-peer" + peer "gx/ipfs/QmZpD74pUj6vuxTp1o6LhA3JavC2Bvh9fsWPPVvHnD9sE7/go-libp2p-peer" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" + protocol "gx/ipfs/QmcQTVCQWCN2MYgBHpFXE5S56rcg2mRsxaRgMYmA1UWgA8/go-libp2p/p2p/protocol" ) var ProtocolBitswap protocol.ID = "/ipfs/bitswap" diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index e46d073a4..a014b4ac9 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -4,12 +4,12 @@ import ( key "github.com/ipfs/go-ipfs/blocks/key" bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" routing "github.com/ipfs/go-ipfs/routing" - host "gx/ipfs/QmXDvxcXUYn2DDnGKJwdQPxkJgG83jBTp5UmmNzeHzqbj5/go-libp2p/p2p/host" - inet "gx/ipfs/QmXDvxcXUYn2DDnGKJwdQPxkJgG83jBTp5UmmNzeHzqbj5/go-libp2p/p2p/net" - peer "gx/ipfs/QmZwZjMVGss5rqYsJVGy18gNbkTJffFyq2x1uJ4e4p3ZAt/go-libp2p-peer" + ma "gx/ipfs/QmYzDkkgAEmrcNzFCiYo6L1dTX4EAG1gZkbtdbd9trL4vd/go-multiaddr" + peer "gx/ipfs/QmZpD74pUj6vuxTp1o6LhA3JavC2Bvh9fsWPPVvHnD9sE7/go-libp2p-peer" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" logging "gx/ipfs/QmaDNZ4QMdBdku1YZWBysufYyoQt1negQGNav6PLYarbY8/go-log" - ma "gx/ipfs/QmcobAGsCjYt5DXoq9et9L8yR8er7o7Cu3DTvpaq12jYSz/go-multiaddr" + host "gx/ipfs/QmcQTVCQWCN2MYgBHpFXE5S56rcg2mRsxaRgMYmA1UWgA8/go-libp2p/p2p/host" + inet "gx/ipfs/QmcQTVCQWCN2MYgBHpFXE5S56rcg2mRsxaRgMYmA1UWgA8/go-libp2p/p2p/net" ) var log = logging.Logger("bitswap_network") diff --git a/bitswap/testnet/interface.go b/bitswap/testnet/interface.go index 73fb8bac7..a0cfdf533 100644 --- a/bitswap/testnet/interface.go +++ b/bitswap/testnet/interface.go @@ -3,7 +3,7 @@ package bitswap import ( bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" "github.com/ipfs/go-ipfs/thirdparty/testutil" - peer "gx/ipfs/QmZwZjMVGss5rqYsJVGy18gNbkTJffFyq2x1uJ4e4p3ZAt/go-libp2p-peer" + peer "gx/ipfs/QmZpD74pUj6vuxTp1o6LhA3JavC2Bvh9fsWPPVvHnD9sE7/go-libp2p-peer" ) type Network interface { diff --git a/bitswap/testnet/network_test.go b/bitswap/testnet/network_test.go index 4db57ac8e..a1e0703f3 100644 --- a/bitswap/testnet/network_test.go +++ b/bitswap/testnet/network_test.go @@ -10,7 +10,7 @@ import ( mockrouting "github.com/ipfs/go-ipfs/routing/mock" delay "github.com/ipfs/go-ipfs/thirdparty/delay" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" - peer "gx/ipfs/QmZwZjMVGss5rqYsJVGy18gNbkTJffFyq2x1uJ4e4p3ZAt/go-libp2p-peer" + peer "gx/ipfs/QmZpD74pUj6vuxTp1o6LhA3JavC2Bvh9fsWPPVvHnD9sE7/go-libp2p-peer" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" ) diff --git a/bitswap/testnet/peernet.go b/bitswap/testnet/peernet.go index 904b4b712..2f55573c3 100644 --- a/bitswap/testnet/peernet.go +++ b/bitswap/testnet/peernet.go @@ -5,9 +5,9 @@ import ( bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" mockrouting "github.com/ipfs/go-ipfs/routing/mock" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" - mockpeernet "gx/ipfs/QmXDvxcXUYn2DDnGKJwdQPxkJgG83jBTp5UmmNzeHzqbj5/go-libp2p/p2p/net/mock" - peer "gx/ipfs/QmZwZjMVGss5rqYsJVGy18gNbkTJffFyq2x1uJ4e4p3ZAt/go-libp2p-peer" + peer "gx/ipfs/QmZpD74pUj6vuxTp1o6LhA3JavC2Bvh9fsWPPVvHnD9sE7/go-libp2p-peer" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" + mockpeernet "gx/ipfs/QmcQTVCQWCN2MYgBHpFXE5S56rcg2mRsxaRgMYmA1UWgA8/go-libp2p/p2p/net/mock" ) type peernet struct { diff --git a/bitswap/testnet/virtual.go b/bitswap/testnet/virtual.go index 40cb9e13f..0de86ecf7 100644 --- a/bitswap/testnet/virtual.go +++ b/bitswap/testnet/virtual.go @@ -10,7 +10,7 @@ import ( mockrouting "github.com/ipfs/go-ipfs/routing/mock" delay "github.com/ipfs/go-ipfs/thirdparty/delay" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" - peer "gx/ipfs/QmZwZjMVGss5rqYsJVGy18gNbkTJffFyq2x1uJ4e4p3ZAt/go-libp2p-peer" + peer "gx/ipfs/QmZpD74pUj6vuxTp1o6LhA3JavC2Bvh9fsWPPVvHnD9sE7/go-libp2p-peer" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" ) diff --git a/bitswap/testutils.go b/bitswap/testutils.go index 23fc6e74b..2266fde4e 100644 --- a/bitswap/testutils.go +++ b/bitswap/testutils.go @@ -10,9 +10,9 @@ import ( datastore2 "github.com/ipfs/go-ipfs/thirdparty/datastore2" delay "github.com/ipfs/go-ipfs/thirdparty/delay" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" - p2ptestutil "gx/ipfs/QmXDvxcXUYn2DDnGKJwdQPxkJgG83jBTp5UmmNzeHzqbj5/go-libp2p/p2p/test/util" - peer "gx/ipfs/QmZwZjMVGss5rqYsJVGy18gNbkTJffFyq2x1uJ4e4p3ZAt/go-libp2p-peer" + peer "gx/ipfs/QmZpD74pUj6vuxTp1o6LhA3JavC2Bvh9fsWPPVvHnD9sE7/go-libp2p-peer" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" + p2ptestutil "gx/ipfs/QmcQTVCQWCN2MYgBHpFXE5S56rcg2mRsxaRgMYmA1UWgA8/go-libp2p/p2p/test/util" ) // WARNING: this uses RandTestBogusIdentity DO NOT USE for NON TESTS! diff --git a/bitswap/wantmanager.go b/bitswap/wantmanager.go index be68b3faa..44d25ea92 100644 --- a/bitswap/wantmanager.go +++ b/bitswap/wantmanager.go @@ -9,7 +9,7 @@ import ( bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" - peer "gx/ipfs/QmZwZjMVGss5rqYsJVGy18gNbkTJffFyq2x1uJ4e4p3ZAt/go-libp2p-peer" + peer "gx/ipfs/QmZpD74pUj6vuxTp1o6LhA3JavC2Bvh9fsWPPVvHnD9sE7/go-libp2p-peer" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" ) diff --git a/bitswap/workers.go b/bitswap/workers.go index 2c190d000..4e18b7bbb 100644 --- a/bitswap/workers.go +++ b/bitswap/workers.go @@ -10,7 +10,7 @@ import ( key "github.com/ipfs/go-ipfs/blocks/key" wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" - peer "gx/ipfs/QmZwZjMVGss5rqYsJVGy18gNbkTJffFyq2x1uJ4e4p3ZAt/go-libp2p-peer" + peer "gx/ipfs/QmZpD74pUj6vuxTp1o6LhA3JavC2Bvh9fsWPPVvHnD9sE7/go-libp2p-peer" logging "gx/ipfs/QmaDNZ4QMdBdku1YZWBysufYyoQt1negQGNav6PLYarbY8/go-log" ) From 47329ad97fb55f1c7e10e6cd1cf885d8b270bcf5 Mon Sep 17 00:00:00 2001 From: Jeromy Date: Fri, 13 May 2016 13:42:46 -0700 Subject: [PATCH 0462/1035] update deps to introduce yamux hang fix License: MIT Signed-off-by: Jeromy This commit was moved from ipfs/go-bitswap@95b8e09e9b23b50b9b8d094d1176a0b0768a781c --- bitswap/bitswap_test.go | 2 +- bitswap/message/message.go | 2 +- bitswap/network/interface.go | 2 +- bitswap/network/ipfs_impl.go | 4 ++-- bitswap/testnet/peernet.go | 2 +- bitswap/testutils.go | 2 +- 6 files changed, 7 insertions(+), 7 deletions(-) diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index e752bcf1f..4c7e4919d 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -16,7 +16,7 @@ import ( tn "github.com/ipfs/go-ipfs/exchange/bitswap/testnet" mockrouting "github.com/ipfs/go-ipfs/routing/mock" delay "github.com/ipfs/go-ipfs/thirdparty/delay" - p2ptestutil "gx/ipfs/QmcQTVCQWCN2MYgBHpFXE5S56rcg2mRsxaRgMYmA1UWgA8/go-libp2p/p2p/test/util" + p2ptestutil "gx/ipfs/QmZpVD1kkRwoC67vNknvCrY72pjmVdtZ7txSk8mtCbuwd3/go-libp2p/p2p/test/util" ) // FIXME the tests are really sensitive to the network delay. fix them to work diff --git a/bitswap/message/message.go b/bitswap/message/message.go index 64146ab0b..9b977c6c7 100644 --- a/bitswap/message/message.go +++ b/bitswap/message/message.go @@ -7,7 +7,7 @@ import ( key "github.com/ipfs/go-ipfs/blocks/key" pb "github.com/ipfs/go-ipfs/exchange/bitswap/message/pb" wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" - inet "gx/ipfs/QmcQTVCQWCN2MYgBHpFXE5S56rcg2mRsxaRgMYmA1UWgA8/go-libp2p/p2p/net" + inet "gx/ipfs/QmZpVD1kkRwoC67vNknvCrY72pjmVdtZ7txSk8mtCbuwd3/go-libp2p/p2p/net" ggio "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/io" proto "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/proto" diff --git a/bitswap/network/interface.go b/bitswap/network/interface.go index e90b4db51..912a9a1c1 100644 --- a/bitswap/network/interface.go +++ b/bitswap/network/interface.go @@ -4,8 +4,8 @@ import ( key "github.com/ipfs/go-ipfs/blocks/key" bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" peer "gx/ipfs/QmZpD74pUj6vuxTp1o6LhA3JavC2Bvh9fsWPPVvHnD9sE7/go-libp2p-peer" + protocol "gx/ipfs/QmZpVD1kkRwoC67vNknvCrY72pjmVdtZ7txSk8mtCbuwd3/go-libp2p/p2p/protocol" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" - protocol "gx/ipfs/QmcQTVCQWCN2MYgBHpFXE5S56rcg2mRsxaRgMYmA1UWgA8/go-libp2p/p2p/protocol" ) var ProtocolBitswap protocol.ID = "/ipfs/bitswap" diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index a75e706d0..25c372f78 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -6,10 +6,10 @@ import ( routing "github.com/ipfs/go-ipfs/routing" ma "gx/ipfs/QmYzDkkgAEmrcNzFCiYo6L1dTX4EAG1gZkbtdbd9trL4vd/go-multiaddr" peer "gx/ipfs/QmZpD74pUj6vuxTp1o6LhA3JavC2Bvh9fsWPPVvHnD9sE7/go-libp2p-peer" + host "gx/ipfs/QmZpVD1kkRwoC67vNknvCrY72pjmVdtZ7txSk8mtCbuwd3/go-libp2p/p2p/host" + inet "gx/ipfs/QmZpVD1kkRwoC67vNknvCrY72pjmVdtZ7txSk8mtCbuwd3/go-libp2p/p2p/net" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" logging "gx/ipfs/QmaDNZ4QMdBdku1YZWBysufYyoQt1negQGNav6PLYarbY8/go-log" - host "gx/ipfs/QmcQTVCQWCN2MYgBHpFXE5S56rcg2mRsxaRgMYmA1UWgA8/go-libp2p/p2p/host" - inet "gx/ipfs/QmcQTVCQWCN2MYgBHpFXE5S56rcg2mRsxaRgMYmA1UWgA8/go-libp2p/p2p/net" ) var log = logging.Logger("bitswap_network") diff --git a/bitswap/testnet/peernet.go b/bitswap/testnet/peernet.go index 2f55573c3..f5ee5f682 100644 --- a/bitswap/testnet/peernet.go +++ b/bitswap/testnet/peernet.go @@ -6,8 +6,8 @@ import ( mockrouting "github.com/ipfs/go-ipfs/routing/mock" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" peer "gx/ipfs/QmZpD74pUj6vuxTp1o6LhA3JavC2Bvh9fsWPPVvHnD9sE7/go-libp2p-peer" + mockpeernet "gx/ipfs/QmZpVD1kkRwoC67vNknvCrY72pjmVdtZ7txSk8mtCbuwd3/go-libp2p/p2p/net/mock" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" - mockpeernet "gx/ipfs/QmcQTVCQWCN2MYgBHpFXE5S56rcg2mRsxaRgMYmA1UWgA8/go-libp2p/p2p/net/mock" ) type peernet struct { diff --git a/bitswap/testutils.go b/bitswap/testutils.go index 2266fde4e..4229e5d9d 100644 --- a/bitswap/testutils.go +++ b/bitswap/testutils.go @@ -11,8 +11,8 @@ import ( delay "github.com/ipfs/go-ipfs/thirdparty/delay" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" peer "gx/ipfs/QmZpD74pUj6vuxTp1o6LhA3JavC2Bvh9fsWPPVvHnD9sE7/go-libp2p-peer" + p2ptestutil "gx/ipfs/QmZpVD1kkRwoC67vNknvCrY72pjmVdtZ7txSk8mtCbuwd3/go-libp2p/p2p/test/util" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" - p2ptestutil "gx/ipfs/QmcQTVCQWCN2MYgBHpFXE5S56rcg2mRsxaRgMYmA1UWgA8/go-libp2p/p2p/test/util" ) // WARNING: this uses RandTestBogusIdentity DO NOT USE for NON TESTS! From 910e4201bbe62bdf25ef2064e724875c42418799 Mon Sep 17 00:00:00 2001 From: Jeromy Date: Mon, 16 May 2016 11:22:36 -0700 Subject: [PATCH 0463/1035] update libp2p to v3.2.1 License: MIT Signed-off-by: Jeromy This commit was moved from ipfs/go-bitswap@263b899d359e209dd33cd9d2379bb4411d47d872 --- bitswap/bitswap_test.go | 2 +- bitswap/message/message.go | 2 +- bitswap/network/interface.go | 2 +- bitswap/network/ipfs_impl.go | 4 ++-- bitswap/testnet/peernet.go | 2 +- bitswap/testutils.go | 2 +- 6 files changed, 7 insertions(+), 7 deletions(-) diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index 4c7e4919d..024de3389 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -16,7 +16,7 @@ import ( tn "github.com/ipfs/go-ipfs/exchange/bitswap/testnet" mockrouting "github.com/ipfs/go-ipfs/routing/mock" delay "github.com/ipfs/go-ipfs/thirdparty/delay" - p2ptestutil "gx/ipfs/QmZpVD1kkRwoC67vNknvCrY72pjmVdtZ7txSk8mtCbuwd3/go-libp2p/p2p/test/util" + p2ptestutil "gx/ipfs/QmUHrgorZ1F9yGkgF2His5fsQ9xtCzjdsPGjizmcEW94i5/go-libp2p/p2p/test/util" ) // FIXME the tests are really sensitive to the network delay. fix them to work diff --git a/bitswap/message/message.go b/bitswap/message/message.go index 9b977c6c7..5b0b4adbd 100644 --- a/bitswap/message/message.go +++ b/bitswap/message/message.go @@ -7,7 +7,7 @@ import ( key "github.com/ipfs/go-ipfs/blocks/key" pb "github.com/ipfs/go-ipfs/exchange/bitswap/message/pb" wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" - inet "gx/ipfs/QmZpVD1kkRwoC67vNknvCrY72pjmVdtZ7txSk8mtCbuwd3/go-libp2p/p2p/net" + inet "gx/ipfs/QmUHrgorZ1F9yGkgF2His5fsQ9xtCzjdsPGjizmcEW94i5/go-libp2p/p2p/net" ggio "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/io" proto "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/proto" diff --git a/bitswap/network/interface.go b/bitswap/network/interface.go index 912a9a1c1..4ce7a4004 100644 --- a/bitswap/network/interface.go +++ b/bitswap/network/interface.go @@ -3,8 +3,8 @@ package network import ( key "github.com/ipfs/go-ipfs/blocks/key" bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" + protocol "gx/ipfs/QmUHrgorZ1F9yGkgF2His5fsQ9xtCzjdsPGjizmcEW94i5/go-libp2p/p2p/protocol" peer "gx/ipfs/QmZpD74pUj6vuxTp1o6LhA3JavC2Bvh9fsWPPVvHnD9sE7/go-libp2p-peer" - protocol "gx/ipfs/QmZpVD1kkRwoC67vNknvCrY72pjmVdtZ7txSk8mtCbuwd3/go-libp2p/p2p/protocol" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" ) diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index 25c372f78..c9b0404a2 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -4,10 +4,10 @@ import ( key "github.com/ipfs/go-ipfs/blocks/key" bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" routing "github.com/ipfs/go-ipfs/routing" + host "gx/ipfs/QmUHrgorZ1F9yGkgF2His5fsQ9xtCzjdsPGjizmcEW94i5/go-libp2p/p2p/host" + inet "gx/ipfs/QmUHrgorZ1F9yGkgF2His5fsQ9xtCzjdsPGjizmcEW94i5/go-libp2p/p2p/net" ma "gx/ipfs/QmYzDkkgAEmrcNzFCiYo6L1dTX4EAG1gZkbtdbd9trL4vd/go-multiaddr" peer "gx/ipfs/QmZpD74pUj6vuxTp1o6LhA3JavC2Bvh9fsWPPVvHnD9sE7/go-libp2p-peer" - host "gx/ipfs/QmZpVD1kkRwoC67vNknvCrY72pjmVdtZ7txSk8mtCbuwd3/go-libp2p/p2p/host" - inet "gx/ipfs/QmZpVD1kkRwoC67vNknvCrY72pjmVdtZ7txSk8mtCbuwd3/go-libp2p/p2p/net" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" logging "gx/ipfs/QmaDNZ4QMdBdku1YZWBysufYyoQt1negQGNav6PLYarbY8/go-log" ) diff --git a/bitswap/testnet/peernet.go b/bitswap/testnet/peernet.go index f5ee5f682..2d18b1734 100644 --- a/bitswap/testnet/peernet.go +++ b/bitswap/testnet/peernet.go @@ -5,8 +5,8 @@ import ( bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" mockrouting "github.com/ipfs/go-ipfs/routing/mock" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" + mockpeernet "gx/ipfs/QmUHrgorZ1F9yGkgF2His5fsQ9xtCzjdsPGjizmcEW94i5/go-libp2p/p2p/net/mock" peer "gx/ipfs/QmZpD74pUj6vuxTp1o6LhA3JavC2Bvh9fsWPPVvHnD9sE7/go-libp2p-peer" - mockpeernet "gx/ipfs/QmZpVD1kkRwoC67vNknvCrY72pjmVdtZ7txSk8mtCbuwd3/go-libp2p/p2p/net/mock" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" ) diff --git a/bitswap/testutils.go b/bitswap/testutils.go index 4229e5d9d..35248fd86 100644 --- a/bitswap/testutils.go +++ b/bitswap/testutils.go @@ -10,8 +10,8 @@ import ( datastore2 "github.com/ipfs/go-ipfs/thirdparty/datastore2" delay "github.com/ipfs/go-ipfs/thirdparty/delay" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" + p2ptestutil "gx/ipfs/QmUHrgorZ1F9yGkgF2His5fsQ9xtCzjdsPGjizmcEW94i5/go-libp2p/p2p/test/util" peer "gx/ipfs/QmZpD74pUj6vuxTp1o6LhA3JavC2Bvh9fsWPPVvHnD9sE7/go-libp2p-peer" - p2ptestutil "gx/ipfs/QmZpVD1kkRwoC67vNknvCrY72pjmVdtZ7txSk8mtCbuwd3/go-libp2p/p2p/test/util" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" ) From 16cd25bb4ff3d3653f786ce3b422d6a4e73bfcdf Mon Sep 17 00:00:00 2001 From: jbenet Date: Mon, 16 May 2016 22:39:39 -0700 Subject: [PATCH 0464/1035] add error checking for nil keys Checks in: - blockstore - blockservice - dagservice - bitswap Do not anger the pokemans #2715 License: MIT Signed-off-by: Juan Benet This commit was moved from ipfs/go-bitswap@4abba3489960f957c01043b79fa52824a97b6162 --- bitswap/bitswap.go | 3 +++ bitswap/bitswap_test.go | 13 +++++++++++++ 2 files changed, 16 insertions(+) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 9f5c92d04..0afed265e 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -155,6 +155,9 @@ type blockRequest struct { // GetBlock attempts to retrieve a particular block from peers within the // deadline enforced by the context. func (bs *Bitswap) GetBlock(parent context.Context, k key.Key) (blocks.Block, error) { + if k == "" { + return nil, blockstore.ErrNotFound + } // Any async work initiated by this function must end when this function // returns. To ensure this, derive a new context. Note that it is okay to diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index 024de3389..136fa85d2 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -11,6 +11,7 @@ import ( context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" blocks "github.com/ipfs/go-ipfs/blocks" + blockstore "github.com/ipfs/go-ipfs/blocks/blockstore" blocksutil "github.com/ipfs/go-ipfs/blocks/blocksutil" key "github.com/ipfs/go-ipfs/blocks/key" tn "github.com/ipfs/go-ipfs/exchange/bitswap/testnet" @@ -278,6 +279,18 @@ func TestSendToWantingPeer(t *testing.T) { } +func TestEmptyKey(t *testing.T) { + net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay)) + sg := NewTestSessionGenerator(net) + defer sg.Close() + bs := sg.Instances(1)[0].Exchange + + _, err := bs.GetBlock(context.Background(), key.Key("")) + if err != blockstore.ErrNotFound { + t.Error("empty str key should return ErrNotFound") + } +} + func TestBasicBitswap(t *testing.T) { net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay)) sg := NewTestSessionGenerator(net) From 2a03dc04408b9085c0a4b10c3b116cbc517b5901 Mon Sep 17 00:00:00 2001 From: Jeromy Date: Tue, 17 May 2016 10:23:10 -0700 Subject: [PATCH 0465/1035] update go-libp2p 3.2.2, nil maddr fixes License: MIT Signed-off-by: Jeromy This commit was moved from ipfs/go-bitswap@786bcdee6c27379385f3893324c607fbe991288f --- bitswap/bitswap.go | 2 +- bitswap/bitswap_test.go | 2 +- bitswap/decision/bench_test.go | 2 +- bitswap/decision/engine.go | 2 +- bitswap/decision/engine_test.go | 2 +- bitswap/decision/ledger.go | 2 +- bitswap/decision/peer_request_queue.go | 2 +- bitswap/message/message.go | 2 +- bitswap/network/interface.go | 4 ++-- bitswap/network/ipfs_impl.go | 6 +++--- bitswap/testnet/interface.go | 2 +- bitswap/testnet/network_test.go | 2 +- bitswap/testnet/peernet.go | 4 ++-- bitswap/testnet/virtual.go | 2 +- bitswap/testutils.go | 4 ++-- bitswap/wantmanager.go | 2 +- bitswap/workers.go | 2 +- 17 files changed, 22 insertions(+), 22 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 9f5c92d04..d5eb882b8 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -10,9 +10,9 @@ import ( process "gx/ipfs/QmQopLATEYMNg7dVqZRNDfeE2S1yKy8zrRh5xnYiuqeZBn/goprocess" procctx "gx/ipfs/QmQopLATEYMNg7dVqZRNDfeE2S1yKy8zrRh5xnYiuqeZBn/goprocess/context" - peer "gx/ipfs/QmZpD74pUj6vuxTp1o6LhA3JavC2Bvh9fsWPPVvHnD9sE7/go-libp2p-peer" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" logging "gx/ipfs/QmaDNZ4QMdBdku1YZWBysufYyoQt1negQGNav6PLYarbY8/go-log" + peer "gx/ipfs/QmbyvM8zRFDkbFdYyt1MnevUMJ62SiSGbfDFZ3Z8nkrzr4/go-libp2p-peer" blocks "github.com/ipfs/go-ipfs/blocks" blockstore "github.com/ipfs/go-ipfs/blocks/blockstore" diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index 024de3389..7ef2a0d96 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -16,7 +16,7 @@ import ( tn "github.com/ipfs/go-ipfs/exchange/bitswap/testnet" mockrouting "github.com/ipfs/go-ipfs/routing/mock" delay "github.com/ipfs/go-ipfs/thirdparty/delay" - p2ptestutil "gx/ipfs/QmUHrgorZ1F9yGkgF2His5fsQ9xtCzjdsPGjizmcEW94i5/go-libp2p/p2p/test/util" + p2ptestutil "gx/ipfs/QmVL44QeoQDTYK8RVdpkyja7uYcK3WDNoBNHVLonf9YDtm/go-libp2p/p2p/test/util" ) // FIXME the tests are really sensitive to the network delay. fix them to work diff --git a/bitswap/decision/bench_test.go b/bitswap/decision/bench_test.go index d9ab28766..465cda486 100644 --- a/bitswap/decision/bench_test.go +++ b/bitswap/decision/bench_test.go @@ -7,7 +7,7 @@ import ( key "github.com/ipfs/go-ipfs/blocks/key" "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" "github.com/ipfs/go-ipfs/thirdparty/testutil" - "gx/ipfs/QmZpD74pUj6vuxTp1o6LhA3JavC2Bvh9fsWPPVvHnD9sE7/go-libp2p-peer" + "gx/ipfs/QmbyvM8zRFDkbFdYyt1MnevUMJ62SiSGbfDFZ3Z8nkrzr4/go-libp2p-peer" ) // FWIW: At the time of this commit, including a timestamp in task increases diff --git a/bitswap/decision/engine.go b/bitswap/decision/engine.go index 2fae95094..ff4fa1fa7 100644 --- a/bitswap/decision/engine.go +++ b/bitswap/decision/engine.go @@ -8,9 +8,9 @@ import ( bstore "github.com/ipfs/go-ipfs/blocks/blockstore" bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" wl "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" - peer "gx/ipfs/QmZpD74pUj6vuxTp1o6LhA3JavC2Bvh9fsWPPVvHnD9sE7/go-libp2p-peer" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" logging "gx/ipfs/QmaDNZ4QMdBdku1YZWBysufYyoQt1negQGNav6PLYarbY8/go-log" + peer "gx/ipfs/QmbyvM8zRFDkbFdYyt1MnevUMJ62SiSGbfDFZ3Z8nkrzr4/go-libp2p-peer" ) // TODO consider taking responsibility for other types of requests. For diff --git a/bitswap/decision/engine_test.go b/bitswap/decision/engine_test.go index 3d1dfb8bc..2b1dea072 100644 --- a/bitswap/decision/engine_test.go +++ b/bitswap/decision/engine_test.go @@ -14,8 +14,8 @@ import ( blockstore "github.com/ipfs/go-ipfs/blocks/blockstore" message "github.com/ipfs/go-ipfs/exchange/bitswap/message" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" - peer "gx/ipfs/QmZpD74pUj6vuxTp1o6LhA3JavC2Bvh9fsWPPVvHnD9sE7/go-libp2p-peer" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" + peer "gx/ipfs/QmbyvM8zRFDkbFdYyt1MnevUMJ62SiSGbfDFZ3Z8nkrzr4/go-libp2p-peer" ) type peerAndEngine struct { diff --git a/bitswap/decision/ledger.go b/bitswap/decision/ledger.go index 2c8ad65b6..6200f5338 100644 --- a/bitswap/decision/ledger.go +++ b/bitswap/decision/ledger.go @@ -5,7 +5,7 @@ import ( key "github.com/ipfs/go-ipfs/blocks/key" wl "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" - peer "gx/ipfs/QmZpD74pUj6vuxTp1o6LhA3JavC2Bvh9fsWPPVvHnD9sE7/go-libp2p-peer" + peer "gx/ipfs/QmbyvM8zRFDkbFdYyt1MnevUMJ62SiSGbfDFZ3Z8nkrzr4/go-libp2p-peer" ) // keySet is just a convenient alias for maps of keys, where we only care diff --git a/bitswap/decision/peer_request_queue.go b/bitswap/decision/peer_request_queue.go index 4b3313d87..d68579df6 100644 --- a/bitswap/decision/peer_request_queue.go +++ b/bitswap/decision/peer_request_queue.go @@ -7,7 +7,7 @@ import ( key "github.com/ipfs/go-ipfs/blocks/key" wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" pq "github.com/ipfs/go-ipfs/thirdparty/pq" - peer "gx/ipfs/QmZpD74pUj6vuxTp1o6LhA3JavC2Bvh9fsWPPVvHnD9sE7/go-libp2p-peer" + peer "gx/ipfs/QmbyvM8zRFDkbFdYyt1MnevUMJ62SiSGbfDFZ3Z8nkrzr4/go-libp2p-peer" ) type peerRequestQueue interface { diff --git a/bitswap/message/message.go b/bitswap/message/message.go index 5b0b4adbd..95a86ee49 100644 --- a/bitswap/message/message.go +++ b/bitswap/message/message.go @@ -7,7 +7,7 @@ import ( key "github.com/ipfs/go-ipfs/blocks/key" pb "github.com/ipfs/go-ipfs/exchange/bitswap/message/pb" wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" - inet "gx/ipfs/QmUHrgorZ1F9yGkgF2His5fsQ9xtCzjdsPGjizmcEW94i5/go-libp2p/p2p/net" + inet "gx/ipfs/QmVL44QeoQDTYK8RVdpkyja7uYcK3WDNoBNHVLonf9YDtm/go-libp2p/p2p/net" ggio "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/io" proto "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/proto" diff --git a/bitswap/network/interface.go b/bitswap/network/interface.go index 4ce7a4004..80e345516 100644 --- a/bitswap/network/interface.go +++ b/bitswap/network/interface.go @@ -3,9 +3,9 @@ package network import ( key "github.com/ipfs/go-ipfs/blocks/key" bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" - protocol "gx/ipfs/QmUHrgorZ1F9yGkgF2His5fsQ9xtCzjdsPGjizmcEW94i5/go-libp2p/p2p/protocol" - peer "gx/ipfs/QmZpD74pUj6vuxTp1o6LhA3JavC2Bvh9fsWPPVvHnD9sE7/go-libp2p-peer" + protocol "gx/ipfs/QmVL44QeoQDTYK8RVdpkyja7uYcK3WDNoBNHVLonf9YDtm/go-libp2p/p2p/protocol" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" + peer "gx/ipfs/QmbyvM8zRFDkbFdYyt1MnevUMJ62SiSGbfDFZ3Z8nkrzr4/go-libp2p-peer" ) var ProtocolBitswap protocol.ID = "/ipfs/bitswap" diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index c9b0404a2..4158b65a1 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -4,12 +4,12 @@ import ( key "github.com/ipfs/go-ipfs/blocks/key" bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" routing "github.com/ipfs/go-ipfs/routing" - host "gx/ipfs/QmUHrgorZ1F9yGkgF2His5fsQ9xtCzjdsPGjizmcEW94i5/go-libp2p/p2p/host" - inet "gx/ipfs/QmUHrgorZ1F9yGkgF2His5fsQ9xtCzjdsPGjizmcEW94i5/go-libp2p/p2p/net" + host "gx/ipfs/QmVL44QeoQDTYK8RVdpkyja7uYcK3WDNoBNHVLonf9YDtm/go-libp2p/p2p/host" + inet "gx/ipfs/QmVL44QeoQDTYK8RVdpkyja7uYcK3WDNoBNHVLonf9YDtm/go-libp2p/p2p/net" ma "gx/ipfs/QmYzDkkgAEmrcNzFCiYo6L1dTX4EAG1gZkbtdbd9trL4vd/go-multiaddr" - peer "gx/ipfs/QmZpD74pUj6vuxTp1o6LhA3JavC2Bvh9fsWPPVvHnD9sE7/go-libp2p-peer" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" logging "gx/ipfs/QmaDNZ4QMdBdku1YZWBysufYyoQt1negQGNav6PLYarbY8/go-log" + peer "gx/ipfs/QmbyvM8zRFDkbFdYyt1MnevUMJ62SiSGbfDFZ3Z8nkrzr4/go-libp2p-peer" ) var log = logging.Logger("bitswap_network") diff --git a/bitswap/testnet/interface.go b/bitswap/testnet/interface.go index a0cfdf533..c894160e4 100644 --- a/bitswap/testnet/interface.go +++ b/bitswap/testnet/interface.go @@ -3,7 +3,7 @@ package bitswap import ( bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" "github.com/ipfs/go-ipfs/thirdparty/testutil" - peer "gx/ipfs/QmZpD74pUj6vuxTp1o6LhA3JavC2Bvh9fsWPPVvHnD9sE7/go-libp2p-peer" + peer "gx/ipfs/QmbyvM8zRFDkbFdYyt1MnevUMJ62SiSGbfDFZ3Z8nkrzr4/go-libp2p-peer" ) type Network interface { diff --git a/bitswap/testnet/network_test.go b/bitswap/testnet/network_test.go index a1e0703f3..e45f91692 100644 --- a/bitswap/testnet/network_test.go +++ b/bitswap/testnet/network_test.go @@ -10,8 +10,8 @@ import ( mockrouting "github.com/ipfs/go-ipfs/routing/mock" delay "github.com/ipfs/go-ipfs/thirdparty/delay" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" - peer "gx/ipfs/QmZpD74pUj6vuxTp1o6LhA3JavC2Bvh9fsWPPVvHnD9sE7/go-libp2p-peer" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" + peer "gx/ipfs/QmbyvM8zRFDkbFdYyt1MnevUMJ62SiSGbfDFZ3Z8nkrzr4/go-libp2p-peer" ) func TestSendMessageAsyncButWaitForResponse(t *testing.T) { diff --git a/bitswap/testnet/peernet.go b/bitswap/testnet/peernet.go index 2d18b1734..2e5c5e4a4 100644 --- a/bitswap/testnet/peernet.go +++ b/bitswap/testnet/peernet.go @@ -5,9 +5,9 @@ import ( bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" mockrouting "github.com/ipfs/go-ipfs/routing/mock" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" - mockpeernet "gx/ipfs/QmUHrgorZ1F9yGkgF2His5fsQ9xtCzjdsPGjizmcEW94i5/go-libp2p/p2p/net/mock" - peer "gx/ipfs/QmZpD74pUj6vuxTp1o6LhA3JavC2Bvh9fsWPPVvHnD9sE7/go-libp2p-peer" + mockpeernet "gx/ipfs/QmVL44QeoQDTYK8RVdpkyja7uYcK3WDNoBNHVLonf9YDtm/go-libp2p/p2p/net/mock" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" + peer "gx/ipfs/QmbyvM8zRFDkbFdYyt1MnevUMJ62SiSGbfDFZ3Z8nkrzr4/go-libp2p-peer" ) type peernet struct { diff --git a/bitswap/testnet/virtual.go b/bitswap/testnet/virtual.go index 0de86ecf7..64013603e 100644 --- a/bitswap/testnet/virtual.go +++ b/bitswap/testnet/virtual.go @@ -10,8 +10,8 @@ import ( mockrouting "github.com/ipfs/go-ipfs/routing/mock" delay "github.com/ipfs/go-ipfs/thirdparty/delay" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" - peer "gx/ipfs/QmZpD74pUj6vuxTp1o6LhA3JavC2Bvh9fsWPPVvHnD9sE7/go-libp2p-peer" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" + peer "gx/ipfs/QmbyvM8zRFDkbFdYyt1MnevUMJ62SiSGbfDFZ3Z8nkrzr4/go-libp2p-peer" ) func VirtualNetwork(rs mockrouting.Server, d delay.D) Network { diff --git a/bitswap/testutils.go b/bitswap/testutils.go index 35248fd86..35a789284 100644 --- a/bitswap/testutils.go +++ b/bitswap/testutils.go @@ -10,9 +10,9 @@ import ( datastore2 "github.com/ipfs/go-ipfs/thirdparty/datastore2" delay "github.com/ipfs/go-ipfs/thirdparty/delay" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" - p2ptestutil "gx/ipfs/QmUHrgorZ1F9yGkgF2His5fsQ9xtCzjdsPGjizmcEW94i5/go-libp2p/p2p/test/util" - peer "gx/ipfs/QmZpD74pUj6vuxTp1o6LhA3JavC2Bvh9fsWPPVvHnD9sE7/go-libp2p-peer" + p2ptestutil "gx/ipfs/QmVL44QeoQDTYK8RVdpkyja7uYcK3WDNoBNHVLonf9YDtm/go-libp2p/p2p/test/util" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" + peer "gx/ipfs/QmbyvM8zRFDkbFdYyt1MnevUMJ62SiSGbfDFZ3Z8nkrzr4/go-libp2p-peer" ) // WARNING: this uses RandTestBogusIdentity DO NOT USE for NON TESTS! diff --git a/bitswap/wantmanager.go b/bitswap/wantmanager.go index 44d25ea92..52dc514d3 100644 --- a/bitswap/wantmanager.go +++ b/bitswap/wantmanager.go @@ -9,8 +9,8 @@ import ( bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" - peer "gx/ipfs/QmZpD74pUj6vuxTp1o6LhA3JavC2Bvh9fsWPPVvHnD9sE7/go-libp2p-peer" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" + peer "gx/ipfs/QmbyvM8zRFDkbFdYyt1MnevUMJ62SiSGbfDFZ3Z8nkrzr4/go-libp2p-peer" ) type WantManager struct { diff --git a/bitswap/workers.go b/bitswap/workers.go index 4e18b7bbb..7717a7170 100644 --- a/bitswap/workers.go +++ b/bitswap/workers.go @@ -10,8 +10,8 @@ import ( key "github.com/ipfs/go-ipfs/blocks/key" wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" - peer "gx/ipfs/QmZpD74pUj6vuxTp1o6LhA3JavC2Bvh9fsWPPVvHnD9sE7/go-libp2p-peer" logging "gx/ipfs/QmaDNZ4QMdBdku1YZWBysufYyoQt1negQGNav6PLYarbY8/go-log" + peer "gx/ipfs/QmbyvM8zRFDkbFdYyt1MnevUMJ62SiSGbfDFZ3Z8nkrzr4/go-libp2p-peer" ) var TaskWorkerCount = 8 From 2932cba3b55feb22a442b4180210407268321c82 Mon Sep 17 00:00:00 2001 From: Jeromy Date: Tue, 17 May 2016 15:59:20 -0700 Subject: [PATCH 0466/1035] fix receive loop error handling License: MIT Signed-off-by: Jeromy This commit was moved from ipfs/go-bitswap@844e5d0d4796aa1783258ef73895bf2dccebb5fa --- bitswap/message/message.go | 3 +++ bitswap/network/ipfs_impl.go | 12 +++++++++--- 2 files changed, 12 insertions(+), 3 deletions(-) diff --git a/bitswap/message/message.go b/bitswap/message/message.go index 95a86ee49..47ec07ff2 100644 --- a/bitswap/message/message.go +++ b/bitswap/message/message.go @@ -135,7 +135,10 @@ func (m *impl) AddBlock(b blocks.Block) { func FromNet(r io.Reader) (BitSwapMessage, error) { pbr := ggio.NewDelimitedReader(r, inet.MessageSizeMax) + return FromPBReader(pbr) +} +func FromPBReader(pbr ggio.Reader) (BitSwapMessage, error) { pb := new(pb.Message) if err := pbr.ReadMsg(pb); err != nil { return nil, err diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index 4158b65a1..24145eb96 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -1,12 +1,15 @@ package network import ( + "io" + key "github.com/ipfs/go-ipfs/blocks/key" bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" routing "github.com/ipfs/go-ipfs/routing" host "gx/ipfs/QmVL44QeoQDTYK8RVdpkyja7uYcK3WDNoBNHVLonf9YDtm/go-libp2p/p2p/host" inet "gx/ipfs/QmVL44QeoQDTYK8RVdpkyja7uYcK3WDNoBNHVLonf9YDtm/go-libp2p/p2p/net" ma "gx/ipfs/QmYzDkkgAEmrcNzFCiYo6L1dTX4EAG1gZkbtdbd9trL4vd/go-multiaddr" + ggio "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/io" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" logging "gx/ipfs/QmaDNZ4QMdBdku1YZWBysufYyoQt1negQGNav6PLYarbY8/go-log" peer "gx/ipfs/QmbyvM8zRFDkbFdYyt1MnevUMJ62SiSGbfDFZ3Z8nkrzr4/go-libp2p-peer" @@ -150,11 +153,14 @@ func (bsnet *impl) handleNewStream(s inet.Stream) { return } + reader := ggio.NewDelimitedReader(s, inet.MessageSizeMax) for { - received, err := bsmsg.FromNet(s) + received, err := bsmsg.FromPBReader(reader) if err != nil { - go bsnet.receiver.ReceiveError(err) - log.Debugf("bitswap net handleNewStream from %s error: %s", s.Conn().RemotePeer(), err) + if err != io.EOF { + go bsnet.receiver.ReceiveError(err) + log.Debugf("bitswap net handleNewStream from %s error: %s", s.Conn().RemotePeer(), err) + } return } From 14b356e0a409316dd668f17eabfe06f7c4d61681 Mon Sep 17 00:00:00 2001 From: Jeromy Date: Mon, 30 May 2016 22:14:21 -0700 Subject: [PATCH 0467/1035] update libp2p to v3.2.3 License: MIT Signed-off-by: Jeromy This commit was moved from ipfs/go-bitswap@a3c1240a43c2d2b71b3f29bf95c85b5efcba11b0 --- bitswap/bitswap_test.go | 2 +- bitswap/message/message.go | 2 +- bitswap/network/interface.go | 2 +- bitswap/network/ipfs_impl.go | 4 ++-- bitswap/testnet/peernet.go | 2 +- bitswap/testutils.go | 2 +- 6 files changed, 7 insertions(+), 7 deletions(-) diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index 31d4f74ae..e60e0e928 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -17,7 +17,7 @@ import ( tn "github.com/ipfs/go-ipfs/exchange/bitswap/testnet" mockrouting "github.com/ipfs/go-ipfs/routing/mock" delay "github.com/ipfs/go-ipfs/thirdparty/delay" - p2ptestutil "gx/ipfs/QmVL44QeoQDTYK8RVdpkyja7uYcK3WDNoBNHVLonf9YDtm/go-libp2p/p2p/test/util" + p2ptestutil "gx/ipfs/QmRW2xiYTpDLWTHb822ZYbPBoh3dGLJwaXLGS9tnPyWZpq/go-libp2p/p2p/test/util" ) // FIXME the tests are really sensitive to the network delay. fix them to work diff --git a/bitswap/message/message.go b/bitswap/message/message.go index 47ec07ff2..43a7d2753 100644 --- a/bitswap/message/message.go +++ b/bitswap/message/message.go @@ -7,7 +7,7 @@ import ( key "github.com/ipfs/go-ipfs/blocks/key" pb "github.com/ipfs/go-ipfs/exchange/bitswap/message/pb" wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" - inet "gx/ipfs/QmVL44QeoQDTYK8RVdpkyja7uYcK3WDNoBNHVLonf9YDtm/go-libp2p/p2p/net" + inet "gx/ipfs/QmRW2xiYTpDLWTHb822ZYbPBoh3dGLJwaXLGS9tnPyWZpq/go-libp2p/p2p/net" ggio "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/io" proto "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/proto" diff --git a/bitswap/network/interface.go b/bitswap/network/interface.go index 80e345516..d0fe8d83a 100644 --- a/bitswap/network/interface.go +++ b/bitswap/network/interface.go @@ -3,7 +3,7 @@ package network import ( key "github.com/ipfs/go-ipfs/blocks/key" bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" - protocol "gx/ipfs/QmVL44QeoQDTYK8RVdpkyja7uYcK3WDNoBNHVLonf9YDtm/go-libp2p/p2p/protocol" + protocol "gx/ipfs/QmRW2xiYTpDLWTHb822ZYbPBoh3dGLJwaXLGS9tnPyWZpq/go-libp2p/p2p/protocol" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" peer "gx/ipfs/QmbyvM8zRFDkbFdYyt1MnevUMJ62SiSGbfDFZ3Z8nkrzr4/go-libp2p-peer" ) diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index 24145eb96..e70eeaf0c 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -6,8 +6,8 @@ import ( key "github.com/ipfs/go-ipfs/blocks/key" bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" routing "github.com/ipfs/go-ipfs/routing" - host "gx/ipfs/QmVL44QeoQDTYK8RVdpkyja7uYcK3WDNoBNHVLonf9YDtm/go-libp2p/p2p/host" - inet "gx/ipfs/QmVL44QeoQDTYK8RVdpkyja7uYcK3WDNoBNHVLonf9YDtm/go-libp2p/p2p/net" + host "gx/ipfs/QmRW2xiYTpDLWTHb822ZYbPBoh3dGLJwaXLGS9tnPyWZpq/go-libp2p/p2p/host" + inet "gx/ipfs/QmRW2xiYTpDLWTHb822ZYbPBoh3dGLJwaXLGS9tnPyWZpq/go-libp2p/p2p/net" ma "gx/ipfs/QmYzDkkgAEmrcNzFCiYo6L1dTX4EAG1gZkbtdbd9trL4vd/go-multiaddr" ggio "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/io" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" diff --git a/bitswap/testnet/peernet.go b/bitswap/testnet/peernet.go index 2e5c5e4a4..9a18a5d8a 100644 --- a/bitswap/testnet/peernet.go +++ b/bitswap/testnet/peernet.go @@ -5,7 +5,7 @@ import ( bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" mockrouting "github.com/ipfs/go-ipfs/routing/mock" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" - mockpeernet "gx/ipfs/QmVL44QeoQDTYK8RVdpkyja7uYcK3WDNoBNHVLonf9YDtm/go-libp2p/p2p/net/mock" + mockpeernet "gx/ipfs/QmRW2xiYTpDLWTHb822ZYbPBoh3dGLJwaXLGS9tnPyWZpq/go-libp2p/p2p/net/mock" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" peer "gx/ipfs/QmbyvM8zRFDkbFdYyt1MnevUMJ62SiSGbfDFZ3Z8nkrzr4/go-libp2p-peer" ) diff --git a/bitswap/testutils.go b/bitswap/testutils.go index 35a789284..ddfa1a456 100644 --- a/bitswap/testutils.go +++ b/bitswap/testutils.go @@ -10,7 +10,7 @@ import ( datastore2 "github.com/ipfs/go-ipfs/thirdparty/datastore2" delay "github.com/ipfs/go-ipfs/thirdparty/delay" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" - p2ptestutil "gx/ipfs/QmVL44QeoQDTYK8RVdpkyja7uYcK3WDNoBNHVLonf9YDtm/go-libp2p/p2p/test/util" + p2ptestutil "gx/ipfs/QmRW2xiYTpDLWTHb822ZYbPBoh3dGLJwaXLGS9tnPyWZpq/go-libp2p/p2p/test/util" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" peer "gx/ipfs/QmbyvM8zRFDkbFdYyt1MnevUMJ62SiSGbfDFZ3Z8nkrzr4/go-libp2p-peer" ) From d37a3b73d5824ccb28b375454fd209fa4a4e36c8 Mon Sep 17 00:00:00 2001 From: Jeromy Date: Wed, 1 Jun 2016 15:51:39 -0700 Subject: [PATCH 0468/1035] update libp2p to v3.3.1 License: MIT Signed-off-by: Jeromy This commit was moved from ipfs/go-bitswap@dceae2683938d2eace2d89bdfd6b645ef4e24094 --- bitswap/bitswap.go | 2 +- bitswap/bitswap_test.go | 2 +- bitswap/decision/bench_test.go | 2 +- bitswap/decision/engine.go | 2 +- bitswap/decision/engine_test.go | 2 +- bitswap/decision/ledger.go | 2 +- bitswap/decision/peer_request_queue.go | 2 +- bitswap/message/message.go | 2 +- bitswap/network/interface.go | 4 ++-- bitswap/network/ipfs_impl.go | 14 ++++++++------ bitswap/testnet/interface.go | 2 +- bitswap/testnet/network_test.go | 2 +- bitswap/testnet/peernet.go | 4 ++-- bitswap/testnet/virtual.go | 2 +- bitswap/testutils.go | 4 ++-- bitswap/wantmanager.go | 2 +- bitswap/workers.go | 2 +- 17 files changed, 27 insertions(+), 25 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 3e060a76c..469b8af96 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -8,11 +8,11 @@ import ( "sync" "time" + peer "gx/ipfs/QmQGwpJy9P4yXZySmqkZEXCmbBpJUb8xntCv8Ca4taZwDC/go-libp2p-peer" process "gx/ipfs/QmQopLATEYMNg7dVqZRNDfeE2S1yKy8zrRh5xnYiuqeZBn/goprocess" procctx "gx/ipfs/QmQopLATEYMNg7dVqZRNDfeE2S1yKy8zrRh5xnYiuqeZBn/goprocess/context" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" logging "gx/ipfs/QmaDNZ4QMdBdku1YZWBysufYyoQt1negQGNav6PLYarbY8/go-log" - peer "gx/ipfs/QmbyvM8zRFDkbFdYyt1MnevUMJ62SiSGbfDFZ3Z8nkrzr4/go-libp2p-peer" blocks "github.com/ipfs/go-ipfs/blocks" blockstore "github.com/ipfs/go-ipfs/blocks/blockstore" diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index e60e0e928..a3f336dbc 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -17,7 +17,7 @@ import ( tn "github.com/ipfs/go-ipfs/exchange/bitswap/testnet" mockrouting "github.com/ipfs/go-ipfs/routing/mock" delay "github.com/ipfs/go-ipfs/thirdparty/delay" - p2ptestutil "gx/ipfs/QmRW2xiYTpDLWTHb822ZYbPBoh3dGLJwaXLGS9tnPyWZpq/go-libp2p/p2p/test/util" + p2ptestutil "gx/ipfs/QmQgQeBQxQmJdeUSaDagc8cr2ompDwGn13Cybjdtzfuaki/go-libp2p/p2p/test/util" ) // FIXME the tests are really sensitive to the network delay. fix them to work diff --git a/bitswap/decision/bench_test.go b/bitswap/decision/bench_test.go index 465cda486..283791ef0 100644 --- a/bitswap/decision/bench_test.go +++ b/bitswap/decision/bench_test.go @@ -7,7 +7,7 @@ import ( key "github.com/ipfs/go-ipfs/blocks/key" "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" "github.com/ipfs/go-ipfs/thirdparty/testutil" - "gx/ipfs/QmbyvM8zRFDkbFdYyt1MnevUMJ62SiSGbfDFZ3Z8nkrzr4/go-libp2p-peer" + "gx/ipfs/QmQGwpJy9P4yXZySmqkZEXCmbBpJUb8xntCv8Ca4taZwDC/go-libp2p-peer" ) // FWIW: At the time of this commit, including a timestamp in task increases diff --git a/bitswap/decision/engine.go b/bitswap/decision/engine.go index ff4fa1fa7..99b8088cf 100644 --- a/bitswap/decision/engine.go +++ b/bitswap/decision/engine.go @@ -8,9 +8,9 @@ import ( bstore "github.com/ipfs/go-ipfs/blocks/blockstore" bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" wl "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" + peer "gx/ipfs/QmQGwpJy9P4yXZySmqkZEXCmbBpJUb8xntCv8Ca4taZwDC/go-libp2p-peer" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" logging "gx/ipfs/QmaDNZ4QMdBdku1YZWBysufYyoQt1negQGNav6PLYarbY8/go-log" - peer "gx/ipfs/QmbyvM8zRFDkbFdYyt1MnevUMJ62SiSGbfDFZ3Z8nkrzr4/go-libp2p-peer" ) // TODO consider taking responsibility for other types of requests. For diff --git a/bitswap/decision/engine_test.go b/bitswap/decision/engine_test.go index 2b1dea072..87882c2fa 100644 --- a/bitswap/decision/engine_test.go +++ b/bitswap/decision/engine_test.go @@ -14,8 +14,8 @@ import ( blockstore "github.com/ipfs/go-ipfs/blocks/blockstore" message "github.com/ipfs/go-ipfs/exchange/bitswap/message" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" + peer "gx/ipfs/QmQGwpJy9P4yXZySmqkZEXCmbBpJUb8xntCv8Ca4taZwDC/go-libp2p-peer" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" - peer "gx/ipfs/QmbyvM8zRFDkbFdYyt1MnevUMJ62SiSGbfDFZ3Z8nkrzr4/go-libp2p-peer" ) type peerAndEngine struct { diff --git a/bitswap/decision/ledger.go b/bitswap/decision/ledger.go index 6200f5338..479027678 100644 --- a/bitswap/decision/ledger.go +++ b/bitswap/decision/ledger.go @@ -5,7 +5,7 @@ import ( key "github.com/ipfs/go-ipfs/blocks/key" wl "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" - peer "gx/ipfs/QmbyvM8zRFDkbFdYyt1MnevUMJ62SiSGbfDFZ3Z8nkrzr4/go-libp2p-peer" + peer "gx/ipfs/QmQGwpJy9P4yXZySmqkZEXCmbBpJUb8xntCv8Ca4taZwDC/go-libp2p-peer" ) // keySet is just a convenient alias for maps of keys, where we only care diff --git a/bitswap/decision/peer_request_queue.go b/bitswap/decision/peer_request_queue.go index d68579df6..54cd19357 100644 --- a/bitswap/decision/peer_request_queue.go +++ b/bitswap/decision/peer_request_queue.go @@ -7,7 +7,7 @@ import ( key "github.com/ipfs/go-ipfs/blocks/key" wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" pq "github.com/ipfs/go-ipfs/thirdparty/pq" - peer "gx/ipfs/QmbyvM8zRFDkbFdYyt1MnevUMJ62SiSGbfDFZ3Z8nkrzr4/go-libp2p-peer" + peer "gx/ipfs/QmQGwpJy9P4yXZySmqkZEXCmbBpJUb8xntCv8Ca4taZwDC/go-libp2p-peer" ) type peerRequestQueue interface { diff --git a/bitswap/message/message.go b/bitswap/message/message.go index 43a7d2753..e2f136f31 100644 --- a/bitswap/message/message.go +++ b/bitswap/message/message.go @@ -7,7 +7,7 @@ import ( key "github.com/ipfs/go-ipfs/blocks/key" pb "github.com/ipfs/go-ipfs/exchange/bitswap/message/pb" wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" - inet "gx/ipfs/QmRW2xiYTpDLWTHb822ZYbPBoh3dGLJwaXLGS9tnPyWZpq/go-libp2p/p2p/net" + inet "gx/ipfs/QmQgQeBQxQmJdeUSaDagc8cr2ompDwGn13Cybjdtzfuaki/go-libp2p/p2p/net" ggio "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/io" proto "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/proto" diff --git a/bitswap/network/interface.go b/bitswap/network/interface.go index d0fe8d83a..7abeca1a3 100644 --- a/bitswap/network/interface.go +++ b/bitswap/network/interface.go @@ -3,9 +3,9 @@ package network import ( key "github.com/ipfs/go-ipfs/blocks/key" bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" - protocol "gx/ipfs/QmRW2xiYTpDLWTHb822ZYbPBoh3dGLJwaXLGS9tnPyWZpq/go-libp2p/p2p/protocol" + peer "gx/ipfs/QmQGwpJy9P4yXZySmqkZEXCmbBpJUb8xntCv8Ca4taZwDC/go-libp2p-peer" + protocol "gx/ipfs/QmQgQeBQxQmJdeUSaDagc8cr2ompDwGn13Cybjdtzfuaki/go-libp2p/p2p/protocol" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" - peer "gx/ipfs/QmbyvM8zRFDkbFdYyt1MnevUMJ62SiSGbfDFZ3Z8nkrzr4/go-libp2p-peer" ) var ProtocolBitswap protocol.ID = "/ipfs/bitswap" diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index e70eeaf0c..7dbd3ebdd 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -6,13 +6,15 @@ import ( key "github.com/ipfs/go-ipfs/blocks/key" bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" routing "github.com/ipfs/go-ipfs/routing" - host "gx/ipfs/QmRW2xiYTpDLWTHb822ZYbPBoh3dGLJwaXLGS9tnPyWZpq/go-libp2p/p2p/host" - inet "gx/ipfs/QmRW2xiYTpDLWTHb822ZYbPBoh3dGLJwaXLGS9tnPyWZpq/go-libp2p/p2p/net" + + peer "gx/ipfs/QmQGwpJy9P4yXZySmqkZEXCmbBpJUb8xntCv8Ca4taZwDC/go-libp2p-peer" + host "gx/ipfs/QmQgQeBQxQmJdeUSaDagc8cr2ompDwGn13Cybjdtzfuaki/go-libp2p/p2p/host" + inet "gx/ipfs/QmQgQeBQxQmJdeUSaDagc8cr2ompDwGn13Cybjdtzfuaki/go-libp2p/p2p/net" ma "gx/ipfs/QmYzDkkgAEmrcNzFCiYo6L1dTX4EAG1gZkbtdbd9trL4vd/go-multiaddr" ggio "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/io" + pstore "gx/ipfs/QmZ62t46e9p7vMYqCmptwQC1RhRv5cpQ5cwoqYspedaXyq/go-libp2p-peerstore" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" logging "gx/ipfs/QmaDNZ4QMdBdku1YZWBysufYyoQt1negQGNav6PLYarbY8/go-log" - peer "gx/ipfs/QmbyvM8zRFDkbFdYyt1MnevUMJ62SiSGbfDFZ3Z8nkrzr4/go-libp2p-peer" ) var log = logging.Logger("bitswap_network") @@ -45,7 +47,7 @@ func (bsnet *impl) newStreamToPeer(ctx context.Context, p peer.ID) (inet.Stream, // first, make sure we're connected. // if this fails, we cannot connect to given peer. //TODO(jbenet) move this into host.NewStream? - if err := bsnet.host.Connect(ctx, peer.PeerInfo{ID: p}); err != nil { + if err := bsnet.host.Connect(ctx, pstore.PeerInfo{ID: p}); err != nil { return nil, err } @@ -101,7 +103,7 @@ func (bsnet *impl) SetDelegate(r Receiver) { } func (bsnet *impl) ConnectTo(ctx context.Context, p peer.ID) error { - return bsnet.host.Connect(ctx, peer.PeerInfo{ID: p}) + return bsnet.host.Connect(ctx, pstore.PeerInfo{ID: p}) } // FindProvidersAsync returns a channel of providers for the given key @@ -129,7 +131,7 @@ func (bsnet *impl) FindProvidersAsync(ctx context.Context, k key.Key, max int) < if info.ID == bsnet.host.ID() { continue // ignore self as provider } - bsnet.host.Peerstore().AddAddrs(info.ID, info.Addrs, peer.TempAddrTTL) + bsnet.host.Peerstore().AddAddrs(info.ID, info.Addrs, pstore.TempAddrTTL) select { case <-ctx.Done(): return diff --git a/bitswap/testnet/interface.go b/bitswap/testnet/interface.go index c894160e4..f9fe5e62f 100644 --- a/bitswap/testnet/interface.go +++ b/bitswap/testnet/interface.go @@ -3,7 +3,7 @@ package bitswap import ( bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" "github.com/ipfs/go-ipfs/thirdparty/testutil" - peer "gx/ipfs/QmbyvM8zRFDkbFdYyt1MnevUMJ62SiSGbfDFZ3Z8nkrzr4/go-libp2p-peer" + peer "gx/ipfs/QmQGwpJy9P4yXZySmqkZEXCmbBpJUb8xntCv8Ca4taZwDC/go-libp2p-peer" ) type Network interface { diff --git a/bitswap/testnet/network_test.go b/bitswap/testnet/network_test.go index e45f91692..af6edcad7 100644 --- a/bitswap/testnet/network_test.go +++ b/bitswap/testnet/network_test.go @@ -10,8 +10,8 @@ import ( mockrouting "github.com/ipfs/go-ipfs/routing/mock" delay "github.com/ipfs/go-ipfs/thirdparty/delay" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" + peer "gx/ipfs/QmQGwpJy9P4yXZySmqkZEXCmbBpJUb8xntCv8Ca4taZwDC/go-libp2p-peer" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" - peer "gx/ipfs/QmbyvM8zRFDkbFdYyt1MnevUMJ62SiSGbfDFZ3Z8nkrzr4/go-libp2p-peer" ) func TestSendMessageAsyncButWaitForResponse(t *testing.T) { diff --git a/bitswap/testnet/peernet.go b/bitswap/testnet/peernet.go index 9a18a5d8a..437af2dca 100644 --- a/bitswap/testnet/peernet.go +++ b/bitswap/testnet/peernet.go @@ -5,9 +5,9 @@ import ( bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" mockrouting "github.com/ipfs/go-ipfs/routing/mock" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" - mockpeernet "gx/ipfs/QmRW2xiYTpDLWTHb822ZYbPBoh3dGLJwaXLGS9tnPyWZpq/go-libp2p/p2p/net/mock" + peer "gx/ipfs/QmQGwpJy9P4yXZySmqkZEXCmbBpJUb8xntCv8Ca4taZwDC/go-libp2p-peer" + mockpeernet "gx/ipfs/QmQgQeBQxQmJdeUSaDagc8cr2ompDwGn13Cybjdtzfuaki/go-libp2p/p2p/net/mock" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" - peer "gx/ipfs/QmbyvM8zRFDkbFdYyt1MnevUMJ62SiSGbfDFZ3Z8nkrzr4/go-libp2p-peer" ) type peernet struct { diff --git a/bitswap/testnet/virtual.go b/bitswap/testnet/virtual.go index 64013603e..89833b682 100644 --- a/bitswap/testnet/virtual.go +++ b/bitswap/testnet/virtual.go @@ -10,8 +10,8 @@ import ( mockrouting "github.com/ipfs/go-ipfs/routing/mock" delay "github.com/ipfs/go-ipfs/thirdparty/delay" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" + peer "gx/ipfs/QmQGwpJy9P4yXZySmqkZEXCmbBpJUb8xntCv8Ca4taZwDC/go-libp2p-peer" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" - peer "gx/ipfs/QmbyvM8zRFDkbFdYyt1MnevUMJ62SiSGbfDFZ3Z8nkrzr4/go-libp2p-peer" ) func VirtualNetwork(rs mockrouting.Server, d delay.D) Network { diff --git a/bitswap/testutils.go b/bitswap/testutils.go index ddfa1a456..b6ccabf97 100644 --- a/bitswap/testutils.go +++ b/bitswap/testutils.go @@ -10,9 +10,9 @@ import ( datastore2 "github.com/ipfs/go-ipfs/thirdparty/datastore2" delay "github.com/ipfs/go-ipfs/thirdparty/delay" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" - p2ptestutil "gx/ipfs/QmRW2xiYTpDLWTHb822ZYbPBoh3dGLJwaXLGS9tnPyWZpq/go-libp2p/p2p/test/util" + peer "gx/ipfs/QmQGwpJy9P4yXZySmqkZEXCmbBpJUb8xntCv8Ca4taZwDC/go-libp2p-peer" + p2ptestutil "gx/ipfs/QmQgQeBQxQmJdeUSaDagc8cr2ompDwGn13Cybjdtzfuaki/go-libp2p/p2p/test/util" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" - peer "gx/ipfs/QmbyvM8zRFDkbFdYyt1MnevUMJ62SiSGbfDFZ3Z8nkrzr4/go-libp2p-peer" ) // WARNING: this uses RandTestBogusIdentity DO NOT USE for NON TESTS! diff --git a/bitswap/wantmanager.go b/bitswap/wantmanager.go index 52dc514d3..50fdb37da 100644 --- a/bitswap/wantmanager.go +++ b/bitswap/wantmanager.go @@ -9,8 +9,8 @@ import ( bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" + peer "gx/ipfs/QmQGwpJy9P4yXZySmqkZEXCmbBpJUb8xntCv8Ca4taZwDC/go-libp2p-peer" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" - peer "gx/ipfs/QmbyvM8zRFDkbFdYyt1MnevUMJ62SiSGbfDFZ3Z8nkrzr4/go-libp2p-peer" ) type WantManager struct { diff --git a/bitswap/workers.go b/bitswap/workers.go index 7717a7170..8a68698c0 100644 --- a/bitswap/workers.go +++ b/bitswap/workers.go @@ -10,8 +10,8 @@ import ( key "github.com/ipfs/go-ipfs/blocks/key" wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" + peer "gx/ipfs/QmQGwpJy9P4yXZySmqkZEXCmbBpJUb8xntCv8Ca4taZwDC/go-libp2p-peer" logging "gx/ipfs/QmaDNZ4QMdBdku1YZWBysufYyoQt1negQGNav6PLYarbY8/go-log" - peer "gx/ipfs/QmbyvM8zRFDkbFdYyt1MnevUMJ62SiSGbfDFZ3Z8nkrzr4/go-libp2p-peer" ) var TaskWorkerCount = 8 From 61975fedb4ca82d768106dd65e8ded1685fa9f45 Mon Sep 17 00:00:00 2001 From: Jeromy Date: Tue, 7 Jun 2016 00:20:06 -0700 Subject: [PATCH 0469/1035] update libp2p to version 3.2.2 License: MIT Signed-off-by: Jeromy This commit was moved from ipfs/go-bitswap@64ceafe16dcc6ba42fbfe17a381296412becf3a1 --- bitswap/bitswap_test.go | 2 +- bitswap/message/message.go | 2 +- bitswap/network/interface.go | 2 +- bitswap/network/ipfs_impl.go | 4 ++-- bitswap/testnet/peernet.go | 2 +- bitswap/testutils.go | 2 +- 6 files changed, 7 insertions(+), 7 deletions(-) diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index a3f336dbc..f39a44eb5 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -17,7 +17,7 @@ import ( tn "github.com/ipfs/go-ipfs/exchange/bitswap/testnet" mockrouting "github.com/ipfs/go-ipfs/routing/mock" delay "github.com/ipfs/go-ipfs/thirdparty/delay" - p2ptestutil "gx/ipfs/QmQgQeBQxQmJdeUSaDagc8cr2ompDwGn13Cybjdtzfuaki/go-libp2p/p2p/test/util" + p2ptestutil "gx/ipfs/QmXJBB9U6e6ennAJPzk8E2rSaVGuHVR2jCxE9H9gPDtRrq/go-libp2p/p2p/test/util" ) // FIXME the tests are really sensitive to the network delay. fix them to work diff --git a/bitswap/message/message.go b/bitswap/message/message.go index e2f136f31..3cfc82ae5 100644 --- a/bitswap/message/message.go +++ b/bitswap/message/message.go @@ -7,7 +7,7 @@ import ( key "github.com/ipfs/go-ipfs/blocks/key" pb "github.com/ipfs/go-ipfs/exchange/bitswap/message/pb" wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" - inet "gx/ipfs/QmQgQeBQxQmJdeUSaDagc8cr2ompDwGn13Cybjdtzfuaki/go-libp2p/p2p/net" + inet "gx/ipfs/QmXJBB9U6e6ennAJPzk8E2rSaVGuHVR2jCxE9H9gPDtRrq/go-libp2p/p2p/net" ggio "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/io" proto "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/proto" diff --git a/bitswap/network/interface.go b/bitswap/network/interface.go index 7abeca1a3..57692551f 100644 --- a/bitswap/network/interface.go +++ b/bitswap/network/interface.go @@ -4,7 +4,7 @@ import ( key "github.com/ipfs/go-ipfs/blocks/key" bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" peer "gx/ipfs/QmQGwpJy9P4yXZySmqkZEXCmbBpJUb8xntCv8Ca4taZwDC/go-libp2p-peer" - protocol "gx/ipfs/QmQgQeBQxQmJdeUSaDagc8cr2ompDwGn13Cybjdtzfuaki/go-libp2p/p2p/protocol" + protocol "gx/ipfs/QmXJBB9U6e6ennAJPzk8E2rSaVGuHVR2jCxE9H9gPDtRrq/go-libp2p/p2p/protocol" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" ) diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index 7dbd3ebdd..d5a168dc0 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -8,8 +8,8 @@ import ( routing "github.com/ipfs/go-ipfs/routing" peer "gx/ipfs/QmQGwpJy9P4yXZySmqkZEXCmbBpJUb8xntCv8Ca4taZwDC/go-libp2p-peer" - host "gx/ipfs/QmQgQeBQxQmJdeUSaDagc8cr2ompDwGn13Cybjdtzfuaki/go-libp2p/p2p/host" - inet "gx/ipfs/QmQgQeBQxQmJdeUSaDagc8cr2ompDwGn13Cybjdtzfuaki/go-libp2p/p2p/net" + host "gx/ipfs/QmXJBB9U6e6ennAJPzk8E2rSaVGuHVR2jCxE9H9gPDtRrq/go-libp2p/p2p/host" + inet "gx/ipfs/QmXJBB9U6e6ennAJPzk8E2rSaVGuHVR2jCxE9H9gPDtRrq/go-libp2p/p2p/net" ma "gx/ipfs/QmYzDkkgAEmrcNzFCiYo6L1dTX4EAG1gZkbtdbd9trL4vd/go-multiaddr" ggio "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/io" pstore "gx/ipfs/QmZ62t46e9p7vMYqCmptwQC1RhRv5cpQ5cwoqYspedaXyq/go-libp2p-peerstore" diff --git a/bitswap/testnet/peernet.go b/bitswap/testnet/peernet.go index 437af2dca..f3085e697 100644 --- a/bitswap/testnet/peernet.go +++ b/bitswap/testnet/peernet.go @@ -6,7 +6,7 @@ import ( mockrouting "github.com/ipfs/go-ipfs/routing/mock" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" peer "gx/ipfs/QmQGwpJy9P4yXZySmqkZEXCmbBpJUb8xntCv8Ca4taZwDC/go-libp2p-peer" - mockpeernet "gx/ipfs/QmQgQeBQxQmJdeUSaDagc8cr2ompDwGn13Cybjdtzfuaki/go-libp2p/p2p/net/mock" + mockpeernet "gx/ipfs/QmXJBB9U6e6ennAJPzk8E2rSaVGuHVR2jCxE9H9gPDtRrq/go-libp2p/p2p/net/mock" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" ) diff --git a/bitswap/testutils.go b/bitswap/testutils.go index b6ccabf97..ba9d923b0 100644 --- a/bitswap/testutils.go +++ b/bitswap/testutils.go @@ -11,7 +11,7 @@ import ( delay "github.com/ipfs/go-ipfs/thirdparty/delay" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" peer "gx/ipfs/QmQGwpJy9P4yXZySmqkZEXCmbBpJUb8xntCv8Ca4taZwDC/go-libp2p-peer" - p2ptestutil "gx/ipfs/QmQgQeBQxQmJdeUSaDagc8cr2ompDwGn13Cybjdtzfuaki/go-libp2p/p2p/test/util" + p2ptestutil "gx/ipfs/QmXJBB9U6e6ennAJPzk8E2rSaVGuHVR2jCxE9H9gPDtRrq/go-libp2p/p2p/test/util" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" ) From b4c7de3d880bf4baadaa1a40074465b65aebcaa0 Mon Sep 17 00:00:00 2001 From: Jeromy Date: Thu, 19 May 2016 14:32:56 -0700 Subject: [PATCH 0470/1035] Make bitswap better License: MIT Signed-off-by: Jeromy This commit was moved from ipfs/go-bitswap@4ae16a8f4d6a68e3ef1d4699fecfa37cce0ca9a5 --- bitswap/decision/engine.go | 21 +++++--- bitswap/decision/ledger.go | 3 ++ bitswap/decision/peer_request_queue.go | 57 +++++++++++++++++++-- bitswap/decision/peer_request_queue_test.go | 2 + bitswap/network/interface.go | 7 +++ bitswap/network/ipfs_impl.go | 21 ++++++++ bitswap/testnet/virtual.go | 24 +++++++++ bitswap/wantmanager.go | 42 +++++++++++---- 8 files changed, 156 insertions(+), 21 deletions(-) diff --git a/bitswap/decision/engine.go b/bitswap/decision/engine.go index 99b8088cf..a31ad6d7a 100644 --- a/bitswap/decision/engine.go +++ b/bitswap/decision/engine.go @@ -3,6 +3,7 @@ package decision import ( "sync" + "time" blocks "github.com/ipfs/go-ipfs/blocks" bstore "github.com/ipfs/go-ipfs/blocks/blockstore" @@ -68,7 +69,7 @@ type Engine struct { // peerRequestQueue is a priority queue of requests received from peers. // Requests are popped from the queue, packaged up, and placed in the // outbox. - peerRequestQueue peerRequestQueue + peerRequestQueue *prq // FIXME it's a bit odd for the client and the worker to both share memory // (both modify the peerRequestQueue) and also to communicate over the @@ -86,6 +87,8 @@ type Engine struct { lock sync.Mutex // protects the fields immediatly below // ledgerMap lists Ledgers by their Partner key. ledgerMap map[peer.ID]*ledger + + ticker *time.Ticker } func NewEngine(ctx context.Context, bs bstore.Blockstore) *Engine { @@ -95,6 +98,7 @@ func NewEngine(ctx context.Context, bs bstore.Blockstore) *Engine { peerRequestQueue: newPRQ(), outbox: make(chan (<-chan *Envelope), outboxChanBuffer), workSignal: make(chan struct{}, 1), + ticker: time.NewTicker(time.Millisecond * 100), } go e.taskWorker(ctx) return e @@ -142,6 +146,9 @@ func (e *Engine) nextEnvelope(ctx context.Context) (*Envelope, error) { return nil, ctx.Err() case <-e.workSignal: nextTask = e.peerRequestQueue.Pop() + case <-e.ticker.C: + e.peerRequestQueue.thawRound() + nextTask = e.peerRequestQueue.Pop() } } @@ -191,9 +198,6 @@ func (e *Engine) Peers() []peer.ID { // MessageReceived performs book-keeping. Returns error if passed invalid // arguments. func (e *Engine) MessageReceived(p peer.ID, m bsmsg.BitSwapMessage) error { - e.lock.Lock() - defer e.lock.Unlock() - if len(m.Wantlist()) == 0 && len(m.Blocks()) == 0 { log.Debugf("received empty message from %s", p) } @@ -206,6 +210,8 @@ func (e *Engine) MessageReceived(p peer.ID, m bsmsg.BitSwapMessage) error { }() l := e.findOrCreate(p) + l.lk.Lock() + defer l.lk.Unlock() if m.Full() { l.wantList = wl.New() } @@ -236,10 +242,12 @@ func (e *Engine) addBlock(block blocks.Block) { work := false for _, l := range e.ledgerMap { + l.lk.Lock() if entry, ok := l.WantListContains(block.Key()); ok { e.peerRequestQueue.Push(entry, l.Partner) work = true } + l.lk.Unlock() } if work { @@ -261,9 +269,6 @@ func (e *Engine) AddBlock(block blocks.Block) { // send happen atomically func (e *Engine) MessageSent(p peer.ID, m bsmsg.BitSwapMessage) error { - e.lock.Lock() - defer e.lock.Unlock() - l := e.findOrCreate(p) for _, block := range m.Blocks() { l.SentBytes(len(block.Data())) @@ -290,11 +295,13 @@ func (e *Engine) numBytesReceivedFrom(p peer.ID) uint64 { // ledger lazily instantiates a ledger func (e *Engine) findOrCreate(p peer.ID) *ledger { + e.lock.Lock() l, ok := e.ledgerMap[p] if !ok { l = newLedger(p) e.ledgerMap[p] = l } + e.lock.Unlock() return l } diff --git a/bitswap/decision/ledger.go b/bitswap/decision/ledger.go index 479027678..dddefb596 100644 --- a/bitswap/decision/ledger.go +++ b/bitswap/decision/ledger.go @@ -1,6 +1,7 @@ package decision import ( + "sync" "time" key "github.com/ipfs/go-ipfs/blocks/key" @@ -44,6 +45,8 @@ type ledger struct { // sentToPeer is a set of keys to ensure we dont send duplicate blocks // to a given peer sentToPeer map[key.Key]time.Time + + lk sync.Mutex } type debtRatio struct { diff --git a/bitswap/decision/peer_request_queue.go b/bitswap/decision/peer_request_queue.go index 54cd19357..21d219a71 100644 --- a/bitswap/decision/peer_request_queue.go +++ b/bitswap/decision/peer_request_queue.go @@ -15,14 +15,16 @@ type peerRequestQueue interface { Pop() *peerRequestTask Push(entry wantlist.Entry, to peer.ID) Remove(k key.Key, p peer.ID) + // NB: cannot expose simply expose taskQueue.Len because trashed elements // may exist. These trashed elements should not contribute to the count. } -func newPRQ() peerRequestQueue { +func newPRQ() *prq { return &prq{ taskMap: make(map[string]*peerRequestTask), partners: make(map[peer.ID]*activePartner), + frozen: make(map[peer.ID]*activePartner), pQueue: pq.New(partnerCompare), } } @@ -38,6 +40,8 @@ type prq struct { pQueue pq.PQ taskMap map[string]*peerRequestTask partners map[peer.ID]*activePartner + + frozen map[peer.ID]*activePartner } // Push currently adds a new peerRequestTask to the end of the list @@ -92,7 +96,7 @@ func (tl *prq) Pop() *peerRequestTask { partner := tl.pQueue.Pop().(*activePartner) var out *peerRequestTask - for partner.taskQueue.Len() > 0 { + for partner.taskQueue.Len() > 0 && partner.freezeVal == 0 { out = partner.taskQueue.Pop().(*peerRequestTask) delete(tl.taskMap, out.Key()) if out.trash { @@ -120,11 +124,47 @@ func (tl *prq) Remove(k key.Key, p peer.ID) { t.trash = true // having canceled a block, we now account for that in the given partner - tl.partners[p].requests-- + partner := tl.partners[p] + partner.requests-- + + // we now also 'freeze' that partner. If they sent us a cancel for a + // block we were about to send them, we should wait a short period of time + // to make sure we receive any other in-flight cancels before sending + // them a block they already potentially have + if partner.freezeVal == 0 { + tl.frozen[p] = partner + } + + partner.freezeVal++ + tl.pQueue.Update(partner.index) } tl.lock.Unlock() } +func (tl *prq) fullThaw() { + tl.lock.Lock() + defer tl.lock.Unlock() + + for id, partner := range tl.frozen { + partner.freezeVal = 0 + delete(tl.frozen, id) + tl.pQueue.Update(partner.index) + } +} + +func (tl *prq) thawRound() { + tl.lock.Lock() + defer tl.lock.Unlock() + + for id, partner := range tl.frozen { + partner.freezeVal -= (partner.freezeVal + 1) / 2 + if partner.freezeVal <= 0 { + delete(tl.frozen, id) + } + tl.pQueue.Update(partner.index) + } +} + type peerRequestTask struct { Entry wantlist.Entry Target peer.ID @@ -196,6 +236,8 @@ type activePartner struct { // for the PQ interface index int + freezeVal int + // priority queue of tasks belonging to this peer taskQueue pq.PQ } @@ -208,6 +250,7 @@ func newActivePartner() *activePartner { } // partnerCompare implements pq.ElemComparator +// returns true if peer 'a' has higher priority than peer 'b' func partnerCompare(a, b pq.Elem) bool { pa := a.(*activePartner) pb := b.(*activePartner) @@ -220,6 +263,14 @@ func partnerCompare(a, b pq.Elem) bool { if pb.requests == 0 { return true } + + if pa.freezeVal > pb.freezeVal { + return false + } + if pa.freezeVal < pb.freezeVal { + return true + } + if pa.active == pb.active { // sorting by taskQueue.Len() aids in cleaning out trash entries faster // if we sorted instead by requests, one peer could potentially build up diff --git a/bitswap/decision/peer_request_queue_test.go b/bitswap/decision/peer_request_queue_test.go index a2d96a9c6..b1091c03c 100644 --- a/bitswap/decision/peer_request_queue_test.go +++ b/bitswap/decision/peer_request_queue_test.go @@ -47,6 +47,8 @@ func TestPushPop(t *testing.T) { prq.Remove(key.Key(consonant), partner) } + prq.fullThaw() + var out []string for { received := prq.Pop() diff --git a/bitswap/network/interface.go b/bitswap/network/interface.go index 57692551f..42d509f63 100644 --- a/bitswap/network/interface.go +++ b/bitswap/network/interface.go @@ -25,9 +25,16 @@ type BitSwapNetwork interface { ConnectTo(context.Context, peer.ID) error + NewMessageSender(context.Context, peer.ID) (MessageSender, error) + Routing } +type MessageSender interface { + SendMsg(bsmsg.BitSwapMessage) error + Close() error +} + // Implement Receiver to receive messages from the BitSwapNetwork type Receiver interface { ReceiveMessage( diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index d5a168dc0..21f7f59f7 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -42,6 +42,27 @@ type impl struct { receiver Receiver } +type streamMessageSender struct { + s inet.Stream +} + +func (s *streamMessageSender) Close() error { + return s.s.Close() +} + +func (s *streamMessageSender) SendMsg(msg bsmsg.BitSwapMessage) error { + return msg.ToNet(s.s) +} + +func (bsnet *impl) NewMessageSender(ctx context.Context, p peer.ID) (MessageSender, error) { + s, err := bsnet.newStreamToPeer(ctx, p) + if err != nil { + return nil, err + } + + return &streamMessageSender{s: s}, nil +} + func (bsnet *impl) newStreamToPeer(ctx context.Context, p peer.ID) (inet.Stream, error) { // first, make sure we're connected. diff --git a/bitswap/testnet/virtual.go b/bitswap/testnet/virtual.go index 89833b682..d0555ff37 100644 --- a/bitswap/testnet/virtual.go +++ b/bitswap/testnet/virtual.go @@ -112,6 +112,30 @@ func (nc *networkClient) FindProvidersAsync(ctx context.Context, k key.Key, max return out } +type messagePasser struct { + net *network + target peer.ID + local peer.ID + ctx context.Context +} + +func (mp *messagePasser) SendMsg(m bsmsg.BitSwapMessage) error { + return mp.net.SendMessage(mp.ctx, mp.local, mp.target, m) +} + +func (mp *messagePasser) Close() error { + return nil +} + +func (n *networkClient) NewMessageSender(ctx context.Context, p peer.ID) (bsnet.MessageSender, error) { + return &messagePasser{ + net: n.network, + target: p, + local: n.local, + ctx: ctx, + }, nil +} + // Provide provides the key to the network func (nc *networkClient) Provide(ctx context.Context, k key.Key) error { return nc.routing.Provide(ctx, k) diff --git a/bitswap/wantmanager.go b/bitswap/wantmanager.go index 50fdb37da..24fd75c1e 100644 --- a/bitswap/wantmanager.go +++ b/bitswap/wantmanager.go @@ -26,9 +26,11 @@ type WantManager struct { network bsnet.BitSwapNetwork ctx context.Context + cancel func() } func NewWantManager(ctx context.Context, network bsnet.BitSwapNetwork) *WantManager { + ctx, cancel := context.WithCancel(ctx) return &WantManager{ incoming: make(chan []*bsmsg.Entry, 10), connect: make(chan peer.ID, 10), @@ -38,6 +40,7 @@ func NewWantManager(ctx context.Context, network bsnet.BitSwapNetwork) *WantMana wl: wantlist.NewThreadSafe(), network: network, ctx: ctx, + cancel: cancel, } } @@ -58,6 +61,8 @@ type msgQueue struct { out bsmsg.BitSwapMessage network bsnet.BitSwapNetwork + sender bsnet.MessageSender + refcnt int work chan struct{} @@ -150,6 +155,11 @@ func (pm *WantManager) stopPeerHandler(p peer.ID) { } func (mq *msgQueue) runQueue(ctx context.Context) { + defer func() { + if mq.sender != nil { + mq.sender.Close() + } + }() for { select { case <-mq.work: // there is work to be done @@ -166,14 +176,25 @@ func (mq *msgQueue) doWork(ctx context.Context) { // allow ten minutes for connections // this includes looking them up in the dht // dialing them, and handshaking - conctx, cancel := context.WithTimeout(ctx, time.Minute*10) - defer cancel() + if mq.sender == nil { + conctx, cancel := context.WithTimeout(ctx, time.Minute*10) + defer cancel() + + err := mq.network.ConnectTo(conctx, mq.p) + if err != nil { + log.Infof("cant connect to peer %s: %s", mq.p, err) + // TODO: cant connect, what now? + return + } - err := mq.network.ConnectTo(conctx, mq.p) - if err != nil { - log.Infof("cant connect to peer %s: %s", mq.p, err) - // TODO: cant connect, what now? - return + nsender, err := mq.network.NewMessageSender(ctx, mq.p) + if err != nil { + log.Infof("cant open new stream to peer %s: %s", mq.p, err) + // TODO: cant open stream, what now? + return + } + + mq.sender = nsender } // grab outgoing message @@ -186,13 +207,12 @@ func (mq *msgQueue) doWork(ctx context.Context) { mq.out = nil mq.outlk.Unlock() - sendctx, cancel := context.WithTimeout(ctx, time.Minute*5) - defer cancel() - // send wantlist updates - err = mq.network.SendMessage(sendctx, mq.p, wlm) + err := mq.sender.SendMsg(wlm) if err != nil { log.Infof("bitswap send error: %s", err) + mq.sender.Close() + mq.sender = nil // TODO: what do we do if this fails? return } From dafc918446814fe8b4338581a965d27c8d202aaf Mon Sep 17 00:00:00 2001 From: Jakub Sztandera Date: Thu, 9 Jun 2016 22:12:52 +0200 Subject: [PATCH 0471/1035] Update go-log https://github.com/ipfs/go-log/pull/3 License: MIT Signed-off-by: Jakub Sztandera This commit was moved from ipfs/go-bitswap@b76a5bbe3e7d777caf143616e8c486d3f936936f --- bitswap/bitswap.go | 2 +- bitswap/decision/engine.go | 2 +- bitswap/network/ipfs_impl.go | 2 +- bitswap/workers.go | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 469b8af96..4dd488027 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -11,8 +11,8 @@ import ( peer "gx/ipfs/QmQGwpJy9P4yXZySmqkZEXCmbBpJUb8xntCv8Ca4taZwDC/go-libp2p-peer" process "gx/ipfs/QmQopLATEYMNg7dVqZRNDfeE2S1yKy8zrRh5xnYiuqeZBn/goprocess" procctx "gx/ipfs/QmQopLATEYMNg7dVqZRNDfeE2S1yKy8zrRh5xnYiuqeZBn/goprocess/context" + logging "gx/ipfs/QmYtB7Qge8cJpXc4irsEp8zRqfnZMBeB7aTrMEkPk67DRv/go-log" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" - logging "gx/ipfs/QmaDNZ4QMdBdku1YZWBysufYyoQt1negQGNav6PLYarbY8/go-log" blocks "github.com/ipfs/go-ipfs/blocks" blockstore "github.com/ipfs/go-ipfs/blocks/blockstore" diff --git a/bitswap/decision/engine.go b/bitswap/decision/engine.go index 99b8088cf..ca03377df 100644 --- a/bitswap/decision/engine.go +++ b/bitswap/decision/engine.go @@ -9,8 +9,8 @@ import ( bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" wl "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" peer "gx/ipfs/QmQGwpJy9P4yXZySmqkZEXCmbBpJUb8xntCv8Ca4taZwDC/go-libp2p-peer" + logging "gx/ipfs/QmYtB7Qge8cJpXc4irsEp8zRqfnZMBeB7aTrMEkPk67DRv/go-log" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" - logging "gx/ipfs/QmaDNZ4QMdBdku1YZWBysufYyoQt1negQGNav6PLYarbY8/go-log" ) // TODO consider taking responsibility for other types of requests. For diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index d5a168dc0..bd13cd5f1 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -10,11 +10,11 @@ import ( peer "gx/ipfs/QmQGwpJy9P4yXZySmqkZEXCmbBpJUb8xntCv8Ca4taZwDC/go-libp2p-peer" host "gx/ipfs/QmXJBB9U6e6ennAJPzk8E2rSaVGuHVR2jCxE9H9gPDtRrq/go-libp2p/p2p/host" inet "gx/ipfs/QmXJBB9U6e6ennAJPzk8E2rSaVGuHVR2jCxE9H9gPDtRrq/go-libp2p/p2p/net" + logging "gx/ipfs/QmYtB7Qge8cJpXc4irsEp8zRqfnZMBeB7aTrMEkPk67DRv/go-log" ma "gx/ipfs/QmYzDkkgAEmrcNzFCiYo6L1dTX4EAG1gZkbtdbd9trL4vd/go-multiaddr" ggio "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/io" pstore "gx/ipfs/QmZ62t46e9p7vMYqCmptwQC1RhRv5cpQ5cwoqYspedaXyq/go-libp2p-peerstore" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" - logging "gx/ipfs/QmaDNZ4QMdBdku1YZWBysufYyoQt1negQGNav6PLYarbY8/go-log" ) var log = logging.Logger("bitswap_network") diff --git a/bitswap/workers.go b/bitswap/workers.go index 8a68698c0..6d861649a 100644 --- a/bitswap/workers.go +++ b/bitswap/workers.go @@ -11,7 +11,7 @@ import ( key "github.com/ipfs/go-ipfs/blocks/key" wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" peer "gx/ipfs/QmQGwpJy9P4yXZySmqkZEXCmbBpJUb8xntCv8Ca4taZwDC/go-libp2p-peer" - logging "gx/ipfs/QmaDNZ4QMdBdku1YZWBysufYyoQt1negQGNav6PLYarbY8/go-log" + logging "gx/ipfs/QmYtB7Qge8cJpXc4irsEp8zRqfnZMBeB7aTrMEkPk67DRv/go-log" ) var TaskWorkerCount = 8 From 2614fbcced5f0524e08800c36b7a7d0768bfacd6 Mon Sep 17 00:00:00 2001 From: Jeromy Date: Sat, 11 Jun 2016 10:33:44 -0700 Subject: [PATCH 0472/1035] pull in libp2p updates with utp fixes License: MIT Signed-off-by: Jeromy This commit was moved from ipfs/go-bitswap@53553e6466d38a0f26c14ab28d79d4c929cd1ee4 --- bitswap/bitswap_test.go | 2 +- bitswap/message/message.go | 2 +- bitswap/network/interface.go | 2 +- bitswap/network/ipfs_impl.go | 6 +++--- bitswap/testnet/peernet.go | 2 +- bitswap/testutils.go | 2 +- 6 files changed, 8 insertions(+), 8 deletions(-) diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index f39a44eb5..a8ae91bcc 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -17,7 +17,7 @@ import ( tn "github.com/ipfs/go-ipfs/exchange/bitswap/testnet" mockrouting "github.com/ipfs/go-ipfs/routing/mock" delay "github.com/ipfs/go-ipfs/thirdparty/delay" - p2ptestutil "gx/ipfs/QmXJBB9U6e6ennAJPzk8E2rSaVGuHVR2jCxE9H9gPDtRrq/go-libp2p/p2p/test/util" + p2ptestutil "gx/ipfs/QmQkQP7WmeT9FRJDsEzAaGYDparttDiB6mCpVBrq2MuWQS/go-libp2p/p2p/test/util" ) // FIXME the tests are really sensitive to the network delay. fix them to work diff --git a/bitswap/message/message.go b/bitswap/message/message.go index 3cfc82ae5..91a52f1ea 100644 --- a/bitswap/message/message.go +++ b/bitswap/message/message.go @@ -7,7 +7,7 @@ import ( key "github.com/ipfs/go-ipfs/blocks/key" pb "github.com/ipfs/go-ipfs/exchange/bitswap/message/pb" wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" - inet "gx/ipfs/QmXJBB9U6e6ennAJPzk8E2rSaVGuHVR2jCxE9H9gPDtRrq/go-libp2p/p2p/net" + inet "gx/ipfs/QmQkQP7WmeT9FRJDsEzAaGYDparttDiB6mCpVBrq2MuWQS/go-libp2p/p2p/net" ggio "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/io" proto "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/proto" diff --git a/bitswap/network/interface.go b/bitswap/network/interface.go index 57692551f..a2d2ea9ce 100644 --- a/bitswap/network/interface.go +++ b/bitswap/network/interface.go @@ -4,7 +4,7 @@ import ( key "github.com/ipfs/go-ipfs/blocks/key" bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" peer "gx/ipfs/QmQGwpJy9P4yXZySmqkZEXCmbBpJUb8xntCv8Ca4taZwDC/go-libp2p-peer" - protocol "gx/ipfs/QmXJBB9U6e6ennAJPzk8E2rSaVGuHVR2jCxE9H9gPDtRrq/go-libp2p/p2p/protocol" + protocol "gx/ipfs/QmQkQP7WmeT9FRJDsEzAaGYDparttDiB6mCpVBrq2MuWQS/go-libp2p/p2p/protocol" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" ) diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index bd13cd5f1..ab5d6178e 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -8,12 +8,12 @@ import ( routing "github.com/ipfs/go-ipfs/routing" peer "gx/ipfs/QmQGwpJy9P4yXZySmqkZEXCmbBpJUb8xntCv8Ca4taZwDC/go-libp2p-peer" - host "gx/ipfs/QmXJBB9U6e6ennAJPzk8E2rSaVGuHVR2jCxE9H9gPDtRrq/go-libp2p/p2p/host" - inet "gx/ipfs/QmXJBB9U6e6ennAJPzk8E2rSaVGuHVR2jCxE9H9gPDtRrq/go-libp2p/p2p/net" + host "gx/ipfs/QmQkQP7WmeT9FRJDsEzAaGYDparttDiB6mCpVBrq2MuWQS/go-libp2p/p2p/host" + inet "gx/ipfs/QmQkQP7WmeT9FRJDsEzAaGYDparttDiB6mCpVBrq2MuWQS/go-libp2p/p2p/net" + pstore "gx/ipfs/QmXHUpFsnpCmanRnacqYkFoLoFfEq5yS2nUgGkAjJ1Nj9j/go-libp2p-peerstore" logging "gx/ipfs/QmYtB7Qge8cJpXc4irsEp8zRqfnZMBeB7aTrMEkPk67DRv/go-log" ma "gx/ipfs/QmYzDkkgAEmrcNzFCiYo6L1dTX4EAG1gZkbtdbd9trL4vd/go-multiaddr" ggio "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/io" - pstore "gx/ipfs/QmZ62t46e9p7vMYqCmptwQC1RhRv5cpQ5cwoqYspedaXyq/go-libp2p-peerstore" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" ) diff --git a/bitswap/testnet/peernet.go b/bitswap/testnet/peernet.go index f3085e697..00c0fe63a 100644 --- a/bitswap/testnet/peernet.go +++ b/bitswap/testnet/peernet.go @@ -6,7 +6,7 @@ import ( mockrouting "github.com/ipfs/go-ipfs/routing/mock" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" peer "gx/ipfs/QmQGwpJy9P4yXZySmqkZEXCmbBpJUb8xntCv8Ca4taZwDC/go-libp2p-peer" - mockpeernet "gx/ipfs/QmXJBB9U6e6ennAJPzk8E2rSaVGuHVR2jCxE9H9gPDtRrq/go-libp2p/p2p/net/mock" + mockpeernet "gx/ipfs/QmQkQP7WmeT9FRJDsEzAaGYDparttDiB6mCpVBrq2MuWQS/go-libp2p/p2p/net/mock" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" ) diff --git a/bitswap/testutils.go b/bitswap/testutils.go index ba9d923b0..1eac5effe 100644 --- a/bitswap/testutils.go +++ b/bitswap/testutils.go @@ -11,7 +11,7 @@ import ( delay "github.com/ipfs/go-ipfs/thirdparty/delay" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" peer "gx/ipfs/QmQGwpJy9P4yXZySmqkZEXCmbBpJUb8xntCv8Ca4taZwDC/go-libp2p-peer" - p2ptestutil "gx/ipfs/QmXJBB9U6e6ennAJPzk8E2rSaVGuHVR2jCxE9H9gPDtRrq/go-libp2p/p2p/test/util" + p2ptestutil "gx/ipfs/QmQkQP7WmeT9FRJDsEzAaGYDparttDiB6mCpVBrq2MuWQS/go-libp2p/p2p/test/util" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" ) From b25658c83060acdfce2e22bc9c3ed79f41e38a80 Mon Sep 17 00:00:00 2001 From: Jakub Sztandera Date: Sat, 11 Jun 2016 16:11:56 +0200 Subject: [PATCH 0473/1035] Remove go-datastore from Godeps License: MIT Signed-off-by: Jakub Sztandera This commit was moved from ipfs/go-bitswap@73c52653d4e3da73061fc28276e8f5014cb15308 --- bitswap/decision/engine_test.go | 4 ++-- bitswap/testnet/peernet.go | 2 +- bitswap/testutils.go | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/bitswap/decision/engine_test.go b/bitswap/decision/engine_test.go index 87882c2fa..185f2685c 100644 --- a/bitswap/decision/engine_test.go +++ b/bitswap/decision/engine_test.go @@ -8,8 +8,8 @@ import ( "sync" "testing" - ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore" - dssync "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore/sync" + ds "github.com/ipfs/go-datastore" + dssync "github.com/ipfs/go-datastore/sync" blocks "github.com/ipfs/go-ipfs/blocks" blockstore "github.com/ipfs/go-ipfs/blocks/blockstore" message "github.com/ipfs/go-ipfs/exchange/bitswap/message" diff --git a/bitswap/testnet/peernet.go b/bitswap/testnet/peernet.go index 00c0fe63a..921d0232a 100644 --- a/bitswap/testnet/peernet.go +++ b/bitswap/testnet/peernet.go @@ -1,7 +1,7 @@ package bitswap import ( - ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore" + ds "github.com/ipfs/go-datastore" bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" mockrouting "github.com/ipfs/go-ipfs/routing/mock" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" diff --git a/bitswap/testutils.go b/bitswap/testutils.go index 1eac5effe..a8147016f 100644 --- a/bitswap/testutils.go +++ b/bitswap/testutils.go @@ -3,8 +3,8 @@ package bitswap import ( "time" - ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore" - ds_sync "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore/sync" + ds "github.com/ipfs/go-datastore" + ds_sync "github.com/ipfs/go-datastore/sync" blockstore "github.com/ipfs/go-ipfs/blocks/blockstore" tn "github.com/ipfs/go-ipfs/exchange/bitswap/testnet" datastore2 "github.com/ipfs/go-ipfs/thirdparty/datastore2" From bf527845d8de4f5c5b1f8d3de7d9629caaf148e6 Mon Sep 17 00:00:00 2001 From: Jakub Sztandera Date: Sat, 11 Jun 2016 16:18:44 +0200 Subject: [PATCH 0474/1035] Import go-datastore to gx License: MIT Signed-off-by: Jakub Sztandera This commit was moved from ipfs/go-bitswap@9172ff34691f1ac863c6607b82e32ca585d2229e --- bitswap/decision/engine_test.go | 4 ++-- bitswap/testnet/peernet.go | 2 +- bitswap/testutils.go | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/bitswap/decision/engine_test.go b/bitswap/decision/engine_test.go index 185f2685c..9f0a365f7 100644 --- a/bitswap/decision/engine_test.go +++ b/bitswap/decision/engine_test.go @@ -8,13 +8,13 @@ import ( "sync" "testing" - ds "github.com/ipfs/go-datastore" - dssync "github.com/ipfs/go-datastore/sync" blocks "github.com/ipfs/go-ipfs/blocks" blockstore "github.com/ipfs/go-ipfs/blocks/blockstore" message "github.com/ipfs/go-ipfs/exchange/bitswap/message" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" peer "gx/ipfs/QmQGwpJy9P4yXZySmqkZEXCmbBpJUb8xntCv8Ca4taZwDC/go-libp2p-peer" + ds "gx/ipfs/QmZ6A6P6AMo8SR3jXAwzTuSU6B9R2Y4eqW2yW9VvfUayDN/go-datastore" + dssync "gx/ipfs/QmZ6A6P6AMo8SR3jXAwzTuSU6B9R2Y4eqW2yW9VvfUayDN/go-datastore/sync" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" ) diff --git a/bitswap/testnet/peernet.go b/bitswap/testnet/peernet.go index 921d0232a..94c92eda0 100644 --- a/bitswap/testnet/peernet.go +++ b/bitswap/testnet/peernet.go @@ -1,12 +1,12 @@ package bitswap import ( - ds "github.com/ipfs/go-datastore" bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" mockrouting "github.com/ipfs/go-ipfs/routing/mock" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" peer "gx/ipfs/QmQGwpJy9P4yXZySmqkZEXCmbBpJUb8xntCv8Ca4taZwDC/go-libp2p-peer" mockpeernet "gx/ipfs/QmQkQP7WmeT9FRJDsEzAaGYDparttDiB6mCpVBrq2MuWQS/go-libp2p/p2p/net/mock" + ds "gx/ipfs/QmZ6A6P6AMo8SR3jXAwzTuSU6B9R2Y4eqW2yW9VvfUayDN/go-datastore" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" ) diff --git a/bitswap/testutils.go b/bitswap/testutils.go index a8147016f..345e7c32c 100644 --- a/bitswap/testutils.go +++ b/bitswap/testutils.go @@ -3,8 +3,6 @@ package bitswap import ( "time" - ds "github.com/ipfs/go-datastore" - ds_sync "github.com/ipfs/go-datastore/sync" blockstore "github.com/ipfs/go-ipfs/blocks/blockstore" tn "github.com/ipfs/go-ipfs/exchange/bitswap/testnet" datastore2 "github.com/ipfs/go-ipfs/thirdparty/datastore2" @@ -12,6 +10,8 @@ import ( testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" peer "gx/ipfs/QmQGwpJy9P4yXZySmqkZEXCmbBpJUb8xntCv8Ca4taZwDC/go-libp2p-peer" p2ptestutil "gx/ipfs/QmQkQP7WmeT9FRJDsEzAaGYDparttDiB6mCpVBrq2MuWQS/go-libp2p/p2p/test/util" + ds "gx/ipfs/QmZ6A6P6AMo8SR3jXAwzTuSU6B9R2Y4eqW2yW9VvfUayDN/go-datastore" + ds_sync "gx/ipfs/QmZ6A6P6AMo8SR3jXAwzTuSU6B9R2Y4eqW2yW9VvfUayDN/go-datastore/sync" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" ) From 104cd1c72cd1a3fb44d34768481f0860354a0b0a Mon Sep 17 00:00:00 2001 From: Jeromy Date: Wed, 15 Jun 2016 13:04:49 -0700 Subject: [PATCH 0475/1035] update go-libp2p to 3.3.4 License: MIT Signed-off-by: Jeromy This commit was moved from ipfs/go-bitswap@70a585d9e7e62ae3106679d1a37b57b77e1b7ea6 --- bitswap/bitswap_test.go | 2 +- bitswap/message/message.go | 2 +- bitswap/network/interface.go | 2 +- bitswap/network/ipfs_impl.go | 4 ++-- bitswap/testnet/peernet.go | 2 +- bitswap/testutils.go | 2 +- 6 files changed, 7 insertions(+), 7 deletions(-) diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index a8ae91bcc..0469e5ad0 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -17,7 +17,7 @@ import ( tn "github.com/ipfs/go-ipfs/exchange/bitswap/testnet" mockrouting "github.com/ipfs/go-ipfs/routing/mock" delay "github.com/ipfs/go-ipfs/thirdparty/delay" - p2ptestutil "gx/ipfs/QmQkQP7WmeT9FRJDsEzAaGYDparttDiB6mCpVBrq2MuWQS/go-libp2p/p2p/test/util" + p2ptestutil "gx/ipfs/QmdBpVuSYuTGDA8Kn66CbKvEThXqKUh2nTANZEhzSxqrmJ/go-libp2p/p2p/test/util" ) // FIXME the tests are really sensitive to the network delay. fix them to work diff --git a/bitswap/message/message.go b/bitswap/message/message.go index 91a52f1ea..bbd85560a 100644 --- a/bitswap/message/message.go +++ b/bitswap/message/message.go @@ -7,7 +7,7 @@ import ( key "github.com/ipfs/go-ipfs/blocks/key" pb "github.com/ipfs/go-ipfs/exchange/bitswap/message/pb" wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" - inet "gx/ipfs/QmQkQP7WmeT9FRJDsEzAaGYDparttDiB6mCpVBrq2MuWQS/go-libp2p/p2p/net" + inet "gx/ipfs/QmdBpVuSYuTGDA8Kn66CbKvEThXqKUh2nTANZEhzSxqrmJ/go-libp2p/p2p/net" ggio "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/io" proto "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/proto" diff --git a/bitswap/network/interface.go b/bitswap/network/interface.go index d0bc7c38f..b27701591 100644 --- a/bitswap/network/interface.go +++ b/bitswap/network/interface.go @@ -4,8 +4,8 @@ import ( key "github.com/ipfs/go-ipfs/blocks/key" bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" peer "gx/ipfs/QmQGwpJy9P4yXZySmqkZEXCmbBpJUb8xntCv8Ca4taZwDC/go-libp2p-peer" - protocol "gx/ipfs/QmQkQP7WmeT9FRJDsEzAaGYDparttDiB6mCpVBrq2MuWQS/go-libp2p/p2p/protocol" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" + protocol "gx/ipfs/QmdBpVuSYuTGDA8Kn66CbKvEThXqKUh2nTANZEhzSxqrmJ/go-libp2p/p2p/protocol" ) var ProtocolBitswap protocol.ID = "/ipfs/bitswap" diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index 8068d13b9..c01ee409d 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -8,13 +8,13 @@ import ( routing "github.com/ipfs/go-ipfs/routing" peer "gx/ipfs/QmQGwpJy9P4yXZySmqkZEXCmbBpJUb8xntCv8Ca4taZwDC/go-libp2p-peer" - host "gx/ipfs/QmQkQP7WmeT9FRJDsEzAaGYDparttDiB6mCpVBrq2MuWQS/go-libp2p/p2p/host" - inet "gx/ipfs/QmQkQP7WmeT9FRJDsEzAaGYDparttDiB6mCpVBrq2MuWQS/go-libp2p/p2p/net" pstore "gx/ipfs/QmXHUpFsnpCmanRnacqYkFoLoFfEq5yS2nUgGkAjJ1Nj9j/go-libp2p-peerstore" logging "gx/ipfs/QmYtB7Qge8cJpXc4irsEp8zRqfnZMBeB7aTrMEkPk67DRv/go-log" ma "gx/ipfs/QmYzDkkgAEmrcNzFCiYo6L1dTX4EAG1gZkbtdbd9trL4vd/go-multiaddr" ggio "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/io" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" + host "gx/ipfs/QmdBpVuSYuTGDA8Kn66CbKvEThXqKUh2nTANZEhzSxqrmJ/go-libp2p/p2p/host" + inet "gx/ipfs/QmdBpVuSYuTGDA8Kn66CbKvEThXqKUh2nTANZEhzSxqrmJ/go-libp2p/p2p/net" ) var log = logging.Logger("bitswap_network") diff --git a/bitswap/testnet/peernet.go b/bitswap/testnet/peernet.go index 94c92eda0..0888a050d 100644 --- a/bitswap/testnet/peernet.go +++ b/bitswap/testnet/peernet.go @@ -5,9 +5,9 @@ import ( mockrouting "github.com/ipfs/go-ipfs/routing/mock" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" peer "gx/ipfs/QmQGwpJy9P4yXZySmqkZEXCmbBpJUb8xntCv8Ca4taZwDC/go-libp2p-peer" - mockpeernet "gx/ipfs/QmQkQP7WmeT9FRJDsEzAaGYDparttDiB6mCpVBrq2MuWQS/go-libp2p/p2p/net/mock" ds "gx/ipfs/QmZ6A6P6AMo8SR3jXAwzTuSU6B9R2Y4eqW2yW9VvfUayDN/go-datastore" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" + mockpeernet "gx/ipfs/QmdBpVuSYuTGDA8Kn66CbKvEThXqKUh2nTANZEhzSxqrmJ/go-libp2p/p2p/net/mock" ) type peernet struct { diff --git a/bitswap/testutils.go b/bitswap/testutils.go index 345e7c32c..ce2f1c4b7 100644 --- a/bitswap/testutils.go +++ b/bitswap/testutils.go @@ -9,10 +9,10 @@ import ( delay "github.com/ipfs/go-ipfs/thirdparty/delay" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" peer "gx/ipfs/QmQGwpJy9P4yXZySmqkZEXCmbBpJUb8xntCv8Ca4taZwDC/go-libp2p-peer" - p2ptestutil "gx/ipfs/QmQkQP7WmeT9FRJDsEzAaGYDparttDiB6mCpVBrq2MuWQS/go-libp2p/p2p/test/util" ds "gx/ipfs/QmZ6A6P6AMo8SR3jXAwzTuSU6B9R2Y4eqW2yW9VvfUayDN/go-datastore" ds_sync "gx/ipfs/QmZ6A6P6AMo8SR3jXAwzTuSU6B9R2Y4eqW2yW9VvfUayDN/go-datastore/sync" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" + p2ptestutil "gx/ipfs/QmdBpVuSYuTGDA8Kn66CbKvEThXqKUh2nTANZEhzSxqrmJ/go-libp2p/p2p/test/util" ) // WARNING: this uses RandTestBogusIdentity DO NOT USE for NON TESTS! From 8fa24381616ec0fdbf59f0523120e865437819aa Mon Sep 17 00:00:00 2001 From: Jakub Sztandera Date: Fri, 24 Jun 2016 18:38:07 +0200 Subject: [PATCH 0476/1035] Update go-log in whole dependency tree (#2898) * Update golog in go-ipfs License: MIT Signed-off-by: Jakub Sztandera * Update go-libp2p for go-log License: MIT Signed-off-by: Jakub Sztandera * Update go-libp2p-secio for go-log License: MIT Signed-off-by: Jakub Sztandera * Update go-libp2p-crypto for go-log License: MIT Signed-off-by: Jakub Sztandera * Update go-libp2p-peer for go-log License: MIT Signed-off-by: Jakub Sztandera * Import peersore, it wasn't imported License: MIT Signed-off-by: Jakub Sztandera * Update peerstore License: MIT Signed-off-by: Jakub Sztandera * Update peer License: MIT Signed-off-by: Jakub Sztandera * Update secio License: MIT Signed-off-by: Jakub Sztandera * Update go-libp2p License: MIT Signed-off-by: Jakub Sztandera This commit was moved from ipfs/go-bitswap@35d36b0e85e406bd64ffe3251f126ab545343a2c --- bitswap/bitswap.go | 4 ++-- bitswap/bitswap_test.go | 2 +- bitswap/decision/bench_test.go | 2 +- bitswap/decision/engine.go | 4 ++-- bitswap/decision/engine_test.go | 2 +- bitswap/decision/ledger.go | 2 +- bitswap/decision/peer_request_queue.go | 2 +- bitswap/message/message.go | 2 +- bitswap/network/interface.go | 4 ++-- bitswap/network/ipfs_impl.go | 10 +++++----- bitswap/testnet/interface.go | 2 +- bitswap/testnet/network_test.go | 2 +- bitswap/testnet/peernet.go | 4 ++-- bitswap/testnet/virtual.go | 2 +- bitswap/testutils.go | 4 ++-- bitswap/wantmanager.go | 2 +- bitswap/workers.go | 4 ++-- 17 files changed, 27 insertions(+), 27 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 4dd488027..f14fe9162 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -8,10 +8,10 @@ import ( "sync" "time" - peer "gx/ipfs/QmQGwpJy9P4yXZySmqkZEXCmbBpJUb8xntCv8Ca4taZwDC/go-libp2p-peer" + logging "gx/ipfs/QmNQynaz7qfriSUJkiEZUrm2Wen1u3Kj9goZzWtrPyu7XR/go-log" process "gx/ipfs/QmQopLATEYMNg7dVqZRNDfeE2S1yKy8zrRh5xnYiuqeZBn/goprocess" procctx "gx/ipfs/QmQopLATEYMNg7dVqZRNDfeE2S1yKy8zrRh5xnYiuqeZBn/goprocess/context" - logging "gx/ipfs/QmYtB7Qge8cJpXc4irsEp8zRqfnZMBeB7aTrMEkPk67DRv/go-log" + peer "gx/ipfs/QmRBqJF7hb8ZSpRcMwUt8hNhydWcxGEhtk81HKq6oUwKvs/go-libp2p-peer" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" blocks "github.com/ipfs/go-ipfs/blocks" diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index 0469e5ad0..b7a4f29df 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -17,7 +17,7 @@ import ( tn "github.com/ipfs/go-ipfs/exchange/bitswap/testnet" mockrouting "github.com/ipfs/go-ipfs/routing/mock" delay "github.com/ipfs/go-ipfs/thirdparty/delay" - p2ptestutil "gx/ipfs/QmdBpVuSYuTGDA8Kn66CbKvEThXqKUh2nTANZEhzSxqrmJ/go-libp2p/p2p/test/util" + p2ptestutil "gx/ipfs/QmZ8bCZpMWDbFSh6h2zgTYwrhnjrGM5c9WCzw72SU8p63b/go-libp2p/p2p/test/util" ) // FIXME the tests are really sensitive to the network delay. fix them to work diff --git a/bitswap/decision/bench_test.go b/bitswap/decision/bench_test.go index 283791ef0..e0086e3a9 100644 --- a/bitswap/decision/bench_test.go +++ b/bitswap/decision/bench_test.go @@ -7,7 +7,7 @@ import ( key "github.com/ipfs/go-ipfs/blocks/key" "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" "github.com/ipfs/go-ipfs/thirdparty/testutil" - "gx/ipfs/QmQGwpJy9P4yXZySmqkZEXCmbBpJUb8xntCv8Ca4taZwDC/go-libp2p-peer" + "gx/ipfs/QmRBqJF7hb8ZSpRcMwUt8hNhydWcxGEhtk81HKq6oUwKvs/go-libp2p-peer" ) // FWIW: At the time of this commit, including a timestamp in task increases diff --git a/bitswap/decision/engine.go b/bitswap/decision/engine.go index 465c6cb3f..92f87c27e 100644 --- a/bitswap/decision/engine.go +++ b/bitswap/decision/engine.go @@ -9,8 +9,8 @@ import ( bstore "github.com/ipfs/go-ipfs/blocks/blockstore" bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" wl "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" - peer "gx/ipfs/QmQGwpJy9P4yXZySmqkZEXCmbBpJUb8xntCv8Ca4taZwDC/go-libp2p-peer" - logging "gx/ipfs/QmYtB7Qge8cJpXc4irsEp8zRqfnZMBeB7aTrMEkPk67DRv/go-log" + logging "gx/ipfs/QmNQynaz7qfriSUJkiEZUrm2Wen1u3Kj9goZzWtrPyu7XR/go-log" + peer "gx/ipfs/QmRBqJF7hb8ZSpRcMwUt8hNhydWcxGEhtk81HKq6oUwKvs/go-libp2p-peer" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" ) diff --git a/bitswap/decision/engine_test.go b/bitswap/decision/engine_test.go index 9f0a365f7..e5836e464 100644 --- a/bitswap/decision/engine_test.go +++ b/bitswap/decision/engine_test.go @@ -12,7 +12,7 @@ import ( blockstore "github.com/ipfs/go-ipfs/blocks/blockstore" message "github.com/ipfs/go-ipfs/exchange/bitswap/message" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" - peer "gx/ipfs/QmQGwpJy9P4yXZySmqkZEXCmbBpJUb8xntCv8Ca4taZwDC/go-libp2p-peer" + peer "gx/ipfs/QmRBqJF7hb8ZSpRcMwUt8hNhydWcxGEhtk81HKq6oUwKvs/go-libp2p-peer" ds "gx/ipfs/QmZ6A6P6AMo8SR3jXAwzTuSU6B9R2Y4eqW2yW9VvfUayDN/go-datastore" dssync "gx/ipfs/QmZ6A6P6AMo8SR3jXAwzTuSU6B9R2Y4eqW2yW9VvfUayDN/go-datastore/sync" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" diff --git a/bitswap/decision/ledger.go b/bitswap/decision/ledger.go index dddefb596..95cd303e2 100644 --- a/bitswap/decision/ledger.go +++ b/bitswap/decision/ledger.go @@ -6,7 +6,7 @@ import ( key "github.com/ipfs/go-ipfs/blocks/key" wl "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" - peer "gx/ipfs/QmQGwpJy9P4yXZySmqkZEXCmbBpJUb8xntCv8Ca4taZwDC/go-libp2p-peer" + peer "gx/ipfs/QmRBqJF7hb8ZSpRcMwUt8hNhydWcxGEhtk81HKq6oUwKvs/go-libp2p-peer" ) // keySet is just a convenient alias for maps of keys, where we only care diff --git a/bitswap/decision/peer_request_queue.go b/bitswap/decision/peer_request_queue.go index 21d219a71..549de7c50 100644 --- a/bitswap/decision/peer_request_queue.go +++ b/bitswap/decision/peer_request_queue.go @@ -7,7 +7,7 @@ import ( key "github.com/ipfs/go-ipfs/blocks/key" wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" pq "github.com/ipfs/go-ipfs/thirdparty/pq" - peer "gx/ipfs/QmQGwpJy9P4yXZySmqkZEXCmbBpJUb8xntCv8Ca4taZwDC/go-libp2p-peer" + peer "gx/ipfs/QmRBqJF7hb8ZSpRcMwUt8hNhydWcxGEhtk81HKq6oUwKvs/go-libp2p-peer" ) type peerRequestQueue interface { diff --git a/bitswap/message/message.go b/bitswap/message/message.go index bbd85560a..56b4bc61e 100644 --- a/bitswap/message/message.go +++ b/bitswap/message/message.go @@ -7,7 +7,7 @@ import ( key "github.com/ipfs/go-ipfs/blocks/key" pb "github.com/ipfs/go-ipfs/exchange/bitswap/message/pb" wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" - inet "gx/ipfs/QmdBpVuSYuTGDA8Kn66CbKvEThXqKUh2nTANZEhzSxqrmJ/go-libp2p/p2p/net" + inet "gx/ipfs/QmZ8bCZpMWDbFSh6h2zgTYwrhnjrGM5c9WCzw72SU8p63b/go-libp2p/p2p/net" ggio "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/io" proto "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/proto" diff --git a/bitswap/network/interface.go b/bitswap/network/interface.go index b27701591..0888412ec 100644 --- a/bitswap/network/interface.go +++ b/bitswap/network/interface.go @@ -3,9 +3,9 @@ package network import ( key "github.com/ipfs/go-ipfs/blocks/key" bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" - peer "gx/ipfs/QmQGwpJy9P4yXZySmqkZEXCmbBpJUb8xntCv8Ca4taZwDC/go-libp2p-peer" + peer "gx/ipfs/QmRBqJF7hb8ZSpRcMwUt8hNhydWcxGEhtk81HKq6oUwKvs/go-libp2p-peer" + protocol "gx/ipfs/QmZ8bCZpMWDbFSh6h2zgTYwrhnjrGM5c9WCzw72SU8p63b/go-libp2p/p2p/protocol" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" - protocol "gx/ipfs/QmdBpVuSYuTGDA8Kn66CbKvEThXqKUh2nTANZEhzSxqrmJ/go-libp2p/p2p/protocol" ) var ProtocolBitswap protocol.ID = "/ipfs/bitswap" diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index c01ee409d..e73b8fb6e 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -7,14 +7,14 @@ import ( bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" routing "github.com/ipfs/go-ipfs/routing" - peer "gx/ipfs/QmQGwpJy9P4yXZySmqkZEXCmbBpJUb8xntCv8Ca4taZwDC/go-libp2p-peer" - pstore "gx/ipfs/QmXHUpFsnpCmanRnacqYkFoLoFfEq5yS2nUgGkAjJ1Nj9j/go-libp2p-peerstore" - logging "gx/ipfs/QmYtB7Qge8cJpXc4irsEp8zRqfnZMBeB7aTrMEkPk67DRv/go-log" + logging "gx/ipfs/QmNQynaz7qfriSUJkiEZUrm2Wen1u3Kj9goZzWtrPyu7XR/go-log" + pstore "gx/ipfs/QmQdnfvZQuhdT93LNc5bos52wAmdr3G2p6G8teLJMEN32P/go-libp2p-peerstore" + peer "gx/ipfs/QmRBqJF7hb8ZSpRcMwUt8hNhydWcxGEhtk81HKq6oUwKvs/go-libp2p-peer" ma "gx/ipfs/QmYzDkkgAEmrcNzFCiYo6L1dTX4EAG1gZkbtdbd9trL4vd/go-multiaddr" ggio "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/io" + host "gx/ipfs/QmZ8bCZpMWDbFSh6h2zgTYwrhnjrGM5c9WCzw72SU8p63b/go-libp2p/p2p/host" + inet "gx/ipfs/QmZ8bCZpMWDbFSh6h2zgTYwrhnjrGM5c9WCzw72SU8p63b/go-libp2p/p2p/net" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" - host "gx/ipfs/QmdBpVuSYuTGDA8Kn66CbKvEThXqKUh2nTANZEhzSxqrmJ/go-libp2p/p2p/host" - inet "gx/ipfs/QmdBpVuSYuTGDA8Kn66CbKvEThXqKUh2nTANZEhzSxqrmJ/go-libp2p/p2p/net" ) var log = logging.Logger("bitswap_network") diff --git a/bitswap/testnet/interface.go b/bitswap/testnet/interface.go index f9fe5e62f..ef79e722e 100644 --- a/bitswap/testnet/interface.go +++ b/bitswap/testnet/interface.go @@ -3,7 +3,7 @@ package bitswap import ( bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" "github.com/ipfs/go-ipfs/thirdparty/testutil" - peer "gx/ipfs/QmQGwpJy9P4yXZySmqkZEXCmbBpJUb8xntCv8Ca4taZwDC/go-libp2p-peer" + peer "gx/ipfs/QmRBqJF7hb8ZSpRcMwUt8hNhydWcxGEhtk81HKq6oUwKvs/go-libp2p-peer" ) type Network interface { diff --git a/bitswap/testnet/network_test.go b/bitswap/testnet/network_test.go index af6edcad7..19e2f2b71 100644 --- a/bitswap/testnet/network_test.go +++ b/bitswap/testnet/network_test.go @@ -10,7 +10,7 @@ import ( mockrouting "github.com/ipfs/go-ipfs/routing/mock" delay "github.com/ipfs/go-ipfs/thirdparty/delay" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" - peer "gx/ipfs/QmQGwpJy9P4yXZySmqkZEXCmbBpJUb8xntCv8Ca4taZwDC/go-libp2p-peer" + peer "gx/ipfs/QmRBqJF7hb8ZSpRcMwUt8hNhydWcxGEhtk81HKq6oUwKvs/go-libp2p-peer" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" ) diff --git a/bitswap/testnet/peernet.go b/bitswap/testnet/peernet.go index 0888a050d..6e072b8f7 100644 --- a/bitswap/testnet/peernet.go +++ b/bitswap/testnet/peernet.go @@ -4,10 +4,10 @@ import ( bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" mockrouting "github.com/ipfs/go-ipfs/routing/mock" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" - peer "gx/ipfs/QmQGwpJy9P4yXZySmqkZEXCmbBpJUb8xntCv8Ca4taZwDC/go-libp2p-peer" + peer "gx/ipfs/QmRBqJF7hb8ZSpRcMwUt8hNhydWcxGEhtk81HKq6oUwKvs/go-libp2p-peer" ds "gx/ipfs/QmZ6A6P6AMo8SR3jXAwzTuSU6B9R2Y4eqW2yW9VvfUayDN/go-datastore" + mockpeernet "gx/ipfs/QmZ8bCZpMWDbFSh6h2zgTYwrhnjrGM5c9WCzw72SU8p63b/go-libp2p/p2p/net/mock" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" - mockpeernet "gx/ipfs/QmdBpVuSYuTGDA8Kn66CbKvEThXqKUh2nTANZEhzSxqrmJ/go-libp2p/p2p/net/mock" ) type peernet struct { diff --git a/bitswap/testnet/virtual.go b/bitswap/testnet/virtual.go index d0555ff37..a468de3bb 100644 --- a/bitswap/testnet/virtual.go +++ b/bitswap/testnet/virtual.go @@ -10,7 +10,7 @@ import ( mockrouting "github.com/ipfs/go-ipfs/routing/mock" delay "github.com/ipfs/go-ipfs/thirdparty/delay" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" - peer "gx/ipfs/QmQGwpJy9P4yXZySmqkZEXCmbBpJUb8xntCv8Ca4taZwDC/go-libp2p-peer" + peer "gx/ipfs/QmRBqJF7hb8ZSpRcMwUt8hNhydWcxGEhtk81HKq6oUwKvs/go-libp2p-peer" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" ) diff --git a/bitswap/testutils.go b/bitswap/testutils.go index ce2f1c4b7..011ab3f2d 100644 --- a/bitswap/testutils.go +++ b/bitswap/testutils.go @@ -8,11 +8,11 @@ import ( datastore2 "github.com/ipfs/go-ipfs/thirdparty/datastore2" delay "github.com/ipfs/go-ipfs/thirdparty/delay" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" - peer "gx/ipfs/QmQGwpJy9P4yXZySmqkZEXCmbBpJUb8xntCv8Ca4taZwDC/go-libp2p-peer" + peer "gx/ipfs/QmRBqJF7hb8ZSpRcMwUt8hNhydWcxGEhtk81HKq6oUwKvs/go-libp2p-peer" ds "gx/ipfs/QmZ6A6P6AMo8SR3jXAwzTuSU6B9R2Y4eqW2yW9VvfUayDN/go-datastore" ds_sync "gx/ipfs/QmZ6A6P6AMo8SR3jXAwzTuSU6B9R2Y4eqW2yW9VvfUayDN/go-datastore/sync" + p2ptestutil "gx/ipfs/QmZ8bCZpMWDbFSh6h2zgTYwrhnjrGM5c9WCzw72SU8p63b/go-libp2p/p2p/test/util" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" - p2ptestutil "gx/ipfs/QmdBpVuSYuTGDA8Kn66CbKvEThXqKUh2nTANZEhzSxqrmJ/go-libp2p/p2p/test/util" ) // WARNING: this uses RandTestBogusIdentity DO NOT USE for NON TESTS! diff --git a/bitswap/wantmanager.go b/bitswap/wantmanager.go index 24fd75c1e..9796aa499 100644 --- a/bitswap/wantmanager.go +++ b/bitswap/wantmanager.go @@ -9,7 +9,7 @@ import ( bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" - peer "gx/ipfs/QmQGwpJy9P4yXZySmqkZEXCmbBpJUb8xntCv8Ca4taZwDC/go-libp2p-peer" + peer "gx/ipfs/QmRBqJF7hb8ZSpRcMwUt8hNhydWcxGEhtk81HKq6oUwKvs/go-libp2p-peer" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" ) diff --git a/bitswap/workers.go b/bitswap/workers.go index 6d861649a..ec7236543 100644 --- a/bitswap/workers.go +++ b/bitswap/workers.go @@ -10,8 +10,8 @@ import ( key "github.com/ipfs/go-ipfs/blocks/key" wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" - peer "gx/ipfs/QmQGwpJy9P4yXZySmqkZEXCmbBpJUb8xntCv8Ca4taZwDC/go-libp2p-peer" - logging "gx/ipfs/QmYtB7Qge8cJpXc4irsEp8zRqfnZMBeB7aTrMEkPk67DRv/go-log" + logging "gx/ipfs/QmNQynaz7qfriSUJkiEZUrm2Wen1u3Kj9goZzWtrPyu7XR/go-log" + peer "gx/ipfs/QmRBqJF7hb8ZSpRcMwUt8hNhydWcxGEhtk81HKq6oUwKvs/go-libp2p-peer" ) var TaskWorkerCount = 8 From d6e86572c1fac69e97402db170cb094adb77636f Mon Sep 17 00:00:00 2001 From: Jeromy Date: Fri, 24 Jun 2016 16:54:33 -0700 Subject: [PATCH 0477/1035] fix argument placement on log message License: MIT Signed-off-by: Jeromy This commit was moved from ipfs/go-bitswap@3f382488e05d1d9444c4fe4eacb35ad2453794ed --- bitswap/wantmanager.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bitswap/wantmanager.go b/bitswap/wantmanager.go index 9796aa499..f685c7079 100644 --- a/bitswap/wantmanager.go +++ b/bitswap/wantmanager.go @@ -109,7 +109,7 @@ func (pm *WantManager) SendBlock(ctx context.Context, env *engine.Envelope) { msg := bsmsg.New(false) msg.AddBlock(env.Block) - log.Infof("Sending block %s to %s", env.Peer, env.Block) + log.Infof("Sending block %s to %s", env.Block, env.Peer) err := pm.network.SendMessage(ctx, env.Peer, msg) if err != nil { log.Infof("sendblock error: %s", err) From f0f9bd57629d17ecd260e41d5299b997a666ff52 Mon Sep 17 00:00:00 2001 From: Jeromy Date: Wed, 22 Jun 2016 14:28:40 -0700 Subject: [PATCH 0478/1035] encode keys to datastore with base32 standard encoding Fixes #2601 Also bump version to 0.4.3-dev License: MIT Signed-off-by: Jeromy This commit was moved from ipfs/go-bitswap@a617ad59175ca160cb74939e5e406abbc86a5cac --- bitswap/decision/engine_test.go | 4 ++-- bitswap/testnet/peernet.go | 2 +- bitswap/testutils.go | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/bitswap/decision/engine_test.go b/bitswap/decision/engine_test.go index e5836e464..8f0aca059 100644 --- a/bitswap/decision/engine_test.go +++ b/bitswap/decision/engine_test.go @@ -13,9 +13,9 @@ import ( message "github.com/ipfs/go-ipfs/exchange/bitswap/message" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" peer "gx/ipfs/QmRBqJF7hb8ZSpRcMwUt8hNhydWcxGEhtk81HKq6oUwKvs/go-libp2p-peer" - ds "gx/ipfs/QmZ6A6P6AMo8SR3jXAwzTuSU6B9R2Y4eqW2yW9VvfUayDN/go-datastore" - dssync "gx/ipfs/QmZ6A6P6AMo8SR3jXAwzTuSU6B9R2Y4eqW2yW9VvfUayDN/go-datastore/sync" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" + ds "gx/ipfs/QmbCg24DeRKaRDLHbzzSVj7xndmWCPanBLkAM7Lx2nbrFs/go-datastore" + dssync "gx/ipfs/QmbCg24DeRKaRDLHbzzSVj7xndmWCPanBLkAM7Lx2nbrFs/go-datastore/sync" ) type peerAndEngine struct { diff --git a/bitswap/testnet/peernet.go b/bitswap/testnet/peernet.go index 6e072b8f7..99a9ef6f3 100644 --- a/bitswap/testnet/peernet.go +++ b/bitswap/testnet/peernet.go @@ -5,9 +5,9 @@ import ( mockrouting "github.com/ipfs/go-ipfs/routing/mock" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" peer "gx/ipfs/QmRBqJF7hb8ZSpRcMwUt8hNhydWcxGEhtk81HKq6oUwKvs/go-libp2p-peer" - ds "gx/ipfs/QmZ6A6P6AMo8SR3jXAwzTuSU6B9R2Y4eqW2yW9VvfUayDN/go-datastore" mockpeernet "gx/ipfs/QmZ8bCZpMWDbFSh6h2zgTYwrhnjrGM5c9WCzw72SU8p63b/go-libp2p/p2p/net/mock" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" + ds "gx/ipfs/QmbCg24DeRKaRDLHbzzSVj7xndmWCPanBLkAM7Lx2nbrFs/go-datastore" ) type peernet struct { diff --git a/bitswap/testutils.go b/bitswap/testutils.go index 011ab3f2d..2fcb9e626 100644 --- a/bitswap/testutils.go +++ b/bitswap/testutils.go @@ -9,10 +9,10 @@ import ( delay "github.com/ipfs/go-ipfs/thirdparty/delay" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" peer "gx/ipfs/QmRBqJF7hb8ZSpRcMwUt8hNhydWcxGEhtk81HKq6oUwKvs/go-libp2p-peer" - ds "gx/ipfs/QmZ6A6P6AMo8SR3jXAwzTuSU6B9R2Y4eqW2yW9VvfUayDN/go-datastore" - ds_sync "gx/ipfs/QmZ6A6P6AMo8SR3jXAwzTuSU6B9R2Y4eqW2yW9VvfUayDN/go-datastore/sync" p2ptestutil "gx/ipfs/QmZ8bCZpMWDbFSh6h2zgTYwrhnjrGM5c9WCzw72SU8p63b/go-libp2p/p2p/test/util" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" + ds "gx/ipfs/QmbCg24DeRKaRDLHbzzSVj7xndmWCPanBLkAM7Lx2nbrFs/go-datastore" + ds_sync "gx/ipfs/QmbCg24DeRKaRDLHbzzSVj7xndmWCPanBLkAM7Lx2nbrFs/go-datastore/sync" ) // WARNING: this uses RandTestBogusIdentity DO NOT USE for NON TESTS! From 1036afe316a36f6efcfc939ea26e446a59334720 Mon Sep 17 00:00:00 2001 From: Jeromy Date: Fri, 1 Jul 2016 22:40:57 -0700 Subject: [PATCH 0479/1035] update go-datastore changes 0.1.2 License: MIT Signed-off-by: Jeromy This commit was moved from ipfs/go-bitswap@4c2d071646912b3cf32b26f0db5af9296a231290 --- bitswap/decision/engine_test.go | 4 ++-- bitswap/testnet/peernet.go | 2 +- bitswap/testutils.go | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/bitswap/decision/engine_test.go b/bitswap/decision/engine_test.go index 8f0aca059..3b8acc05f 100644 --- a/bitswap/decision/engine_test.go +++ b/bitswap/decision/engine_test.go @@ -14,8 +14,8 @@ import ( testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" peer "gx/ipfs/QmRBqJF7hb8ZSpRcMwUt8hNhydWcxGEhtk81HKq6oUwKvs/go-libp2p-peer" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" - ds "gx/ipfs/QmbCg24DeRKaRDLHbzzSVj7xndmWCPanBLkAM7Lx2nbrFs/go-datastore" - dssync "gx/ipfs/QmbCg24DeRKaRDLHbzzSVj7xndmWCPanBLkAM7Lx2nbrFs/go-datastore/sync" + ds "gx/ipfs/QmfQzVugPq1w5shWRcLWSeiHF4a2meBX7yVD8Vw7GWJM9o/go-datastore" + dssync "gx/ipfs/QmfQzVugPq1w5shWRcLWSeiHF4a2meBX7yVD8Vw7GWJM9o/go-datastore/sync" ) type peerAndEngine struct { diff --git a/bitswap/testnet/peernet.go b/bitswap/testnet/peernet.go index 99a9ef6f3..1d491a9a4 100644 --- a/bitswap/testnet/peernet.go +++ b/bitswap/testnet/peernet.go @@ -7,7 +7,7 @@ import ( peer "gx/ipfs/QmRBqJF7hb8ZSpRcMwUt8hNhydWcxGEhtk81HKq6oUwKvs/go-libp2p-peer" mockpeernet "gx/ipfs/QmZ8bCZpMWDbFSh6h2zgTYwrhnjrGM5c9WCzw72SU8p63b/go-libp2p/p2p/net/mock" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" - ds "gx/ipfs/QmbCg24DeRKaRDLHbzzSVj7xndmWCPanBLkAM7Lx2nbrFs/go-datastore" + ds "gx/ipfs/QmfQzVugPq1w5shWRcLWSeiHF4a2meBX7yVD8Vw7GWJM9o/go-datastore" ) type peernet struct { diff --git a/bitswap/testutils.go b/bitswap/testutils.go index 2fcb9e626..6449412ba 100644 --- a/bitswap/testutils.go +++ b/bitswap/testutils.go @@ -11,8 +11,8 @@ import ( peer "gx/ipfs/QmRBqJF7hb8ZSpRcMwUt8hNhydWcxGEhtk81HKq6oUwKvs/go-libp2p-peer" p2ptestutil "gx/ipfs/QmZ8bCZpMWDbFSh6h2zgTYwrhnjrGM5c9WCzw72SU8p63b/go-libp2p/p2p/test/util" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" - ds "gx/ipfs/QmbCg24DeRKaRDLHbzzSVj7xndmWCPanBLkAM7Lx2nbrFs/go-datastore" - ds_sync "gx/ipfs/QmbCg24DeRKaRDLHbzzSVj7xndmWCPanBLkAM7Lx2nbrFs/go-datastore/sync" + ds "gx/ipfs/QmfQzVugPq1w5shWRcLWSeiHF4a2meBX7yVD8Vw7GWJM9o/go-datastore" + ds_sync "gx/ipfs/QmfQzVugPq1w5shWRcLWSeiHF4a2meBX7yVD8Vw7GWJM9o/go-datastore/sync" ) // WARNING: this uses RandTestBogusIdentity DO NOT USE for NON TESTS! From 116195ec0fba0e9a1fa904db107218cf1ce1f23d Mon Sep 17 00:00:00 2001 From: Jakub Sztandera Date: Tue, 21 Jun 2016 21:05:59 +0200 Subject: [PATCH 0480/1035] blocks/blockstore: Add bloom filter Replace write_cache with bloom_cache Improve ARC caching Fix small issue in case of AllKeysChan fails deps: Update go-datastore blocks/blockstore: Invalidate ARC cache before deletin block deps: Update go-datastore License: MIT Signed-off-by: Jakub Sztandera This commit was moved from ipfs/go-bitswap@fe749737dfb09c20dedfb8fac53f3e51a03df725 --- bitswap/testutils.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/bitswap/testutils.go b/bitswap/testutils.go index 6449412ba..4df1d4fb6 100644 --- a/bitswap/testutils.go +++ b/bitswap/testutils.go @@ -87,12 +87,13 @@ func (i *Instance) SetBlockstoreLatency(t time.Duration) time.Duration { // just a much better idea. func Session(ctx context.Context, net tn.Network, p testutil.Identity) Instance { bsdelay := delay.Fixed(0) + const bloomSize = 512 const writeCacheElems = 100 adapter := net.Adapter(p) dstore := ds_sync.MutexWrap(datastore2.WithDelay(ds.NewMapDatastore(), bsdelay)) - bstore, err := blockstore.WriteCached(blockstore.NewBlockstore(ds_sync.MutexWrap(dstore)), writeCacheElems) + bstore, err := blockstore.BloomCached(blockstore.NewBlockstore(ds_sync.MutexWrap(dstore)), bloomSize, writeCacheElems) if err != nil { panic(err.Error()) // FIXME perhaps change signature and return error. } From 46166f4f44098e633ec36f3a683f5e596ef763e7 Mon Sep 17 00:00:00 2001 From: Jeromy Date: Mon, 4 Jul 2016 12:27:26 -0700 Subject: [PATCH 0481/1035] update go-libp2p License: MIT Signed-off-by: Jeromy This commit was moved from ipfs/go-bitswap@4d2981d6f339b0e3c8d037f0445f9d3fb080bdaf --- bitswap/bitswap_test.go | 2 +- bitswap/message/message.go | 2 +- bitswap/network/interface.go | 2 +- bitswap/network/ipfs_impl.go | 4 ++-- bitswap/testnet/peernet.go | 2 +- bitswap/testutils.go | 2 +- 6 files changed, 7 insertions(+), 7 deletions(-) diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index b7a4f29df..6cbfe2b62 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -17,7 +17,7 @@ import ( tn "github.com/ipfs/go-ipfs/exchange/bitswap/testnet" mockrouting "github.com/ipfs/go-ipfs/routing/mock" delay "github.com/ipfs/go-ipfs/thirdparty/delay" - p2ptestutil "gx/ipfs/QmZ8bCZpMWDbFSh6h2zgTYwrhnjrGM5c9WCzw72SU8p63b/go-libp2p/p2p/test/util" + p2ptestutil "gx/ipfs/QmVCe3SNMjkcPgnpFhZs719dheq6xE7gJwjzV7aWcUM4Ms/go-libp2p/p2p/test/util" ) // FIXME the tests are really sensitive to the network delay. fix them to work diff --git a/bitswap/message/message.go b/bitswap/message/message.go index 56b4bc61e..e828e0c25 100644 --- a/bitswap/message/message.go +++ b/bitswap/message/message.go @@ -7,7 +7,7 @@ import ( key "github.com/ipfs/go-ipfs/blocks/key" pb "github.com/ipfs/go-ipfs/exchange/bitswap/message/pb" wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" - inet "gx/ipfs/QmZ8bCZpMWDbFSh6h2zgTYwrhnjrGM5c9WCzw72SU8p63b/go-libp2p/p2p/net" + inet "gx/ipfs/QmVCe3SNMjkcPgnpFhZs719dheq6xE7gJwjzV7aWcUM4Ms/go-libp2p/p2p/net" ggio "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/io" proto "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/proto" diff --git a/bitswap/network/interface.go b/bitswap/network/interface.go index 0888412ec..144d835c1 100644 --- a/bitswap/network/interface.go +++ b/bitswap/network/interface.go @@ -4,7 +4,7 @@ import ( key "github.com/ipfs/go-ipfs/blocks/key" bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" peer "gx/ipfs/QmRBqJF7hb8ZSpRcMwUt8hNhydWcxGEhtk81HKq6oUwKvs/go-libp2p-peer" - protocol "gx/ipfs/QmZ8bCZpMWDbFSh6h2zgTYwrhnjrGM5c9WCzw72SU8p63b/go-libp2p/p2p/protocol" + protocol "gx/ipfs/QmVCe3SNMjkcPgnpFhZs719dheq6xE7gJwjzV7aWcUM4Ms/go-libp2p/p2p/protocol" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" ) diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index e73b8fb6e..bf1259246 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -10,10 +10,10 @@ import ( logging "gx/ipfs/QmNQynaz7qfriSUJkiEZUrm2Wen1u3Kj9goZzWtrPyu7XR/go-log" pstore "gx/ipfs/QmQdnfvZQuhdT93LNc5bos52wAmdr3G2p6G8teLJMEN32P/go-libp2p-peerstore" peer "gx/ipfs/QmRBqJF7hb8ZSpRcMwUt8hNhydWcxGEhtk81HKq6oUwKvs/go-libp2p-peer" + host "gx/ipfs/QmVCe3SNMjkcPgnpFhZs719dheq6xE7gJwjzV7aWcUM4Ms/go-libp2p/p2p/host" + inet "gx/ipfs/QmVCe3SNMjkcPgnpFhZs719dheq6xE7gJwjzV7aWcUM4Ms/go-libp2p/p2p/net" ma "gx/ipfs/QmYzDkkgAEmrcNzFCiYo6L1dTX4EAG1gZkbtdbd9trL4vd/go-multiaddr" ggio "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/io" - host "gx/ipfs/QmZ8bCZpMWDbFSh6h2zgTYwrhnjrGM5c9WCzw72SU8p63b/go-libp2p/p2p/host" - inet "gx/ipfs/QmZ8bCZpMWDbFSh6h2zgTYwrhnjrGM5c9WCzw72SU8p63b/go-libp2p/p2p/net" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" ) diff --git a/bitswap/testnet/peernet.go b/bitswap/testnet/peernet.go index 1d491a9a4..c5fa32f09 100644 --- a/bitswap/testnet/peernet.go +++ b/bitswap/testnet/peernet.go @@ -5,7 +5,7 @@ import ( mockrouting "github.com/ipfs/go-ipfs/routing/mock" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" peer "gx/ipfs/QmRBqJF7hb8ZSpRcMwUt8hNhydWcxGEhtk81HKq6oUwKvs/go-libp2p-peer" - mockpeernet "gx/ipfs/QmZ8bCZpMWDbFSh6h2zgTYwrhnjrGM5c9WCzw72SU8p63b/go-libp2p/p2p/net/mock" + mockpeernet "gx/ipfs/QmVCe3SNMjkcPgnpFhZs719dheq6xE7gJwjzV7aWcUM4Ms/go-libp2p/p2p/net/mock" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" ds "gx/ipfs/QmfQzVugPq1w5shWRcLWSeiHF4a2meBX7yVD8Vw7GWJM9o/go-datastore" ) diff --git a/bitswap/testutils.go b/bitswap/testutils.go index 4df1d4fb6..d42278e71 100644 --- a/bitswap/testutils.go +++ b/bitswap/testutils.go @@ -9,7 +9,7 @@ import ( delay "github.com/ipfs/go-ipfs/thirdparty/delay" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" peer "gx/ipfs/QmRBqJF7hb8ZSpRcMwUt8hNhydWcxGEhtk81HKq6oUwKvs/go-libp2p-peer" - p2ptestutil "gx/ipfs/QmZ8bCZpMWDbFSh6h2zgTYwrhnjrGM5c9WCzw72SU8p63b/go-libp2p/p2p/test/util" + p2ptestutil "gx/ipfs/QmVCe3SNMjkcPgnpFhZs719dheq6xE7gJwjzV7aWcUM4Ms/go-libp2p/p2p/test/util" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" ds "gx/ipfs/QmfQzVugPq1w5shWRcLWSeiHF4a2meBX7yVD8Vw7GWJM9o/go-datastore" ds_sync "gx/ipfs/QmfQzVugPq1w5shWRcLWSeiHF4a2meBX7yVD8Vw7GWJM9o/go-datastore/sync" From da2bb9acb00ee6ba1cd1811e8744660b710ac3ca Mon Sep 17 00:00:00 2001 From: Jakub Sztandera Date: Mon, 4 Jul 2016 23:02:29 +0200 Subject: [PATCH 0482/1035] blocks/blockstore: introduce context passing to blockstore License: MIT Signed-off-by: Jakub Sztandera This commit was moved from ipfs/go-bitswap@9876376a6b063a9da70d8be327d321706cdf1986 --- bitswap/testutils.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/bitswap/testutils.go b/bitswap/testutils.go index d42278e71..a4be8d06f 100644 --- a/bitswap/testutils.go +++ b/bitswap/testutils.go @@ -93,7 +93,8 @@ func Session(ctx context.Context, net tn.Network, p testutil.Identity) Instance adapter := net.Adapter(p) dstore := ds_sync.MutexWrap(datastore2.WithDelay(ds.NewMapDatastore(), bsdelay)) - bstore, err := blockstore.BloomCached(blockstore.NewBlockstore(ds_sync.MutexWrap(dstore)), bloomSize, writeCacheElems) + bstore, err := blockstore.CachedBlockstore(blockstore.NewBlockstore( + ds_sync.MutexWrap(dstore)), ctx, blockstore.DefaultCacheOpts()) if err != nil { panic(err.Error()) // FIXME perhaps change signature and return error. } From 46dbf73ef6591190f70a9777afa7afeeb3f34579 Mon Sep 17 00:00:00 2001 From: Jeromy Date: Tue, 26 Jul 2016 10:48:25 -0700 Subject: [PATCH 0483/1035] use batching datastore for providers storage License: MIT Signed-off-by: Jeromy This commit was moved from ipfs/go-bitswap@eaefd13e3f1ff78d4041cdd6f5e445c0100c71ee --- bitswap/decision/engine_test.go | 4 ++-- bitswap/testnet/peernet.go | 2 +- bitswap/testutils.go | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/bitswap/decision/engine_test.go b/bitswap/decision/engine_test.go index 3b8acc05f..d2c3190f6 100644 --- a/bitswap/decision/engine_test.go +++ b/bitswap/decision/engine_test.go @@ -13,9 +13,9 @@ import ( message "github.com/ipfs/go-ipfs/exchange/bitswap/message" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" peer "gx/ipfs/QmRBqJF7hb8ZSpRcMwUt8hNhydWcxGEhtk81HKq6oUwKvs/go-libp2p-peer" + ds "gx/ipfs/QmTxLSvdhwg68WJimdS6icLPhZi28aTp6b7uihC2Yb47Xk/go-datastore" + dssync "gx/ipfs/QmTxLSvdhwg68WJimdS6icLPhZi28aTp6b7uihC2Yb47Xk/go-datastore/sync" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" - ds "gx/ipfs/QmfQzVugPq1w5shWRcLWSeiHF4a2meBX7yVD8Vw7GWJM9o/go-datastore" - dssync "gx/ipfs/QmfQzVugPq1w5shWRcLWSeiHF4a2meBX7yVD8Vw7GWJM9o/go-datastore/sync" ) type peerAndEngine struct { diff --git a/bitswap/testnet/peernet.go b/bitswap/testnet/peernet.go index c5fa32f09..551b03382 100644 --- a/bitswap/testnet/peernet.go +++ b/bitswap/testnet/peernet.go @@ -5,9 +5,9 @@ import ( mockrouting "github.com/ipfs/go-ipfs/routing/mock" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" peer "gx/ipfs/QmRBqJF7hb8ZSpRcMwUt8hNhydWcxGEhtk81HKq6oUwKvs/go-libp2p-peer" + ds "gx/ipfs/QmTxLSvdhwg68WJimdS6icLPhZi28aTp6b7uihC2Yb47Xk/go-datastore" mockpeernet "gx/ipfs/QmVCe3SNMjkcPgnpFhZs719dheq6xE7gJwjzV7aWcUM4Ms/go-libp2p/p2p/net/mock" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" - ds "gx/ipfs/QmfQzVugPq1w5shWRcLWSeiHF4a2meBX7yVD8Vw7GWJM9o/go-datastore" ) type peernet struct { diff --git a/bitswap/testutils.go b/bitswap/testutils.go index a4be8d06f..b930f7ef5 100644 --- a/bitswap/testutils.go +++ b/bitswap/testutils.go @@ -9,10 +9,10 @@ import ( delay "github.com/ipfs/go-ipfs/thirdparty/delay" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" peer "gx/ipfs/QmRBqJF7hb8ZSpRcMwUt8hNhydWcxGEhtk81HKq6oUwKvs/go-libp2p-peer" + ds "gx/ipfs/QmTxLSvdhwg68WJimdS6icLPhZi28aTp6b7uihC2Yb47Xk/go-datastore" + ds_sync "gx/ipfs/QmTxLSvdhwg68WJimdS6icLPhZi28aTp6b7uihC2Yb47Xk/go-datastore/sync" p2ptestutil "gx/ipfs/QmVCe3SNMjkcPgnpFhZs719dheq6xE7gJwjzV7aWcUM4Ms/go-libp2p/p2p/test/util" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" - ds "gx/ipfs/QmfQzVugPq1w5shWRcLWSeiHF4a2meBX7yVD8Vw7GWJM9o/go-datastore" - ds_sync "gx/ipfs/QmfQzVugPq1w5shWRcLWSeiHF4a2meBX7yVD8Vw7GWJM9o/go-datastore/sync" ) // WARNING: this uses RandTestBogusIdentity DO NOT USE for NON TESTS! From 65c6ec9d3fe72a7a2d4cf55d1317fb10ab6bd855 Mon Sep 17 00:00:00 2001 From: Jeromy Date: Wed, 3 Aug 2016 17:58:02 -0700 Subject: [PATCH 0484/1035] bitswap: fix a minor data race race detector picked up a minor race condition, Since loop iteration reuses the same local variable, its not safe to take its address and use it concurrently. The fix is to rebind the variable into a controlled scope (creating a new variable) and taking the address of that to pass outwards. License: MIT Signed-off-by: Jeromy This commit was moved from ipfs/go-bitswap@3fbf157a1de464f696ee0619e4d232d3d857c10b --- bitswap/workers.go | 1 + 1 file changed, 1 insertion(+) diff --git a/bitswap/workers.go b/bitswap/workers.go index ec7236543..4aa457917 100644 --- a/bitswap/workers.go +++ b/bitswap/workers.go @@ -173,6 +173,7 @@ func (bs *Bitswap) rebroadcastWorker(parent context.Context) { case <-broadcastSignal.C: // resend unfulfilled wantlist keys log.Event(ctx, "Bitswap.Rebroadcast.active") for _, e := range bs.wm.wl.Entries() { + e := e bs.findKeys <- &e } case <-parent.Done(): From 041f22b7c6c069e7c61f6dcf15097b37ef54c9c8 Mon Sep 17 00:00:00 2001 From: Thomas Gardner Date: Fri, 5 Aug 2016 19:35:34 +1000 Subject: [PATCH 0485/1035] bitswap: add `ledger` subcommand License: MIT Signed-off-by: Thomas Gardner This commit was moved from ipfs/go-bitswap@3d7d133e7bdaf25858045e080bd7e109a413bd4f --- bitswap/bitswap.go | 4 ++++ bitswap/decision/engine.go | 15 +++++++++++++++ bitswap/decision/ledger.go | 8 ++++++++ 3 files changed, 27 insertions(+) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index f14fe9162..53fc9cba1 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -205,6 +205,10 @@ func (bs *Bitswap) WantlistForPeer(p peer.ID) []key.Key { return out } +func (bs *Bitswap) LedgerForPeer(p peer.ID) *decision.Receipt { + return bs.engine.LedgerForPeer(p) +} + // GetBlocks returns a channel where the caller may receive blocks that // correspond to the provided |keys|. Returns an error if BitSwap is unable to // begin this request within the deadline enforced by the context. diff --git a/bitswap/decision/engine.go b/bitswap/decision/engine.go index 92f87c27e..06d2d03ed 100644 --- a/bitswap/decision/engine.go +++ b/bitswap/decision/engine.go @@ -114,6 +114,21 @@ func (e *Engine) WantlistForPeer(p peer.ID) (out []wl.Entry) { return out } +func (e *Engine) LedgerForPeer(p peer.ID) *Receipt { + ledger := e.findOrCreate(p) + + ledger.lk.Lock() + defer ledger.lk.Unlock() + + return &Receipt{ + Peer: ledger.Partner.String(), + Value: ledger.Accounting.Value(), + Sent: ledger.Accounting.BytesSent, + Recv: ledger.Accounting.BytesRecv, + Exchanged: ledger.ExchangeCount(), + } +} + func (e *Engine) taskWorker(ctx context.Context) { defer close(e.outbox) // because taskWorker uses the channel exclusively for { diff --git a/bitswap/decision/ledger.go b/bitswap/decision/ledger.go index 95cd303e2..3226f57ce 100644 --- a/bitswap/decision/ledger.go +++ b/bitswap/decision/ledger.go @@ -49,6 +49,14 @@ type ledger struct { lk sync.Mutex } +type Receipt struct { + Peer string + Value float64 + Sent uint64 + Recv uint64 + Exchanged uint64 +} + type debtRatio struct { BytesSent uint64 BytesRecv uint64 From fcc9392438c50b45ade5dbe37a01e0508399a476 Mon Sep 17 00:00:00 2001 From: Jeromy Date: Tue, 16 Aug 2016 11:43:01 -0700 Subject: [PATCH 0486/1035] datastore: blockstore should retry when it encounters temp errors License: MIT Signed-off-by: Jeromy This commit was moved from ipfs/go-bitswap@2cfbc2ce8c4edad4e15d4b88d4234e9fce1f1f46 --- bitswap/bitswap.go | 14 +------------- bitswap/bitswap_test.go | 6 +++++- 2 files changed, 6 insertions(+), 14 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 53fc9cba1..576e62c97 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -265,7 +265,7 @@ func (bs *Bitswap) HasBlock(blk blocks.Block) error { default: } - err := bs.tryPutBlock(blk, 4) // attempt to store block up to four times + err := bs.blockstore.Put(blk) if err != nil { log.Errorf("Error writing block to datastore: %s", err) return err @@ -284,18 +284,6 @@ func (bs *Bitswap) HasBlock(blk blocks.Block) error { return nil } -func (bs *Bitswap) tryPutBlock(blk blocks.Block, attempts int) error { - var err error - for i := 0; i < attempts; i++ { - if err = bs.blockstore.Put(blk); err == nil { - break - } - - time.Sleep(time.Millisecond * time.Duration(400*(i+1))) - } - return err -} - func (bs *Bitswap) ReceiveMessage(ctx context.Context, p peer.ID, incoming bsmsg.BitSwapMessage) { // This call records changes to wantlists, blocks received, // and number of bytes transfered. diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index 6cbfe2b62..1d680aa74 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -24,8 +24,12 @@ import ( // well under varying conditions const kNetworkDelay = 0 * time.Millisecond +func getVirtualNetwork() tn.Network { + return tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay)) +} + func TestClose(t *testing.T) { - vnet := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay)) + vnet := getVirtualNetwork() sesgen := NewTestSessionGenerator(vnet) defer sesgen.Close() bgen := blocksutil.NewBlockGenerator() From d5f5e18a4f171bda2ca76de13a75f09821106877 Mon Sep 17 00:00:00 2001 From: Jeromy Date: Sat, 20 Aug 2016 11:30:15 -0700 Subject: [PATCH 0487/1035] routing: rework interfaces to make separation easier License: MIT Signed-off-by: Jeromy This commit was moved from ipfs/go-bitswap@04e4abd87b8ea281a5774206288e9aa579584786 --- bitswap/network/ipfs_impl.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index bf1259246..022b07001 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -20,7 +20,7 @@ import ( var log = logging.Logger("bitswap_network") // NewFromIpfsHost returns a BitSwapNetwork supported by underlying IPFS host -func NewFromIpfsHost(host host.Host, r routing.IpfsRouting) BitSwapNetwork { +func NewFromIpfsHost(host host.Host, r routing.ContentRouting) BitSwapNetwork { bitswapNetwork := impl{ host: host, routing: r, @@ -36,7 +36,7 @@ func NewFromIpfsHost(host host.Host, r routing.IpfsRouting) BitSwapNetwork { // NetMessage objects, into the bitswap network interface. type impl struct { host host.Host - routing routing.IpfsRouting + routing routing.ContentRouting // inbound messages from the network are forwarded to the receiver receiver Receiver From ca0d665f2a2c1c23998d956a29846de09a2b9588 Mon Sep 17 00:00:00 2001 From: Jeromy Date: Fri, 19 Aug 2016 18:33:44 -0700 Subject: [PATCH 0488/1035] blockservice: don't store blocks we already have License: MIT Signed-off-by: Jeromy This commit was moved from ipfs/go-bitswap@25bfef8d62c3c8fe54daf20a85747d48dcb65290 --- bitswap/workers.go | 1 + 1 file changed, 1 insertion(+) diff --git a/bitswap/workers.go b/bitswap/workers.go index 4aa457917..b7e4a4a7c 100644 --- a/bitswap/workers.go +++ b/bitswap/workers.go @@ -133,6 +133,7 @@ func (bs *Bitswap) provideCollector(ctx context.Context) { log.Debug("newBlocks channel closed") return } + if keysOut == nil { nextKey = blk.Key() keysOut = bs.provideKeys From 12219ee22bb96daa89cd2ab015a16270e2c8a80d Mon Sep 17 00:00:00 2001 From: Jeromy Date: Mon, 22 Aug 2016 22:29:25 -0700 Subject: [PATCH 0489/1035] update deps for libp2p 3.4.0 License: MIT Signed-off-by: Jeromy This commit was moved from ipfs/go-bitswap@f3ea01e90b74c9774c5e9a1797e49c5436db86a4 --- bitswap/bitswap.go | 4 ++-- bitswap/bitswap_test.go | 2 +- bitswap/decision/bench_test.go | 2 +- bitswap/decision/engine.go | 4 ++-- bitswap/decision/engine_test.go | 6 +++--- bitswap/decision/ledger.go | 2 +- bitswap/decision/peer_request_queue.go | 2 +- bitswap/message/message.go | 2 +- bitswap/network/interface.go | 4 ++-- bitswap/network/ipfs_impl.go | 13 +++++++------ bitswap/testnet/interface.go | 2 +- bitswap/testnet/network_test.go | 2 +- bitswap/testnet/peernet.go | 6 +++--- bitswap/testnet/virtual.go | 2 +- bitswap/testutils.go | 8 ++++---- bitswap/wantmanager.go | 2 +- bitswap/workers.go | 4 ++-- 17 files changed, 34 insertions(+), 33 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 576e62c97..c98a98db7 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -8,10 +8,10 @@ import ( "sync" "time" - logging "gx/ipfs/QmNQynaz7qfriSUJkiEZUrm2Wen1u3Kj9goZzWtrPyu7XR/go-log" process "gx/ipfs/QmQopLATEYMNg7dVqZRNDfeE2S1yKy8zrRh5xnYiuqeZBn/goprocess" procctx "gx/ipfs/QmQopLATEYMNg7dVqZRNDfeE2S1yKy8zrRh5xnYiuqeZBn/goprocess/context" - peer "gx/ipfs/QmRBqJF7hb8ZSpRcMwUt8hNhydWcxGEhtk81HKq6oUwKvs/go-libp2p-peer" + logging "gx/ipfs/QmSpJByNKFX1sCsHBEp3R73FL4NF6FnQTEGyNAXHm2GS52/go-log" + peer "gx/ipfs/QmWtbQU15LaB5B1JC2F7TV9P4K88vD3PpA4AJrwfCjhML8/go-libp2p-peer" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" blocks "github.com/ipfs/go-ipfs/blocks" diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index 1d680aa74..df2bf9e27 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -17,7 +17,7 @@ import ( tn "github.com/ipfs/go-ipfs/exchange/bitswap/testnet" mockrouting "github.com/ipfs/go-ipfs/routing/mock" delay "github.com/ipfs/go-ipfs/thirdparty/delay" - p2ptestutil "gx/ipfs/QmVCe3SNMjkcPgnpFhZs719dheq6xE7gJwjzV7aWcUM4Ms/go-libp2p/p2p/test/util" + p2ptestutil "gx/ipfs/Qmf4ETeAWXuThBfWwonVyFqGFSgTWepUDEr1txcctvpTXS/go-libp2p/p2p/test/util" ) // FIXME the tests are really sensitive to the network delay. fix them to work diff --git a/bitswap/decision/bench_test.go b/bitswap/decision/bench_test.go index e0086e3a9..22d533ea2 100644 --- a/bitswap/decision/bench_test.go +++ b/bitswap/decision/bench_test.go @@ -7,7 +7,7 @@ import ( key "github.com/ipfs/go-ipfs/blocks/key" "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" "github.com/ipfs/go-ipfs/thirdparty/testutil" - "gx/ipfs/QmRBqJF7hb8ZSpRcMwUt8hNhydWcxGEhtk81HKq6oUwKvs/go-libp2p-peer" + "gx/ipfs/QmWtbQU15LaB5B1JC2F7TV9P4K88vD3PpA4AJrwfCjhML8/go-libp2p-peer" ) // FWIW: At the time of this commit, including a timestamp in task increases diff --git a/bitswap/decision/engine.go b/bitswap/decision/engine.go index 06d2d03ed..51a0f0524 100644 --- a/bitswap/decision/engine.go +++ b/bitswap/decision/engine.go @@ -9,8 +9,8 @@ import ( bstore "github.com/ipfs/go-ipfs/blocks/blockstore" bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" wl "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" - logging "gx/ipfs/QmNQynaz7qfriSUJkiEZUrm2Wen1u3Kj9goZzWtrPyu7XR/go-log" - peer "gx/ipfs/QmRBqJF7hb8ZSpRcMwUt8hNhydWcxGEhtk81HKq6oUwKvs/go-libp2p-peer" + logging "gx/ipfs/QmSpJByNKFX1sCsHBEp3R73FL4NF6FnQTEGyNAXHm2GS52/go-log" + peer "gx/ipfs/QmWtbQU15LaB5B1JC2F7TV9P4K88vD3PpA4AJrwfCjhML8/go-libp2p-peer" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" ) diff --git a/bitswap/decision/engine_test.go b/bitswap/decision/engine_test.go index d2c3190f6..f9cb8aae3 100644 --- a/bitswap/decision/engine_test.go +++ b/bitswap/decision/engine_test.go @@ -12,9 +12,9 @@ import ( blockstore "github.com/ipfs/go-ipfs/blocks/blockstore" message "github.com/ipfs/go-ipfs/exchange/bitswap/message" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" - peer "gx/ipfs/QmRBqJF7hb8ZSpRcMwUt8hNhydWcxGEhtk81HKq6oUwKvs/go-libp2p-peer" - ds "gx/ipfs/QmTxLSvdhwg68WJimdS6icLPhZi28aTp6b7uihC2Yb47Xk/go-datastore" - dssync "gx/ipfs/QmTxLSvdhwg68WJimdS6icLPhZi28aTp6b7uihC2Yb47Xk/go-datastore/sync" + ds "gx/ipfs/QmNgqJarToRiq2GBaPJhkmW4B5BxS5B74E1rkGvv2JoaTp/go-datastore" + dssync "gx/ipfs/QmNgqJarToRiq2GBaPJhkmW4B5BxS5B74E1rkGvv2JoaTp/go-datastore/sync" + peer "gx/ipfs/QmWtbQU15LaB5B1JC2F7TV9P4K88vD3PpA4AJrwfCjhML8/go-libp2p-peer" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" ) diff --git a/bitswap/decision/ledger.go b/bitswap/decision/ledger.go index 3226f57ce..225e00f15 100644 --- a/bitswap/decision/ledger.go +++ b/bitswap/decision/ledger.go @@ -6,7 +6,7 @@ import ( key "github.com/ipfs/go-ipfs/blocks/key" wl "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" - peer "gx/ipfs/QmRBqJF7hb8ZSpRcMwUt8hNhydWcxGEhtk81HKq6oUwKvs/go-libp2p-peer" + peer "gx/ipfs/QmWtbQU15LaB5B1JC2F7TV9P4K88vD3PpA4AJrwfCjhML8/go-libp2p-peer" ) // keySet is just a convenient alias for maps of keys, where we only care diff --git a/bitswap/decision/peer_request_queue.go b/bitswap/decision/peer_request_queue.go index 549de7c50..7265ea9e6 100644 --- a/bitswap/decision/peer_request_queue.go +++ b/bitswap/decision/peer_request_queue.go @@ -7,7 +7,7 @@ import ( key "github.com/ipfs/go-ipfs/blocks/key" wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" pq "github.com/ipfs/go-ipfs/thirdparty/pq" - peer "gx/ipfs/QmRBqJF7hb8ZSpRcMwUt8hNhydWcxGEhtk81HKq6oUwKvs/go-libp2p-peer" + peer "gx/ipfs/QmWtbQU15LaB5B1JC2F7TV9P4K88vD3PpA4AJrwfCjhML8/go-libp2p-peer" ) type peerRequestQueue interface { diff --git a/bitswap/message/message.go b/bitswap/message/message.go index e828e0c25..f3b45e054 100644 --- a/bitswap/message/message.go +++ b/bitswap/message/message.go @@ -7,7 +7,7 @@ import ( key "github.com/ipfs/go-ipfs/blocks/key" pb "github.com/ipfs/go-ipfs/exchange/bitswap/message/pb" wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" - inet "gx/ipfs/QmVCe3SNMjkcPgnpFhZs719dheq6xE7gJwjzV7aWcUM4Ms/go-libp2p/p2p/net" + inet "gx/ipfs/Qmf4ETeAWXuThBfWwonVyFqGFSgTWepUDEr1txcctvpTXS/go-libp2p/p2p/net" ggio "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/io" proto "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/proto" diff --git a/bitswap/network/interface.go b/bitswap/network/interface.go index 144d835c1..16f0dfed2 100644 --- a/bitswap/network/interface.go +++ b/bitswap/network/interface.go @@ -3,9 +3,9 @@ package network import ( key "github.com/ipfs/go-ipfs/blocks/key" bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" - peer "gx/ipfs/QmRBqJF7hb8ZSpRcMwUt8hNhydWcxGEhtk81HKq6oUwKvs/go-libp2p-peer" - protocol "gx/ipfs/QmVCe3SNMjkcPgnpFhZs719dheq6xE7gJwjzV7aWcUM4Ms/go-libp2p/p2p/protocol" + peer "gx/ipfs/QmWtbQU15LaB5B1JC2F7TV9P4K88vD3PpA4AJrwfCjhML8/go-libp2p-peer" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" + protocol "gx/ipfs/Qmf4ETeAWXuThBfWwonVyFqGFSgTWepUDEr1txcctvpTXS/go-libp2p/p2p/protocol" ) var ProtocolBitswap protocol.ID = "/ipfs/bitswap" diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index 022b07001..fe764641d 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -7,14 +7,14 @@ import ( bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" routing "github.com/ipfs/go-ipfs/routing" - logging "gx/ipfs/QmNQynaz7qfriSUJkiEZUrm2Wen1u3Kj9goZzWtrPyu7XR/go-log" - pstore "gx/ipfs/QmQdnfvZQuhdT93LNc5bos52wAmdr3G2p6G8teLJMEN32P/go-libp2p-peerstore" - peer "gx/ipfs/QmRBqJF7hb8ZSpRcMwUt8hNhydWcxGEhtk81HKq6oUwKvs/go-libp2p-peer" - host "gx/ipfs/QmVCe3SNMjkcPgnpFhZs719dheq6xE7gJwjzV7aWcUM4Ms/go-libp2p/p2p/host" - inet "gx/ipfs/QmVCe3SNMjkcPgnpFhZs719dheq6xE7gJwjzV7aWcUM4Ms/go-libp2p/p2p/net" + pstore "gx/ipfs/QmSZi9ygLohBUGyHMqE5N6eToPwqcg7bZQTULeVLFu7Q6d/go-libp2p-peerstore" + logging "gx/ipfs/QmSpJByNKFX1sCsHBEp3R73FL4NF6FnQTEGyNAXHm2GS52/go-log" + peer "gx/ipfs/QmWtbQU15LaB5B1JC2F7TV9P4K88vD3PpA4AJrwfCjhML8/go-libp2p-peer" ma "gx/ipfs/QmYzDkkgAEmrcNzFCiYo6L1dTX4EAG1gZkbtdbd9trL4vd/go-multiaddr" ggio "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/io" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" + host "gx/ipfs/Qmf4ETeAWXuThBfWwonVyFqGFSgTWepUDEr1txcctvpTXS/go-libp2p/p2p/host" + inet "gx/ipfs/Qmf4ETeAWXuThBfWwonVyFqGFSgTWepUDEr1txcctvpTXS/go-libp2p/p2p/net" ) var log = logging.Logger("bitswap_network") @@ -26,6 +26,7 @@ func NewFromIpfsHost(host host.Host, r routing.ContentRouting) BitSwapNetwork { routing: r, } host.SetStreamHandler(ProtocolBitswap, bitswapNetwork.handleNewStream) + host.SetStreamHandler("/bitswap/1.0.0", bitswapNetwork.handleNewStream) host.Network().Notify((*netNotifiee)(&bitswapNetwork)) // TODO: StopNotify. @@ -72,7 +73,7 @@ func (bsnet *impl) newStreamToPeer(ctx context.Context, p peer.ID) (inet.Stream, return nil, err } - return bsnet.host.NewStream(ctx, ProtocolBitswap, p) + return bsnet.host.NewStream(ctx, p, "/bitswap/1.0.0", ProtocolBitswap) } func (bsnet *impl) SendMessage( diff --git a/bitswap/testnet/interface.go b/bitswap/testnet/interface.go index ef79e722e..0378cc994 100644 --- a/bitswap/testnet/interface.go +++ b/bitswap/testnet/interface.go @@ -3,7 +3,7 @@ package bitswap import ( bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" "github.com/ipfs/go-ipfs/thirdparty/testutil" - peer "gx/ipfs/QmRBqJF7hb8ZSpRcMwUt8hNhydWcxGEhtk81HKq6oUwKvs/go-libp2p-peer" + peer "gx/ipfs/QmWtbQU15LaB5B1JC2F7TV9P4K88vD3PpA4AJrwfCjhML8/go-libp2p-peer" ) type Network interface { diff --git a/bitswap/testnet/network_test.go b/bitswap/testnet/network_test.go index 19e2f2b71..077c220e0 100644 --- a/bitswap/testnet/network_test.go +++ b/bitswap/testnet/network_test.go @@ -10,7 +10,7 @@ import ( mockrouting "github.com/ipfs/go-ipfs/routing/mock" delay "github.com/ipfs/go-ipfs/thirdparty/delay" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" - peer "gx/ipfs/QmRBqJF7hb8ZSpRcMwUt8hNhydWcxGEhtk81HKq6oUwKvs/go-libp2p-peer" + peer "gx/ipfs/QmWtbQU15LaB5B1JC2F7TV9P4K88vD3PpA4AJrwfCjhML8/go-libp2p-peer" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" ) diff --git a/bitswap/testnet/peernet.go b/bitswap/testnet/peernet.go index 551b03382..eb692fe7a 100644 --- a/bitswap/testnet/peernet.go +++ b/bitswap/testnet/peernet.go @@ -4,10 +4,10 @@ import ( bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" mockrouting "github.com/ipfs/go-ipfs/routing/mock" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" - peer "gx/ipfs/QmRBqJF7hb8ZSpRcMwUt8hNhydWcxGEhtk81HKq6oUwKvs/go-libp2p-peer" - ds "gx/ipfs/QmTxLSvdhwg68WJimdS6icLPhZi28aTp6b7uihC2Yb47Xk/go-datastore" - mockpeernet "gx/ipfs/QmVCe3SNMjkcPgnpFhZs719dheq6xE7gJwjzV7aWcUM4Ms/go-libp2p/p2p/net/mock" + ds "gx/ipfs/QmNgqJarToRiq2GBaPJhkmW4B5BxS5B74E1rkGvv2JoaTp/go-datastore" + peer "gx/ipfs/QmWtbQU15LaB5B1JC2F7TV9P4K88vD3PpA4AJrwfCjhML8/go-libp2p-peer" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" + mockpeernet "gx/ipfs/Qmf4ETeAWXuThBfWwonVyFqGFSgTWepUDEr1txcctvpTXS/go-libp2p/p2p/net/mock" ) type peernet struct { diff --git a/bitswap/testnet/virtual.go b/bitswap/testnet/virtual.go index a468de3bb..e44290313 100644 --- a/bitswap/testnet/virtual.go +++ b/bitswap/testnet/virtual.go @@ -10,7 +10,7 @@ import ( mockrouting "github.com/ipfs/go-ipfs/routing/mock" delay "github.com/ipfs/go-ipfs/thirdparty/delay" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" - peer "gx/ipfs/QmRBqJF7hb8ZSpRcMwUt8hNhydWcxGEhtk81HKq6oUwKvs/go-libp2p-peer" + peer "gx/ipfs/QmWtbQU15LaB5B1JC2F7TV9P4K88vD3PpA4AJrwfCjhML8/go-libp2p-peer" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" ) diff --git a/bitswap/testutils.go b/bitswap/testutils.go index b930f7ef5..16b9d4d20 100644 --- a/bitswap/testutils.go +++ b/bitswap/testutils.go @@ -8,11 +8,11 @@ import ( datastore2 "github.com/ipfs/go-ipfs/thirdparty/datastore2" delay "github.com/ipfs/go-ipfs/thirdparty/delay" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" - peer "gx/ipfs/QmRBqJF7hb8ZSpRcMwUt8hNhydWcxGEhtk81HKq6oUwKvs/go-libp2p-peer" - ds "gx/ipfs/QmTxLSvdhwg68WJimdS6icLPhZi28aTp6b7uihC2Yb47Xk/go-datastore" - ds_sync "gx/ipfs/QmTxLSvdhwg68WJimdS6icLPhZi28aTp6b7uihC2Yb47Xk/go-datastore/sync" - p2ptestutil "gx/ipfs/QmVCe3SNMjkcPgnpFhZs719dheq6xE7gJwjzV7aWcUM4Ms/go-libp2p/p2p/test/util" + ds "gx/ipfs/QmNgqJarToRiq2GBaPJhkmW4B5BxS5B74E1rkGvv2JoaTp/go-datastore" + ds_sync "gx/ipfs/QmNgqJarToRiq2GBaPJhkmW4B5BxS5B74E1rkGvv2JoaTp/go-datastore/sync" + peer "gx/ipfs/QmWtbQU15LaB5B1JC2F7TV9P4K88vD3PpA4AJrwfCjhML8/go-libp2p-peer" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" + p2ptestutil "gx/ipfs/Qmf4ETeAWXuThBfWwonVyFqGFSgTWepUDEr1txcctvpTXS/go-libp2p/p2p/test/util" ) // WARNING: this uses RandTestBogusIdentity DO NOT USE for NON TESTS! diff --git a/bitswap/wantmanager.go b/bitswap/wantmanager.go index f685c7079..47ea7ba35 100644 --- a/bitswap/wantmanager.go +++ b/bitswap/wantmanager.go @@ -9,7 +9,7 @@ import ( bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" - peer "gx/ipfs/QmRBqJF7hb8ZSpRcMwUt8hNhydWcxGEhtk81HKq6oUwKvs/go-libp2p-peer" + peer "gx/ipfs/QmWtbQU15LaB5B1JC2F7TV9P4K88vD3PpA4AJrwfCjhML8/go-libp2p-peer" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" ) diff --git a/bitswap/workers.go b/bitswap/workers.go index b7e4a4a7c..f6d2b912d 100644 --- a/bitswap/workers.go +++ b/bitswap/workers.go @@ -10,8 +10,8 @@ import ( key "github.com/ipfs/go-ipfs/blocks/key" wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" - logging "gx/ipfs/QmNQynaz7qfriSUJkiEZUrm2Wen1u3Kj9goZzWtrPyu7XR/go-log" - peer "gx/ipfs/QmRBqJF7hb8ZSpRcMwUt8hNhydWcxGEhtk81HKq6oUwKvs/go-libp2p-peer" + logging "gx/ipfs/QmSpJByNKFX1sCsHBEp3R73FL4NF6FnQTEGyNAXHm2GS52/go-log" + peer "gx/ipfs/QmWtbQU15LaB5B1JC2F7TV9P4K88vD3PpA4AJrwfCjhML8/go-libp2p-peer" ) var TaskWorkerCount = 8 From 0bb6c6da0698cfb9e1396c6d304aafa1b640a377 Mon Sep 17 00:00:00 2001 From: Jeromy Date: Fri, 26 Aug 2016 13:56:47 -0700 Subject: [PATCH 0490/1035] use correct protocol names for ipfs services License: MIT Signed-off-by: Jeromy This commit was moved from ipfs/go-bitswap@628442e829332e3a509d0da88df8f7cdf136e4b9 --- bitswap/network/interface.go | 3 ++- bitswap/network/ipfs_impl.go | 4 ++-- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/bitswap/network/interface.go b/bitswap/network/interface.go index 16f0dfed2..1c40f0a3e 100644 --- a/bitswap/network/interface.go +++ b/bitswap/network/interface.go @@ -8,7 +8,8 @@ import ( protocol "gx/ipfs/Qmf4ETeAWXuThBfWwonVyFqGFSgTWepUDEr1txcctvpTXS/go-libp2p/p2p/protocol" ) -var ProtocolBitswap protocol.ID = "/ipfs/bitswap" +var ProtocolBitswap protocol.ID = "/ipfs/bitswap/1.0.0" +var ProtocolBitswapOld protocol.ID = "/ipfs/bitswap" // BitSwapNetwork provides network connectivity for BitSwap sessions type BitSwapNetwork interface { diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index fe764641d..055d6e549 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -26,7 +26,7 @@ func NewFromIpfsHost(host host.Host, r routing.ContentRouting) BitSwapNetwork { routing: r, } host.SetStreamHandler(ProtocolBitswap, bitswapNetwork.handleNewStream) - host.SetStreamHandler("/bitswap/1.0.0", bitswapNetwork.handleNewStream) + host.SetStreamHandler(ProtocolBitswapOld, bitswapNetwork.handleNewStream) host.Network().Notify((*netNotifiee)(&bitswapNetwork)) // TODO: StopNotify. @@ -73,7 +73,7 @@ func (bsnet *impl) newStreamToPeer(ctx context.Context, p peer.ID) (inet.Stream, return nil, err } - return bsnet.host.NewStream(ctx, p, "/bitswap/1.0.0", ProtocolBitswap) + return bsnet.host.NewStream(ctx, p, ProtocolBitswap, ProtocolBitswapOld) } func (bsnet *impl) SendMessage( From c970572f8789ce56747d546a2e6cb04cf4bbff37 Mon Sep 17 00:00:00 2001 From: mateon1 Date: Fri, 2 Sep 2016 21:38:59 +0200 Subject: [PATCH 0491/1035] Fix minor typo in bitswap debug logging MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit License: MIT Signed-off-by: Mateusz Naściszewski This commit was moved from ipfs/go-bitswap@03f53fc9b7582edf356d09cbc17e26e0c2bb04c2 --- bitswap/workers.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bitswap/workers.go b/bitswap/workers.go index f6d2b912d..9befad41a 100644 --- a/bitswap/workers.go +++ b/bitswap/workers.go @@ -169,7 +169,7 @@ func (bs *Bitswap) rebroadcastWorker(parent context.Context) { case <-tick.C: n := bs.wm.wl.Len() if n > 0 { - log.Debug(n, "keys in bitswap wantlist") + log.Debug(n, " keys in bitswap wantlist") } case <-broadcastSignal.C: // resend unfulfilled wantlist keys log.Event(ctx, "Bitswap.Rebroadcast.active") From 23a3975cb5bc62362cea1da9a25c01d42662b07e Mon Sep 17 00:00:00 2001 From: Jeromy Date: Fri, 2 Sep 2016 15:28:10 -0700 Subject: [PATCH 0492/1035] bitswap: add better tests around wantlist clearing License: MIT Signed-off-by: Jeromy This commit was moved from ipfs/go-bitswap@ff2098530c878bc900079e70a5acd4ce3c2136db --- bitswap/bitswap_test.go | 78 ++++++++++++++++++++++++++++++++++++++--- 1 file changed, 73 insertions(+), 5 deletions(-) diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index 6cbfe2b62..c03aa2ef1 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -334,7 +334,6 @@ func TestDoubleGet(t *testing.T) { blocks := bg.Blocks(1) ctx1, cancel1 := context.WithCancel(context.Background()) - blkch1, err := instances[1].Exchange.GetBlocks(ctx1, []key.Key{blocks[0].Key()}) if err != nil { t.Fatal(err) @@ -362,11 +361,15 @@ func TestDoubleGet(t *testing.T) { t.Fatal(err) } - blk, ok := <-blkch2 - if !ok { - t.Fatal("expected to get the block here") + select { + case blk, ok := <-blkch2: + if !ok { + t.Fatal("expected to get the block here") + } + t.Log(blk) + case <-time.After(time.Second * 5): + t.Fatal("timed out waiting on block") } - t.Log(blk) for _, inst := range instances { err := inst.Exchange.Close() @@ -375,3 +378,68 @@ func TestDoubleGet(t *testing.T) { } } } + +func TestWantlistCleanup(t *testing.T) { + net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay)) + sg := NewTestSessionGenerator(net) + defer sg.Close() + bg := blocksutil.NewBlockGenerator() + + instances := sg.Instances(1)[0] + bswap := instances.Exchange + blocks := bg.Blocks(20) + + var keys []key.Key + for _, b := range blocks { + keys = append(keys, b.Key()) + } + + ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond*50) + defer cancel() + _, err := bswap.GetBlock(ctx, keys[0]) + if err != context.DeadlineExceeded { + t.Fatal("shouldnt have fetched any blocks") + } + + time.Sleep(time.Millisecond * 50) + + if len(bswap.GetWantlist()) > 0 { + t.Fatal("should not have anyting in wantlist") + } + + ctx, cancel = context.WithTimeout(context.Background(), time.Millisecond*50) + defer cancel() + _, err = bswap.GetBlocks(ctx, keys[:10]) + if err != nil { + t.Fatal(err) + } + + <-ctx.Done() + time.Sleep(time.Millisecond * 50) + + if len(bswap.GetWantlist()) > 0 { + t.Fatal("should not have anyting in wantlist") + } + + _, err = bswap.GetBlocks(context.Background(), keys[:1]) + if err != nil { + t.Fatal(err) + } + + ctx, cancel = context.WithCancel(context.Background()) + _, err = bswap.GetBlocks(ctx, keys[10:]) + if err != nil { + t.Fatal(err) + } + + time.Sleep(time.Millisecond * 50) + if len(bswap.GetWantlist()) != 11 { + t.Fatal("should have 11 keys in wantlist") + } + + cancel() + time.Sleep(time.Millisecond * 50) + if !(len(bswap.GetWantlist()) == 1 && bswap.GetWantlist()[0] == keys[0]) { + t.Fatal("should only have keys[0] in wantlist") + } +} From 43e163a438b01bfb0c5f3c2b6438f5e9ae7006b7 Mon Sep 17 00:00:00 2001 From: Jeromy Date: Thu, 1 Sep 2016 08:26:59 -0700 Subject: [PATCH 0493/1035] bitswap: Don't clear 'active' until Connect calls are finished License: MIT Signed-off-by: Jeromy This commit was moved from ipfs/go-bitswap@1b0996413833edbbe9e605c33f7c6a2af769a5eb --- bitswap/workers.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/bitswap/workers.go b/bitswap/workers.go index 4aa457917..a7a218fb5 100644 --- a/bitswap/workers.go +++ b/bitswap/workers.go @@ -201,14 +201,18 @@ func (bs *Bitswap) providerQueryManager(ctx context.Context) { child, cancel := context.WithTimeout(e.Ctx, providerRequestTimeout) defer cancel() providers := bs.network.FindProvidersAsync(child, e.Key, maxProvidersPerRequest) + wg := &sync.WaitGroup{} for p := range providers { + wg.Add(1) go func(p peer.ID) { + defer wg.Done() err := bs.network.ConnectTo(child, p) if err != nil { log.Debug("failed to connect to provider %s: %s", p, err) } }(p) } + wg.Wait() activeLk.Lock() delete(active, e.Key) activeLk.Unlock() From 7ac4cb5209286191654a96a7a66877f4745ca529 Mon Sep 17 00:00:00 2001 From: Jeromy Date: Fri, 2 Sep 2016 15:28:26 -0700 Subject: [PATCH 0494/1035] bitswap: clear wantlists when GetBlocks calls are cancelled License: MIT Signed-off-by: Jeromy This commit was moved from ipfs/go-bitswap@a53c0055e28229a7ac57e12079850be878b51f00 --- bitswap/bitswap.go | 50 ++++++++++++++--- bitswap/decision/bench_test.go | 2 +- bitswap/decision/engine.go | 4 +- bitswap/decision/ledger.go | 2 +- bitswap/decision/peer_request_queue.go | 6 +- bitswap/decision/peer_request_queue_test.go | 10 ++-- bitswap/message/message.go | 4 +- bitswap/wantlist/wantlist.go | 62 ++++++++++----------- bitswap/wantmanager.go | 26 ++++----- bitswap/workers.go | 22 +++++--- 10 files changed, 114 insertions(+), 74 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index f14fe9162..13ead3388 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -22,7 +22,6 @@ import ( bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" notifications "github.com/ipfs/go-ipfs/exchange/bitswap/notifications" - wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" flags "github.com/ipfs/go-ipfs/flags" "github.com/ipfs/go-ipfs/thirdparty/delay" loggables "github.com/ipfs/go-ipfs/thirdparty/loggables" @@ -88,7 +87,7 @@ func New(parent context.Context, p peer.ID, network bsnet.BitSwapNetwork, notifications: notif, engine: decision.NewEngine(ctx, bstore), // TODO close the engine with Close() method network: network, - findKeys: make(chan *wantlist.Entry, sizeBatchRequestChan), + findKeys: make(chan *blockRequest, sizeBatchRequestChan), process: px, newBlocks: make(chan blocks.Block, HasBlockBufferSize), provideKeys: make(chan key.Key, provideKeysBufferSize), @@ -131,7 +130,7 @@ type Bitswap struct { notifications notifications.PubSub // send keys to a worker to find and connect to providers for them - findKeys chan *wantlist.Entry + findKeys chan *blockRequest engine *decision.Engine @@ -148,8 +147,8 @@ type Bitswap struct { } type blockRequest struct { - key key.Key - ctx context.Context + Key key.Key + Ctx context.Context } // GetBlock attempts to retrieve a particular block from peers within the @@ -235,13 +234,50 @@ func (bs *Bitswap) GetBlocks(ctx context.Context, keys []key.Key) (<-chan blocks // NB: Optimization. Assumes that providers of key[0] are likely to // be able to provide for all keys. This currently holds true in most // every situation. Later, this assumption may not hold as true. - req := &wantlist.Entry{ + req := &blockRequest{ Key: keys[0], Ctx: ctx, } + + remaining := make(map[key.Key]struct{}) + for _, k := range keys { + remaining[k] = struct{}{} + } + + out := make(chan blocks.Block) + go func() { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + defer close(out) + defer func() { + var toCancel []key.Key + for k, _ := range remaining { + toCancel = append(toCancel, k) + } + bs.CancelWants(toCancel) + }() + for { + select { + case blk, ok := <-promise: + if !ok { + return + } + + delete(remaining, blk.Key()) + select { + case out <- blk: + case <-ctx.Done(): + return + } + case <-ctx.Done(): + return + } + } + }() + select { case bs.findKeys <- req: - return promise, nil + return out, nil case <-ctx.Done(): return nil, ctx.Err() } diff --git a/bitswap/decision/bench_test.go b/bitswap/decision/bench_test.go index e0086e3a9..a87adf455 100644 --- a/bitswap/decision/bench_test.go +++ b/bitswap/decision/bench_test.go @@ -21,6 +21,6 @@ func BenchmarkTaskQueuePush(b *testing.B) { } b.ResetTimer() for i := 0; i < b.N; i++ { - q.Push(wantlist.Entry{Key: key.Key(i), Priority: math.MaxInt32}, peers[i%len(peers)]) + q.Push(&wantlist.Entry{Key: key.Key(i), Priority: math.MaxInt32}, peers[i%len(peers)]) } } diff --git a/bitswap/decision/engine.go b/bitswap/decision/engine.go index 92f87c27e..389a37ca3 100644 --- a/bitswap/decision/engine.go +++ b/bitswap/decision/engine.go @@ -104,7 +104,7 @@ func NewEngine(ctx context.Context, bs bstore.Blockstore) *Engine { return e } -func (e *Engine) WantlistForPeer(p peer.ID) (out []wl.Entry) { +func (e *Engine) WantlistForPeer(p peer.ID) (out []*wl.Entry) { e.lock.Lock() partner, ok := e.ledgerMap[p] if ok { @@ -218,7 +218,7 @@ func (e *Engine) MessageReceived(p peer.ID, m bsmsg.BitSwapMessage) error { for _, entry := range m.Wantlist() { if entry.Cancel { - log.Debugf("cancel %s", entry.Key) + log.Debugf("%s cancel %s", p, entry.Key) l.CancelWant(entry.Key) e.peerRequestQueue.Remove(entry.Key, p) } else { diff --git a/bitswap/decision/ledger.go b/bitswap/decision/ledger.go index 95cd303e2..965673d50 100644 --- a/bitswap/decision/ledger.go +++ b/bitswap/decision/ledger.go @@ -79,7 +79,7 @@ func (l *ledger) CancelWant(k key.Key) { l.wantList.Remove(k) } -func (l *ledger) WantListContains(k key.Key) (wl.Entry, bool) { +func (l *ledger) WantListContains(k key.Key) (*wl.Entry, bool) { return l.wantList.Contains(k) } diff --git a/bitswap/decision/peer_request_queue.go b/bitswap/decision/peer_request_queue.go index 549de7c50..05658aab1 100644 --- a/bitswap/decision/peer_request_queue.go +++ b/bitswap/decision/peer_request_queue.go @@ -13,7 +13,7 @@ import ( type peerRequestQueue interface { // Pop returns the next peerRequestTask. Returns nil if the peerRequestQueue is empty. Pop() *peerRequestTask - Push(entry wantlist.Entry, to peer.ID) + Push(entry *wantlist.Entry, to peer.ID) Remove(k key.Key, p peer.ID) // NB: cannot expose simply expose taskQueue.Len because trashed elements @@ -45,7 +45,7 @@ type prq struct { } // Push currently adds a new peerRequestTask to the end of the list -func (tl *prq) Push(entry wantlist.Entry, to peer.ID) { +func (tl *prq) Push(entry *wantlist.Entry, to peer.ID) { tl.lock.Lock() defer tl.lock.Unlock() partner, ok := tl.partners[to] @@ -166,7 +166,7 @@ func (tl *prq) thawRound() { } type peerRequestTask struct { - Entry wantlist.Entry + Entry *wantlist.Entry Target peer.ID // A callback to signal that this task has been completed diff --git a/bitswap/decision/peer_request_queue_test.go b/bitswap/decision/peer_request_queue_test.go index b1091c03c..a8356ad62 100644 --- a/bitswap/decision/peer_request_queue_test.go +++ b/bitswap/decision/peer_request_queue_test.go @@ -41,7 +41,7 @@ func TestPushPop(t *testing.T) { for _, index := range rand.Perm(len(alphabet)) { // add blocks for all letters letter := alphabet[index] t.Log(partner.String()) - prq.Push(wantlist.Entry{Key: key.Key(letter), Priority: math.MaxInt32 - index}, partner) + prq.Push(&wantlist.Entry{Key: key.Key(letter), Priority: math.MaxInt32 - index}, partner) } for _, consonant := range consonants { prq.Remove(key.Key(consonant), partner) @@ -78,10 +78,10 @@ func TestPeerRepeats(t *testing.T) { // Have each push some blocks for i := 0; i < 5; i++ { - prq.Push(wantlist.Entry{Key: key.Key(i)}, a) - prq.Push(wantlist.Entry{Key: key.Key(i)}, b) - prq.Push(wantlist.Entry{Key: key.Key(i)}, c) - prq.Push(wantlist.Entry{Key: key.Key(i)}, d) + prq.Push(&wantlist.Entry{Key: key.Key(i)}, a) + prq.Push(&wantlist.Entry{Key: key.Key(i)}, b) + prq.Push(&wantlist.Entry{Key: key.Key(i)}, c) + prq.Push(&wantlist.Entry{Key: key.Key(i)}, d) } // now, pop off four entries, there should be one from each diff --git a/bitswap/message/message.go b/bitswap/message/message.go index e828e0c25..6fcd2bac7 100644 --- a/bitswap/message/message.go +++ b/bitswap/message/message.go @@ -64,7 +64,7 @@ func newMsg(full bool) *impl { } type Entry struct { - wantlist.Entry + *wantlist.Entry Cancel bool } @@ -120,7 +120,7 @@ func (m *impl) addEntry(k key.Key, priority int, cancel bool) { e.Cancel = cancel } else { m.wantlist[k] = Entry{ - Entry: wantlist.Entry{ + Entry: &wantlist.Entry{ Key: k, Priority: priority, }, diff --git a/bitswap/wantlist/wantlist.go b/bitswap/wantlist/wantlist.go index 77b959a65..6e4650b65 100644 --- a/bitswap/wantlist/wantlist.go +++ b/bitswap/wantlist/wantlist.go @@ -7,8 +7,6 @@ import ( "sync" key "github.com/ipfs/go-ipfs/blocks/key" - - "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" ) type ThreadSafe struct { @@ -18,19 +16,17 @@ type ThreadSafe struct { // not threadsafe type Wantlist struct { - set map[key.Key]Entry + set map[key.Key]*Entry } type Entry struct { Key key.Key Priority int - Ctx context.Context - cancel func() RefCnt int } -type entrySlice []Entry +type entrySlice []*Entry func (es entrySlice) Len() int { return len(es) } func (es entrySlice) Swap(i, j int) { es[i], es[j] = es[j], es[i] } @@ -44,41 +40,41 @@ func NewThreadSafe() *ThreadSafe { func New() *Wantlist { return &Wantlist{ - set: make(map[key.Key]Entry), + set: make(map[key.Key]*Entry), } } -func (w *ThreadSafe) Add(k key.Key, priority int) { +func (w *ThreadSafe) Add(k key.Key, priority int) bool { w.lk.Lock() defer w.lk.Unlock() - w.Wantlist.Add(k, priority) + return w.Wantlist.Add(k, priority) } -func (w *ThreadSafe) AddEntry(e Entry) { +func (w *ThreadSafe) AddEntry(e *Entry) bool { w.lk.Lock() defer w.lk.Unlock() - w.Wantlist.AddEntry(e) + return w.Wantlist.AddEntry(e) } -func (w *ThreadSafe) Remove(k key.Key) { +func (w *ThreadSafe) Remove(k key.Key) bool { w.lk.Lock() defer w.lk.Unlock() - w.Wantlist.Remove(k) + return w.Wantlist.Remove(k) } -func (w *ThreadSafe) Contains(k key.Key) (Entry, bool) { +func (w *ThreadSafe) Contains(k key.Key) (*Entry, bool) { w.lk.RLock() defer w.lk.RUnlock() return w.Wantlist.Contains(k) } -func (w *ThreadSafe) Entries() []Entry { +func (w *ThreadSafe) Entries() []*Entry { w.lk.RLock() defer w.lk.RUnlock() return w.Wantlist.Entries() } -func (w *ThreadSafe) SortedEntries() []Entry { +func (w *ThreadSafe) SortedEntries() []*Entry { w.lk.RLock() defer w.lk.RUnlock() return w.Wantlist.SortedEntries() @@ -94,50 +90,50 @@ func (w *Wantlist) Len() int { return len(w.set) } -func (w *Wantlist) Add(k key.Key, priority int) { +func (w *Wantlist) Add(k key.Key, priority int) bool { if e, ok := w.set[k]; ok { e.RefCnt++ - return + return false } - ctx, cancel := context.WithCancel(context.Background()) - w.set[k] = Entry{ + w.set[k] = &Entry{ Key: k, Priority: priority, - Ctx: ctx, - cancel: cancel, RefCnt: 1, } + + return true } -func (w *Wantlist) AddEntry(e Entry) { - if _, ok := w.set[e.Key]; ok { - return +func (w *Wantlist) AddEntry(e *Entry) bool { + if ex, ok := w.set[e.Key]; ok { + ex.RefCnt++ + return false } w.set[e.Key] = e + return true } -func (w *Wantlist) Remove(k key.Key) { +func (w *Wantlist) Remove(k key.Key) bool { e, ok := w.set[k] if !ok { - return + return false } e.RefCnt-- if e.RefCnt <= 0 { delete(w.set, k) - if e.cancel != nil { - e.cancel() - } + return true } + return false } -func (w *Wantlist) Contains(k key.Key) (Entry, bool) { +func (w *Wantlist) Contains(k key.Key) (*Entry, bool) { e, ok := w.set[k] return e, ok } -func (w *Wantlist) Entries() []Entry { +func (w *Wantlist) Entries() []*Entry { var es entrySlice for _, e := range w.set { es = append(es, e) @@ -145,7 +141,7 @@ func (w *Wantlist) Entries() []Entry { return es } -func (w *Wantlist) SortedEntries() []Entry { +func (w *Wantlist) SortedEntries() []*Entry { var es entrySlice for _, e := range w.set { es = append(es, e) diff --git a/bitswap/wantmanager.go b/bitswap/wantmanager.go index f685c7079..ab8b55510 100644 --- a/bitswap/wantmanager.go +++ b/bitswap/wantmanager.go @@ -75,6 +75,7 @@ func (pm *WantManager) WantBlocks(ctx context.Context, ks []key.Key) { } func (pm *WantManager) CancelWants(ks []key.Key) { + log.Infof("cancel wants: %s", ks) pm.addEntries(context.TODO(), ks, true) } @@ -83,16 +84,17 @@ func (pm *WantManager) addEntries(ctx context.Context, ks []key.Key, cancel bool for i, k := range ks { entries = append(entries, &bsmsg.Entry{ Cancel: cancel, - Entry: wantlist.Entry{ + Entry: &wantlist.Entry{ Key: k, Priority: kMaxPriority - i, - Ctx: ctx, + RefCnt: 1, }, }) } select { case pm.incoming <- entries: case <-pm.ctx.Done(): + case <-ctx.Done(): } } @@ -241,33 +243,31 @@ func (pm *WantManager) Run() { case entries := <-pm.incoming: // add changes to our wantlist + var filtered []*bsmsg.Entry for _, e := range entries { if e.Cancel { - pm.wl.Remove(e.Key) + if pm.wl.Remove(e.Key) { + filtered = append(filtered, e) + } } else { - pm.wl.AddEntry(e.Entry) + if pm.wl.AddEntry(e.Entry) { + filtered = append(filtered, e) + } } } // broadcast those wantlist changes for _, p := range pm.peers { - p.addMessage(entries) + p.addMessage(filtered) } case <-tock.C: // resend entire wantlist every so often (REALLY SHOULDNT BE NECESSARY) var es []*bsmsg.Entry for _, e := range pm.wl.Entries() { - select { - case <-e.Ctx.Done(): - // entry has been cancelled - // simply continue, the entry will be removed from the - // wantlist soon enough - continue - default: - } es = append(es, &bsmsg.Entry{Entry: e}) } + for _, p := range pm.peers { p.outlk.Lock() p.out = bsmsg.New(true) diff --git a/bitswap/workers.go b/bitswap/workers.go index 4aa457917..c91c22dff 100644 --- a/bitswap/workers.go +++ b/bitswap/workers.go @@ -9,7 +9,6 @@ import ( context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" key "github.com/ipfs/go-ipfs/blocks/key" - wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" logging "gx/ipfs/QmNQynaz7qfriSUJkiEZUrm2Wen1u3Kj9goZzWtrPyu7XR/go-log" peer "gx/ipfs/QmRBqJF7hb8ZSpRcMwUt8hNhydWcxGEhtk81HKq6oUwKvs/go-libp2p-peer" ) @@ -172,10 +171,19 @@ func (bs *Bitswap) rebroadcastWorker(parent context.Context) { } case <-broadcastSignal.C: // resend unfulfilled wantlist keys log.Event(ctx, "Bitswap.Rebroadcast.active") + entries := bs.wm.wl.Entries() + if len(entries) == 0 { + continue + } + tctx, cancel := context.WithTimeout(ctx, providerRequestTimeout) for _, e := range bs.wm.wl.Entries() { e := e - bs.findKeys <- &e + bs.findKeys <- &blockRequest{ + Key: e.Key, + Ctx: tctx, + } } + cancel() case <-parent.Done(): return } @@ -184,20 +192,20 @@ func (bs *Bitswap) rebroadcastWorker(parent context.Context) { func (bs *Bitswap) providerQueryManager(ctx context.Context) { var activeLk sync.Mutex - active := make(map[key.Key]*wantlist.Entry) + kset := key.NewKeySet() for { select { case e := <-bs.findKeys: activeLk.Lock() - if _, ok := active[e.Key]; ok { + if kset.Has(e.Key) { activeLk.Unlock() continue } - active[e.Key] = e + kset.Add(e.Key) activeLk.Unlock() - go func(e *wantlist.Entry) { + go func(e *blockRequest) { child, cancel := context.WithTimeout(e.Ctx, providerRequestTimeout) defer cancel() providers := bs.network.FindProvidersAsync(child, e.Key, maxProvidersPerRequest) @@ -210,7 +218,7 @@ func (bs *Bitswap) providerQueryManager(ctx context.Context) { }(p) } activeLk.Lock() - delete(active, e.Key) + kset.Remove(e.Key) activeLk.Unlock() }(e) From e4c1a6de921af06fea5ef015af31f99f102c7067 Mon Sep 17 00:00:00 2001 From: Jeromy Date: Mon, 5 Sep 2016 20:13:10 -0700 Subject: [PATCH 0495/1035] bitswap: search for wantlist providers a little less often License: MIT Signed-off-by: Jeromy This commit was moved from ipfs/go-bitswap@5575419b374bc66542071ce97991ccc030350d03 --- bitswap/workers.go | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/bitswap/workers.go b/bitswap/workers.go index 187157150..6da730a80 100644 --- a/bitswap/workers.go +++ b/bitswap/workers.go @@ -1,6 +1,7 @@ package bitswap import ( + "math/rand" "sync" "time" @@ -175,15 +176,14 @@ func (bs *Bitswap) rebroadcastWorker(parent context.Context) { if len(entries) == 0 { continue } - tctx, cancel := context.WithTimeout(ctx, providerRequestTimeout) - for _, e := range bs.wm.wl.Entries() { - e := e - bs.findKeys <- &blockRequest{ - Key: e.Key, - Ctx: tctx, - } + + // TODO: come up with a better strategy for determining when to search + // for new providers for blocks. + i := rand.Intn(len(entries)) + bs.findKeys <- &blockRequest{ + Key: entries[i].Key, + Ctx: ctx, } - cancel() case <-parent.Done(): return } From 5378e7b3b6b32fa5a5c3a1744742c0631a8208a6 Mon Sep 17 00:00:00 2001 From: Jeromy Date: Thu, 1 Sep 2016 07:50:27 -0700 Subject: [PATCH 0496/1035] integrate CIDv0 License: MIT Signed-off-by: Jeromy This commit was moved from ipfs/go-bitswap@540558d24ea61f7fea5aa083d8c76751a537aaee --- bitswap/bitswap.go | 18 +++++++++--------- bitswap/bitswap_test.go | 7 +++++-- bitswap/decision/engine.go | 6 +++--- bitswap/decision/engine_test.go | 2 +- bitswap/message/message.go | 2 +- bitswap/notifications/notifications_test.go | 2 +- bitswap/testnet/network_test.go | 2 +- 7 files changed, 21 insertions(+), 18 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index c98a98db7..27d0a7b60 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -8,12 +8,6 @@ import ( "sync" "time" - process "gx/ipfs/QmQopLATEYMNg7dVqZRNDfeE2S1yKy8zrRh5xnYiuqeZBn/goprocess" - procctx "gx/ipfs/QmQopLATEYMNg7dVqZRNDfeE2S1yKy8zrRh5xnYiuqeZBn/goprocess/context" - logging "gx/ipfs/QmSpJByNKFX1sCsHBEp3R73FL4NF6FnQTEGyNAXHm2GS52/go-log" - peer "gx/ipfs/QmWtbQU15LaB5B1JC2F7TV9P4K88vD3PpA4AJrwfCjhML8/go-libp2p-peer" - context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" - blocks "github.com/ipfs/go-ipfs/blocks" blockstore "github.com/ipfs/go-ipfs/blocks/blockstore" key "github.com/ipfs/go-ipfs/blocks/key" @@ -26,6 +20,12 @@ import ( flags "github.com/ipfs/go-ipfs/flags" "github.com/ipfs/go-ipfs/thirdparty/delay" loggables "github.com/ipfs/go-ipfs/thirdparty/loggables" + + process "gx/ipfs/QmQopLATEYMNg7dVqZRNDfeE2S1yKy8zrRh5xnYiuqeZBn/goprocess" + procctx "gx/ipfs/QmQopLATEYMNg7dVqZRNDfeE2S1yKy8zrRh5xnYiuqeZBn/goprocess/context" + logging "gx/ipfs/QmSpJByNKFX1sCsHBEp3R73FL4NF6FnQTEGyNAXHm2GS52/go-log" + peer "gx/ipfs/QmWtbQU15LaB5B1JC2F7TV9P4K88vD3PpA4AJrwfCjhML8/go-libp2p-peer" + context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" ) var log = logging.Logger("bitswap") @@ -252,8 +252,8 @@ func (bs *Bitswap) GetBlocks(ctx context.Context, keys []key.Key) (<-chan blocks } // CancelWant removes a given key from the wantlist -func (bs *Bitswap) CancelWants(ks []key.Key) { - bs.wm.CancelWants(ks) +func (bs *Bitswap) CancelWants(keys []key.Key) { + bs.wm.CancelWants(keys) } // HasBlock announces the existance of a block to this bitswap service. The @@ -343,7 +343,7 @@ func (bs *Bitswap) updateReceiveCounters(b blocks.Block) error { } if err == nil && has { bs.dupBlocksRecvd++ - bs.dupDataRecvd += uint64(len(b.Data())) + bs.dupDataRecvd += uint64(len(b.RawData())) } if has { diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index df2bf9e27..ea512f15d 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -90,7 +90,7 @@ func TestGetBlockFromPeerAfterPeerAnnounces(t *testing.T) { t.Fatal("Expected to succeed") } - if !bytes.Equal(block.Data(), received.Data()) { + if !bytes.Equal(block.RawData(), received.RawData()) { t.Fatal("Data doesn't match") } } @@ -289,7 +289,10 @@ func TestEmptyKey(t *testing.T) { defer sg.Close() bs := sg.Instances(1)[0].Exchange - _, err := bs.GetBlock(context.Background(), key.Key("")) + ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) + defer cancel() + + _, err := bs.GetBlock(ctx, key.Key("")) if err != blockstore.ErrNotFound { t.Error("empty str key should return ErrNotFound") } diff --git a/bitswap/decision/engine.go b/bitswap/decision/engine.go index 51a0f0524..067c87053 100644 --- a/bitswap/decision/engine.go +++ b/bitswap/decision/engine.go @@ -247,8 +247,8 @@ func (e *Engine) MessageReceived(p peer.ID, m bsmsg.BitSwapMessage) error { } for _, block := range m.Blocks() { - log.Debugf("got block %s %d bytes", block.Key(), len(block.Data())) - l.ReceivedBytes(len(block.Data())) + log.Debugf("got block %s %d bytes", block, len(block.RawData())) + l.ReceivedBytes(len(block.RawData())) } return nil } @@ -286,7 +286,7 @@ func (e *Engine) AddBlock(block blocks.Block) { func (e *Engine) MessageSent(p peer.ID, m bsmsg.BitSwapMessage) error { l := e.findOrCreate(p) for _, block := range m.Blocks() { - l.SentBytes(len(block.Data())) + l.SentBytes(len(block.RawData())) l.wantList.Remove(block.Key()) e.peerRequestQueue.Remove(block.Key(), p) } diff --git a/bitswap/decision/engine_test.go b/bitswap/decision/engine_test.go index f9cb8aae3..e25575161 100644 --- a/bitswap/decision/engine_test.go +++ b/bitswap/decision/engine_test.go @@ -188,7 +188,7 @@ func checkHandledInOrder(t *testing.T, e *Engine, keys []string) error { received := envelope.Block expected := blocks.NewBlock([]byte(k)) if received.Key() != expected.Key() { - return errors.New(fmt.Sprintln("received", string(received.Data()), "expected", string(expected.Data()))) + return errors.New(fmt.Sprintln("received", string(received.RawData()), "expected", string(expected.RawData()))) } } return nil diff --git a/bitswap/message/message.go b/bitswap/message/message.go index f3b45e054..f73dedf6a 100644 --- a/bitswap/message/message.go +++ b/bitswap/message/message.go @@ -159,7 +159,7 @@ func (m *impl) ToProto() *pb.Message { }) } for _, b := range m.Blocks() { - pbm.Blocks = append(pbm.Blocks, b.Data()) + pbm.Blocks = append(pbm.Blocks, b.RawData()) } return pbm } diff --git a/bitswap/notifications/notifications_test.go b/bitswap/notifications/notifications_test.go index 3e923b84e..0880296e5 100644 --- a/bitswap/notifications/notifications_test.go +++ b/bitswap/notifications/notifications_test.go @@ -159,7 +159,7 @@ func assertBlockChannelNil(t *testing.T, blockChannel <-chan blocks.Block) { } func assertBlocksEqual(t *testing.T, a, b blocks.Block) { - if !bytes.Equal(a.Data(), b.Data()) { + if !bytes.Equal(a.RawData(), b.RawData()) { t.Fatal("blocks aren't equal") } if a.Key() != b.Key() { diff --git a/bitswap/testnet/network_test.go b/bitswap/testnet/network_test.go index 077c220e0..dfbf45c01 100644 --- a/bitswap/testnet/network_test.go +++ b/bitswap/testnet/network_test.go @@ -44,7 +44,7 @@ func TestSendMessageAsyncButWaitForResponse(t *testing.T) { // TODO assert that this came from the correct peer and that the message contents are as expected ok := false for _, b := range msgFromResponder.Blocks() { - if string(b.Data()) == expectedStr { + if string(b.RawData()) == expectedStr { wg.Done() ok = true } From c62468c27025c37cb016fd3378288a622bd8f212 Mon Sep 17 00:00:00 2001 From: George Antoniadis Date: Fri, 9 Sep 2016 15:41:28 +0100 Subject: [PATCH 0497/1035] Extract key and datastore License: MIT Signed-off-by: George Antoniadis This commit was moved from ipfs/go-bitswap@56c1d0d88d1b62e909203c5fff98fa0c205690f2 --- bitswap/bitswap.go | 6 +++--- bitswap/bitswap_test.go | 2 +- bitswap/decision/bench_test.go | 2 +- bitswap/decision/engine_test.go | 4 ++-- bitswap/decision/ledger.go | 2 +- bitswap/decision/peer_request_queue.go | 2 +- bitswap/decision/peer_request_queue_test.go | 2 +- bitswap/message/message.go | 2 +- bitswap/message/message_test.go | 2 +- bitswap/network/interface.go | 2 +- bitswap/network/ipfs_impl.go | 2 +- bitswap/notifications/notifications.go | 2 +- bitswap/notifications/notifications_test.go | 2 +- bitswap/stat.go | 2 +- bitswap/testnet/peernet.go | 2 +- bitswap/testnet/virtual.go | 2 +- bitswap/testutils.go | 4 ++-- bitswap/wantlist/wantlist.go | 2 +- bitswap/wantmanager.go | 2 +- bitswap/workers.go | 6 +++--- 20 files changed, 26 insertions(+), 26 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 27d0a7b60..63a9f914a 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -10,7 +10,6 @@ import ( blocks "github.com/ipfs/go-ipfs/blocks" blockstore "github.com/ipfs/go-ipfs/blocks/blockstore" - key "github.com/ipfs/go-ipfs/blocks/key" exchange "github.com/ipfs/go-ipfs/exchange" decision "github.com/ipfs/go-ipfs/exchange/bitswap/decision" bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" @@ -20,9 +19,10 @@ import ( flags "github.com/ipfs/go-ipfs/flags" "github.com/ipfs/go-ipfs/thirdparty/delay" loggables "github.com/ipfs/go-ipfs/thirdparty/loggables" + key "gx/ipfs/Qmce4Y4zg3sYr7xKM5UueS67vhNni6EeWgCRnb7MbLJMew/go-key" - process "gx/ipfs/QmQopLATEYMNg7dVqZRNDfeE2S1yKy8zrRh5xnYiuqeZBn/goprocess" - procctx "gx/ipfs/QmQopLATEYMNg7dVqZRNDfeE2S1yKy8zrRh5xnYiuqeZBn/goprocess/context" + process "gx/ipfs/QmSF8fPo3jgVBAy8fpdjjYqgG87dkJgUprRBHRd2tmfgpP/goprocess" + procctx "gx/ipfs/QmSF8fPo3jgVBAy8fpdjjYqgG87dkJgUprRBHRd2tmfgpP/goprocess/context" logging "gx/ipfs/QmSpJByNKFX1sCsHBEp3R73FL4NF6FnQTEGyNAXHm2GS52/go-log" peer "gx/ipfs/QmWtbQU15LaB5B1JC2F7TV9P4K88vD3PpA4AJrwfCjhML8/go-libp2p-peer" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index ea512f15d..7e5dfb8f6 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -13,10 +13,10 @@ import ( blocks "github.com/ipfs/go-ipfs/blocks" blockstore "github.com/ipfs/go-ipfs/blocks/blockstore" blocksutil "github.com/ipfs/go-ipfs/blocks/blocksutil" - key "github.com/ipfs/go-ipfs/blocks/key" tn "github.com/ipfs/go-ipfs/exchange/bitswap/testnet" mockrouting "github.com/ipfs/go-ipfs/routing/mock" delay "github.com/ipfs/go-ipfs/thirdparty/delay" + key "gx/ipfs/Qmce4Y4zg3sYr7xKM5UueS67vhNni6EeWgCRnb7MbLJMew/go-key" p2ptestutil "gx/ipfs/Qmf4ETeAWXuThBfWwonVyFqGFSgTWepUDEr1txcctvpTXS/go-libp2p/p2p/test/util" ) diff --git a/bitswap/decision/bench_test.go b/bitswap/decision/bench_test.go index 22d533ea2..881ede31a 100644 --- a/bitswap/decision/bench_test.go +++ b/bitswap/decision/bench_test.go @@ -4,10 +4,10 @@ import ( "math" "testing" - key "github.com/ipfs/go-ipfs/blocks/key" "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" "github.com/ipfs/go-ipfs/thirdparty/testutil" "gx/ipfs/QmWtbQU15LaB5B1JC2F7TV9P4K88vD3PpA4AJrwfCjhML8/go-libp2p-peer" + key "gx/ipfs/Qmce4Y4zg3sYr7xKM5UueS67vhNni6EeWgCRnb7MbLJMew/go-key" ) // FWIW: At the time of this commit, including a timestamp in task increases diff --git a/bitswap/decision/engine_test.go b/bitswap/decision/engine_test.go index e25575161..37c1463d0 100644 --- a/bitswap/decision/engine_test.go +++ b/bitswap/decision/engine_test.go @@ -12,10 +12,10 @@ import ( blockstore "github.com/ipfs/go-ipfs/blocks/blockstore" message "github.com/ipfs/go-ipfs/exchange/bitswap/message" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" - ds "gx/ipfs/QmNgqJarToRiq2GBaPJhkmW4B5BxS5B74E1rkGvv2JoaTp/go-datastore" - dssync "gx/ipfs/QmNgqJarToRiq2GBaPJhkmW4B5BxS5B74E1rkGvv2JoaTp/go-datastore/sync" peer "gx/ipfs/QmWtbQU15LaB5B1JC2F7TV9P4K88vD3PpA4AJrwfCjhML8/go-libp2p-peer" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" + ds "gx/ipfs/QmbzuUusHqaLLoNTDEVLcSF6vZDHZDLPC7p4bztRvvkXxU/go-datastore" + dssync "gx/ipfs/QmbzuUusHqaLLoNTDEVLcSF6vZDHZDLPC7p4bztRvvkXxU/go-datastore/sync" ) type peerAndEngine struct { diff --git a/bitswap/decision/ledger.go b/bitswap/decision/ledger.go index 225e00f15..4046ece5f 100644 --- a/bitswap/decision/ledger.go +++ b/bitswap/decision/ledger.go @@ -4,9 +4,9 @@ import ( "sync" "time" - key "github.com/ipfs/go-ipfs/blocks/key" wl "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" peer "gx/ipfs/QmWtbQU15LaB5B1JC2F7TV9P4K88vD3PpA4AJrwfCjhML8/go-libp2p-peer" + key "gx/ipfs/Qmce4Y4zg3sYr7xKM5UueS67vhNni6EeWgCRnb7MbLJMew/go-key" ) // keySet is just a convenient alias for maps of keys, where we only care diff --git a/bitswap/decision/peer_request_queue.go b/bitswap/decision/peer_request_queue.go index 7265ea9e6..7367c2a81 100644 --- a/bitswap/decision/peer_request_queue.go +++ b/bitswap/decision/peer_request_queue.go @@ -4,10 +4,10 @@ import ( "sync" "time" - key "github.com/ipfs/go-ipfs/blocks/key" wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" pq "github.com/ipfs/go-ipfs/thirdparty/pq" peer "gx/ipfs/QmWtbQU15LaB5B1JC2F7TV9P4K88vD3PpA4AJrwfCjhML8/go-libp2p-peer" + key "gx/ipfs/Qmce4Y4zg3sYr7xKM5UueS67vhNni6EeWgCRnb7MbLJMew/go-key" ) type peerRequestQueue interface { diff --git a/bitswap/decision/peer_request_queue_test.go b/bitswap/decision/peer_request_queue_test.go index b1091c03c..01e07baee 100644 --- a/bitswap/decision/peer_request_queue_test.go +++ b/bitswap/decision/peer_request_queue_test.go @@ -7,9 +7,9 @@ import ( "strings" "testing" - key "github.com/ipfs/go-ipfs/blocks/key" "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" "github.com/ipfs/go-ipfs/thirdparty/testutil" + key "gx/ipfs/Qmce4Y4zg3sYr7xKM5UueS67vhNni6EeWgCRnb7MbLJMew/go-key" ) func TestPushPop(t *testing.T) { diff --git a/bitswap/message/message.go b/bitswap/message/message.go index f73dedf6a..6510221ee 100644 --- a/bitswap/message/message.go +++ b/bitswap/message/message.go @@ -4,9 +4,9 @@ import ( "io" blocks "github.com/ipfs/go-ipfs/blocks" - key "github.com/ipfs/go-ipfs/blocks/key" pb "github.com/ipfs/go-ipfs/exchange/bitswap/message/pb" wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" + key "gx/ipfs/Qmce4Y4zg3sYr7xKM5UueS67vhNni6EeWgCRnb7MbLJMew/go-key" inet "gx/ipfs/Qmf4ETeAWXuThBfWwonVyFqGFSgTWepUDEr1txcctvpTXS/go-libp2p/p2p/net" ggio "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/io" diff --git a/bitswap/message/message_test.go b/bitswap/message/message_test.go index db79208d2..500b3f6e3 100644 --- a/bitswap/message/message_test.go +++ b/bitswap/message/message_test.go @@ -7,8 +7,8 @@ import ( proto "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/proto" blocks "github.com/ipfs/go-ipfs/blocks" - key "github.com/ipfs/go-ipfs/blocks/key" pb "github.com/ipfs/go-ipfs/exchange/bitswap/message/pb" + key "gx/ipfs/Qmce4Y4zg3sYr7xKM5UueS67vhNni6EeWgCRnb7MbLJMew/go-key" ) func TestAppendWanted(t *testing.T) { diff --git a/bitswap/network/interface.go b/bitswap/network/interface.go index 16f0dfed2..f43b846c9 100644 --- a/bitswap/network/interface.go +++ b/bitswap/network/interface.go @@ -1,10 +1,10 @@ package network import ( - key "github.com/ipfs/go-ipfs/blocks/key" bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" peer "gx/ipfs/QmWtbQU15LaB5B1JC2F7TV9P4K88vD3PpA4AJrwfCjhML8/go-libp2p-peer" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" + key "gx/ipfs/Qmce4Y4zg3sYr7xKM5UueS67vhNni6EeWgCRnb7MbLJMew/go-key" protocol "gx/ipfs/Qmf4ETeAWXuThBfWwonVyFqGFSgTWepUDEr1txcctvpTXS/go-libp2p/p2p/protocol" ) diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index fe764641d..4c18b76b4 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -3,9 +3,9 @@ package network import ( "io" - key "github.com/ipfs/go-ipfs/blocks/key" bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" routing "github.com/ipfs/go-ipfs/routing" + key "gx/ipfs/Qmce4Y4zg3sYr7xKM5UueS67vhNni6EeWgCRnb7MbLJMew/go-key" pstore "gx/ipfs/QmSZi9ygLohBUGyHMqE5N6eToPwqcg7bZQTULeVLFu7Q6d/go-libp2p-peerstore" logging "gx/ipfs/QmSpJByNKFX1sCsHBEp3R73FL4NF6FnQTEGyNAXHm2GS52/go-log" diff --git a/bitswap/notifications/notifications.go b/bitswap/notifications/notifications.go index 0b7f4f33a..4e440b490 100644 --- a/bitswap/notifications/notifications.go +++ b/bitswap/notifications/notifications.go @@ -3,8 +3,8 @@ package notifications import ( pubsub "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/briantigerchow/pubsub" blocks "github.com/ipfs/go-ipfs/blocks" - key "github.com/ipfs/go-ipfs/blocks/key" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" + key "gx/ipfs/Qmce4Y4zg3sYr7xKM5UueS67vhNni6EeWgCRnb7MbLJMew/go-key" ) const bufferSize = 16 diff --git a/bitswap/notifications/notifications_test.go b/bitswap/notifications/notifications_test.go index 0880296e5..c6aaac5ca 100644 --- a/bitswap/notifications/notifications_test.go +++ b/bitswap/notifications/notifications_test.go @@ -7,8 +7,8 @@ import ( blocks "github.com/ipfs/go-ipfs/blocks" blocksutil "github.com/ipfs/go-ipfs/blocks/blocksutil" - key "github.com/ipfs/go-ipfs/blocks/key" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" + key "gx/ipfs/Qmce4Y4zg3sYr7xKM5UueS67vhNni6EeWgCRnb7MbLJMew/go-key" ) func TestDuplicates(t *testing.T) { diff --git a/bitswap/stat.go b/bitswap/stat.go index 956a4c5b7..ff201c3ae 100644 --- a/bitswap/stat.go +++ b/bitswap/stat.go @@ -1,7 +1,7 @@ package bitswap import ( - key "github.com/ipfs/go-ipfs/blocks/key" + key "gx/ipfs/Qmce4Y4zg3sYr7xKM5UueS67vhNni6EeWgCRnb7MbLJMew/go-key" "sort" ) diff --git a/bitswap/testnet/peernet.go b/bitswap/testnet/peernet.go index eb692fe7a..6c0cf3b8e 100644 --- a/bitswap/testnet/peernet.go +++ b/bitswap/testnet/peernet.go @@ -4,9 +4,9 @@ import ( bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" mockrouting "github.com/ipfs/go-ipfs/routing/mock" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" - ds "gx/ipfs/QmNgqJarToRiq2GBaPJhkmW4B5BxS5B74E1rkGvv2JoaTp/go-datastore" peer "gx/ipfs/QmWtbQU15LaB5B1JC2F7TV9P4K88vD3PpA4AJrwfCjhML8/go-libp2p-peer" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" + ds "gx/ipfs/QmbzuUusHqaLLoNTDEVLcSF6vZDHZDLPC7p4bztRvvkXxU/go-datastore" mockpeernet "gx/ipfs/Qmf4ETeAWXuThBfWwonVyFqGFSgTWepUDEr1txcctvpTXS/go-libp2p/p2p/net/mock" ) diff --git a/bitswap/testnet/virtual.go b/bitswap/testnet/virtual.go index e44290313..7a1966a0a 100644 --- a/bitswap/testnet/virtual.go +++ b/bitswap/testnet/virtual.go @@ -3,7 +3,6 @@ package bitswap import ( "errors" - key "github.com/ipfs/go-ipfs/blocks/key" bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" routing "github.com/ipfs/go-ipfs/routing" @@ -12,6 +11,7 @@ import ( testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" peer "gx/ipfs/QmWtbQU15LaB5B1JC2F7TV9P4K88vD3PpA4AJrwfCjhML8/go-libp2p-peer" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" + key "gx/ipfs/Qmce4Y4zg3sYr7xKM5UueS67vhNni6EeWgCRnb7MbLJMew/go-key" ) func VirtualNetwork(rs mockrouting.Server, d delay.D) Network { diff --git a/bitswap/testutils.go b/bitswap/testutils.go index 16b9d4d20..3bccb9e4e 100644 --- a/bitswap/testutils.go +++ b/bitswap/testutils.go @@ -8,10 +8,10 @@ import ( datastore2 "github.com/ipfs/go-ipfs/thirdparty/datastore2" delay "github.com/ipfs/go-ipfs/thirdparty/delay" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" - ds "gx/ipfs/QmNgqJarToRiq2GBaPJhkmW4B5BxS5B74E1rkGvv2JoaTp/go-datastore" - ds_sync "gx/ipfs/QmNgqJarToRiq2GBaPJhkmW4B5BxS5B74E1rkGvv2JoaTp/go-datastore/sync" peer "gx/ipfs/QmWtbQU15LaB5B1JC2F7TV9P4K88vD3PpA4AJrwfCjhML8/go-libp2p-peer" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" + ds "gx/ipfs/QmbzuUusHqaLLoNTDEVLcSF6vZDHZDLPC7p4bztRvvkXxU/go-datastore" + ds_sync "gx/ipfs/QmbzuUusHqaLLoNTDEVLcSF6vZDHZDLPC7p4bztRvvkXxU/go-datastore/sync" p2ptestutil "gx/ipfs/Qmf4ETeAWXuThBfWwonVyFqGFSgTWepUDEr1txcctvpTXS/go-libp2p/p2p/test/util" ) diff --git a/bitswap/wantlist/wantlist.go b/bitswap/wantlist/wantlist.go index 77b959a65..2fcaf0c29 100644 --- a/bitswap/wantlist/wantlist.go +++ b/bitswap/wantlist/wantlist.go @@ -6,7 +6,7 @@ import ( "sort" "sync" - key "github.com/ipfs/go-ipfs/blocks/key" + key "gx/ipfs/Qmce4Y4zg3sYr7xKM5UueS67vhNni6EeWgCRnb7MbLJMew/go-key" "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" ) diff --git a/bitswap/wantmanager.go b/bitswap/wantmanager.go index 47ea7ba35..e9daae034 100644 --- a/bitswap/wantmanager.go +++ b/bitswap/wantmanager.go @@ -4,13 +4,13 @@ import ( "sync" "time" - key "github.com/ipfs/go-ipfs/blocks/key" engine "github.com/ipfs/go-ipfs/exchange/bitswap/decision" bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" peer "gx/ipfs/QmWtbQU15LaB5B1JC2F7TV9P4K88vD3PpA4AJrwfCjhML8/go-libp2p-peer" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" + key "gx/ipfs/Qmce4Y4zg3sYr7xKM5UueS67vhNni6EeWgCRnb7MbLJMew/go-key" ) type WantManager struct { diff --git a/bitswap/workers.go b/bitswap/workers.go index 9befad41a..bf45bce7d 100644 --- a/bitswap/workers.go +++ b/bitswap/workers.go @@ -4,14 +4,14 @@ import ( "sync" "time" - process "gx/ipfs/QmQopLATEYMNg7dVqZRNDfeE2S1yKy8zrRh5xnYiuqeZBn/goprocess" - procctx "gx/ipfs/QmQopLATEYMNg7dVqZRNDfeE2S1yKy8zrRh5xnYiuqeZBn/goprocess/context" + process "gx/ipfs/QmSF8fPo3jgVBAy8fpdjjYqgG87dkJgUprRBHRd2tmfgpP/goprocess" + procctx "gx/ipfs/QmSF8fPo3jgVBAy8fpdjjYqgG87dkJgUprRBHRd2tmfgpP/goprocess/context" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" - key "github.com/ipfs/go-ipfs/blocks/key" wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" logging "gx/ipfs/QmSpJByNKFX1sCsHBEp3R73FL4NF6FnQTEGyNAXHm2GS52/go-log" peer "gx/ipfs/QmWtbQU15LaB5B1JC2F7TV9P4K88vD3PpA4AJrwfCjhML8/go-libp2p-peer" + key "gx/ipfs/Qmce4Y4zg3sYr7xKM5UueS67vhNni6EeWgCRnb7MbLJMew/go-key" ) var TaskWorkerCount = 8 From bf7b7b0efe8ad17c1500421c2682407864928d41 Mon Sep 17 00:00:00 2001 From: George Antoniadis Date: Sat, 10 Sep 2016 23:00:05 +0100 Subject: [PATCH 0498/1035] Extract thirdparty/loggables License: MIT Signed-off-by: George Antoniadis This commit was moved from ipfs/go-bitswap@5d62468fedcb2cf90425aeb8f80a419b04a8c39d --- bitswap/bitswap.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 63a9f914a..8b6511b7e 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -8,6 +8,8 @@ import ( "sync" "time" + key "gx/ipfs/Qmce4Y4zg3sYr7xKM5UueS67vhNni6EeWgCRnb7MbLJMew/go-key" + blocks "github.com/ipfs/go-ipfs/blocks" blockstore "github.com/ipfs/go-ipfs/blocks/blockstore" exchange "github.com/ipfs/go-ipfs/exchange" @@ -18,8 +20,7 @@ import ( wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" flags "github.com/ipfs/go-ipfs/flags" "github.com/ipfs/go-ipfs/thirdparty/delay" - loggables "github.com/ipfs/go-ipfs/thirdparty/loggables" - key "gx/ipfs/Qmce4Y4zg3sYr7xKM5UueS67vhNni6EeWgCRnb7MbLJMew/go-key" + loggables "gx/ipfs/QmYrv4LgCC8FhG2Ab4bwuq5DqBdwMtx3hMb3KKJDZcr2d7/go-libp2p-loggables" process "gx/ipfs/QmSF8fPo3jgVBAy8fpdjjYqgG87dkJgUprRBHRd2tmfgpP/goprocess" procctx "gx/ipfs/QmSF8fPo3jgVBAy8fpdjjYqgG87dkJgUprRBHRd2tmfgpP/goprocess/context" From b9d6a0dc3d2e29a0d23f10726df2839ba3ebe0d1 Mon Sep 17 00:00:00 2001 From: George Antoniadis Date: Sat, 10 Sep 2016 23:22:17 +0100 Subject: [PATCH 0499/1035] Extract peerset, update peer, peerset, secio, libp2p License: MIT Signed-off-by: George Antoniadis This commit was moved from ipfs/go-bitswap@4ba214f1e8d41042445c3f54c38c7ab48264df9d --- bitswap/bitswap.go | 2 +- bitswap/bitswap_test.go | 2 +- bitswap/decision/bench_test.go | 2 +- bitswap/decision/engine.go | 2 +- bitswap/decision/engine_test.go | 2 +- bitswap/decision/ledger.go | 2 +- bitswap/decision/peer_request_queue.go | 2 +- bitswap/message/message.go | 2 +- bitswap/network/interface.go | 4 ++-- bitswap/network/ipfs_impl.go | 8 ++++---- bitswap/testnet/interface.go | 2 +- bitswap/testnet/network_test.go | 2 +- bitswap/testnet/peernet.go | 4 ++-- bitswap/testnet/virtual.go | 2 +- bitswap/testutils.go | 4 ++-- bitswap/wantmanager.go | 2 +- bitswap/workers.go | 2 +- 17 files changed, 23 insertions(+), 23 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 8b6511b7e..ed914b979 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -25,7 +25,7 @@ import ( process "gx/ipfs/QmSF8fPo3jgVBAy8fpdjjYqgG87dkJgUprRBHRd2tmfgpP/goprocess" procctx "gx/ipfs/QmSF8fPo3jgVBAy8fpdjjYqgG87dkJgUprRBHRd2tmfgpP/goprocess/context" logging "gx/ipfs/QmSpJByNKFX1sCsHBEp3R73FL4NF6FnQTEGyNAXHm2GS52/go-log" - peer "gx/ipfs/QmWtbQU15LaB5B1JC2F7TV9P4K88vD3PpA4AJrwfCjhML8/go-libp2p-peer" + peer "gx/ipfs/QmWXjJo15p4pzT7cayEwZi2sWgJqLnGDof6ZGMh9xBgU1p/go-libp2p-peer" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" ) diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index 7e5dfb8f6..4b9e354fd 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -16,8 +16,8 @@ import ( tn "github.com/ipfs/go-ipfs/exchange/bitswap/testnet" mockrouting "github.com/ipfs/go-ipfs/routing/mock" delay "github.com/ipfs/go-ipfs/thirdparty/delay" + p2ptestutil "gx/ipfs/QmXnaDLonE9YBTVDdWBM6Jb5YxxmW1MHMkXzgsnu1jTEmK/go-libp2p/p2p/test/util" key "gx/ipfs/Qmce4Y4zg3sYr7xKM5UueS67vhNni6EeWgCRnb7MbLJMew/go-key" - p2ptestutil "gx/ipfs/Qmf4ETeAWXuThBfWwonVyFqGFSgTWepUDEr1txcctvpTXS/go-libp2p/p2p/test/util" ) // FIXME the tests are really sensitive to the network delay. fix them to work diff --git a/bitswap/decision/bench_test.go b/bitswap/decision/bench_test.go index 881ede31a..5a5a34587 100644 --- a/bitswap/decision/bench_test.go +++ b/bitswap/decision/bench_test.go @@ -6,7 +6,7 @@ import ( "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" "github.com/ipfs/go-ipfs/thirdparty/testutil" - "gx/ipfs/QmWtbQU15LaB5B1JC2F7TV9P4K88vD3PpA4AJrwfCjhML8/go-libp2p-peer" + "gx/ipfs/QmWXjJo15p4pzT7cayEwZi2sWgJqLnGDof6ZGMh9xBgU1p/go-libp2p-peer" key "gx/ipfs/Qmce4Y4zg3sYr7xKM5UueS67vhNni6EeWgCRnb7MbLJMew/go-key" ) diff --git a/bitswap/decision/engine.go b/bitswap/decision/engine.go index 067c87053..8f888851f 100644 --- a/bitswap/decision/engine.go +++ b/bitswap/decision/engine.go @@ -10,7 +10,7 @@ import ( bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" wl "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" logging "gx/ipfs/QmSpJByNKFX1sCsHBEp3R73FL4NF6FnQTEGyNAXHm2GS52/go-log" - peer "gx/ipfs/QmWtbQU15LaB5B1JC2F7TV9P4K88vD3PpA4AJrwfCjhML8/go-libp2p-peer" + peer "gx/ipfs/QmWXjJo15p4pzT7cayEwZi2sWgJqLnGDof6ZGMh9xBgU1p/go-libp2p-peer" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" ) diff --git a/bitswap/decision/engine_test.go b/bitswap/decision/engine_test.go index 37c1463d0..234768577 100644 --- a/bitswap/decision/engine_test.go +++ b/bitswap/decision/engine_test.go @@ -12,7 +12,7 @@ import ( blockstore "github.com/ipfs/go-ipfs/blocks/blockstore" message "github.com/ipfs/go-ipfs/exchange/bitswap/message" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" - peer "gx/ipfs/QmWtbQU15LaB5B1JC2F7TV9P4K88vD3PpA4AJrwfCjhML8/go-libp2p-peer" + peer "gx/ipfs/QmWXjJo15p4pzT7cayEwZi2sWgJqLnGDof6ZGMh9xBgU1p/go-libp2p-peer" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" ds "gx/ipfs/QmbzuUusHqaLLoNTDEVLcSF6vZDHZDLPC7p4bztRvvkXxU/go-datastore" dssync "gx/ipfs/QmbzuUusHqaLLoNTDEVLcSF6vZDHZDLPC7p4bztRvvkXxU/go-datastore/sync" diff --git a/bitswap/decision/ledger.go b/bitswap/decision/ledger.go index 4046ece5f..dedbbb8e3 100644 --- a/bitswap/decision/ledger.go +++ b/bitswap/decision/ledger.go @@ -5,7 +5,7 @@ import ( "time" wl "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" - peer "gx/ipfs/QmWtbQU15LaB5B1JC2F7TV9P4K88vD3PpA4AJrwfCjhML8/go-libp2p-peer" + peer "gx/ipfs/QmWXjJo15p4pzT7cayEwZi2sWgJqLnGDof6ZGMh9xBgU1p/go-libp2p-peer" key "gx/ipfs/Qmce4Y4zg3sYr7xKM5UueS67vhNni6EeWgCRnb7MbLJMew/go-key" ) diff --git a/bitswap/decision/peer_request_queue.go b/bitswap/decision/peer_request_queue.go index 7367c2a81..c6eb045c1 100644 --- a/bitswap/decision/peer_request_queue.go +++ b/bitswap/decision/peer_request_queue.go @@ -6,7 +6,7 @@ import ( wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" pq "github.com/ipfs/go-ipfs/thirdparty/pq" - peer "gx/ipfs/QmWtbQU15LaB5B1JC2F7TV9P4K88vD3PpA4AJrwfCjhML8/go-libp2p-peer" + peer "gx/ipfs/QmWXjJo15p4pzT7cayEwZi2sWgJqLnGDof6ZGMh9xBgU1p/go-libp2p-peer" key "gx/ipfs/Qmce4Y4zg3sYr7xKM5UueS67vhNni6EeWgCRnb7MbLJMew/go-key" ) diff --git a/bitswap/message/message.go b/bitswap/message/message.go index 6510221ee..29514958f 100644 --- a/bitswap/message/message.go +++ b/bitswap/message/message.go @@ -6,8 +6,8 @@ import ( blocks "github.com/ipfs/go-ipfs/blocks" pb "github.com/ipfs/go-ipfs/exchange/bitswap/message/pb" wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" + inet "gx/ipfs/QmXnaDLonE9YBTVDdWBM6Jb5YxxmW1MHMkXzgsnu1jTEmK/go-libp2p/p2p/net" key "gx/ipfs/Qmce4Y4zg3sYr7xKM5UueS67vhNni6EeWgCRnb7MbLJMew/go-key" - inet "gx/ipfs/Qmf4ETeAWXuThBfWwonVyFqGFSgTWepUDEr1txcctvpTXS/go-libp2p/p2p/net" ggio "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/io" proto "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/proto" diff --git a/bitswap/network/interface.go b/bitswap/network/interface.go index 460bf3a72..a0ffe990f 100644 --- a/bitswap/network/interface.go +++ b/bitswap/network/interface.go @@ -2,10 +2,10 @@ package network import ( bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" - peer "gx/ipfs/QmWtbQU15LaB5B1JC2F7TV9P4K88vD3PpA4AJrwfCjhML8/go-libp2p-peer" + peer "gx/ipfs/QmWXjJo15p4pzT7cayEwZi2sWgJqLnGDof6ZGMh9xBgU1p/go-libp2p-peer" + protocol "gx/ipfs/QmXnaDLonE9YBTVDdWBM6Jb5YxxmW1MHMkXzgsnu1jTEmK/go-libp2p/p2p/protocol" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" key "gx/ipfs/Qmce4Y4zg3sYr7xKM5UueS67vhNni6EeWgCRnb7MbLJMew/go-key" - protocol "gx/ipfs/Qmf4ETeAWXuThBfWwonVyFqGFSgTWepUDEr1txcctvpTXS/go-libp2p/p2p/protocol" ) var ProtocolBitswap protocol.ID = "/ipfs/bitswap/1.0.0" diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index ad40a2860..41674e2bf 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -7,14 +7,14 @@ import ( routing "github.com/ipfs/go-ipfs/routing" key "gx/ipfs/Qmce4Y4zg3sYr7xKM5UueS67vhNni6EeWgCRnb7MbLJMew/go-key" - pstore "gx/ipfs/QmSZi9ygLohBUGyHMqE5N6eToPwqcg7bZQTULeVLFu7Q6d/go-libp2p-peerstore" logging "gx/ipfs/QmSpJByNKFX1sCsHBEp3R73FL4NF6FnQTEGyNAXHm2GS52/go-log" - peer "gx/ipfs/QmWtbQU15LaB5B1JC2F7TV9P4K88vD3PpA4AJrwfCjhML8/go-libp2p-peer" + peer "gx/ipfs/QmWXjJo15p4pzT7cayEwZi2sWgJqLnGDof6ZGMh9xBgU1p/go-libp2p-peer" + host "gx/ipfs/QmXnaDLonE9YBTVDdWBM6Jb5YxxmW1MHMkXzgsnu1jTEmK/go-libp2p/p2p/host" + inet "gx/ipfs/QmXnaDLonE9YBTVDdWBM6Jb5YxxmW1MHMkXzgsnu1jTEmK/go-libp2p/p2p/net" ma "gx/ipfs/QmYzDkkgAEmrcNzFCiYo6L1dTX4EAG1gZkbtdbd9trL4vd/go-multiaddr" ggio "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/io" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" - host "gx/ipfs/Qmf4ETeAWXuThBfWwonVyFqGFSgTWepUDEr1txcctvpTXS/go-libp2p/p2p/host" - inet "gx/ipfs/Qmf4ETeAWXuThBfWwonVyFqGFSgTWepUDEr1txcctvpTXS/go-libp2p/p2p/net" + pstore "gx/ipfs/QmdMfSLMDBDYhtc4oF3NYGCZr5dy4wQb6Ji26N4D4mdxa2/go-libp2p-peerstore" ) var log = logging.Logger("bitswap_network") diff --git a/bitswap/testnet/interface.go b/bitswap/testnet/interface.go index 0378cc994..077859805 100644 --- a/bitswap/testnet/interface.go +++ b/bitswap/testnet/interface.go @@ -3,7 +3,7 @@ package bitswap import ( bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" "github.com/ipfs/go-ipfs/thirdparty/testutil" - peer "gx/ipfs/QmWtbQU15LaB5B1JC2F7TV9P4K88vD3PpA4AJrwfCjhML8/go-libp2p-peer" + peer "gx/ipfs/QmWXjJo15p4pzT7cayEwZi2sWgJqLnGDof6ZGMh9xBgU1p/go-libp2p-peer" ) type Network interface { diff --git a/bitswap/testnet/network_test.go b/bitswap/testnet/network_test.go index dfbf45c01..4fc767acd 100644 --- a/bitswap/testnet/network_test.go +++ b/bitswap/testnet/network_test.go @@ -10,7 +10,7 @@ import ( mockrouting "github.com/ipfs/go-ipfs/routing/mock" delay "github.com/ipfs/go-ipfs/thirdparty/delay" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" - peer "gx/ipfs/QmWtbQU15LaB5B1JC2F7TV9P4K88vD3PpA4AJrwfCjhML8/go-libp2p-peer" + peer "gx/ipfs/QmWXjJo15p4pzT7cayEwZi2sWgJqLnGDof6ZGMh9xBgU1p/go-libp2p-peer" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" ) diff --git a/bitswap/testnet/peernet.go b/bitswap/testnet/peernet.go index 6c0cf3b8e..5e612e315 100644 --- a/bitswap/testnet/peernet.go +++ b/bitswap/testnet/peernet.go @@ -4,10 +4,10 @@ import ( bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" mockrouting "github.com/ipfs/go-ipfs/routing/mock" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" - peer "gx/ipfs/QmWtbQU15LaB5B1JC2F7TV9P4K88vD3PpA4AJrwfCjhML8/go-libp2p-peer" + peer "gx/ipfs/QmWXjJo15p4pzT7cayEwZi2sWgJqLnGDof6ZGMh9xBgU1p/go-libp2p-peer" + mockpeernet "gx/ipfs/QmXnaDLonE9YBTVDdWBM6Jb5YxxmW1MHMkXzgsnu1jTEmK/go-libp2p/p2p/net/mock" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" ds "gx/ipfs/QmbzuUusHqaLLoNTDEVLcSF6vZDHZDLPC7p4bztRvvkXxU/go-datastore" - mockpeernet "gx/ipfs/Qmf4ETeAWXuThBfWwonVyFqGFSgTWepUDEr1txcctvpTXS/go-libp2p/p2p/net/mock" ) type peernet struct { diff --git a/bitswap/testnet/virtual.go b/bitswap/testnet/virtual.go index 7a1966a0a..2fcc2f82f 100644 --- a/bitswap/testnet/virtual.go +++ b/bitswap/testnet/virtual.go @@ -9,7 +9,7 @@ import ( mockrouting "github.com/ipfs/go-ipfs/routing/mock" delay "github.com/ipfs/go-ipfs/thirdparty/delay" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" - peer "gx/ipfs/QmWtbQU15LaB5B1JC2F7TV9P4K88vD3PpA4AJrwfCjhML8/go-libp2p-peer" + peer "gx/ipfs/QmWXjJo15p4pzT7cayEwZi2sWgJqLnGDof6ZGMh9xBgU1p/go-libp2p-peer" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" key "gx/ipfs/Qmce4Y4zg3sYr7xKM5UueS67vhNni6EeWgCRnb7MbLJMew/go-key" ) diff --git a/bitswap/testutils.go b/bitswap/testutils.go index 3bccb9e4e..73200e1d2 100644 --- a/bitswap/testutils.go +++ b/bitswap/testutils.go @@ -8,11 +8,11 @@ import ( datastore2 "github.com/ipfs/go-ipfs/thirdparty/datastore2" delay "github.com/ipfs/go-ipfs/thirdparty/delay" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" - peer "gx/ipfs/QmWtbQU15LaB5B1JC2F7TV9P4K88vD3PpA4AJrwfCjhML8/go-libp2p-peer" + peer "gx/ipfs/QmWXjJo15p4pzT7cayEwZi2sWgJqLnGDof6ZGMh9xBgU1p/go-libp2p-peer" + p2ptestutil "gx/ipfs/QmXnaDLonE9YBTVDdWBM6Jb5YxxmW1MHMkXzgsnu1jTEmK/go-libp2p/p2p/test/util" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" ds "gx/ipfs/QmbzuUusHqaLLoNTDEVLcSF6vZDHZDLPC7p4bztRvvkXxU/go-datastore" ds_sync "gx/ipfs/QmbzuUusHqaLLoNTDEVLcSF6vZDHZDLPC7p4bztRvvkXxU/go-datastore/sync" - p2ptestutil "gx/ipfs/Qmf4ETeAWXuThBfWwonVyFqGFSgTWepUDEr1txcctvpTXS/go-libp2p/p2p/test/util" ) // WARNING: this uses RandTestBogusIdentity DO NOT USE for NON TESTS! diff --git a/bitswap/wantmanager.go b/bitswap/wantmanager.go index e9daae034..189c2e38e 100644 --- a/bitswap/wantmanager.go +++ b/bitswap/wantmanager.go @@ -8,7 +8,7 @@ import ( bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" - peer "gx/ipfs/QmWtbQU15LaB5B1JC2F7TV9P4K88vD3PpA4AJrwfCjhML8/go-libp2p-peer" + peer "gx/ipfs/QmWXjJo15p4pzT7cayEwZi2sWgJqLnGDof6ZGMh9xBgU1p/go-libp2p-peer" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" key "gx/ipfs/Qmce4Y4zg3sYr7xKM5UueS67vhNni6EeWgCRnb7MbLJMew/go-key" ) diff --git a/bitswap/workers.go b/bitswap/workers.go index bf45bce7d..bc8ae1c39 100644 --- a/bitswap/workers.go +++ b/bitswap/workers.go @@ -10,7 +10,7 @@ import ( wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" logging "gx/ipfs/QmSpJByNKFX1sCsHBEp3R73FL4NF6FnQTEGyNAXHm2GS52/go-log" - peer "gx/ipfs/QmWtbQU15LaB5B1JC2F7TV9P4K88vD3PpA4AJrwfCjhML8/go-libp2p-peer" + peer "gx/ipfs/QmWXjJo15p4pzT7cayEwZi2sWgJqLnGDof6ZGMh9xBgU1p/go-libp2p-peer" key "gx/ipfs/Qmce4Y4zg3sYr7xKM5UueS67vhNni6EeWgCRnb7MbLJMew/go-key" ) From c7f37e69963471aee20cbd72060f0b9b8f644260 Mon Sep 17 00:00:00 2001 From: Jeromy Date: Mon, 12 Sep 2016 07:47:04 -0700 Subject: [PATCH 0500/1035] Update libp2p to have fixed spdystream dep License: MIT Signed-off-by: Jeromy This commit was moved from ipfs/go-bitswap@1596992d4671c3bbe53b32fc9ab61cc50deafd42 --- bitswap/bitswap_test.go | 2 +- bitswap/message/message.go | 2 +- bitswap/network/interface.go | 2 +- bitswap/network/ipfs_impl.go | 4 ++-- bitswap/testnet/peernet.go | 2 +- bitswap/testutils.go | 2 +- 6 files changed, 7 insertions(+), 7 deletions(-) diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index 4b9e354fd..9e59b5a74 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -16,8 +16,8 @@ import ( tn "github.com/ipfs/go-ipfs/exchange/bitswap/testnet" mockrouting "github.com/ipfs/go-ipfs/routing/mock" delay "github.com/ipfs/go-ipfs/thirdparty/delay" - p2ptestutil "gx/ipfs/QmXnaDLonE9YBTVDdWBM6Jb5YxxmW1MHMkXzgsnu1jTEmK/go-libp2p/p2p/test/util" key "gx/ipfs/Qmce4Y4zg3sYr7xKM5UueS67vhNni6EeWgCRnb7MbLJMew/go-key" + p2ptestutil "gx/ipfs/QmcpZpCmnfjRunzeYtXZdtcy16P2mC65CThjb7aA8sPqNY/go-libp2p/p2p/test/util" ) // FIXME the tests are really sensitive to the network delay. fix them to work diff --git a/bitswap/message/message.go b/bitswap/message/message.go index 29514958f..8520592f6 100644 --- a/bitswap/message/message.go +++ b/bitswap/message/message.go @@ -6,8 +6,8 @@ import ( blocks "github.com/ipfs/go-ipfs/blocks" pb "github.com/ipfs/go-ipfs/exchange/bitswap/message/pb" wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" - inet "gx/ipfs/QmXnaDLonE9YBTVDdWBM6Jb5YxxmW1MHMkXzgsnu1jTEmK/go-libp2p/p2p/net" key "gx/ipfs/Qmce4Y4zg3sYr7xKM5UueS67vhNni6EeWgCRnb7MbLJMew/go-key" + inet "gx/ipfs/QmcpZpCmnfjRunzeYtXZdtcy16P2mC65CThjb7aA8sPqNY/go-libp2p/p2p/net" ggio "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/io" proto "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/proto" diff --git a/bitswap/network/interface.go b/bitswap/network/interface.go index a0ffe990f..9650bb1f5 100644 --- a/bitswap/network/interface.go +++ b/bitswap/network/interface.go @@ -3,9 +3,9 @@ package network import ( bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" peer "gx/ipfs/QmWXjJo15p4pzT7cayEwZi2sWgJqLnGDof6ZGMh9xBgU1p/go-libp2p-peer" - protocol "gx/ipfs/QmXnaDLonE9YBTVDdWBM6Jb5YxxmW1MHMkXzgsnu1jTEmK/go-libp2p/p2p/protocol" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" key "gx/ipfs/Qmce4Y4zg3sYr7xKM5UueS67vhNni6EeWgCRnb7MbLJMew/go-key" + protocol "gx/ipfs/QmcpZpCmnfjRunzeYtXZdtcy16P2mC65CThjb7aA8sPqNY/go-libp2p/p2p/protocol" ) var ProtocolBitswap protocol.ID = "/ipfs/bitswap/1.0.0" diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index 41674e2bf..1ec3a2778 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -9,11 +9,11 @@ import ( logging "gx/ipfs/QmSpJByNKFX1sCsHBEp3R73FL4NF6FnQTEGyNAXHm2GS52/go-log" peer "gx/ipfs/QmWXjJo15p4pzT7cayEwZi2sWgJqLnGDof6ZGMh9xBgU1p/go-libp2p-peer" - host "gx/ipfs/QmXnaDLonE9YBTVDdWBM6Jb5YxxmW1MHMkXzgsnu1jTEmK/go-libp2p/p2p/host" - inet "gx/ipfs/QmXnaDLonE9YBTVDdWBM6Jb5YxxmW1MHMkXzgsnu1jTEmK/go-libp2p/p2p/net" ma "gx/ipfs/QmYzDkkgAEmrcNzFCiYo6L1dTX4EAG1gZkbtdbd9trL4vd/go-multiaddr" ggio "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/io" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" + host "gx/ipfs/QmcpZpCmnfjRunzeYtXZdtcy16P2mC65CThjb7aA8sPqNY/go-libp2p/p2p/host" + inet "gx/ipfs/QmcpZpCmnfjRunzeYtXZdtcy16P2mC65CThjb7aA8sPqNY/go-libp2p/p2p/net" pstore "gx/ipfs/QmdMfSLMDBDYhtc4oF3NYGCZr5dy4wQb6Ji26N4D4mdxa2/go-libp2p-peerstore" ) diff --git a/bitswap/testnet/peernet.go b/bitswap/testnet/peernet.go index 5e612e315..67c488974 100644 --- a/bitswap/testnet/peernet.go +++ b/bitswap/testnet/peernet.go @@ -5,9 +5,9 @@ import ( mockrouting "github.com/ipfs/go-ipfs/routing/mock" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" peer "gx/ipfs/QmWXjJo15p4pzT7cayEwZi2sWgJqLnGDof6ZGMh9xBgU1p/go-libp2p-peer" - mockpeernet "gx/ipfs/QmXnaDLonE9YBTVDdWBM6Jb5YxxmW1MHMkXzgsnu1jTEmK/go-libp2p/p2p/net/mock" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" ds "gx/ipfs/QmbzuUusHqaLLoNTDEVLcSF6vZDHZDLPC7p4bztRvvkXxU/go-datastore" + mockpeernet "gx/ipfs/QmcpZpCmnfjRunzeYtXZdtcy16P2mC65CThjb7aA8sPqNY/go-libp2p/p2p/net/mock" ) type peernet struct { diff --git a/bitswap/testutils.go b/bitswap/testutils.go index 73200e1d2..1ea0b05c6 100644 --- a/bitswap/testutils.go +++ b/bitswap/testutils.go @@ -9,10 +9,10 @@ import ( delay "github.com/ipfs/go-ipfs/thirdparty/delay" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" peer "gx/ipfs/QmWXjJo15p4pzT7cayEwZi2sWgJqLnGDof6ZGMh9xBgU1p/go-libp2p-peer" - p2ptestutil "gx/ipfs/QmXnaDLonE9YBTVDdWBM6Jb5YxxmW1MHMkXzgsnu1jTEmK/go-libp2p/p2p/test/util" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" ds "gx/ipfs/QmbzuUusHqaLLoNTDEVLcSF6vZDHZDLPC7p4bztRvvkXxU/go-datastore" ds_sync "gx/ipfs/QmbzuUusHqaLLoNTDEVLcSF6vZDHZDLPC7p4bztRvvkXxU/go-datastore/sync" + p2ptestutil "gx/ipfs/QmcpZpCmnfjRunzeYtXZdtcy16P2mC65CThjb7aA8sPqNY/go-libp2p/p2p/test/util" ) // WARNING: this uses RandTestBogusIdentity DO NOT USE for NON TESTS! From 7bc61d4af99aa32189354f77015339436268e0a7 Mon Sep 17 00:00:00 2001 From: Jeromy Date: Mon, 12 Sep 2016 14:26:55 -0700 Subject: [PATCH 0501/1035] Update libp2p to 3.5.2 License: MIT Signed-off-by: Jeromy This commit was moved from ipfs/go-bitswap@e35d729b8e336fcabfd2bbf7f0ec73b95e9c587e --- bitswap/bitswap_test.go | 2 +- bitswap/message/message.go | 2 +- bitswap/network/interface.go | 2 +- bitswap/network/ipfs_impl.go | 4 ++-- bitswap/testnet/peernet.go | 2 +- bitswap/testutils.go | 2 +- 6 files changed, 7 insertions(+), 7 deletions(-) diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index 9e59b5a74..5428d221c 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -16,8 +16,8 @@ import ( tn "github.com/ipfs/go-ipfs/exchange/bitswap/testnet" mockrouting "github.com/ipfs/go-ipfs/routing/mock" delay "github.com/ipfs/go-ipfs/thirdparty/delay" + p2ptestutil "gx/ipfs/QmUuwQUJmtvC6ReYcu7xaYKEUM3pD46H18dFn3LBhVt2Di/go-libp2p/p2p/test/util" key "gx/ipfs/Qmce4Y4zg3sYr7xKM5UueS67vhNni6EeWgCRnb7MbLJMew/go-key" - p2ptestutil "gx/ipfs/QmcpZpCmnfjRunzeYtXZdtcy16P2mC65CThjb7aA8sPqNY/go-libp2p/p2p/test/util" ) // FIXME the tests are really sensitive to the network delay. fix them to work diff --git a/bitswap/message/message.go b/bitswap/message/message.go index 8520592f6..23a9f14ed 100644 --- a/bitswap/message/message.go +++ b/bitswap/message/message.go @@ -6,8 +6,8 @@ import ( blocks "github.com/ipfs/go-ipfs/blocks" pb "github.com/ipfs/go-ipfs/exchange/bitswap/message/pb" wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" + inet "gx/ipfs/QmUuwQUJmtvC6ReYcu7xaYKEUM3pD46H18dFn3LBhVt2Di/go-libp2p/p2p/net" key "gx/ipfs/Qmce4Y4zg3sYr7xKM5UueS67vhNni6EeWgCRnb7MbLJMew/go-key" - inet "gx/ipfs/QmcpZpCmnfjRunzeYtXZdtcy16P2mC65CThjb7aA8sPqNY/go-libp2p/p2p/net" ggio "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/io" proto "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/proto" diff --git a/bitswap/network/interface.go b/bitswap/network/interface.go index 9650bb1f5..85578f637 100644 --- a/bitswap/network/interface.go +++ b/bitswap/network/interface.go @@ -2,10 +2,10 @@ package network import ( bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" + protocol "gx/ipfs/QmUuwQUJmtvC6ReYcu7xaYKEUM3pD46H18dFn3LBhVt2Di/go-libp2p/p2p/protocol" peer "gx/ipfs/QmWXjJo15p4pzT7cayEwZi2sWgJqLnGDof6ZGMh9xBgU1p/go-libp2p-peer" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" key "gx/ipfs/Qmce4Y4zg3sYr7xKM5UueS67vhNni6EeWgCRnb7MbLJMew/go-key" - protocol "gx/ipfs/QmcpZpCmnfjRunzeYtXZdtcy16P2mC65CThjb7aA8sPqNY/go-libp2p/p2p/protocol" ) var ProtocolBitswap protocol.ID = "/ipfs/bitswap/1.0.0" diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index 1ec3a2778..2c6a6db6d 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -8,12 +8,12 @@ import ( key "gx/ipfs/Qmce4Y4zg3sYr7xKM5UueS67vhNni6EeWgCRnb7MbLJMew/go-key" logging "gx/ipfs/QmSpJByNKFX1sCsHBEp3R73FL4NF6FnQTEGyNAXHm2GS52/go-log" + host "gx/ipfs/QmUuwQUJmtvC6ReYcu7xaYKEUM3pD46H18dFn3LBhVt2Di/go-libp2p/p2p/host" + inet "gx/ipfs/QmUuwQUJmtvC6ReYcu7xaYKEUM3pD46H18dFn3LBhVt2Di/go-libp2p/p2p/net" peer "gx/ipfs/QmWXjJo15p4pzT7cayEwZi2sWgJqLnGDof6ZGMh9xBgU1p/go-libp2p-peer" ma "gx/ipfs/QmYzDkkgAEmrcNzFCiYo6L1dTX4EAG1gZkbtdbd9trL4vd/go-multiaddr" ggio "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/io" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" - host "gx/ipfs/QmcpZpCmnfjRunzeYtXZdtcy16P2mC65CThjb7aA8sPqNY/go-libp2p/p2p/host" - inet "gx/ipfs/QmcpZpCmnfjRunzeYtXZdtcy16P2mC65CThjb7aA8sPqNY/go-libp2p/p2p/net" pstore "gx/ipfs/QmdMfSLMDBDYhtc4oF3NYGCZr5dy4wQb6Ji26N4D4mdxa2/go-libp2p-peerstore" ) diff --git a/bitswap/testnet/peernet.go b/bitswap/testnet/peernet.go index 67c488974..67d595da5 100644 --- a/bitswap/testnet/peernet.go +++ b/bitswap/testnet/peernet.go @@ -4,10 +4,10 @@ import ( bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" mockrouting "github.com/ipfs/go-ipfs/routing/mock" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" + mockpeernet "gx/ipfs/QmUuwQUJmtvC6ReYcu7xaYKEUM3pD46H18dFn3LBhVt2Di/go-libp2p/p2p/net/mock" peer "gx/ipfs/QmWXjJo15p4pzT7cayEwZi2sWgJqLnGDof6ZGMh9xBgU1p/go-libp2p-peer" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" ds "gx/ipfs/QmbzuUusHqaLLoNTDEVLcSF6vZDHZDLPC7p4bztRvvkXxU/go-datastore" - mockpeernet "gx/ipfs/QmcpZpCmnfjRunzeYtXZdtcy16P2mC65CThjb7aA8sPqNY/go-libp2p/p2p/net/mock" ) type peernet struct { diff --git a/bitswap/testutils.go b/bitswap/testutils.go index 1ea0b05c6..4493c6646 100644 --- a/bitswap/testutils.go +++ b/bitswap/testutils.go @@ -8,11 +8,11 @@ import ( datastore2 "github.com/ipfs/go-ipfs/thirdparty/datastore2" delay "github.com/ipfs/go-ipfs/thirdparty/delay" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" + p2ptestutil "gx/ipfs/QmUuwQUJmtvC6ReYcu7xaYKEUM3pD46H18dFn3LBhVt2Di/go-libp2p/p2p/test/util" peer "gx/ipfs/QmWXjJo15p4pzT7cayEwZi2sWgJqLnGDof6ZGMh9xBgU1p/go-libp2p-peer" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" ds "gx/ipfs/QmbzuUusHqaLLoNTDEVLcSF6vZDHZDLPC7p4bztRvvkXxU/go-datastore" ds_sync "gx/ipfs/QmbzuUusHqaLLoNTDEVLcSF6vZDHZDLPC7p4bztRvvkXxU/go-datastore/sync" - p2ptestutil "gx/ipfs/QmcpZpCmnfjRunzeYtXZdtcy16P2mC65CThjb7aA8sPqNY/go-libp2p/p2p/test/util" ) // WARNING: this uses RandTestBogusIdentity DO NOT USE for NON TESTS! From c9c5d6ab85e09ad064baa10583fa6a75d5fe4f74 Mon Sep 17 00:00:00 2001 From: Jeromy Date: Tue, 13 Sep 2016 15:17:07 -0700 Subject: [PATCH 0502/1035] routing: use extracted dht and routing code License: MIT Signed-off-by: Jeromy This commit was moved from ipfs/go-bitswap@95b982a46cdef4a371c8a65e1f74a6ea79ee0d52 --- bitswap/network/ipfs_impl.go | 2 +- bitswap/testnet/virtual.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index 2c6a6db6d..578145b47 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -4,8 +4,8 @@ import ( "io" bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" - routing "github.com/ipfs/go-ipfs/routing" key "gx/ipfs/Qmce4Y4zg3sYr7xKM5UueS67vhNni6EeWgCRnb7MbLJMew/go-key" + routing "gx/ipfs/QmcoQiBzRaaVv1DZbbXoDWiEtvDN94Ca1DcwnQKK2tP92s/go-libp2p-routing" logging "gx/ipfs/QmSpJByNKFX1sCsHBEp3R73FL4NF6FnQTEGyNAXHm2GS52/go-log" host "gx/ipfs/QmUuwQUJmtvC6ReYcu7xaYKEUM3pD46H18dFn3LBhVt2Di/go-libp2p/p2p/host" diff --git a/bitswap/testnet/virtual.go b/bitswap/testnet/virtual.go index 2fcc2f82f..135049ee2 100644 --- a/bitswap/testnet/virtual.go +++ b/bitswap/testnet/virtual.go @@ -5,13 +5,13 @@ import ( bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" - routing "github.com/ipfs/go-ipfs/routing" mockrouting "github.com/ipfs/go-ipfs/routing/mock" delay "github.com/ipfs/go-ipfs/thirdparty/delay" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" peer "gx/ipfs/QmWXjJo15p4pzT7cayEwZi2sWgJqLnGDof6ZGMh9xBgU1p/go-libp2p-peer" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" key "gx/ipfs/Qmce4Y4zg3sYr7xKM5UueS67vhNni6EeWgCRnb7MbLJMew/go-key" + routing "gx/ipfs/QmcoQiBzRaaVv1DZbbXoDWiEtvDN94Ca1DcwnQKK2tP92s/go-libp2p-routing" ) func VirtualNetwork(rs mockrouting.Server, d delay.D) Network { From e2b6b766c828519fbed960049679bb4eb7120c7a Mon Sep 17 00:00:00 2001 From: Jeromy Date: Sun, 25 Sep 2016 23:42:14 -0700 Subject: [PATCH 0503/1035] update libp2p and dht packages License: MIT Signed-off-by: Jeromy This commit was moved from ipfs/go-bitswap@7dfc5ccf2a0a33e47ad72cb40847e24de16afbf0 --- bitswap/bitswap.go | 4 ++-- bitswap/bitswap_test.go | 2 +- bitswap/message/message.go | 2 +- bitswap/network/interface.go | 2 +- bitswap/network/ipfs_impl.go | 8 ++++---- bitswap/testnet/peernet.go | 2 +- bitswap/testnet/virtual.go | 2 +- bitswap/testutils.go | 2 +- bitswap/workers.go | 4 ++-- 9 files changed, 14 insertions(+), 14 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index de2dce25d..1f99fa4cd 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -21,8 +21,8 @@ import ( "github.com/ipfs/go-ipfs/thirdparty/delay" loggables "gx/ipfs/QmYrv4LgCC8FhG2Ab4bwuq5DqBdwMtx3hMb3KKJDZcr2d7/go-libp2p-loggables" - process "gx/ipfs/QmSF8fPo3jgVBAy8fpdjjYqgG87dkJgUprRBHRd2tmfgpP/goprocess" - procctx "gx/ipfs/QmSF8fPo3jgVBAy8fpdjjYqgG87dkJgUprRBHRd2tmfgpP/goprocess/context" + process "gx/ipfs/QmQopLATEYMNg7dVqZRNDfeE2S1yKy8zrRh5xnYiuqeZBn/goprocess" + procctx "gx/ipfs/QmQopLATEYMNg7dVqZRNDfeE2S1yKy8zrRh5xnYiuqeZBn/goprocess/context" logging "gx/ipfs/QmSpJByNKFX1sCsHBEp3R73FL4NF6FnQTEGyNAXHm2GS52/go-log" peer "gx/ipfs/QmWXjJo15p4pzT7cayEwZi2sWgJqLnGDof6ZGMh9xBgU1p/go-libp2p-peer" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index 49785c6ce..c434e2027 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -16,7 +16,7 @@ import ( tn "github.com/ipfs/go-ipfs/exchange/bitswap/testnet" mockrouting "github.com/ipfs/go-ipfs/routing/mock" delay "github.com/ipfs/go-ipfs/thirdparty/delay" - p2ptestutil "gx/ipfs/QmUuwQUJmtvC6ReYcu7xaYKEUM3pD46H18dFn3LBhVt2Di/go-libp2p/p2p/test/util" + p2ptestutil "gx/ipfs/QmbiRCGZqhfcSjnm9icGz3oNQQdPLAnLWnKHXixaEWXVCN/go-libp2p/p2p/test/util" key "gx/ipfs/Qmce4Y4zg3sYr7xKM5UueS67vhNni6EeWgCRnb7MbLJMew/go-key" ) diff --git a/bitswap/message/message.go b/bitswap/message/message.go index 8e65b369b..53da2276d 100644 --- a/bitswap/message/message.go +++ b/bitswap/message/message.go @@ -6,7 +6,7 @@ import ( blocks "github.com/ipfs/go-ipfs/blocks" pb "github.com/ipfs/go-ipfs/exchange/bitswap/message/pb" wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" - inet "gx/ipfs/QmUuwQUJmtvC6ReYcu7xaYKEUM3pD46H18dFn3LBhVt2Di/go-libp2p/p2p/net" + inet "gx/ipfs/QmbiRCGZqhfcSjnm9icGz3oNQQdPLAnLWnKHXixaEWXVCN/go-libp2p/p2p/net" key "gx/ipfs/Qmce4Y4zg3sYr7xKM5UueS67vhNni6EeWgCRnb7MbLJMew/go-key" ggio "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/io" diff --git a/bitswap/network/interface.go b/bitswap/network/interface.go index 85578f637..726698cf9 100644 --- a/bitswap/network/interface.go +++ b/bitswap/network/interface.go @@ -2,9 +2,9 @@ package network import ( bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" - protocol "gx/ipfs/QmUuwQUJmtvC6ReYcu7xaYKEUM3pD46H18dFn3LBhVt2Di/go-libp2p/p2p/protocol" peer "gx/ipfs/QmWXjJo15p4pzT7cayEwZi2sWgJqLnGDof6ZGMh9xBgU1p/go-libp2p-peer" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" + protocol "gx/ipfs/QmbiRCGZqhfcSjnm9icGz3oNQQdPLAnLWnKHXixaEWXVCN/go-libp2p/p2p/protocol" key "gx/ipfs/Qmce4Y4zg3sYr7xKM5UueS67vhNni6EeWgCRnb7MbLJMew/go-key" ) diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index 578145b47..4f3aa8cc9 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -5,16 +5,16 @@ import ( bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" key "gx/ipfs/Qmce4Y4zg3sYr7xKM5UueS67vhNni6EeWgCRnb7MbLJMew/go-key" - routing "gx/ipfs/QmcoQiBzRaaVv1DZbbXoDWiEtvDN94Ca1DcwnQKK2tP92s/go-libp2p-routing" + routing "gx/ipfs/QmemZcG8WprPbnVX3AM43GhhSUiA3V6NjcTLAguvWzkdpQ/go-libp2p-routing" logging "gx/ipfs/QmSpJByNKFX1sCsHBEp3R73FL4NF6FnQTEGyNAXHm2GS52/go-log" - host "gx/ipfs/QmUuwQUJmtvC6ReYcu7xaYKEUM3pD46H18dFn3LBhVt2Di/go-libp2p/p2p/host" - inet "gx/ipfs/QmUuwQUJmtvC6ReYcu7xaYKEUM3pD46H18dFn3LBhVt2Di/go-libp2p/p2p/net" peer "gx/ipfs/QmWXjJo15p4pzT7cayEwZi2sWgJqLnGDof6ZGMh9xBgU1p/go-libp2p-peer" + pstore "gx/ipfs/QmYkwVGkwoPbMVQEbf6LonZg4SsCxGP3H7PBEtdNCNRyxD/go-libp2p-peerstore" ma "gx/ipfs/QmYzDkkgAEmrcNzFCiYo6L1dTX4EAG1gZkbtdbd9trL4vd/go-multiaddr" ggio "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/io" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" - pstore "gx/ipfs/QmdMfSLMDBDYhtc4oF3NYGCZr5dy4wQb6Ji26N4D4mdxa2/go-libp2p-peerstore" + host "gx/ipfs/QmbiRCGZqhfcSjnm9icGz3oNQQdPLAnLWnKHXixaEWXVCN/go-libp2p/p2p/host" + inet "gx/ipfs/QmbiRCGZqhfcSjnm9icGz3oNQQdPLAnLWnKHXixaEWXVCN/go-libp2p/p2p/net" ) var log = logging.Logger("bitswap_network") diff --git a/bitswap/testnet/peernet.go b/bitswap/testnet/peernet.go index 67d595da5..46a41ba5b 100644 --- a/bitswap/testnet/peernet.go +++ b/bitswap/testnet/peernet.go @@ -4,9 +4,9 @@ import ( bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" mockrouting "github.com/ipfs/go-ipfs/routing/mock" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" - mockpeernet "gx/ipfs/QmUuwQUJmtvC6ReYcu7xaYKEUM3pD46H18dFn3LBhVt2Di/go-libp2p/p2p/net/mock" peer "gx/ipfs/QmWXjJo15p4pzT7cayEwZi2sWgJqLnGDof6ZGMh9xBgU1p/go-libp2p-peer" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" + mockpeernet "gx/ipfs/QmbiRCGZqhfcSjnm9icGz3oNQQdPLAnLWnKHXixaEWXVCN/go-libp2p/p2p/net/mock" ds "gx/ipfs/QmbzuUusHqaLLoNTDEVLcSF6vZDHZDLPC7p4bztRvvkXxU/go-datastore" ) diff --git a/bitswap/testnet/virtual.go b/bitswap/testnet/virtual.go index 135049ee2..2bb9773bd 100644 --- a/bitswap/testnet/virtual.go +++ b/bitswap/testnet/virtual.go @@ -11,7 +11,7 @@ import ( peer "gx/ipfs/QmWXjJo15p4pzT7cayEwZi2sWgJqLnGDof6ZGMh9xBgU1p/go-libp2p-peer" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" key "gx/ipfs/Qmce4Y4zg3sYr7xKM5UueS67vhNni6EeWgCRnb7MbLJMew/go-key" - routing "gx/ipfs/QmcoQiBzRaaVv1DZbbXoDWiEtvDN94Ca1DcwnQKK2tP92s/go-libp2p-routing" + routing "gx/ipfs/QmemZcG8WprPbnVX3AM43GhhSUiA3V6NjcTLAguvWzkdpQ/go-libp2p-routing" ) func VirtualNetwork(rs mockrouting.Server, d delay.D) Network { diff --git a/bitswap/testutils.go b/bitswap/testutils.go index 4493c6646..60aa66d9b 100644 --- a/bitswap/testutils.go +++ b/bitswap/testutils.go @@ -8,9 +8,9 @@ import ( datastore2 "github.com/ipfs/go-ipfs/thirdparty/datastore2" delay "github.com/ipfs/go-ipfs/thirdparty/delay" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" - p2ptestutil "gx/ipfs/QmUuwQUJmtvC6ReYcu7xaYKEUM3pD46H18dFn3LBhVt2Di/go-libp2p/p2p/test/util" peer "gx/ipfs/QmWXjJo15p4pzT7cayEwZi2sWgJqLnGDof6ZGMh9xBgU1p/go-libp2p-peer" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" + p2ptestutil "gx/ipfs/QmbiRCGZqhfcSjnm9icGz3oNQQdPLAnLWnKHXixaEWXVCN/go-libp2p/p2p/test/util" ds "gx/ipfs/QmbzuUusHqaLLoNTDEVLcSF6vZDHZDLPC7p4bztRvvkXxU/go-datastore" ds_sync "gx/ipfs/QmbzuUusHqaLLoNTDEVLcSF6vZDHZDLPC7p4bztRvvkXxU/go-datastore/sync" ) diff --git a/bitswap/workers.go b/bitswap/workers.go index 5332a2013..9f5c6c5ea 100644 --- a/bitswap/workers.go +++ b/bitswap/workers.go @@ -5,8 +5,8 @@ import ( "sync" "time" - process "gx/ipfs/QmSF8fPo3jgVBAy8fpdjjYqgG87dkJgUprRBHRd2tmfgpP/goprocess" - procctx "gx/ipfs/QmSF8fPo3jgVBAy8fpdjjYqgG87dkJgUprRBHRd2tmfgpP/goprocess/context" + process "gx/ipfs/QmQopLATEYMNg7dVqZRNDfeE2S1yKy8zrRh5xnYiuqeZBn/goprocess" + procctx "gx/ipfs/QmQopLATEYMNg7dVqZRNDfeE2S1yKy8zrRh5xnYiuqeZBn/goprocess/context" logging "gx/ipfs/QmSpJByNKFX1sCsHBEp3R73FL4NF6FnQTEGyNAXHm2GS52/go-log" peer "gx/ipfs/QmWXjJo15p4pzT7cayEwZi2sWgJqLnGDof6ZGMh9xBgU1p/go-libp2p-peer" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" From a8a3122cec30a866f2e50dce48d5b472dd63021e Mon Sep 17 00:00:00 2001 From: Jeromy Date: Wed, 28 Sep 2016 17:08:13 -0700 Subject: [PATCH 0504/1035] only pass keys down newBlocks chan in bitswap License: MIT Signed-off-by: Jeromy This commit was moved from ipfs/go-bitswap@5d2b9d6cc0d6e0a5e1c663b263becc16bdbc7221 --- bitswap/bitswap.go | 11 ++++++++--- bitswap/workers.go | 6 +++--- 2 files changed, 11 insertions(+), 6 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 1f99fa4cd..580d49845 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -90,7 +90,7 @@ func New(parent context.Context, p peer.ID, network bsnet.BitSwapNetwork, network: network, findKeys: make(chan *blockRequest, sizeBatchRequestChan), process: px, - newBlocks: make(chan blocks.Block, HasBlockBufferSize), + newBlocks: make(chan key.Key, HasBlockBufferSize), provideKeys: make(chan key.Key, provideKeysBufferSize), wm: NewWantManager(ctx, network), } @@ -137,7 +137,7 @@ type Bitswap struct { process process.Process - newBlocks chan blocks.Block + newBlocks chan key.Key provideKeys chan key.Key @@ -308,12 +308,17 @@ func (bs *Bitswap) HasBlock(blk blocks.Block) error { return err } + // NOTE: There exists the possiblity for a race condition here. If a user + // creates a node, then adds it to the dagservice while another goroutine + // is waiting on a GetBlock for that object, they will receive a reference + // to the same node. We should address this soon, but i'm not going to do + // it now as it requires more thought and isnt causing immediate problems. bs.notifications.Publish(blk) bs.engine.AddBlock(blk) select { - case bs.newBlocks <- blk: + case bs.newBlocks <- blk.Key(): // send block off to be reprovided case <-bs.process.Closing(): return bs.process.Close() diff --git a/bitswap/workers.go b/bitswap/workers.go index 9f5c6c5ea..51fc1fde8 100644 --- a/bitswap/workers.go +++ b/bitswap/workers.go @@ -127,17 +127,17 @@ func (bs *Bitswap) provideCollector(ctx context.Context) { for { select { - case blk, ok := <-bs.newBlocks: + case blkey, ok := <-bs.newBlocks: if !ok { log.Debug("newBlocks channel closed") return } if keysOut == nil { - nextKey = blk.Key() + nextKey = blkey keysOut = bs.provideKeys } else { - toProvide = append(toProvide, blk.Key()) + toProvide = append(toProvide, blkey) } case keysOut <- nextKey: if len(toProvide) > 0 { From 6021aaadca755efa0b7ba22b1ba9a9c52f927424 Mon Sep 17 00:00:00 2001 From: Jeromy Date: Wed, 5 Oct 2016 15:49:08 -0700 Subject: [PATCH 0505/1035] update to libp2p 4.0.1 and propogate other changes License: MIT Signed-off-by: Jeromy This commit was moved from ipfs/go-bitswap@c343d6bbb20a2853bd1168c680bf35ee90aa10e4 --- bitswap/bitswap.go | 12 +++++----- bitswap/bitswap_test.go | 8 +++---- bitswap/decision/bench_test.go | 4 ++-- bitswap/decision/engine.go | 4 ++-- bitswap/decision/engine_test.go | 4 ++-- bitswap/decision/ledger.go | 4 ++-- bitswap/decision/peer_request_queue.go | 4 ++-- bitswap/decision/peer_request_queue_test.go | 2 +- bitswap/message/message.go | 4 ++-- bitswap/message/message_test.go | 2 +- bitswap/network/interface.go | 8 +++---- bitswap/network/ipfs_impl.go | 25 ++++++++++++--------- bitswap/notifications/notifications.go | 4 ++-- bitswap/notifications/notifications_test.go | 4 ++-- bitswap/stat.go | 2 +- bitswap/testnet/interface.go | 2 +- bitswap/testnet/network_test.go | 4 ++-- bitswap/testnet/peernet.go | 6 ++--- bitswap/testnet/virtual.go | 15 ++++++++----- bitswap/testutils.go | 6 ++--- bitswap/wantlist/wantlist.go | 2 +- bitswap/wantmanager.go | 6 ++--- bitswap/workers.go | 10 ++++----- 23 files changed, 75 insertions(+), 67 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 580d49845..f832e0787 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -8,7 +8,7 @@ import ( "sync" "time" - key "gx/ipfs/Qmce4Y4zg3sYr7xKM5UueS67vhNni6EeWgCRnb7MbLJMew/go-key" + key "gx/ipfs/QmYEoKZXHoAToWfhGF3vryhMn3WWhE1o2MasQ8uzY5iDi9/go-key" blocks "github.com/ipfs/go-ipfs/blocks" blockstore "github.com/ipfs/go-ipfs/blocks/blockstore" @@ -19,13 +19,13 @@ import ( notifications "github.com/ipfs/go-ipfs/exchange/bitswap/notifications" flags "github.com/ipfs/go-ipfs/flags" "github.com/ipfs/go-ipfs/thirdparty/delay" - loggables "gx/ipfs/QmYrv4LgCC8FhG2Ab4bwuq5DqBdwMtx3hMb3KKJDZcr2d7/go-libp2p-loggables" + loggables "gx/ipfs/QmTMy4hVSY28DdwJ9kBz6y7q6MuioFzPcpM3Ma3aPjo1i3/go-libp2p-loggables" - process "gx/ipfs/QmQopLATEYMNg7dVqZRNDfeE2S1yKy8zrRh5xnYiuqeZBn/goprocess" - procctx "gx/ipfs/QmQopLATEYMNg7dVqZRNDfeE2S1yKy8zrRh5xnYiuqeZBn/goprocess/context" + context "context" + process "gx/ipfs/QmSF8fPo3jgVBAy8fpdjjYqgG87dkJgUprRBHRd2tmfgpP/goprocess" + procctx "gx/ipfs/QmSF8fPo3jgVBAy8fpdjjYqgG87dkJgUprRBHRd2tmfgpP/goprocess/context" logging "gx/ipfs/QmSpJByNKFX1sCsHBEp3R73FL4NF6FnQTEGyNAXHm2GS52/go-log" - peer "gx/ipfs/QmWXjJo15p4pzT7cayEwZi2sWgJqLnGDof6ZGMh9xBgU1p/go-libp2p-peer" - context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" + peer "gx/ipfs/QmfMmLGoKzCHDN7cGgk64PJr4iipzidDRME8HABSJqvmhC/go-libp2p-peer" ) var log = logging.Logger("bitswap") diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index c434e2027..e15e92df0 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -6,9 +6,9 @@ import ( "testing" "time" + context "context" detectrace "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-detect-race" travis "github.com/ipfs/go-ipfs/thirdparty/testutil/ci/travis" - context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" blocks "github.com/ipfs/go-ipfs/blocks" blockstore "github.com/ipfs/go-ipfs/blocks/blockstore" @@ -16,8 +16,8 @@ import ( tn "github.com/ipfs/go-ipfs/exchange/bitswap/testnet" mockrouting "github.com/ipfs/go-ipfs/routing/mock" delay "github.com/ipfs/go-ipfs/thirdparty/delay" - p2ptestutil "gx/ipfs/QmbiRCGZqhfcSjnm9icGz3oNQQdPLAnLWnKHXixaEWXVCN/go-libp2p/p2p/test/util" - key "gx/ipfs/Qmce4Y4zg3sYr7xKM5UueS67vhNni6EeWgCRnb7MbLJMew/go-key" + key "gx/ipfs/QmYEoKZXHoAToWfhGF3vryhMn3WWhE1o2MasQ8uzY5iDi9/go-key" + p2ptestutil "gx/ipfs/QmcRa2qn6iCmap9bjp8jAwkvYAq13AUfxdY3rrYiaJbLum/go-libp2p/p2p/test/util" ) // FIXME the tests are really sensitive to the network delay. fix them to work @@ -50,7 +50,7 @@ func TestProviderForKeyButNetworkCannotFind(t *testing.T) { // TODO revisit this block := blocks.NewBlock([]byte("block")) pinfo := p2ptestutil.RandTestBogusIdentityOrFatal(t) - rs.Client(pinfo).Provide(context.Background(), block.Key()) // but not on network + rs.Client(pinfo).Provide(context.Background(), block.Cid()) // but not on network solo := g.Next() defer solo.Exchange.Close() diff --git a/bitswap/decision/bench_test.go b/bitswap/decision/bench_test.go index eabc7cbeb..8a8fd3db1 100644 --- a/bitswap/decision/bench_test.go +++ b/bitswap/decision/bench_test.go @@ -6,8 +6,8 @@ import ( "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" "github.com/ipfs/go-ipfs/thirdparty/testutil" - "gx/ipfs/QmWXjJo15p4pzT7cayEwZi2sWgJqLnGDof6ZGMh9xBgU1p/go-libp2p-peer" - key "gx/ipfs/Qmce4Y4zg3sYr7xKM5UueS67vhNni6EeWgCRnb7MbLJMew/go-key" + key "gx/ipfs/QmYEoKZXHoAToWfhGF3vryhMn3WWhE1o2MasQ8uzY5iDi9/go-key" + "gx/ipfs/QmfMmLGoKzCHDN7cGgk64PJr4iipzidDRME8HABSJqvmhC/go-libp2p-peer" ) // FWIW: At the time of this commit, including a timestamp in task increases diff --git a/bitswap/decision/engine.go b/bitswap/decision/engine.go index 5a0b99c19..3eddeff86 100644 --- a/bitswap/decision/engine.go +++ b/bitswap/decision/engine.go @@ -5,13 +5,13 @@ import ( "sync" "time" + context "context" blocks "github.com/ipfs/go-ipfs/blocks" bstore "github.com/ipfs/go-ipfs/blocks/blockstore" bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" wl "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" logging "gx/ipfs/QmSpJByNKFX1sCsHBEp3R73FL4NF6FnQTEGyNAXHm2GS52/go-log" - peer "gx/ipfs/QmWXjJo15p4pzT7cayEwZi2sWgJqLnGDof6ZGMh9xBgU1p/go-libp2p-peer" - context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" + peer "gx/ipfs/QmfMmLGoKzCHDN7cGgk64PJr4iipzidDRME8HABSJqvmhC/go-libp2p-peer" ) // TODO consider taking responsibility for other types of requests. For diff --git a/bitswap/decision/engine_test.go b/bitswap/decision/engine_test.go index 234768577..91dbc8fcd 100644 --- a/bitswap/decision/engine_test.go +++ b/bitswap/decision/engine_test.go @@ -8,14 +8,14 @@ import ( "sync" "testing" + context "context" blocks "github.com/ipfs/go-ipfs/blocks" blockstore "github.com/ipfs/go-ipfs/blocks/blockstore" message "github.com/ipfs/go-ipfs/exchange/bitswap/message" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" - peer "gx/ipfs/QmWXjJo15p4pzT7cayEwZi2sWgJqLnGDof6ZGMh9xBgU1p/go-libp2p-peer" - context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" ds "gx/ipfs/QmbzuUusHqaLLoNTDEVLcSF6vZDHZDLPC7p4bztRvvkXxU/go-datastore" dssync "gx/ipfs/QmbzuUusHqaLLoNTDEVLcSF6vZDHZDLPC7p4bztRvvkXxU/go-datastore/sync" + peer "gx/ipfs/QmfMmLGoKzCHDN7cGgk64PJr4iipzidDRME8HABSJqvmhC/go-libp2p-peer" ) type peerAndEngine struct { diff --git a/bitswap/decision/ledger.go b/bitswap/decision/ledger.go index 887451dd8..b5217cf2b 100644 --- a/bitswap/decision/ledger.go +++ b/bitswap/decision/ledger.go @@ -5,8 +5,8 @@ import ( "time" wl "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" - peer "gx/ipfs/QmWXjJo15p4pzT7cayEwZi2sWgJqLnGDof6ZGMh9xBgU1p/go-libp2p-peer" - key "gx/ipfs/Qmce4Y4zg3sYr7xKM5UueS67vhNni6EeWgCRnb7MbLJMew/go-key" + key "gx/ipfs/QmYEoKZXHoAToWfhGF3vryhMn3WWhE1o2MasQ8uzY5iDi9/go-key" + peer "gx/ipfs/QmfMmLGoKzCHDN7cGgk64PJr4iipzidDRME8HABSJqvmhC/go-libp2p-peer" ) // keySet is just a convenient alias for maps of keys, where we only care diff --git a/bitswap/decision/peer_request_queue.go b/bitswap/decision/peer_request_queue.go index 6e301869f..732f0d4d4 100644 --- a/bitswap/decision/peer_request_queue.go +++ b/bitswap/decision/peer_request_queue.go @@ -6,8 +6,8 @@ import ( wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" pq "github.com/ipfs/go-ipfs/thirdparty/pq" - peer "gx/ipfs/QmWXjJo15p4pzT7cayEwZi2sWgJqLnGDof6ZGMh9xBgU1p/go-libp2p-peer" - key "gx/ipfs/Qmce4Y4zg3sYr7xKM5UueS67vhNni6EeWgCRnb7MbLJMew/go-key" + key "gx/ipfs/QmYEoKZXHoAToWfhGF3vryhMn3WWhE1o2MasQ8uzY5iDi9/go-key" + peer "gx/ipfs/QmfMmLGoKzCHDN7cGgk64PJr4iipzidDRME8HABSJqvmhC/go-libp2p-peer" ) type peerRequestQueue interface { diff --git a/bitswap/decision/peer_request_queue_test.go b/bitswap/decision/peer_request_queue_test.go index 59fd9f273..22a5f164d 100644 --- a/bitswap/decision/peer_request_queue_test.go +++ b/bitswap/decision/peer_request_queue_test.go @@ -9,7 +9,7 @@ import ( "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" "github.com/ipfs/go-ipfs/thirdparty/testutil" - key "gx/ipfs/Qmce4Y4zg3sYr7xKM5UueS67vhNni6EeWgCRnb7MbLJMew/go-key" + key "gx/ipfs/QmYEoKZXHoAToWfhGF3vryhMn3WWhE1o2MasQ8uzY5iDi9/go-key" ) func TestPushPop(t *testing.T) { diff --git a/bitswap/message/message.go b/bitswap/message/message.go index 53da2276d..2c1947cfe 100644 --- a/bitswap/message/message.go +++ b/bitswap/message/message.go @@ -6,8 +6,8 @@ import ( blocks "github.com/ipfs/go-ipfs/blocks" pb "github.com/ipfs/go-ipfs/exchange/bitswap/message/pb" wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" - inet "gx/ipfs/QmbiRCGZqhfcSjnm9icGz3oNQQdPLAnLWnKHXixaEWXVCN/go-libp2p/p2p/net" - key "gx/ipfs/Qmce4Y4zg3sYr7xKM5UueS67vhNni6EeWgCRnb7MbLJMew/go-key" + key "gx/ipfs/QmYEoKZXHoAToWfhGF3vryhMn3WWhE1o2MasQ8uzY5iDi9/go-key" + inet "gx/ipfs/QmdXimY9QHaasZmw6hWojWnCJvfgxETjZQfg9g6ZrA9wMX/go-libp2p-net" ggio "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/io" proto "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/proto" diff --git a/bitswap/message/message_test.go b/bitswap/message/message_test.go index 500b3f6e3..56609c434 100644 --- a/bitswap/message/message_test.go +++ b/bitswap/message/message_test.go @@ -8,7 +8,7 @@ import ( blocks "github.com/ipfs/go-ipfs/blocks" pb "github.com/ipfs/go-ipfs/exchange/bitswap/message/pb" - key "gx/ipfs/Qmce4Y4zg3sYr7xKM5UueS67vhNni6EeWgCRnb7MbLJMew/go-key" + key "gx/ipfs/QmYEoKZXHoAToWfhGF3vryhMn3WWhE1o2MasQ8uzY5iDi9/go-key" ) func TestAppendWanted(t *testing.T) { diff --git a/bitswap/network/interface.go b/bitswap/network/interface.go index 726698cf9..72cd80a67 100644 --- a/bitswap/network/interface.go +++ b/bitswap/network/interface.go @@ -1,11 +1,11 @@ package network import ( + context "context" bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" - peer "gx/ipfs/QmWXjJo15p4pzT7cayEwZi2sWgJqLnGDof6ZGMh9xBgU1p/go-libp2p-peer" - context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" - protocol "gx/ipfs/QmbiRCGZqhfcSjnm9icGz3oNQQdPLAnLWnKHXixaEWXVCN/go-libp2p/p2p/protocol" - key "gx/ipfs/Qmce4Y4zg3sYr7xKM5UueS67vhNni6EeWgCRnb7MbLJMew/go-key" + key "gx/ipfs/QmYEoKZXHoAToWfhGF3vryhMn3WWhE1o2MasQ8uzY5iDi9/go-key" + protocol "gx/ipfs/QmZNkThpqfVXs9GNbexPrfBbXSLNYeKrE7jwFM2oqHbyqN/go-libp2p-protocol" + peer "gx/ipfs/QmfMmLGoKzCHDN7cGgk64PJr4iipzidDRME8HABSJqvmhC/go-libp2p-peer" ) var ProtocolBitswap protocol.ID = "/ipfs/bitswap/1.0.0" diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index 4f3aa8cc9..af18965cc 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -1,20 +1,21 @@ package network import ( + "context" "io" bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" - key "gx/ipfs/Qmce4Y4zg3sYr7xKM5UueS67vhNni6EeWgCRnb7MbLJMew/go-key" - routing "gx/ipfs/QmemZcG8WprPbnVX3AM43GhhSUiA3V6NjcTLAguvWzkdpQ/go-libp2p-routing" logging "gx/ipfs/QmSpJByNKFX1sCsHBEp3R73FL4NF6FnQTEGyNAXHm2GS52/go-log" - peer "gx/ipfs/QmWXjJo15p4pzT7cayEwZi2sWgJqLnGDof6ZGMh9xBgU1p/go-libp2p-peer" - pstore "gx/ipfs/QmYkwVGkwoPbMVQEbf6LonZg4SsCxGP3H7PBEtdNCNRyxD/go-libp2p-peerstore" - ma "gx/ipfs/QmYzDkkgAEmrcNzFCiYo6L1dTX4EAG1gZkbtdbd9trL4vd/go-multiaddr" + ma "gx/ipfs/QmUAQaWbKxGCUTuoQVvvicbQNZ9APF5pDGWyAZSe93AtKH/go-multiaddr" + routing "gx/ipfs/QmXKuGUzLcgoQvp8M6ZEJzupWUNmx8NoqXEbYLMDjL4rjj/go-libp2p-routing" + pstore "gx/ipfs/QmXXCcQ7CLg5a81Ui9TTR35QcR4y7ZyihxwfjqaHfUVcVo/go-libp2p-peerstore" + key "gx/ipfs/QmYEoKZXHoAToWfhGF3vryhMn3WWhE1o2MasQ8uzY5iDi9/go-key" ggio "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/io" - context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" - host "gx/ipfs/QmbiRCGZqhfcSjnm9icGz3oNQQdPLAnLWnKHXixaEWXVCN/go-libp2p/p2p/host" - inet "gx/ipfs/QmbiRCGZqhfcSjnm9icGz3oNQQdPLAnLWnKHXixaEWXVCN/go-libp2p/p2p/net" + cid "gx/ipfs/QmakyCk6Vnn16WEKjbkxieZmM2YLTzkFWizbmGowoYPjro/go-cid" + host "gx/ipfs/QmdML3R42PRSwnt46jSuEts9bHSqLctVYEjJqMR3UYV8ki/go-libp2p-host" + inet "gx/ipfs/QmdXimY9QHaasZmw6hWojWnCJvfgxETjZQfg9g6ZrA9wMX/go-libp2p-net" + peer "gx/ipfs/QmfMmLGoKzCHDN7cGgk64PJr4iipzidDRME8HABSJqvmhC/go-libp2p-peer" ) var log = logging.Logger("bitswap_network") @@ -146,9 +147,12 @@ func (bsnet *impl) FindProvidersAsync(ctx context.Context, k key.Key, max int) < out <- id } + // TEMPORARY SHIM UNTIL CID GETS PROPAGATED + c := cid.NewCidV0(k.ToMultihash()) + go func() { defer close(out) - providers := bsnet.routing.FindProvidersAsync(ctx, k, max) + providers := bsnet.routing.FindProvidersAsync(ctx, c, max) for info := range providers { if info.ID == bsnet.host.ID() { continue // ignore self as provider @@ -166,7 +170,8 @@ func (bsnet *impl) FindProvidersAsync(ctx context.Context, k key.Key, max int) < // Provide provides the key to the network func (bsnet *impl) Provide(ctx context.Context, k key.Key) error { - return bsnet.routing.Provide(ctx, k) + c := cid.NewCidV0(k.ToMultihash()) + return bsnet.routing.Provide(ctx, c) } // handleNewStream receives a new stream from the network. diff --git a/bitswap/notifications/notifications.go b/bitswap/notifications/notifications.go index 4e440b490..bb0fb59d1 100644 --- a/bitswap/notifications/notifications.go +++ b/bitswap/notifications/notifications.go @@ -1,10 +1,10 @@ package notifications import ( + context "context" pubsub "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/briantigerchow/pubsub" blocks "github.com/ipfs/go-ipfs/blocks" - context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" - key "gx/ipfs/Qmce4Y4zg3sYr7xKM5UueS67vhNni6EeWgCRnb7MbLJMew/go-key" + key "gx/ipfs/QmYEoKZXHoAToWfhGF3vryhMn3WWhE1o2MasQ8uzY5iDi9/go-key" ) const bufferSize = 16 diff --git a/bitswap/notifications/notifications_test.go b/bitswap/notifications/notifications_test.go index c6aaac5ca..e58815649 100644 --- a/bitswap/notifications/notifications_test.go +++ b/bitswap/notifications/notifications_test.go @@ -5,10 +5,10 @@ import ( "testing" "time" + context "context" blocks "github.com/ipfs/go-ipfs/blocks" blocksutil "github.com/ipfs/go-ipfs/blocks/blocksutil" - context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" - key "gx/ipfs/Qmce4Y4zg3sYr7xKM5UueS67vhNni6EeWgCRnb7MbLJMew/go-key" + key "gx/ipfs/QmYEoKZXHoAToWfhGF3vryhMn3WWhE1o2MasQ8uzY5iDi9/go-key" ) func TestDuplicates(t *testing.T) { diff --git a/bitswap/stat.go b/bitswap/stat.go index ff201c3ae..e3518a0d7 100644 --- a/bitswap/stat.go +++ b/bitswap/stat.go @@ -1,7 +1,7 @@ package bitswap import ( - key "gx/ipfs/Qmce4Y4zg3sYr7xKM5UueS67vhNni6EeWgCRnb7MbLJMew/go-key" + key "gx/ipfs/QmYEoKZXHoAToWfhGF3vryhMn3WWhE1o2MasQ8uzY5iDi9/go-key" "sort" ) diff --git a/bitswap/testnet/interface.go b/bitswap/testnet/interface.go index 077859805..0e9331627 100644 --- a/bitswap/testnet/interface.go +++ b/bitswap/testnet/interface.go @@ -3,7 +3,7 @@ package bitswap import ( bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" "github.com/ipfs/go-ipfs/thirdparty/testutil" - peer "gx/ipfs/QmWXjJo15p4pzT7cayEwZi2sWgJqLnGDof6ZGMh9xBgU1p/go-libp2p-peer" + peer "gx/ipfs/QmfMmLGoKzCHDN7cGgk64PJr4iipzidDRME8HABSJqvmhC/go-libp2p-peer" ) type Network interface { diff --git a/bitswap/testnet/network_test.go b/bitswap/testnet/network_test.go index 4fc767acd..31d572283 100644 --- a/bitswap/testnet/network_test.go +++ b/bitswap/testnet/network_test.go @@ -4,14 +4,14 @@ import ( "sync" "testing" + context "context" blocks "github.com/ipfs/go-ipfs/blocks" bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" mockrouting "github.com/ipfs/go-ipfs/routing/mock" delay "github.com/ipfs/go-ipfs/thirdparty/delay" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" - peer "gx/ipfs/QmWXjJo15p4pzT7cayEwZi2sWgJqLnGDof6ZGMh9xBgU1p/go-libp2p-peer" - context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" + peer "gx/ipfs/QmfMmLGoKzCHDN7cGgk64PJr4iipzidDRME8HABSJqvmhC/go-libp2p-peer" ) func TestSendMessageAsyncButWaitForResponse(t *testing.T) { diff --git a/bitswap/testnet/peernet.go b/bitswap/testnet/peernet.go index 46a41ba5b..047202c7d 100644 --- a/bitswap/testnet/peernet.go +++ b/bitswap/testnet/peernet.go @@ -1,13 +1,13 @@ package bitswap import ( + context "context" bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" mockrouting "github.com/ipfs/go-ipfs/routing/mock" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" - peer "gx/ipfs/QmWXjJo15p4pzT7cayEwZi2sWgJqLnGDof6ZGMh9xBgU1p/go-libp2p-peer" - context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" - mockpeernet "gx/ipfs/QmbiRCGZqhfcSjnm9icGz3oNQQdPLAnLWnKHXixaEWXVCN/go-libp2p/p2p/net/mock" ds "gx/ipfs/QmbzuUusHqaLLoNTDEVLcSF6vZDHZDLPC7p4bztRvvkXxU/go-datastore" + mockpeernet "gx/ipfs/QmcRa2qn6iCmap9bjp8jAwkvYAq13AUfxdY3rrYiaJbLum/go-libp2p/p2p/net/mock" + peer "gx/ipfs/QmfMmLGoKzCHDN7cGgk64PJr4iipzidDRME8HABSJqvmhC/go-libp2p-peer" ) type peernet struct { diff --git a/bitswap/testnet/virtual.go b/bitswap/testnet/virtual.go index 2bb9773bd..b9b029178 100644 --- a/bitswap/testnet/virtual.go +++ b/bitswap/testnet/virtual.go @@ -1,6 +1,7 @@ package bitswap import ( + "context" "errors" bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" @@ -8,10 +9,10 @@ import ( mockrouting "github.com/ipfs/go-ipfs/routing/mock" delay "github.com/ipfs/go-ipfs/thirdparty/delay" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" - peer "gx/ipfs/QmWXjJo15p4pzT7cayEwZi2sWgJqLnGDof6ZGMh9xBgU1p/go-libp2p-peer" - context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" - key "gx/ipfs/Qmce4Y4zg3sYr7xKM5UueS67vhNni6EeWgCRnb7MbLJMew/go-key" - routing "gx/ipfs/QmemZcG8WprPbnVX3AM43GhhSUiA3V6NjcTLAguvWzkdpQ/go-libp2p-routing" + routing "gx/ipfs/QmXKuGUzLcgoQvp8M6ZEJzupWUNmx8NoqXEbYLMDjL4rjj/go-libp2p-routing" + key "gx/ipfs/QmYEoKZXHoAToWfhGF3vryhMn3WWhE1o2MasQ8uzY5iDi9/go-key" + cid "gx/ipfs/QmakyCk6Vnn16WEKjbkxieZmM2YLTzkFWizbmGowoYPjro/go-cid" + peer "gx/ipfs/QmfMmLGoKzCHDN7cGgk64PJr4iipzidDRME8HABSJqvmhC/go-libp2p-peer" ) func VirtualNetwork(rs mockrouting.Server, d delay.D) Network { @@ -98,10 +99,11 @@ func (nc *networkClient) FindProvidersAsync(ctx context.Context, k key.Key, max // deprecated once the ipfsnet.Mock is added. The code below is only // temporary. + c := cid.NewCidV0(k.ToMultihash()) out := make(chan peer.ID) go func() { defer close(out) - providers := nc.routing.FindProvidersAsync(ctx, k, max) + providers := nc.routing.FindProvidersAsync(ctx, c, max) for info := range providers { select { case <-ctx.Done(): @@ -138,7 +140,8 @@ func (n *networkClient) NewMessageSender(ctx context.Context, p peer.ID) (bsnet. // Provide provides the key to the network func (nc *networkClient) Provide(ctx context.Context, k key.Key) error { - return nc.routing.Provide(ctx, k) + c := cid.NewCidV0(k.ToMultihash()) + return nc.routing.Provide(ctx, c) } func (nc *networkClient) SetDelegate(r bsnet.Receiver) { diff --git a/bitswap/testutils.go b/bitswap/testutils.go index 60aa66d9b..4987e2faf 100644 --- a/bitswap/testutils.go +++ b/bitswap/testutils.go @@ -3,16 +3,16 @@ package bitswap import ( "time" + context "context" blockstore "github.com/ipfs/go-ipfs/blocks/blockstore" tn "github.com/ipfs/go-ipfs/exchange/bitswap/testnet" datastore2 "github.com/ipfs/go-ipfs/thirdparty/datastore2" delay "github.com/ipfs/go-ipfs/thirdparty/delay" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" - peer "gx/ipfs/QmWXjJo15p4pzT7cayEwZi2sWgJqLnGDof6ZGMh9xBgU1p/go-libp2p-peer" - context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" - p2ptestutil "gx/ipfs/QmbiRCGZqhfcSjnm9icGz3oNQQdPLAnLWnKHXixaEWXVCN/go-libp2p/p2p/test/util" ds "gx/ipfs/QmbzuUusHqaLLoNTDEVLcSF6vZDHZDLPC7p4bztRvvkXxU/go-datastore" ds_sync "gx/ipfs/QmbzuUusHqaLLoNTDEVLcSF6vZDHZDLPC7p4bztRvvkXxU/go-datastore/sync" + p2ptestutil "gx/ipfs/QmcRa2qn6iCmap9bjp8jAwkvYAq13AUfxdY3rrYiaJbLum/go-libp2p/p2p/test/util" + peer "gx/ipfs/QmfMmLGoKzCHDN7cGgk64PJr4iipzidDRME8HABSJqvmhC/go-libp2p-peer" ) // WARNING: this uses RandTestBogusIdentity DO NOT USE for NON TESTS! diff --git a/bitswap/wantlist/wantlist.go b/bitswap/wantlist/wantlist.go index 9c31b4f38..1f514e9db 100644 --- a/bitswap/wantlist/wantlist.go +++ b/bitswap/wantlist/wantlist.go @@ -6,7 +6,7 @@ import ( "sort" "sync" - key "gx/ipfs/Qmce4Y4zg3sYr7xKM5UueS67vhNni6EeWgCRnb7MbLJMew/go-key" + key "gx/ipfs/QmYEoKZXHoAToWfhGF3vryhMn3WWhE1o2MasQ8uzY5iDi9/go-key" ) type ThreadSafe struct { diff --git a/bitswap/wantmanager.go b/bitswap/wantmanager.go index b39a3a3cc..79f8df790 100644 --- a/bitswap/wantmanager.go +++ b/bitswap/wantmanager.go @@ -4,13 +4,13 @@ import ( "sync" "time" + context "context" engine "github.com/ipfs/go-ipfs/exchange/bitswap/decision" bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" - peer "gx/ipfs/QmWXjJo15p4pzT7cayEwZi2sWgJqLnGDof6ZGMh9xBgU1p/go-libp2p-peer" - context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" - key "gx/ipfs/Qmce4Y4zg3sYr7xKM5UueS67vhNni6EeWgCRnb7MbLJMew/go-key" + key "gx/ipfs/QmYEoKZXHoAToWfhGF3vryhMn3WWhE1o2MasQ8uzY5iDi9/go-key" + peer "gx/ipfs/QmfMmLGoKzCHDN7cGgk64PJr4iipzidDRME8HABSJqvmhC/go-libp2p-peer" ) type WantManager struct { diff --git a/bitswap/workers.go b/bitswap/workers.go index 51fc1fde8..6254500b8 100644 --- a/bitswap/workers.go +++ b/bitswap/workers.go @@ -5,12 +5,12 @@ import ( "sync" "time" - process "gx/ipfs/QmQopLATEYMNg7dVqZRNDfeE2S1yKy8zrRh5xnYiuqeZBn/goprocess" - procctx "gx/ipfs/QmQopLATEYMNg7dVqZRNDfeE2S1yKy8zrRh5xnYiuqeZBn/goprocess/context" + context "context" + process "gx/ipfs/QmSF8fPo3jgVBAy8fpdjjYqgG87dkJgUprRBHRd2tmfgpP/goprocess" + procctx "gx/ipfs/QmSF8fPo3jgVBAy8fpdjjYqgG87dkJgUprRBHRd2tmfgpP/goprocess/context" logging "gx/ipfs/QmSpJByNKFX1sCsHBEp3R73FL4NF6FnQTEGyNAXHm2GS52/go-log" - peer "gx/ipfs/QmWXjJo15p4pzT7cayEwZi2sWgJqLnGDof6ZGMh9xBgU1p/go-libp2p-peer" - context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" - key "gx/ipfs/Qmce4Y4zg3sYr7xKM5UueS67vhNni6EeWgCRnb7MbLJMew/go-key" + key "gx/ipfs/QmYEoKZXHoAToWfhGF3vryhMn3WWhE1o2MasQ8uzY5iDi9/go-key" + peer "gx/ipfs/QmfMmLGoKzCHDN7cGgk64PJr4iipzidDRME8HABSJqvmhC/go-libp2p-peer" ) var TaskWorkerCount = 8 From 55267c4ec5abddb0e7027a38fd40d1e3c4076bc2 Mon Sep 17 00:00:00 2001 From: Kevin Atkinson Date: Fri, 30 Sep 2016 17:06:12 -0400 Subject: [PATCH 0506/1035] Don't use a separate LinkService for DAGService.GetLinks() Instead make LinkService a part of DAGService. The LinkService is now simply an interface that DAGService implements. Also provide a GetOfflineLinkService() method that the GC uses to get an offline instance. License: MIT Signed-off-by: Kevin Atkinson This commit was moved from ipfs/go-bitswap@e5c0ecbe2fe71ab7b931fb21fbea1e44c414829d --- bitswap/bitswap.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index f832e0787..21e4e9bdf 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -422,3 +422,7 @@ func (bs *Bitswap) GetWantlist() []key.Key { } return out } + +func (bs *Bitswap) IsOnline() bool { + return true +} From 7abc07bd9f70834d770b3071941e9e75e91ba89c Mon Sep 17 00:00:00 2001 From: Jeromy Date: Fri, 7 Oct 2016 11:14:45 -0700 Subject: [PATCH 0507/1035] cid: integrate cid into bitswap and blockstores License: MIT Signed-off-by: Jeromy This commit was moved from ipfs/go-bitswap@ac1dd9998de15236c930a64ed3136d771b87d63a --- bitswap/bitswap.go | 78 ++++++++++----------- bitswap/bitswap_test.go | 42 +++++------ bitswap/decision/bench_test.go | 8 ++- bitswap/decision/engine.go | 21 +++--- bitswap/decision/engine_test.go | 6 +- bitswap/decision/ledger.go | 17 ++--- bitswap/decision/peer_request_queue.go | 34 ++++----- bitswap/decision/peer_request_queue_test.go | 25 ++++--- bitswap/message/message.go | 47 +++++++------ bitswap/message/message_test.go | 63 ++++++++++------- bitswap/network/interface.go | 9 +-- bitswap/network/ipfs_impl.go | 13 ++-- bitswap/notifications/notifications.go | 19 ++--- bitswap/notifications/notifications_test.go | 24 +++---- bitswap/stat.go | 5 +- bitswap/testnet/virtual.go | 11 ++- bitswap/wantlist/wantlist.go | 31 ++++---- bitswap/wantmanager.go | 22 +++--- bitswap/workers.go | 26 +++---- 19 files changed, 259 insertions(+), 242 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 21e4e9bdf..206a38494 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -3,13 +3,12 @@ package bitswap import ( + "context" "errors" "math" "sync" "time" - key "gx/ipfs/QmYEoKZXHoAToWfhGF3vryhMn3WWhE1o2MasQ8uzY5iDi9/go-key" - blocks "github.com/ipfs/go-ipfs/blocks" blockstore "github.com/ipfs/go-ipfs/blocks/blockstore" exchange "github.com/ipfs/go-ipfs/exchange" @@ -19,12 +18,12 @@ import ( notifications "github.com/ipfs/go-ipfs/exchange/bitswap/notifications" flags "github.com/ipfs/go-ipfs/flags" "github.com/ipfs/go-ipfs/thirdparty/delay" - loggables "gx/ipfs/QmTMy4hVSY28DdwJ9kBz6y7q6MuioFzPcpM3Ma3aPjo1i3/go-libp2p-loggables" - context "context" process "gx/ipfs/QmSF8fPo3jgVBAy8fpdjjYqgG87dkJgUprRBHRd2tmfgpP/goprocess" procctx "gx/ipfs/QmSF8fPo3jgVBAy8fpdjjYqgG87dkJgUprRBHRd2tmfgpP/goprocess/context" logging "gx/ipfs/QmSpJByNKFX1sCsHBEp3R73FL4NF6FnQTEGyNAXHm2GS52/go-log" + loggables "gx/ipfs/QmTMy4hVSY28DdwJ9kBz6y7q6MuioFzPcpM3Ma3aPjo1i3/go-libp2p-loggables" + cid "gx/ipfs/QmakyCk6Vnn16WEKjbkxieZmM2YLTzkFWizbmGowoYPjro/go-cid" peer "gx/ipfs/QmfMmLGoKzCHDN7cGgk64PJr4iipzidDRME8HABSJqvmhC/go-libp2p-peer" ) @@ -90,8 +89,8 @@ func New(parent context.Context, p peer.ID, network bsnet.BitSwapNetwork, network: network, findKeys: make(chan *blockRequest, sizeBatchRequestChan), process: px, - newBlocks: make(chan key.Key, HasBlockBufferSize), - provideKeys: make(chan key.Key, provideKeysBufferSize), + newBlocks: make(chan *cid.Cid, HasBlockBufferSize), + provideKeys: make(chan *cid.Cid, provideKeysBufferSize), wm: NewWantManager(ctx, network), } go bs.wm.Run() @@ -137,9 +136,9 @@ type Bitswap struct { process process.Process - newBlocks chan key.Key + newBlocks chan *cid.Cid - provideKeys chan key.Key + provideKeys chan *cid.Cid counterLk sync.Mutex blocksRecvd int @@ -148,14 +147,15 @@ type Bitswap struct { } type blockRequest struct { - Key key.Key + Cid *cid.Cid Ctx context.Context } // GetBlock attempts to retrieve a particular block from peers within the // deadline enforced by the context. -func (bs *Bitswap) GetBlock(parent context.Context, k key.Key) (blocks.Block, error) { - if k == "" { +func (bs *Bitswap) GetBlock(parent context.Context, k *cid.Cid) (blocks.Block, error) { + if k == nil { + log.Error("nil cid in GetBlock") return nil, blockstore.ErrNotFound } @@ -165,18 +165,17 @@ func (bs *Bitswap) GetBlock(parent context.Context, k key.Key) (blocks.Block, er // functions called by this one. Otherwise those functions won't return // when this context's cancel func is executed. This is difficult to // enforce. May this comment keep you safe. - ctx, cancelFunc := context.WithCancel(parent) ctx = logging.ContextWithLoggable(ctx, loggables.Uuid("GetBlockRequest")) - log.Event(ctx, "Bitswap.GetBlockRequest.Start", &k) - defer log.Event(ctx, "Bitswap.GetBlockRequest.End", &k) + log.Event(ctx, "Bitswap.GetBlockRequest.Start", k) + defer log.Event(ctx, "Bitswap.GetBlockRequest.End", k) defer func() { cancelFunc() }() - promise, err := bs.GetBlocks(ctx, []key.Key{k}) + promise, err := bs.GetBlocks(ctx, []*cid.Cid{k}) if err != nil { return nil, err } @@ -197,10 +196,10 @@ func (bs *Bitswap) GetBlock(parent context.Context, k key.Key) (blocks.Block, er } } -func (bs *Bitswap) WantlistForPeer(p peer.ID) []key.Key { - var out []key.Key +func (bs *Bitswap) WantlistForPeer(p peer.ID) []*cid.Cid { + var out []*cid.Cid for _, e := range bs.engine.WantlistForPeer(p) { - out = append(out, e.Key) + out = append(out, e.Cid) } return out } @@ -216,7 +215,7 @@ func (bs *Bitswap) LedgerForPeer(p peer.ID) *decision.Receipt { // NB: Your request remains open until the context expires. To conserve // resources, provide a context with a reasonably short deadline (ie. not one // that lasts throughout the lifetime of the server) -func (bs *Bitswap) GetBlocks(ctx context.Context, keys []key.Key) (<-chan blocks.Block, error) { +func (bs *Bitswap) GetBlocks(ctx context.Context, keys []*cid.Cid) (<-chan blocks.Block, error) { if len(keys) == 0 { out := make(chan blocks.Block) close(out) @@ -231,7 +230,7 @@ func (bs *Bitswap) GetBlocks(ctx context.Context, keys []key.Key) (<-chan blocks promise := bs.notifications.Subscribe(ctx, keys...) for _, k := range keys { - log.Event(ctx, "Bitswap.GetBlockRequest.Start", &k) + log.Event(ctx, "Bitswap.GetBlockRequest.Start", k) } bs.wm.WantBlocks(ctx, keys) @@ -240,13 +239,13 @@ func (bs *Bitswap) GetBlocks(ctx context.Context, keys []key.Key) (<-chan blocks // be able to provide for all keys. This currently holds true in most // every situation. Later, this assumption may not hold as true. req := &blockRequest{ - Key: keys[0], + Cid: keys[0], Ctx: ctx, } - remaining := make(map[key.Key]struct{}) + remaining := cid.NewSet() for _, k := range keys { - remaining[k] = struct{}{} + remaining.Add(k) } out := make(chan blocks.Block) @@ -255,11 +254,8 @@ func (bs *Bitswap) GetBlocks(ctx context.Context, keys []key.Key) (<-chan blocks defer cancel() defer close(out) defer func() { - var toCancel []key.Key - for k, _ := range remaining { - toCancel = append(toCancel, k) - } - bs.CancelWants(toCancel) + // can't just defer this call on its own, arguments are resolved *when* the defer is created + bs.CancelWants(remaining.Keys()) }() for { select { @@ -268,7 +264,7 @@ func (bs *Bitswap) GetBlocks(ctx context.Context, keys []key.Key) (<-chan blocks return } - delete(remaining, blk.Key()) + remaining.Remove(blk.Cid()) select { case out <- blk: case <-ctx.Done(): @@ -289,8 +285,8 @@ func (bs *Bitswap) GetBlocks(ctx context.Context, keys []key.Key) (<-chan blocks } // CancelWant removes a given key from the wantlist -func (bs *Bitswap) CancelWants(keys []key.Key) { - bs.wm.CancelWants(keys) +func (bs *Bitswap) CancelWants(cids []*cid.Cid) { + bs.wm.CancelWants(cids) } // HasBlock announces the existance of a block to this bitswap service. The @@ -318,7 +314,7 @@ func (bs *Bitswap) HasBlock(blk blocks.Block) error { bs.engine.AddBlock(blk) select { - case bs.newBlocks <- blk.Key(): + case bs.newBlocks <- blk.Cid(): // send block off to be reprovided case <-bs.process.Closing(): return bs.process.Close() @@ -340,13 +336,13 @@ func (bs *Bitswap) ReceiveMessage(ctx context.Context, p peer.ID, incoming bsmsg } // quickly send out cancels, reduces chances of duplicate block receives - var keys []key.Key + var keys []*cid.Cid for _, block := range iblocks { - if _, found := bs.wm.wl.Contains(block.Key()); !found { + if _, found := bs.wm.wl.Contains(block.Cid()); !found { log.Infof("received un-asked-for %s from %s", block, p) continue } - keys = append(keys, block.Key()) + keys = append(keys, block.Cid()) } bs.wm.CancelWants(keys) @@ -360,8 +356,8 @@ func (bs *Bitswap) ReceiveMessage(ctx context.Context, p peer.ID, incoming bsmsg return // ignore error, is either logged previously, or ErrAlreadyHaveBlock } - k := b.Key() - log.Event(ctx, "Bitswap.GetBlockRequest.End", &k) + k := b.Cid() + log.Event(ctx, "Bitswap.GetBlockRequest.End", k) log.Debugf("got block %s from %s", b, p) if err := bs.HasBlock(b); err != nil { @@ -378,7 +374,7 @@ func (bs *Bitswap) updateReceiveCounters(b blocks.Block) error { bs.counterLk.Lock() defer bs.counterLk.Unlock() bs.blocksRecvd++ - has, err := bs.blockstore.Has(b.Key()) + has, err := bs.blockstore.Has(b.Cid()) if err != nil { log.Infof("blockstore.Has error: %s", err) return err @@ -415,10 +411,10 @@ func (bs *Bitswap) Close() error { return bs.process.Close() } -func (bs *Bitswap) GetWantlist() []key.Key { - var out []key.Key +func (bs *Bitswap) GetWantlist() []*cid.Cid { + var out []*cid.Cid for _, e := range bs.wm.wl.Entries() { - out = append(out, e.Key) + out = append(out, e.Cid) } return out } diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index e15e92df0..2ec9ef5a1 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -2,21 +2,21 @@ package bitswap import ( "bytes" + "context" "sync" "testing" "time" - context "context" - detectrace "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-detect-race" - travis "github.com/ipfs/go-ipfs/thirdparty/testutil/ci/travis" - blocks "github.com/ipfs/go-ipfs/blocks" blockstore "github.com/ipfs/go-ipfs/blocks/blockstore" blocksutil "github.com/ipfs/go-ipfs/blocks/blocksutil" tn "github.com/ipfs/go-ipfs/exchange/bitswap/testnet" mockrouting "github.com/ipfs/go-ipfs/routing/mock" delay "github.com/ipfs/go-ipfs/thirdparty/delay" - key "gx/ipfs/QmYEoKZXHoAToWfhGF3vryhMn3WWhE1o2MasQ8uzY5iDi9/go-key" + travis "github.com/ipfs/go-ipfs/thirdparty/testutil/ci/travis" + + detectrace "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-detect-race" + cid "gx/ipfs/QmakyCk6Vnn16WEKjbkxieZmM2YLTzkFWizbmGowoYPjro/go-cid" p2ptestutil "gx/ipfs/QmcRa2qn6iCmap9bjp8jAwkvYAq13AUfxdY3rrYiaJbLum/go-libp2p/p2p/test/util" ) @@ -38,7 +38,7 @@ func TestClose(t *testing.T) { bitswap := sesgen.Next() bitswap.Exchange.Close() - bitswap.Exchange.GetBlock(context.Background(), block.Key()) + bitswap.Exchange.GetBlock(context.Background(), block.Cid()) } func TestProviderForKeyButNetworkCannotFind(t *testing.T) { // TODO revisit this @@ -57,7 +57,7 @@ func TestProviderForKeyButNetworkCannotFind(t *testing.T) { // TODO revisit this ctx, cancel := context.WithTimeout(context.Background(), time.Nanosecond) defer cancel() - _, err := solo.Exchange.GetBlock(ctx, block.Key()) + _, err := solo.Exchange.GetBlock(ctx, block.Cid()) if err != context.DeadlineExceeded { t.Fatal("Expected DeadlineExceeded error") @@ -84,7 +84,7 @@ func TestGetBlockFromPeerAfterPeerAnnounces(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), time.Second) defer cancel() - received, err := wantsBlock.Exchange.GetBlock(ctx, block.Key()) + received, err := wantsBlock.Exchange.GetBlock(ctx, block.Cid()) if err != nil { t.Log(err) t.Fatal("Expected to succeed") @@ -176,10 +176,10 @@ func PerformDistributionTest(t *testing.T, numInstances, numBlocks int) { } } - var blkeys []key.Key + var blkeys []*cid.Cid first := instances[0] for _, b := range blocks { - blkeys = append(blkeys, b.Key()) + blkeys = append(blkeys, b.Cid()) first.Exchange.HasBlock(b) } @@ -216,7 +216,7 @@ func PerformDistributionTest(t *testing.T, numInstances, numBlocks int) { for _, inst := range instances { for _, b := range blocks { - if _, err := inst.Blockstore().Get(b.Key()); err != nil { + if _, err := inst.Blockstore().Get(b.Cid()); err != nil { t.Fatal(err) } } @@ -224,8 +224,8 @@ func PerformDistributionTest(t *testing.T, numInstances, numBlocks int) { } func getOrFail(bitswap Instance, b blocks.Block, t *testing.T, wg *sync.WaitGroup) { - if _, err := bitswap.Blockstore().Get(b.Key()); err != nil { - _, err := bitswap.Exchange.GetBlock(context.Background(), b.Key()) + if _, err := bitswap.Blockstore().Get(b.Cid()); err != nil { + _, err := bitswap.Exchange.GetBlock(context.Background(), b.Cid()) if err != nil { t.Fatal(err) } @@ -260,7 +260,7 @@ func TestSendToWantingPeer(t *testing.T) { // peerA requests and waits for block alpha ctx, cancel := context.WithTimeout(context.Background(), waitTime) defer cancel() - alphaPromise, err := peerA.Exchange.GetBlocks(ctx, []key.Key{alpha.Key()}) + alphaPromise, err := peerA.Exchange.GetBlocks(ctx, []*cid.Cid{alpha.Cid()}) if err != nil { t.Fatal(err) } @@ -277,7 +277,7 @@ func TestSendToWantingPeer(t *testing.T) { t.Fatal("context timed out and broke promise channel!") } - if blkrecvd.Key() != alpha.Key() { + if !blkrecvd.Cid().Equals(alpha.Cid()) { t.Fatal("Wrong block!") } @@ -292,7 +292,7 @@ func TestEmptyKey(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) defer cancel() - _, err := bs.GetBlock(ctx, key.Key("")) + _, err := bs.GetBlock(ctx, nil) if err != blockstore.ErrNotFound { t.Error("empty str key should return ErrNotFound") } @@ -315,7 +315,7 @@ func TestBasicBitswap(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) defer cancel() - blk, err := instances[1].Exchange.GetBlock(ctx, blocks[0].Key()) + blk, err := instances[1].Exchange.GetBlock(ctx, blocks[0].Cid()) if err != nil { t.Fatal(err) } @@ -341,7 +341,7 @@ func TestDoubleGet(t *testing.T) { blocks := bg.Blocks(1) ctx1, cancel1 := context.WithCancel(context.Background()) - blkch1, err := instances[1].Exchange.GetBlocks(ctx1, []key.Key{blocks[0].Key()}) + blkch1, err := instances[1].Exchange.GetBlocks(ctx1, []*cid.Cid{blocks[0].Cid()}) if err != nil { t.Fatal(err) } @@ -349,7 +349,7 @@ func TestDoubleGet(t *testing.T) { ctx2, cancel2 := context.WithCancel(context.Background()) defer cancel2() - blkch2, err := instances[1].Exchange.GetBlocks(ctx2, []key.Key{blocks[0].Key()}) + blkch2, err := instances[1].Exchange.GetBlocks(ctx2, []*cid.Cid{blocks[0].Cid()}) if err != nil { t.Fatal(err) } @@ -396,9 +396,9 @@ func TestWantlistCleanup(t *testing.T) { bswap := instances.Exchange blocks := bg.Blocks(20) - var keys []key.Key + var keys []*cid.Cid for _, b := range blocks { - keys = append(keys, b.Key()) + keys = append(keys, b.Cid()) } ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond*50) diff --git a/bitswap/decision/bench_test.go b/bitswap/decision/bench_test.go index 8a8fd3db1..cc429278c 100644 --- a/bitswap/decision/bench_test.go +++ b/bitswap/decision/bench_test.go @@ -1,12 +1,14 @@ package decision import ( + "fmt" "math" "testing" "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" "github.com/ipfs/go-ipfs/thirdparty/testutil" - key "gx/ipfs/QmYEoKZXHoAToWfhGF3vryhMn3WWhE1o2MasQ8uzY5iDi9/go-key" + cid "gx/ipfs/QmakyCk6Vnn16WEKjbkxieZmM2YLTzkFWizbmGowoYPjro/go-cid" + u "gx/ipfs/Qmb912gdngC1UWwTkhuW8knyRbcWeu5kqkxBpveLmW8bSr/go-ipfs-util" "gx/ipfs/QmfMmLGoKzCHDN7cGgk64PJr4iipzidDRME8HABSJqvmhC/go-libp2p-peer" ) @@ -21,6 +23,8 @@ func BenchmarkTaskQueuePush(b *testing.B) { } b.ResetTimer() for i := 0; i < b.N; i++ { - q.Push(&wantlist.Entry{Key: key.Key(i), Priority: math.MaxInt32}, peers[i%len(peers)]) + c := cid.NewCidV0(u.Hash([]byte(fmt.Sprint(i)))) + + q.Push(&wantlist.Entry{Cid: c, Priority: math.MaxInt32}, peers[i%len(peers)]) } } diff --git a/bitswap/decision/engine.go b/bitswap/decision/engine.go index 3eddeff86..d494554d0 100644 --- a/bitswap/decision/engine.go +++ b/bitswap/decision/engine.go @@ -169,8 +169,9 @@ func (e *Engine) nextEnvelope(ctx context.Context) (*Envelope, error) { // with a task in hand, we're ready to prepare the envelope... - block, err := e.bs.Get(nextTask.Entry.Key) + block, err := e.bs.Get(nextTask.Entry.Cid) if err != nil { + log.Errorf("tried to execute a task and errored fetching block: %s", err) // If we don't have the block, don't hold that against the peer // make sure to update that the task has been 'completed' nextTask.Done() @@ -233,13 +234,13 @@ func (e *Engine) MessageReceived(p peer.ID, m bsmsg.BitSwapMessage) error { for _, entry := range m.Wantlist() { if entry.Cancel { - log.Debugf("%s cancel %s", p, entry.Key) - l.CancelWant(entry.Key) - e.peerRequestQueue.Remove(entry.Key, p) + log.Debugf("%s cancel %s", p, entry.Cid) + l.CancelWant(entry.Cid) + e.peerRequestQueue.Remove(entry.Cid, p) } else { - log.Debugf("wants %s - %d", entry.Key, entry.Priority) - l.Wants(entry.Key, entry.Priority) - if exists, err := e.bs.Has(entry.Key); err == nil && exists { + log.Debugf("wants %s - %d", entry.Cid, entry.Priority) + l.Wants(entry.Cid, entry.Priority) + if exists, err := e.bs.Has(entry.Cid); err == nil && exists { e.peerRequestQueue.Push(entry.Entry, p) newWorkExists = true } @@ -258,7 +259,7 @@ func (e *Engine) addBlock(block blocks.Block) { for _, l := range e.ledgerMap { l.lk.Lock() - if entry, ok := l.WantListContains(block.Key()); ok { + if entry, ok := l.WantListContains(block.Cid()); ok { e.peerRequestQueue.Push(entry, l.Partner) work = true } @@ -287,8 +288,8 @@ func (e *Engine) MessageSent(p peer.ID, m bsmsg.BitSwapMessage) error { l := e.findOrCreate(p) for _, block := range m.Blocks() { l.SentBytes(len(block.RawData())) - l.wantList.Remove(block.Key()) - e.peerRequestQueue.Remove(block.Key(), p) + l.wantList.Remove(block.Cid()) + e.peerRequestQueue.Remove(block.Cid(), p) } return nil diff --git a/bitswap/decision/engine_test.go b/bitswap/decision/engine_test.go index 91dbc8fcd..d2d4fa0ca 100644 --- a/bitswap/decision/engine_test.go +++ b/bitswap/decision/engine_test.go @@ -167,7 +167,7 @@ func partnerWants(e *Engine, keys []string, partner peer.ID) { add := message.New(false) for i, letter := range keys { block := blocks.NewBlock([]byte(letter)) - add.AddEntry(block.Key(), math.MaxInt32-i) + add.AddEntry(block.Cid(), math.MaxInt32-i) } e.MessageReceived(partner, add) } @@ -176,7 +176,7 @@ func partnerCancels(e *Engine, keys []string, partner peer.ID) { cancels := message.New(false) for _, k := range keys { block := blocks.NewBlock([]byte(k)) - cancels.Cancel(block.Key()) + cancels.Cancel(block.Cid()) } e.MessageReceived(partner, cancels) } @@ -187,7 +187,7 @@ func checkHandledInOrder(t *testing.T, e *Engine, keys []string) error { envelope := <-next received := envelope.Block expected := blocks.NewBlock([]byte(k)) - if received.Key() != expected.Key() { + if !received.Cid().Equals(expected.Cid()) { return errors.New(fmt.Sprintln("received", string(received.RawData()), "expected", string(expected.RawData()))) } } diff --git a/bitswap/decision/ledger.go b/bitswap/decision/ledger.go index b5217cf2b..b4b46ef11 100644 --- a/bitswap/decision/ledger.go +++ b/bitswap/decision/ledger.go @@ -5,19 +5,16 @@ import ( "time" wl "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" - key "gx/ipfs/QmYEoKZXHoAToWfhGF3vryhMn3WWhE1o2MasQ8uzY5iDi9/go-key" + + cid "gx/ipfs/QmakyCk6Vnn16WEKjbkxieZmM2YLTzkFWizbmGowoYPjro/go-cid" peer "gx/ipfs/QmfMmLGoKzCHDN7cGgk64PJr4iipzidDRME8HABSJqvmhC/go-libp2p-peer" ) -// keySet is just a convenient alias for maps of keys, where we only care -// access/lookups. -type keySet map[key.Key]struct{} - func newLedger(p peer.ID) *ledger { return &ledger{ wantList: wl.New(), Partner: p, - sentToPeer: make(map[key.Key]time.Time), + sentToPeer: make(map[string]time.Time), } } @@ -44,7 +41,7 @@ type ledger struct { // sentToPeer is a set of keys to ensure we dont send duplicate blocks // to a given peer - sentToPeer map[key.Key]time.Time + sentToPeer map[string]time.Time lk sync.Mutex } @@ -78,16 +75,16 @@ func (l *ledger) ReceivedBytes(n int) { l.Accounting.BytesRecv += uint64(n) } -func (l *ledger) Wants(k key.Key, priority int) { +func (l *ledger) Wants(k *cid.Cid, priority int) { log.Debugf("peer %s wants %s", l.Partner, k) l.wantList.Add(k, priority) } -func (l *ledger) CancelWant(k key.Key) { +func (l *ledger) CancelWant(k *cid.Cid) { l.wantList.Remove(k) } -func (l *ledger) WantListContains(k key.Key) (*wl.Entry, bool) { +func (l *ledger) WantListContains(k *cid.Cid) (*wl.Entry, bool) { return l.wantList.Contains(k) } diff --git a/bitswap/decision/peer_request_queue.go b/bitswap/decision/peer_request_queue.go index 732f0d4d4..742bcd6ff 100644 --- a/bitswap/decision/peer_request_queue.go +++ b/bitswap/decision/peer_request_queue.go @@ -6,7 +6,8 @@ import ( wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" pq "github.com/ipfs/go-ipfs/thirdparty/pq" - key "gx/ipfs/QmYEoKZXHoAToWfhGF3vryhMn3WWhE1o2MasQ8uzY5iDi9/go-key" + + cid "gx/ipfs/QmakyCk6Vnn16WEKjbkxieZmM2YLTzkFWizbmGowoYPjro/go-cid" peer "gx/ipfs/QmfMmLGoKzCHDN7cGgk64PJr4iipzidDRME8HABSJqvmhC/go-libp2p-peer" ) @@ -14,7 +15,7 @@ type peerRequestQueue interface { // Pop returns the next peerRequestTask. Returns nil if the peerRequestQueue is empty. Pop() *peerRequestTask Push(entry *wantlist.Entry, to peer.ID) - Remove(k key.Key, p peer.ID) + Remove(k *cid.Cid, p peer.ID) // NB: cannot expose simply expose taskQueue.Len because trashed elements // may exist. These trashed elements should not contribute to the count. @@ -57,12 +58,11 @@ func (tl *prq) Push(entry *wantlist.Entry, to peer.ID) { partner.activelk.Lock() defer partner.activelk.Unlock() - _, ok = partner.activeBlocks[entry.Key] - if ok { + if partner.activeBlocks.Has(entry.Cid) { return } - if task, ok := tl.taskMap[taskKey(to, entry.Key)]; ok { + if task, ok := tl.taskMap[taskKey(to, entry.Cid)]; ok { task.Entry.Priority = entry.Priority partner.taskQueue.Update(task.index) return @@ -74,7 +74,7 @@ func (tl *prq) Push(entry *wantlist.Entry, to peer.ID) { created: time.Now(), Done: func() { tl.lock.Lock() - partner.TaskDone(entry.Key) + partner.TaskDone(entry.Cid) tl.pQueue.Update(partner.Index()) tl.lock.Unlock() }, @@ -104,7 +104,7 @@ func (tl *prq) Pop() *peerRequestTask { continue // discarding tasks that have been removed } - partner.StartTask(out.Entry.Key) + partner.StartTask(out.Entry.Cid) partner.requests-- break // and return |out| } @@ -114,7 +114,7 @@ func (tl *prq) Pop() *peerRequestTask { } // Remove removes a task from the queue -func (tl *prq) Remove(k key.Key, p peer.ID) { +func (tl *prq) Remove(k *cid.Cid, p peer.ID) { tl.lock.Lock() t, ok := tl.taskMap[taskKey(p, k)] if ok { @@ -181,7 +181,7 @@ type peerRequestTask struct { // Key uniquely identifies a task. func (t *peerRequestTask) Key() string { - return taskKey(t.Target, t.Entry.Key) + return taskKey(t.Target, t.Entry.Cid) } // Index implements pq.Elem @@ -195,8 +195,8 @@ func (t *peerRequestTask) SetIndex(i int) { } // taskKey returns a key that uniquely identifies a task. -func taskKey(p peer.ID, k key.Key) string { - return string(p) + string(k) +func taskKey(p peer.ID, k *cid.Cid) string { + return string(p) + k.KeyString() } // FIFO is a basic task comparator that returns tasks in the order created. @@ -226,7 +226,7 @@ type activePartner struct { activelk sync.Mutex active int - activeBlocks map[key.Key]struct{} + activeBlocks *cid.Set // requests is the number of blocks this peer is currently requesting // request need not be locked around as it will only be modified under @@ -245,7 +245,7 @@ type activePartner struct { func newActivePartner() *activePartner { return &activePartner{ taskQueue: pq.New(wrapCmp(V1)), - activeBlocks: make(map[key.Key]struct{}), + activeBlocks: cid.NewSet(), } } @@ -281,17 +281,17 @@ func partnerCompare(a, b pq.Elem) bool { } // StartTask signals that a task was started for this partner -func (p *activePartner) StartTask(k key.Key) { +func (p *activePartner) StartTask(k *cid.Cid) { p.activelk.Lock() - p.activeBlocks[k] = struct{}{} + p.activeBlocks.Add(k) p.active++ p.activelk.Unlock() } // TaskDone signals that a task was completed for this partner -func (p *activePartner) TaskDone(k key.Key) { +func (p *activePartner) TaskDone(k *cid.Cid) { p.activelk.Lock() - delete(p.activeBlocks, k) + p.activeBlocks.Remove(k) p.active-- if p.active < 0 { panic("more tasks finished than started!") diff --git a/bitswap/decision/peer_request_queue_test.go b/bitswap/decision/peer_request_queue_test.go index 22a5f164d..6a82d3f20 100644 --- a/bitswap/decision/peer_request_queue_test.go +++ b/bitswap/decision/peer_request_queue_test.go @@ -1,6 +1,7 @@ package decision import ( + "fmt" "math" "math/rand" "sort" @@ -9,7 +10,8 @@ import ( "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" "github.com/ipfs/go-ipfs/thirdparty/testutil" - key "gx/ipfs/QmYEoKZXHoAToWfhGF3vryhMn3WWhE1o2MasQ8uzY5iDi9/go-key" + cid "gx/ipfs/QmakyCk6Vnn16WEKjbkxieZmM2YLTzkFWizbmGowoYPjro/go-cid" + u "gx/ipfs/Qmb912gdngC1UWwTkhuW8knyRbcWeu5kqkxBpveLmW8bSr/go-ipfs-util" ) func TestPushPop(t *testing.T) { @@ -41,10 +43,13 @@ func TestPushPop(t *testing.T) { for _, index := range rand.Perm(len(alphabet)) { // add blocks for all letters letter := alphabet[index] t.Log(partner.String()) - prq.Push(&wantlist.Entry{Key: key.Key(letter), Priority: math.MaxInt32 - index}, partner) + + c := cid.NewCidV0(u.Hash([]byte(letter))) + prq.Push(&wantlist.Entry{Cid: c, Priority: math.MaxInt32 - index}, partner) } for _, consonant := range consonants { - prq.Remove(key.Key(consonant), partner) + c := cid.NewCidV0(u.Hash([]byte(consonant))) + prq.Remove(c, partner) } prq.fullThaw() @@ -56,12 +61,13 @@ func TestPushPop(t *testing.T) { break } - out = append(out, string(received.Entry.Key)) + out = append(out, received.Entry.Cid.String()) } // Entries popped should already be in correct order for i, expected := range vowels { - if out[i] != expected { + exp := cid.NewCidV0(u.Hash([]byte(expected))).String() + if out[i] != exp { t.Fatal("received", out[i], "expected", expected) } } @@ -78,10 +84,11 @@ func TestPeerRepeats(t *testing.T) { // Have each push some blocks for i := 0; i < 5; i++ { - prq.Push(&wantlist.Entry{Key: key.Key(i)}, a) - prq.Push(&wantlist.Entry{Key: key.Key(i)}, b) - prq.Push(&wantlist.Entry{Key: key.Key(i)}, c) - prq.Push(&wantlist.Entry{Key: key.Key(i)}, d) + elcid := cid.NewCidV0(u.Hash([]byte(fmt.Sprint(i)))) + prq.Push(&wantlist.Entry{Cid: elcid}, a) + prq.Push(&wantlist.Entry{Cid: elcid}, b) + prq.Push(&wantlist.Entry{Cid: elcid}, c) + prq.Push(&wantlist.Entry{Cid: elcid}, d) } // now, pop off four entries, there should be one from each diff --git a/bitswap/message/message.go b/bitswap/message/message.go index 2c1947cfe..5dc7be1bd 100644 --- a/bitswap/message/message.go +++ b/bitswap/message/message.go @@ -1,16 +1,17 @@ package message import ( + "fmt" "io" blocks "github.com/ipfs/go-ipfs/blocks" pb "github.com/ipfs/go-ipfs/exchange/bitswap/message/pb" wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" - key "gx/ipfs/QmYEoKZXHoAToWfhGF3vryhMn3WWhE1o2MasQ8uzY5iDi9/go-key" - inet "gx/ipfs/QmdXimY9QHaasZmw6hWojWnCJvfgxETjZQfg9g6ZrA9wMX/go-libp2p-net" ggio "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/io" proto "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/proto" + cid "gx/ipfs/QmakyCk6Vnn16WEKjbkxieZmM2YLTzkFWizbmGowoYPjro/go-cid" + inet "gx/ipfs/QmdXimY9QHaasZmw6hWojWnCJvfgxETjZQfg9g6ZrA9wMX/go-libp2p-net" ) // TODO move message.go into the bitswap package @@ -25,9 +26,9 @@ type BitSwapMessage interface { Blocks() []blocks.Block // AddEntry adds an entry to the Wantlist. - AddEntry(key key.Key, priority int) + AddEntry(key *cid.Cid, priority int) - Cancel(key key.Key) + Cancel(key *cid.Cid) Empty() bool @@ -47,8 +48,8 @@ type Exportable interface { type impl struct { full bool - wantlist map[key.Key]Entry - blocks map[key.Key]blocks.Block + wantlist map[string]Entry + blocks map[string]blocks.Block } func New(full bool) BitSwapMessage { @@ -57,8 +58,8 @@ func New(full bool) BitSwapMessage { func newMsg(full bool) *impl { return &impl{ - blocks: make(map[key.Key]blocks.Block), - wantlist: make(map[key.Key]Entry), + blocks: make(map[string]blocks.Block), + wantlist: make(map[string]Entry), full: full, } } @@ -68,16 +69,20 @@ type Entry struct { Cancel bool } -func newMessageFromProto(pbm pb.Message) BitSwapMessage { +func newMessageFromProto(pbm pb.Message) (BitSwapMessage, error) { m := newMsg(pbm.GetWantlist().GetFull()) for _, e := range pbm.GetWantlist().GetEntries() { - m.addEntry(key.Key(e.GetBlock()), int(e.GetPriority()), e.GetCancel()) + c, err := cid.Cast([]byte(e.GetBlock())) + if err != nil { + return nil, fmt.Errorf("incorrectly formatted cid in wantlist: %s", err) + } + m.addEntry(c, int(e.GetPriority()), e.GetCancel()) } for _, d := range pbm.GetBlocks() { b := blocks.NewBlock(d) m.AddBlock(b) } - return m + return m, nil } func (m *impl) Full() bool { @@ -104,16 +109,17 @@ func (m *impl) Blocks() []blocks.Block { return bs } -func (m *impl) Cancel(k key.Key) { - delete(m.wantlist, k) +func (m *impl) Cancel(k *cid.Cid) { + delete(m.wantlist, k.KeyString()) m.addEntry(k, 0, true) } -func (m *impl) AddEntry(k key.Key, priority int) { +func (m *impl) AddEntry(k *cid.Cid, priority int) { m.addEntry(k, priority, false) } -func (m *impl) addEntry(k key.Key, priority int, cancel bool) { +func (m *impl) addEntry(c *cid.Cid, priority int, cancel bool) { + k := c.KeyString() e, exists := m.wantlist[k] if exists { e.Priority = priority @@ -121,7 +127,7 @@ func (m *impl) addEntry(k key.Key, priority int, cancel bool) { } else { m.wantlist[k] = Entry{ Entry: &wantlist.Entry{ - Key: k, + Cid: c, Priority: priority, }, Cancel: cancel, @@ -130,7 +136,7 @@ func (m *impl) addEntry(k key.Key, priority int, cancel bool) { } func (m *impl) AddBlock(b blocks.Block) { - m.blocks[b.Key()] = b + m.blocks[b.Cid().KeyString()] = b } func FromNet(r io.Reader) (BitSwapMessage, error) { @@ -144,8 +150,7 @@ func FromPBReader(pbr ggio.Reader) (BitSwapMessage, error) { return nil, err } - m := newMessageFromProto(*pb) - return m, nil + return newMessageFromProto(*pb) } func (m *impl) ToProto() *pb.Message { @@ -153,7 +158,7 @@ func (m *impl) ToProto() *pb.Message { pbm.Wantlist = new(pb.Message_Wantlist) for _, e := range m.wantlist { pbm.Wantlist.Entries = append(pbm.Wantlist.Entries, &pb.Message_Wantlist_Entry{ - Block: proto.String(string(e.Key)), + Block: proto.String(e.Cid.KeyString()), Priority: proto.Int32(int32(e.Priority)), Cancel: proto.Bool(e.Cancel), }) @@ -176,7 +181,7 @@ func (m *impl) ToNet(w io.Writer) error { func (m *impl) Loggable() map[string]interface{} { var blocks []string for _, v := range m.blocks { - blocks = append(blocks, v.Key().B58String()) + blocks = append(blocks, v.Cid().String()) } return map[string]interface{}{ "blocks": blocks, diff --git a/bitswap/message/message_test.go b/bitswap/message/message_test.go index 56609c434..d516093b5 100644 --- a/bitswap/message/message_test.go +++ b/bitswap/message/message_test.go @@ -8,13 +8,18 @@ import ( blocks "github.com/ipfs/go-ipfs/blocks" pb "github.com/ipfs/go-ipfs/exchange/bitswap/message/pb" - key "gx/ipfs/QmYEoKZXHoAToWfhGF3vryhMn3WWhE1o2MasQ8uzY5iDi9/go-key" + cid "gx/ipfs/QmakyCk6Vnn16WEKjbkxieZmM2YLTzkFWizbmGowoYPjro/go-cid" + u "gx/ipfs/Qmb912gdngC1UWwTkhuW8knyRbcWeu5kqkxBpveLmW8bSr/go-ipfs-util" ) +func mkFakeCid(s string) *cid.Cid { + return cid.NewCidV0(u.Hash([]byte(s))) +} + func TestAppendWanted(t *testing.T) { - const str = "foo" + str := mkFakeCid("foo") m := New(true) - m.AddEntry(key.Key(str), 1) + m.AddEntry(str, 1) if !wantlistContains(m.ToProto().GetWantlist(), str) { t.Fail() @@ -23,16 +28,20 @@ func TestAppendWanted(t *testing.T) { } func TestNewMessageFromProto(t *testing.T) { - const str = "a_key" + str := mkFakeCid("a_key") protoMessage := new(pb.Message) protoMessage.Wantlist = new(pb.Message_Wantlist) protoMessage.Wantlist.Entries = []*pb.Message_Wantlist_Entry{ - {Block: proto.String(str)}, + {Block: proto.String(str.KeyString())}, } if !wantlistContains(protoMessage.Wantlist, str) { t.Fail() } - m := newMessageFromProto(*protoMessage) + m, err := newMessageFromProto(*protoMessage) + if err != nil { + t.Fatal(err) + } + if !wantlistContains(m.ToProto().GetWantlist(), str) { t.Fail() } @@ -60,10 +69,10 @@ func TestAppendBlock(t *testing.T) { } func TestWantlist(t *testing.T) { - keystrs := []string{"foo", "bar", "baz", "bat"} + keystrs := []*cid.Cid{mkFakeCid("foo"), mkFakeCid("bar"), mkFakeCid("baz"), mkFakeCid("bat")} m := New(true) for _, s := range keystrs { - m.AddEntry(key.Key(s), 1) + m.AddEntry(s, 1) } exported := m.Wantlist() @@ -71,22 +80,22 @@ func TestWantlist(t *testing.T) { present := false for _, s := range keystrs { - if s == string(k.Key) { + if s.Equals(k.Cid) { present = true } } if !present { - t.Logf("%v isn't in original list", k.Key) + t.Logf("%v isn't in original list", k.Cid) t.Fail() } } } func TestCopyProtoByValue(t *testing.T) { - const str = "foo" + str := mkFakeCid("foo") m := New(true) protoBeforeAppend := m.ToProto() - m.AddEntry(key.Key(str), 1) + m.AddEntry(str, 1) if wantlistContains(protoBeforeAppend.GetWantlist(), str) { t.Fail() } @@ -94,11 +103,11 @@ func TestCopyProtoByValue(t *testing.T) { func TestToNetFromNetPreservesWantList(t *testing.T) { original := New(true) - original.AddEntry(key.Key("M"), 1) - original.AddEntry(key.Key("B"), 1) - original.AddEntry(key.Key("D"), 1) - original.AddEntry(key.Key("T"), 1) - original.AddEntry(key.Key("F"), 1) + original.AddEntry(mkFakeCid("M"), 1) + original.AddEntry(mkFakeCid("B"), 1) + original.AddEntry(mkFakeCid("D"), 1) + original.AddEntry(mkFakeCid("T"), 1) + original.AddEntry(mkFakeCid("F"), 1) buf := new(bytes.Buffer) if err := original.ToNet(buf); err != nil { @@ -110,13 +119,13 @@ func TestToNetFromNetPreservesWantList(t *testing.T) { t.Fatal(err) } - keys := make(map[key.Key]bool) + keys := make(map[string]bool) for _, k := range copied.Wantlist() { - keys[k.Key] = true + keys[k.Cid.KeyString()] = true } for _, k := range original.Wantlist() { - if _, ok := keys[k.Key]; !ok { + if _, ok := keys[k.Cid.KeyString()]; !ok { t.Fatalf("Key Missing: \"%v\"", k) } } @@ -140,21 +149,21 @@ func TestToAndFromNetMessage(t *testing.T) { t.Fatal(err) } - keys := make(map[key.Key]bool) + keys := make(map[string]bool) for _, b := range m2.Blocks() { - keys[b.Key()] = true + keys[b.Cid().KeyString()] = true } for _, b := range original.Blocks() { - if _, ok := keys[b.Key()]; !ok { + if _, ok := keys[b.Cid().KeyString()]; !ok { t.Fail() } } } -func wantlistContains(wantlist *pb.Message_Wantlist, x string) bool { +func wantlistContains(wantlist *pb.Message_Wantlist, c *cid.Cid) bool { for _, e := range wantlist.GetEntries() { - if e.GetBlock() == x { + if e.GetBlock() == c.KeyString() { return true } } @@ -174,8 +183,8 @@ func TestDuplicates(t *testing.T) { b := blocks.NewBlock([]byte("foo")) msg := New(true) - msg.AddEntry(b.Key(), 1) - msg.AddEntry(b.Key(), 1) + msg.AddEntry(b.Cid(), 1) + msg.AddEntry(b.Cid(), 1) if len(msg.Wantlist()) != 1 { t.Fatal("Duplicate in BitSwapMessage") } diff --git a/bitswap/network/interface.go b/bitswap/network/interface.go index 72cd80a67..e7aa86cb6 100644 --- a/bitswap/network/interface.go +++ b/bitswap/network/interface.go @@ -1,10 +1,11 @@ package network import ( - context "context" + "context" + bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" - key "gx/ipfs/QmYEoKZXHoAToWfhGF3vryhMn3WWhE1o2MasQ8uzY5iDi9/go-key" protocol "gx/ipfs/QmZNkThpqfVXs9GNbexPrfBbXSLNYeKrE7jwFM2oqHbyqN/go-libp2p-protocol" + cid "gx/ipfs/QmakyCk6Vnn16WEKjbkxieZmM2YLTzkFWizbmGowoYPjro/go-cid" peer "gx/ipfs/QmfMmLGoKzCHDN7cGgk64PJr4iipzidDRME8HABSJqvmhC/go-libp2p-peer" ) @@ -52,8 +53,8 @@ type Receiver interface { type Routing interface { // FindProvidersAsync returns a channel of providers for the given key - FindProvidersAsync(context.Context, key.Key, int) <-chan peer.ID + FindProvidersAsync(context.Context, *cid.Cid, int) <-chan peer.ID // Provide provides the key to the network - Provide(context.Context, key.Key) error + Provide(context.Context, *cid.Cid) error } diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index af18965cc..45312130f 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -10,7 +10,6 @@ import ( ma "gx/ipfs/QmUAQaWbKxGCUTuoQVvvicbQNZ9APF5pDGWyAZSe93AtKH/go-multiaddr" routing "gx/ipfs/QmXKuGUzLcgoQvp8M6ZEJzupWUNmx8NoqXEbYLMDjL4rjj/go-libp2p-routing" pstore "gx/ipfs/QmXXCcQ7CLg5a81Ui9TTR35QcR4y7ZyihxwfjqaHfUVcVo/go-libp2p-peerstore" - key "gx/ipfs/QmYEoKZXHoAToWfhGF3vryhMn3WWhE1o2MasQ8uzY5iDi9/go-key" ggio "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/io" cid "gx/ipfs/QmakyCk6Vnn16WEKjbkxieZmM2YLTzkFWizbmGowoYPjro/go-cid" host "gx/ipfs/QmdML3R42PRSwnt46jSuEts9bHSqLctVYEjJqMR3UYV8ki/go-libp2p-host" @@ -130,7 +129,7 @@ func (bsnet *impl) ConnectTo(ctx context.Context, p peer.ID) error { } // FindProvidersAsync returns a channel of providers for the given key -func (bsnet *impl) FindProvidersAsync(ctx context.Context, k key.Key, max int) <-chan peer.ID { +func (bsnet *impl) FindProvidersAsync(ctx context.Context, k *cid.Cid, max int) <-chan peer.ID { // Since routing queries are expensive, give bitswap the peers to which we // have open connections. Note that this may cause issues if bitswap starts @@ -147,12 +146,9 @@ func (bsnet *impl) FindProvidersAsync(ctx context.Context, k key.Key, max int) < out <- id } - // TEMPORARY SHIM UNTIL CID GETS PROPAGATED - c := cid.NewCidV0(k.ToMultihash()) - go func() { defer close(out) - providers := bsnet.routing.FindProvidersAsync(ctx, c, max) + providers := bsnet.routing.FindProvidersAsync(ctx, k, max) for info := range providers { if info.ID == bsnet.host.ID() { continue // ignore self as provider @@ -169,9 +165,8 @@ func (bsnet *impl) FindProvidersAsync(ctx context.Context, k key.Key, max int) < } // Provide provides the key to the network -func (bsnet *impl) Provide(ctx context.Context, k key.Key) error { - c := cid.NewCidV0(k.ToMultihash()) - return bsnet.routing.Provide(ctx, c) +func (bsnet *impl) Provide(ctx context.Context, k *cid.Cid) error { + return bsnet.routing.Provide(ctx, k) } // handleNewStream receives a new stream from the network. diff --git a/bitswap/notifications/notifications.go b/bitswap/notifications/notifications.go index bb0fb59d1..41c38ad48 100644 --- a/bitswap/notifications/notifications.go +++ b/bitswap/notifications/notifications.go @@ -1,17 +1,19 @@ package notifications import ( - context "context" - pubsub "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/briantigerchow/pubsub" + "context" + blocks "github.com/ipfs/go-ipfs/blocks" - key "gx/ipfs/QmYEoKZXHoAToWfhGF3vryhMn3WWhE1o2MasQ8uzY5iDi9/go-key" + + pubsub "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/briantigerchow/pubsub" + cid "gx/ipfs/QmakyCk6Vnn16WEKjbkxieZmM2YLTzkFWizbmGowoYPjro/go-cid" ) const bufferSize = 16 type PubSub interface { Publish(block blocks.Block) - Subscribe(ctx context.Context, keys ...key.Key) <-chan blocks.Block + Subscribe(ctx context.Context, keys ...*cid.Cid) <-chan blocks.Block Shutdown() } @@ -24,8 +26,7 @@ type impl struct { } func (ps *impl) Publish(block blocks.Block) { - topic := string(block.Key()) - ps.wrapped.Pub(block, topic) + ps.wrapped.Pub(block, block.Cid().KeyString()) } func (ps *impl) Shutdown() { @@ -35,7 +36,7 @@ func (ps *impl) Shutdown() { // Subscribe returns a channel of blocks for the given |keys|. |blockChannel| // is closed if the |ctx| times out or is cancelled, or after sending len(keys) // blocks. -func (ps *impl) Subscribe(ctx context.Context, keys ...key.Key) <-chan blocks.Block { +func (ps *impl) Subscribe(ctx context.Context, keys ...*cid.Cid) <-chan blocks.Block { blocksCh := make(chan blocks.Block, len(keys)) valuesCh := make(chan interface{}, len(keys)) // provide our own channel to control buffer, prevent blocking @@ -71,10 +72,10 @@ func (ps *impl) Subscribe(ctx context.Context, keys ...key.Key) <-chan blocks.Bl return blocksCh } -func toStrings(keys []key.Key) []string { +func toStrings(keys []*cid.Cid) []string { strs := make([]string, 0) for _, key := range keys { - strs = append(strs, string(key)) + strs = append(strs, key.KeyString()) } return strs } diff --git a/bitswap/notifications/notifications_test.go b/bitswap/notifications/notifications_test.go index e58815649..343ddb34c 100644 --- a/bitswap/notifications/notifications_test.go +++ b/bitswap/notifications/notifications_test.go @@ -2,13 +2,13 @@ package notifications import ( "bytes" + "context" "testing" "time" - context "context" blocks "github.com/ipfs/go-ipfs/blocks" blocksutil "github.com/ipfs/go-ipfs/blocks/blocksutil" - key "gx/ipfs/QmYEoKZXHoAToWfhGF3vryhMn3WWhE1o2MasQ8uzY5iDi9/go-key" + cid "gx/ipfs/QmakyCk6Vnn16WEKjbkxieZmM2YLTzkFWizbmGowoYPjro/go-cid" ) func TestDuplicates(t *testing.T) { @@ -17,7 +17,7 @@ func TestDuplicates(t *testing.T) { n := New() defer n.Shutdown() - ch := n.Subscribe(context.Background(), b1.Key(), b2.Key()) + ch := n.Subscribe(context.Background(), b1.Cid(), b2.Cid()) n.Publish(b1) blockRecvd, ok := <-ch @@ -41,7 +41,7 @@ func TestPublishSubscribe(t *testing.T) { n := New() defer n.Shutdown() - ch := n.Subscribe(context.Background(), blockSent.Key()) + ch := n.Subscribe(context.Background(), blockSent.Cid()) n.Publish(blockSent) blockRecvd, ok := <-ch @@ -59,7 +59,7 @@ func TestSubscribeMany(t *testing.T) { n := New() defer n.Shutdown() - ch := n.Subscribe(context.Background(), e1.Key(), e2.Key()) + ch := n.Subscribe(context.Background(), e1.Cid(), e2.Cid()) n.Publish(e1) r1, ok := <-ch @@ -83,8 +83,8 @@ func TestDuplicateSubscribe(t *testing.T) { n := New() defer n.Shutdown() - ch1 := n.Subscribe(context.Background(), e1.Key()) - ch2 := n.Subscribe(context.Background(), e1.Key()) + ch1 := n.Subscribe(context.Background(), e1.Cid()) + ch2 := n.Subscribe(context.Background(), e1.Cid()) n.Publish(e1) r1, ok := <-ch1 @@ -118,7 +118,7 @@ func TestCarryOnWhenDeadlineExpires(t *testing.T) { n := New() defer n.Shutdown() block := blocks.NewBlock([]byte("A Missed Connection")) - blockChannel := n.Subscribe(fastExpiringCtx, block.Key()) + blockChannel := n.Subscribe(fastExpiringCtx, block.Cid()) assertBlockChannelNil(t, blockChannel) } @@ -132,10 +132,10 @@ func TestDoesNotDeadLockIfContextCancelledBeforePublish(t *testing.T) { t.Log("generate a large number of blocks. exceed default buffer") bs := g.Blocks(1000) - ks := func() []key.Key { - var keys []key.Key + ks := func() []*cid.Cid { + var keys []*cid.Cid for _, b := range bs { - keys = append(keys, b.Key()) + keys = append(keys, b.Cid()) } return keys }() @@ -162,7 +162,7 @@ func assertBlocksEqual(t *testing.T, a, b blocks.Block) { if !bytes.Equal(a.RawData(), b.RawData()) { t.Fatal("blocks aren't equal") } - if a.Key() != b.Key() { + if a.Cid() != b.Cid() { t.Fatal("block keys aren't equal") } } diff --git a/bitswap/stat.go b/bitswap/stat.go index e3518a0d7..3f8ddc28e 100644 --- a/bitswap/stat.go +++ b/bitswap/stat.go @@ -1,13 +1,14 @@ package bitswap import ( - key "gx/ipfs/QmYEoKZXHoAToWfhGF3vryhMn3WWhE1o2MasQ8uzY5iDi9/go-key" "sort" + + cid "gx/ipfs/QmakyCk6Vnn16WEKjbkxieZmM2YLTzkFWizbmGowoYPjro/go-cid" ) type Stat struct { ProvideBufLen int - Wantlist []key.Key + Wantlist []*cid.Cid Peers []string BlocksReceived int DupBlksReceived int diff --git a/bitswap/testnet/virtual.go b/bitswap/testnet/virtual.go index b9b029178..b9d7c5a50 100644 --- a/bitswap/testnet/virtual.go +++ b/bitswap/testnet/virtual.go @@ -10,7 +10,6 @@ import ( delay "github.com/ipfs/go-ipfs/thirdparty/delay" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" routing "gx/ipfs/QmXKuGUzLcgoQvp8M6ZEJzupWUNmx8NoqXEbYLMDjL4rjj/go-libp2p-routing" - key "gx/ipfs/QmYEoKZXHoAToWfhGF3vryhMn3WWhE1o2MasQ8uzY5iDi9/go-key" cid "gx/ipfs/QmakyCk6Vnn16WEKjbkxieZmM2YLTzkFWizbmGowoYPjro/go-cid" peer "gx/ipfs/QmfMmLGoKzCHDN7cGgk64PJr4iipzidDRME8HABSJqvmhC/go-libp2p-peer" ) @@ -92,18 +91,17 @@ func (nc *networkClient) SendMessage( } // FindProvidersAsync returns a channel of providers for the given key -func (nc *networkClient) FindProvidersAsync(ctx context.Context, k key.Key, max int) <-chan peer.ID { +func (nc *networkClient) FindProvidersAsync(ctx context.Context, k *cid.Cid, max int) <-chan peer.ID { // NB: this function duplicates the PeerInfo -> ID transformation in the // bitswap network adapter. Not to worry. This network client will be // deprecated once the ipfsnet.Mock is added. The code below is only // temporary. - c := cid.NewCidV0(k.ToMultihash()) out := make(chan peer.ID) go func() { defer close(out) - providers := nc.routing.FindProvidersAsync(ctx, c, max) + providers := nc.routing.FindProvidersAsync(ctx, k, max) for info := range providers { select { case <-ctx.Done(): @@ -139,9 +137,8 @@ func (n *networkClient) NewMessageSender(ctx context.Context, p peer.ID) (bsnet. } // Provide provides the key to the network -func (nc *networkClient) Provide(ctx context.Context, k key.Key) error { - c := cid.NewCidV0(k.ToMultihash()) - return nc.routing.Provide(ctx, c) +func (nc *networkClient) Provide(ctx context.Context, k *cid.Cid) error { + return nc.routing.Provide(ctx, k) } func (nc *networkClient) SetDelegate(r bsnet.Receiver) { diff --git a/bitswap/wantlist/wantlist.go b/bitswap/wantlist/wantlist.go index 1f514e9db..bf89c4db9 100644 --- a/bitswap/wantlist/wantlist.go +++ b/bitswap/wantlist/wantlist.go @@ -6,7 +6,7 @@ import ( "sort" "sync" - key "gx/ipfs/QmYEoKZXHoAToWfhGF3vryhMn3WWhE1o2MasQ8uzY5iDi9/go-key" + cid "gx/ipfs/QmakyCk6Vnn16WEKjbkxieZmM2YLTzkFWizbmGowoYPjro/go-cid" ) type ThreadSafe struct { @@ -16,11 +16,11 @@ type ThreadSafe struct { // not threadsafe type Wantlist struct { - set map[key.Key]*Entry + set map[string]*Entry } type Entry struct { - Key key.Key + Cid *cid.Cid Priority int RefCnt int @@ -40,11 +40,11 @@ func NewThreadSafe() *ThreadSafe { func New() *Wantlist { return &Wantlist{ - set: make(map[key.Key]*Entry), + set: make(map[string]*Entry), } } -func (w *ThreadSafe) Add(k key.Key, priority int) bool { +func (w *ThreadSafe) Add(k *cid.Cid, priority int) bool { w.lk.Lock() defer w.lk.Unlock() return w.Wantlist.Add(k, priority) @@ -56,13 +56,13 @@ func (w *ThreadSafe) AddEntry(e *Entry) bool { return w.Wantlist.AddEntry(e) } -func (w *ThreadSafe) Remove(k key.Key) bool { +func (w *ThreadSafe) Remove(k *cid.Cid) bool { w.lk.Lock() defer w.lk.Unlock() return w.Wantlist.Remove(k) } -func (w *ThreadSafe) Contains(k key.Key) (*Entry, bool) { +func (w *ThreadSafe) Contains(k *cid.Cid) (*Entry, bool) { w.lk.RLock() defer w.lk.RUnlock() return w.Wantlist.Contains(k) @@ -90,14 +90,15 @@ func (w *Wantlist) Len() int { return len(w.set) } -func (w *Wantlist) Add(k key.Key, priority int) bool { +func (w *Wantlist) Add(c *cid.Cid, priority int) bool { + k := c.KeyString() if e, ok := w.set[k]; ok { e.RefCnt++ return false } w.set[k] = &Entry{ - Key: k, + Cid: c, Priority: priority, RefCnt: 1, } @@ -106,15 +107,17 @@ func (w *Wantlist) Add(k key.Key, priority int) bool { } func (w *Wantlist) AddEntry(e *Entry) bool { - if ex, ok := w.set[e.Key]; ok { + k := e.Cid.KeyString() + if ex, ok := w.set[k]; ok { ex.RefCnt++ return false } - w.set[e.Key] = e + w.set[k] = e return true } -func (w *Wantlist) Remove(k key.Key) bool { +func (w *Wantlist) Remove(c *cid.Cid) bool { + k := c.KeyString() e, ok := w.set[k] if !ok { return false @@ -128,8 +131,8 @@ func (w *Wantlist) Remove(k key.Key) bool { return false } -func (w *Wantlist) Contains(k key.Key) (*Entry, bool) { - e, ok := w.set[k] +func (w *Wantlist) Contains(k *cid.Cid) (*Entry, bool) { + e, ok := w.set[k.KeyString()] return e, ok } diff --git a/bitswap/wantmanager.go b/bitswap/wantmanager.go index 79f8df790..eca8739d8 100644 --- a/bitswap/wantmanager.go +++ b/bitswap/wantmanager.go @@ -1,15 +1,15 @@ package bitswap import ( + "context" "sync" "time" - context "context" engine "github.com/ipfs/go-ipfs/exchange/bitswap/decision" bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" - key "gx/ipfs/QmYEoKZXHoAToWfhGF3vryhMn3WWhE1o2MasQ8uzY5iDi9/go-key" + cid "gx/ipfs/QmakyCk6Vnn16WEKjbkxieZmM2YLTzkFWizbmGowoYPjro/go-cid" peer "gx/ipfs/QmfMmLGoKzCHDN7cGgk64PJr4iipzidDRME8HABSJqvmhC/go-libp2p-peer" ) @@ -51,7 +51,7 @@ type msgPair struct { type cancellation struct { who peer.ID - blk key.Key + blk *cid.Cid } type msgQueue struct { @@ -69,23 +69,23 @@ type msgQueue struct { done chan struct{} } -func (pm *WantManager) WantBlocks(ctx context.Context, ks []key.Key) { +func (pm *WantManager) WantBlocks(ctx context.Context, ks []*cid.Cid) { log.Infof("want blocks: %s", ks) pm.addEntries(ctx, ks, false) } -func (pm *WantManager) CancelWants(ks []key.Key) { +func (pm *WantManager) CancelWants(ks []*cid.Cid) { log.Infof("cancel wants: %s", ks) pm.addEntries(context.TODO(), ks, true) } -func (pm *WantManager) addEntries(ctx context.Context, ks []key.Key, cancel bool) { +func (pm *WantManager) addEntries(ctx context.Context, ks []*cid.Cid, cancel bool) { var entries []*bsmsg.Entry for i, k := range ks { entries = append(entries, &bsmsg.Entry{ Cancel: cancel, Entry: &wantlist.Entry{ - Key: k, + Cid: k, Priority: kMaxPriority - i, RefCnt: 1, }, @@ -130,7 +130,7 @@ func (pm *WantManager) startPeerHandler(p peer.ID) *msgQueue { // new peer, we will want to give them our full wantlist fullwantlist := bsmsg.New(true) for _, e := range pm.wl.Entries() { - fullwantlist.AddEntry(e.Key, e.Priority) + fullwantlist.AddEntry(e.Cid, e.Priority) } mq.out = fullwantlist mq.work <- struct{}{} @@ -246,7 +246,7 @@ func (pm *WantManager) Run() { var filtered []*bsmsg.Entry for _, e := range entries { if e.Cancel { - if pm.wl.Remove(e.Key) { + if pm.wl.Remove(e.Cid) { filtered = append(filtered, e) } } else { @@ -323,9 +323,9 @@ func (mq *msgQueue) addMessage(entries []*bsmsg.Entry) { // one passed in for _, e := range entries { if e.Cancel { - mq.out.Cancel(e.Key) + mq.out.Cancel(e.Cid) } else { - mq.out.AddEntry(e.Key, e.Priority) + mq.out.AddEntry(e.Cid, e.Priority) } } } diff --git a/bitswap/workers.go b/bitswap/workers.go index 6254500b8..d7216ae66 100644 --- a/bitswap/workers.go +++ b/bitswap/workers.go @@ -1,15 +1,15 @@ package bitswap import ( + "context" "math/rand" "sync" "time" - context "context" process "gx/ipfs/QmSF8fPo3jgVBAy8fpdjjYqgG87dkJgUprRBHRd2tmfgpP/goprocess" procctx "gx/ipfs/QmSF8fPo3jgVBAy8fpdjjYqgG87dkJgUprRBHRd2tmfgpP/goprocess/context" logging "gx/ipfs/QmSpJByNKFX1sCsHBEp3R73FL4NF6FnQTEGyNAXHm2GS52/go-log" - key "gx/ipfs/QmYEoKZXHoAToWfhGF3vryhMn3WWhE1o2MasQ8uzY5iDi9/go-key" + cid "gx/ipfs/QmakyCk6Vnn16WEKjbkxieZmM2YLTzkFWizbmGowoYPjro/go-cid" peer "gx/ipfs/QmfMmLGoKzCHDN7cGgk64PJr4iipzidDRME8HABSJqvmhC/go-libp2p-peer" ) @@ -77,7 +77,7 @@ func (bs *Bitswap) provideWorker(px process.Process) { limit := make(chan struct{}, provideWorkerMax) - limitedGoProvide := func(k key.Key, wid int) { + limitedGoProvide := func(k *cid.Cid, wid int) { defer func() { // replace token when done <-limit @@ -85,7 +85,7 @@ func (bs *Bitswap) provideWorker(px process.Process) { ev := logging.LoggableMap{"ID": wid} ctx := procctx.OnClosingContext(px) // derive ctx from px - defer log.EventBegin(ctx, "Bitswap.ProvideWorker.Work", ev, &k).Done() + defer log.EventBegin(ctx, "Bitswap.ProvideWorker.Work", ev, k).Done() ctx, cancel := context.WithTimeout(ctx, provideTimeout) // timeout ctx defer cancel() @@ -121,9 +121,9 @@ func (bs *Bitswap) provideWorker(px process.Process) { func (bs *Bitswap) provideCollector(ctx context.Context) { defer close(bs.provideKeys) - var toProvide []key.Key - var nextKey key.Key - var keysOut chan key.Key + var toProvide []*cid.Cid + var nextKey *cid.Cid + var keysOut chan *cid.Cid for { select { @@ -181,7 +181,7 @@ func (bs *Bitswap) rebroadcastWorker(parent context.Context) { // for new providers for blocks. i := rand.Intn(len(entries)) bs.findKeys <- &blockRequest{ - Key: entries[i].Key, + Cid: entries[i].Cid, Ctx: ctx, } case <-parent.Done(): @@ -192,23 +192,23 @@ func (bs *Bitswap) rebroadcastWorker(parent context.Context) { func (bs *Bitswap) providerQueryManager(ctx context.Context) { var activeLk sync.Mutex - kset := key.NewKeySet() + kset := cid.NewSet() for { select { case e := <-bs.findKeys: activeLk.Lock() - if kset.Has(e.Key) { + if kset.Has(e.Cid) { activeLk.Unlock() continue } - kset.Add(e.Key) + kset.Add(e.Cid) activeLk.Unlock() go func(e *blockRequest) { child, cancel := context.WithTimeout(e.Ctx, providerRequestTimeout) defer cancel() - providers := bs.network.FindProvidersAsync(child, e.Key, maxProvidersPerRequest) + providers := bs.network.FindProvidersAsync(child, e.Cid, maxProvidersPerRequest) wg := &sync.WaitGroup{} for p := range providers { wg.Add(1) @@ -222,7 +222,7 @@ func (bs *Bitswap) providerQueryManager(ctx context.Context) { } wg.Wait() activeLk.Lock() - kset.Remove(e.Key) + kset.Remove(e.Cid) activeLk.Unlock() }(e) From 67dc4489865f8e4d3bbc42a03208b4712e17d0f8 Mon Sep 17 00:00:00 2001 From: Jeromy Date: Sat, 8 Oct 2016 17:59:41 -0700 Subject: [PATCH 0508/1035] bitswap: protocol extension to handle cids This change adds the /ipfs/bitswap/1.1.0 protocol. The new protocol adds a 'payload' field to the protobuf message and deprecates the existing 'blocks' field. The 'payload' field is an array of pairs of cid prefixes and block data. The cid prefixes are used to ensure the correct codecs and hash functions are used to handle the block on the receiving end. License: MIT Signed-off-by: Jeromy This commit was moved from ipfs/go-bitswap@591491b13690e0d70c653d6da20dffd184be7820 --- bitswap/bitswap.go | 2 +- bitswap/bitswap_test.go | 2 +- bitswap/decision/bench_test.go | 2 +- bitswap/decision/ledger.go | 2 +- bitswap/decision/peer_request_queue.go | 2 +- bitswap/decision/peer_request_queue_test.go | 2 +- bitswap/message/message.go | 67 +++++++++++++++++++-- bitswap/message/message_test.go | 15 +++-- bitswap/message/pb/message.pb.go | 38 ++++++++++++ bitswap/message/pb/message.proto | 6 ++ bitswap/network/interface.go | 11 +++- bitswap/network/ipfs_impl.go | 62 ++++++++----------- bitswap/notifications/notifications.go | 2 +- bitswap/notifications/notifications_test.go | 2 +- bitswap/stat.go | 2 +- bitswap/testnet/virtual.go | 4 +- bitswap/wantlist/wantlist.go | 2 +- bitswap/wantmanager.go | 2 +- bitswap/workers.go | 2 +- 19 files changed, 160 insertions(+), 67 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 206a38494..fd36f904a 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -23,7 +23,7 @@ import ( procctx "gx/ipfs/QmSF8fPo3jgVBAy8fpdjjYqgG87dkJgUprRBHRd2tmfgpP/goprocess/context" logging "gx/ipfs/QmSpJByNKFX1sCsHBEp3R73FL4NF6FnQTEGyNAXHm2GS52/go-log" loggables "gx/ipfs/QmTMy4hVSY28DdwJ9kBz6y7q6MuioFzPcpM3Ma3aPjo1i3/go-libp2p-loggables" - cid "gx/ipfs/QmakyCk6Vnn16WEKjbkxieZmM2YLTzkFWizbmGowoYPjro/go-cid" + cid "gx/ipfs/QmXUuRadqDq5BuFWzVU6VuKaSjTcNm1gNCtLvvP1TJCW4z/go-cid" peer "gx/ipfs/QmfMmLGoKzCHDN7cGgk64PJr4iipzidDRME8HABSJqvmhC/go-libp2p-peer" ) diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index 2ec9ef5a1..ab46e3607 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -16,7 +16,7 @@ import ( travis "github.com/ipfs/go-ipfs/thirdparty/testutil/ci/travis" detectrace "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-detect-race" - cid "gx/ipfs/QmakyCk6Vnn16WEKjbkxieZmM2YLTzkFWizbmGowoYPjro/go-cid" + cid "gx/ipfs/QmXUuRadqDq5BuFWzVU6VuKaSjTcNm1gNCtLvvP1TJCW4z/go-cid" p2ptestutil "gx/ipfs/QmcRa2qn6iCmap9bjp8jAwkvYAq13AUfxdY3rrYiaJbLum/go-libp2p/p2p/test/util" ) diff --git a/bitswap/decision/bench_test.go b/bitswap/decision/bench_test.go index cc429278c..91515875a 100644 --- a/bitswap/decision/bench_test.go +++ b/bitswap/decision/bench_test.go @@ -7,7 +7,7 @@ import ( "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" "github.com/ipfs/go-ipfs/thirdparty/testutil" - cid "gx/ipfs/QmakyCk6Vnn16WEKjbkxieZmM2YLTzkFWizbmGowoYPjro/go-cid" + cid "gx/ipfs/QmXUuRadqDq5BuFWzVU6VuKaSjTcNm1gNCtLvvP1TJCW4z/go-cid" u "gx/ipfs/Qmb912gdngC1UWwTkhuW8knyRbcWeu5kqkxBpveLmW8bSr/go-ipfs-util" "gx/ipfs/QmfMmLGoKzCHDN7cGgk64PJr4iipzidDRME8HABSJqvmhC/go-libp2p-peer" ) diff --git a/bitswap/decision/ledger.go b/bitswap/decision/ledger.go index b4b46ef11..c6e66451e 100644 --- a/bitswap/decision/ledger.go +++ b/bitswap/decision/ledger.go @@ -6,7 +6,7 @@ import ( wl "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" - cid "gx/ipfs/QmakyCk6Vnn16WEKjbkxieZmM2YLTzkFWizbmGowoYPjro/go-cid" + cid "gx/ipfs/QmXUuRadqDq5BuFWzVU6VuKaSjTcNm1gNCtLvvP1TJCW4z/go-cid" peer "gx/ipfs/QmfMmLGoKzCHDN7cGgk64PJr4iipzidDRME8HABSJqvmhC/go-libp2p-peer" ) diff --git a/bitswap/decision/peer_request_queue.go b/bitswap/decision/peer_request_queue.go index 742bcd6ff..63f4426d4 100644 --- a/bitswap/decision/peer_request_queue.go +++ b/bitswap/decision/peer_request_queue.go @@ -7,7 +7,7 @@ import ( wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" pq "github.com/ipfs/go-ipfs/thirdparty/pq" - cid "gx/ipfs/QmakyCk6Vnn16WEKjbkxieZmM2YLTzkFWizbmGowoYPjro/go-cid" + cid "gx/ipfs/QmXUuRadqDq5BuFWzVU6VuKaSjTcNm1gNCtLvvP1TJCW4z/go-cid" peer "gx/ipfs/QmfMmLGoKzCHDN7cGgk64PJr4iipzidDRME8HABSJqvmhC/go-libp2p-peer" ) diff --git a/bitswap/decision/peer_request_queue_test.go b/bitswap/decision/peer_request_queue_test.go index 6a82d3f20..cf9913955 100644 --- a/bitswap/decision/peer_request_queue_test.go +++ b/bitswap/decision/peer_request_queue_test.go @@ -10,7 +10,7 @@ import ( "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" "github.com/ipfs/go-ipfs/thirdparty/testutil" - cid "gx/ipfs/QmakyCk6Vnn16WEKjbkxieZmM2YLTzkFWizbmGowoYPjro/go-cid" + cid "gx/ipfs/QmXUuRadqDq5BuFWzVU6VuKaSjTcNm1gNCtLvvP1TJCW4z/go-cid" u "gx/ipfs/Qmb912gdngC1UWwTkhuW8knyRbcWeu5kqkxBpveLmW8bSr/go-ipfs-util" ) diff --git a/bitswap/message/message.go b/bitswap/message/message.go index 5dc7be1bd..ed58541d3 100644 --- a/bitswap/message/message.go +++ b/bitswap/message/message.go @@ -8,9 +8,9 @@ import ( pb "github.com/ipfs/go-ipfs/exchange/bitswap/message/pb" wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" + cid "gx/ipfs/QmXUuRadqDq5BuFWzVU6VuKaSjTcNm1gNCtLvvP1TJCW4z/go-cid" ggio "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/io" proto "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/proto" - cid "gx/ipfs/QmakyCk6Vnn16WEKjbkxieZmM2YLTzkFWizbmGowoYPjro/go-cid" inet "gx/ipfs/QmdXimY9QHaasZmw6hWojWnCJvfgxETjZQfg9g6ZrA9wMX/go-libp2p-net" ) @@ -42,8 +42,10 @@ type BitSwapMessage interface { } type Exportable interface { - ToProto() *pb.Message - ToNet(w io.Writer) error + ToProtoV0() *pb.Message + ToProtoV1() *pb.Message + ToNetV0(w io.Writer) error + ToNetV1(w io.Writer) error } type impl struct { @@ -78,10 +80,34 @@ func newMessageFromProto(pbm pb.Message) (BitSwapMessage, error) { } m.addEntry(c, int(e.GetPriority()), e.GetCancel()) } + + // deprecated for _, d := range pbm.GetBlocks() { + // CIDv0, sha256, protobuf only b := blocks.NewBlock(d) m.AddBlock(b) } + // + + for _, b := range pbm.GetPayload() { + pref, err := cid.PrefixFromBytes(b.GetPrefix()) + if err != nil { + return nil, err + } + + c, err := pref.Sum(b.GetData()) + if err != nil { + return nil, err + } + + blk, err := blocks.NewBlockWithCid(b.GetData(), c) + if err != nil { + return nil, err + } + + m.AddBlock(blk) + } + return m, nil } @@ -153,7 +179,7 @@ func FromPBReader(pbr ggio.Reader) (BitSwapMessage, error) { return newMessageFromProto(*pb) } -func (m *impl) ToProto() *pb.Message { +func (m *impl) ToProtoV0() *pb.Message { pbm := new(pb.Message) pbm.Wantlist = new(pb.Message_Wantlist) for _, e := range m.wantlist { @@ -169,10 +195,39 @@ func (m *impl) ToProto() *pb.Message { return pbm } -func (m *impl) ToNet(w io.Writer) error { +func (m *impl) ToProtoV1() *pb.Message { + pbm := new(pb.Message) + pbm.Wantlist = new(pb.Message_Wantlist) + for _, e := range m.wantlist { + pbm.Wantlist.Entries = append(pbm.Wantlist.Entries, &pb.Message_Wantlist_Entry{ + Block: proto.String(e.Cid.KeyString()), + Priority: proto.Int32(int32(e.Priority)), + Cancel: proto.Bool(e.Cancel), + }) + } + for _, b := range m.Blocks() { + blk := &pb.Message_Block{ + Data: b.RawData(), + Prefix: b.Cid().Prefix().Bytes(), + } + pbm.Payload = append(pbm.Payload, blk) + } + return pbm +} + +func (m *impl) ToNetV0(w io.Writer) error { + pbw := ggio.NewDelimitedWriter(w) + + if err := pbw.WriteMsg(m.ToProtoV0()); err != nil { + return err + } + return nil +} + +func (m *impl) ToNetV1(w io.Writer) error { pbw := ggio.NewDelimitedWriter(w) - if err := pbw.WriteMsg(m.ToProto()); err != nil { + if err := pbw.WriteMsg(m.ToProtoV1()); err != nil { return err } return nil diff --git a/bitswap/message/message_test.go b/bitswap/message/message_test.go index d516093b5..4cfbf8f27 100644 --- a/bitswap/message/message_test.go +++ b/bitswap/message/message_test.go @@ -8,7 +8,7 @@ import ( blocks "github.com/ipfs/go-ipfs/blocks" pb "github.com/ipfs/go-ipfs/exchange/bitswap/message/pb" - cid "gx/ipfs/QmakyCk6Vnn16WEKjbkxieZmM2YLTzkFWizbmGowoYPjro/go-cid" + cid "gx/ipfs/QmXUuRadqDq5BuFWzVU6VuKaSjTcNm1gNCtLvvP1TJCW4z/go-cid" u "gx/ipfs/Qmb912gdngC1UWwTkhuW8knyRbcWeu5kqkxBpveLmW8bSr/go-ipfs-util" ) @@ -21,10 +21,9 @@ func TestAppendWanted(t *testing.T) { m := New(true) m.AddEntry(str, 1) - if !wantlistContains(m.ToProto().GetWantlist(), str) { + if !wantlistContains(m.ToProtoV0().GetWantlist(), str) { t.Fail() } - m.ToProto().GetWantlist().GetEntries() } func TestNewMessageFromProto(t *testing.T) { @@ -42,7 +41,7 @@ func TestNewMessageFromProto(t *testing.T) { t.Fatal(err) } - if !wantlistContains(m.ToProto().GetWantlist(), str) { + if !wantlistContains(m.ToProtoV0().GetWantlist(), str) { t.Fail() } } @@ -60,7 +59,7 @@ func TestAppendBlock(t *testing.T) { } // assert strings are in proto message - for _, blockbytes := range m.ToProto().GetBlocks() { + for _, blockbytes := range m.ToProtoV0().GetBlocks() { s := bytes.NewBuffer(blockbytes).String() if !contains(strs, s) { t.Fail() @@ -94,7 +93,7 @@ func TestWantlist(t *testing.T) { func TestCopyProtoByValue(t *testing.T) { str := mkFakeCid("foo") m := New(true) - protoBeforeAppend := m.ToProto() + protoBeforeAppend := m.ToProtoV0() m.AddEntry(str, 1) if wantlistContains(protoBeforeAppend.GetWantlist(), str) { t.Fail() @@ -110,7 +109,7 @@ func TestToNetFromNetPreservesWantList(t *testing.T) { original.AddEntry(mkFakeCid("F"), 1) buf := new(bytes.Buffer) - if err := original.ToNet(buf); err != nil { + if err := original.ToNetV1(buf); err != nil { t.Fatal(err) } @@ -140,7 +139,7 @@ func TestToAndFromNetMessage(t *testing.T) { original.AddBlock(blocks.NewBlock([]byte("M"))) buf := new(bytes.Buffer) - if err := original.ToNet(buf); err != nil { + if err := original.ToNetV1(buf); err != nil { t.Fatal(err) } diff --git a/bitswap/message/pb/message.pb.go b/bitswap/message/pb/message.pb.go index 02f9f2944..18e4a60e3 100644 --- a/bitswap/message/pb/message.pb.go +++ b/bitswap/message/pb/message.pb.go @@ -14,15 +14,18 @@ It has these top-level messages: package bitswap_message_pb import proto "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/proto" +import fmt "fmt" import math "math" // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal +var _ = fmt.Errorf var _ = math.Inf type Message struct { Wantlist *Message_Wantlist `protobuf:"bytes,1,opt,name=wantlist" json:"wantlist,omitempty"` Blocks [][]byte `protobuf:"bytes,2,rep,name=blocks" json:"blocks,omitempty"` + Payload []*Message_Block `protobuf:"bytes,3,rep,name=payload" json:"payload,omitempty"` XXX_unrecognized []byte `json:"-"` } @@ -44,6 +47,13 @@ func (m *Message) GetBlocks() [][]byte { return nil } +func (m *Message) GetPayload() []*Message_Block { + if m != nil { + return m.Payload + } + return nil +} + type Message_Wantlist struct { Entries []*Message_Wantlist_Entry `protobuf:"bytes,1,rep,name=entries" json:"entries,omitempty"` Full *bool `protobuf:"varint,2,opt,name=full" json:"full,omitempty"` @@ -100,5 +110,33 @@ func (m *Message_Wantlist_Entry) GetCancel() bool { return false } +type Message_Block struct { + Prefix []byte `protobuf:"bytes,1,opt,name=prefix" json:"prefix,omitempty"` + Data []byte `protobuf:"bytes,2,opt,name=data" json:"data,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Message_Block) Reset() { *m = Message_Block{} } +func (m *Message_Block) String() string { return proto.CompactTextString(m) } +func (*Message_Block) ProtoMessage() {} + +func (m *Message_Block) GetPrefix() []byte { + if m != nil { + return m.Prefix + } + return nil +} + +func (m *Message_Block) GetData() []byte { + if m != nil { + return m.Data + } + return nil +} + func init() { + proto.RegisterType((*Message)(nil), "bitswap.message.pb.Message") + proto.RegisterType((*Message_Wantlist)(nil), "bitswap.message.pb.Message.Wantlist") + proto.RegisterType((*Message_Wantlist_Entry)(nil), "bitswap.message.pb.Message.Wantlist.Entry") + proto.RegisterType((*Message_Block)(nil), "bitswap.message.pb.Message.Block") } diff --git a/bitswap/message/pb/message.proto b/bitswap/message/pb/message.proto index 7c44f3a6b..bd4f41b3e 100644 --- a/bitswap/message/pb/message.proto +++ b/bitswap/message/pb/message.proto @@ -14,6 +14,12 @@ message Message { optional bool full = 2; // whether this is the full wantlist. default to false } + message Block { + optional bytes prefix = 1; + optional bytes data = 2; + } + optional Wantlist wantlist = 1; repeated bytes blocks = 2; + repeated Block payload = 3; } diff --git a/bitswap/network/interface.go b/bitswap/network/interface.go index e7aa86cb6..3f61f43fa 100644 --- a/bitswap/network/interface.go +++ b/bitswap/network/interface.go @@ -4,13 +4,18 @@ import ( "context" bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" + cid "gx/ipfs/QmXUuRadqDq5BuFWzVU6VuKaSjTcNm1gNCtLvvP1TJCW4z/go-cid" protocol "gx/ipfs/QmZNkThpqfVXs9GNbexPrfBbXSLNYeKrE7jwFM2oqHbyqN/go-libp2p-protocol" - cid "gx/ipfs/QmakyCk6Vnn16WEKjbkxieZmM2YLTzkFWizbmGowoYPjro/go-cid" peer "gx/ipfs/QmfMmLGoKzCHDN7cGgk64PJr4iipzidDRME8HABSJqvmhC/go-libp2p-peer" ) -var ProtocolBitswap protocol.ID = "/ipfs/bitswap/1.0.0" -var ProtocolBitswapOld protocol.ID = "/ipfs/bitswap" +var ( + // These two are equivalent, legacy + ProtocolBitswapOne protocol.ID = "/ipfs/bitswap/1.0.0" + ProtocolBitswapNoVers protocol.ID = "/ipfs/bitswap" + + ProtocolBitswap protocol.ID = "/ipfs/bitswap/1.1.0" +) // BitSwapNetwork provides network connectivity for BitSwap sessions type BitSwapNetwork interface { diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index 45312130f..2addd37d1 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -2,16 +2,17 @@ package network import ( "context" + "fmt" "io" bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" + routing "gx/ipfs/QmNUgVQTYnXQVrGT2rajZYsuKV8GYdiL91cdZSQDKNPNgE/go-libp2p-routing" logging "gx/ipfs/QmSpJByNKFX1sCsHBEp3R73FL4NF6FnQTEGyNAXHm2GS52/go-log" ma "gx/ipfs/QmUAQaWbKxGCUTuoQVvvicbQNZ9APF5pDGWyAZSe93AtKH/go-multiaddr" - routing "gx/ipfs/QmXKuGUzLcgoQvp8M6ZEJzupWUNmx8NoqXEbYLMDjL4rjj/go-libp2p-routing" + cid "gx/ipfs/QmXUuRadqDq5BuFWzVU6VuKaSjTcNm1gNCtLvvP1TJCW4z/go-cid" pstore "gx/ipfs/QmXXCcQ7CLg5a81Ui9TTR35QcR4y7ZyihxwfjqaHfUVcVo/go-libp2p-peerstore" ggio "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/io" - cid "gx/ipfs/QmakyCk6Vnn16WEKjbkxieZmM2YLTzkFWizbmGowoYPjro/go-cid" host "gx/ipfs/QmdML3R42PRSwnt46jSuEts9bHSqLctVYEjJqMR3UYV8ki/go-libp2p-host" inet "gx/ipfs/QmdXimY9QHaasZmw6hWojWnCJvfgxETjZQfg9g6ZrA9wMX/go-libp2p-net" peer "gx/ipfs/QmfMmLGoKzCHDN7cGgk64PJr4iipzidDRME8HABSJqvmhC/go-libp2p-peer" @@ -26,7 +27,8 @@ func NewFromIpfsHost(host host.Host, r routing.ContentRouting) BitSwapNetwork { routing: r, } host.SetStreamHandler(ProtocolBitswap, bitswapNetwork.handleNewStream) - host.SetStreamHandler(ProtocolBitswapOld, bitswapNetwork.handleNewStream) + host.SetStreamHandler(ProtocolBitswapOne, bitswapNetwork.handleNewStream) + host.SetStreamHandler(ProtocolBitswapNoVers, bitswapNetwork.handleNewStream) host.Network().Notify((*netNotifiee)(&bitswapNetwork)) // TODO: StopNotify. @@ -52,7 +54,25 @@ func (s *streamMessageSender) Close() error { } func (s *streamMessageSender) SendMsg(msg bsmsg.BitSwapMessage) error { - return msg.ToNet(s.s) + return msgToStream(s.s, msg) +} + +func msgToStream(s inet.Stream, msg bsmsg.BitSwapMessage) error { + switch s.Protocol() { + case ProtocolBitswap: + if err := msg.ToNetV1(s); err != nil { + log.Debugf("error: %s", err) + return err + } + case ProtocolBitswapOne, ProtocolBitswapNoVers: + if err := msg.ToNetV0(s); err != nil { + log.Debugf("error: %s", err) + return err + } + default: + return fmt.Errorf("unrecognized protocol on remote: %s", s.Protocol()) + } + return nil } func (bsnet *impl) NewMessageSender(ctx context.Context, p peer.ID) (MessageSender, error) { @@ -73,7 +93,7 @@ func (bsnet *impl) newStreamToPeer(ctx context.Context, p peer.ID) (inet.Stream, return nil, err } - return bsnet.host.NewStream(ctx, p, ProtocolBitswap, ProtocolBitswapOld) + return bsnet.host.NewStream(ctx, p, ProtocolBitswap, ProtocolBitswapOne, ProtocolBitswapNoVers) } func (bsnet *impl) SendMessage( @@ -87,37 +107,7 @@ func (bsnet *impl) SendMessage( } defer s.Close() - if err := outgoing.ToNet(s); err != nil { - log.Debugf("error: %s", err) - return err - } - - return err -} - -func (bsnet *impl) SendRequest( - ctx context.Context, - p peer.ID, - outgoing bsmsg.BitSwapMessage) (bsmsg.BitSwapMessage, error) { - - s, err := bsnet.newStreamToPeer(ctx, p) - if err != nil { - return nil, err - } - defer s.Close() - - if err := outgoing.ToNet(s); err != nil { - log.Debugf("error: %s", err) - return nil, err - } - - incoming, err := bsmsg.FromNet(s) - if err != nil { - log.Debugf("error: %s", err) - return incoming, err - } - - return incoming, nil + return msgToStream(s, outgoing) } func (bsnet *impl) SetDelegate(r Receiver) { diff --git a/bitswap/notifications/notifications.go b/bitswap/notifications/notifications.go index 41c38ad48..d56750ee2 100644 --- a/bitswap/notifications/notifications.go +++ b/bitswap/notifications/notifications.go @@ -6,7 +6,7 @@ import ( blocks "github.com/ipfs/go-ipfs/blocks" pubsub "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/briantigerchow/pubsub" - cid "gx/ipfs/QmakyCk6Vnn16WEKjbkxieZmM2YLTzkFWizbmGowoYPjro/go-cid" + cid "gx/ipfs/QmXUuRadqDq5BuFWzVU6VuKaSjTcNm1gNCtLvvP1TJCW4z/go-cid" ) const bufferSize = 16 diff --git a/bitswap/notifications/notifications_test.go b/bitswap/notifications/notifications_test.go index 343ddb34c..f4fa9b766 100644 --- a/bitswap/notifications/notifications_test.go +++ b/bitswap/notifications/notifications_test.go @@ -8,7 +8,7 @@ import ( blocks "github.com/ipfs/go-ipfs/blocks" blocksutil "github.com/ipfs/go-ipfs/blocks/blocksutil" - cid "gx/ipfs/QmakyCk6Vnn16WEKjbkxieZmM2YLTzkFWizbmGowoYPjro/go-cid" + cid "gx/ipfs/QmXUuRadqDq5BuFWzVU6VuKaSjTcNm1gNCtLvvP1TJCW4z/go-cid" ) func TestDuplicates(t *testing.T) { diff --git a/bitswap/stat.go b/bitswap/stat.go index 3f8ddc28e..692794869 100644 --- a/bitswap/stat.go +++ b/bitswap/stat.go @@ -3,7 +3,7 @@ package bitswap import ( "sort" - cid "gx/ipfs/QmakyCk6Vnn16WEKjbkxieZmM2YLTzkFWizbmGowoYPjro/go-cid" + cid "gx/ipfs/QmXUuRadqDq5BuFWzVU6VuKaSjTcNm1gNCtLvvP1TJCW4z/go-cid" ) type Stat struct { diff --git a/bitswap/testnet/virtual.go b/bitswap/testnet/virtual.go index b9d7c5a50..7142aa61f 100644 --- a/bitswap/testnet/virtual.go +++ b/bitswap/testnet/virtual.go @@ -9,8 +9,8 @@ import ( mockrouting "github.com/ipfs/go-ipfs/routing/mock" delay "github.com/ipfs/go-ipfs/thirdparty/delay" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" - routing "gx/ipfs/QmXKuGUzLcgoQvp8M6ZEJzupWUNmx8NoqXEbYLMDjL4rjj/go-libp2p-routing" - cid "gx/ipfs/QmakyCk6Vnn16WEKjbkxieZmM2YLTzkFWizbmGowoYPjro/go-cid" + routing "gx/ipfs/QmNUgVQTYnXQVrGT2rajZYsuKV8GYdiL91cdZSQDKNPNgE/go-libp2p-routing" + cid "gx/ipfs/QmXUuRadqDq5BuFWzVU6VuKaSjTcNm1gNCtLvvP1TJCW4z/go-cid" peer "gx/ipfs/QmfMmLGoKzCHDN7cGgk64PJr4iipzidDRME8HABSJqvmhC/go-libp2p-peer" ) diff --git a/bitswap/wantlist/wantlist.go b/bitswap/wantlist/wantlist.go index bf89c4db9..ee6c20f8e 100644 --- a/bitswap/wantlist/wantlist.go +++ b/bitswap/wantlist/wantlist.go @@ -6,7 +6,7 @@ import ( "sort" "sync" - cid "gx/ipfs/QmakyCk6Vnn16WEKjbkxieZmM2YLTzkFWizbmGowoYPjro/go-cid" + cid "gx/ipfs/QmXUuRadqDq5BuFWzVU6VuKaSjTcNm1gNCtLvvP1TJCW4z/go-cid" ) type ThreadSafe struct { diff --git a/bitswap/wantmanager.go b/bitswap/wantmanager.go index eca8739d8..82fab8b08 100644 --- a/bitswap/wantmanager.go +++ b/bitswap/wantmanager.go @@ -9,7 +9,7 @@ import ( bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" - cid "gx/ipfs/QmakyCk6Vnn16WEKjbkxieZmM2YLTzkFWizbmGowoYPjro/go-cid" + cid "gx/ipfs/QmXUuRadqDq5BuFWzVU6VuKaSjTcNm1gNCtLvvP1TJCW4z/go-cid" peer "gx/ipfs/QmfMmLGoKzCHDN7cGgk64PJr4iipzidDRME8HABSJqvmhC/go-libp2p-peer" ) diff --git a/bitswap/workers.go b/bitswap/workers.go index d7216ae66..9fba1b0c3 100644 --- a/bitswap/workers.go +++ b/bitswap/workers.go @@ -9,7 +9,7 @@ import ( process "gx/ipfs/QmSF8fPo3jgVBAy8fpdjjYqgG87dkJgUprRBHRd2tmfgpP/goprocess" procctx "gx/ipfs/QmSF8fPo3jgVBAy8fpdjjYqgG87dkJgUprRBHRd2tmfgpP/goprocess/context" logging "gx/ipfs/QmSpJByNKFX1sCsHBEp3R73FL4NF6FnQTEGyNAXHm2GS52/go-log" - cid "gx/ipfs/QmakyCk6Vnn16WEKjbkxieZmM2YLTzkFWizbmGowoYPjro/go-cid" + cid "gx/ipfs/QmXUuRadqDq5BuFWzVU6VuKaSjTcNm1gNCtLvvP1TJCW4z/go-cid" peer "gx/ipfs/QmfMmLGoKzCHDN7cGgk64PJr4iipzidDRME8HABSJqvmhC/go-libp2p-peer" ) From 46d0db26b6219ab902b5bcea50f5b0c3e550375f Mon Sep 17 00:00:00 2001 From: Jeromy Date: Sun, 9 Oct 2016 12:59:36 -0700 Subject: [PATCH 0509/1035] merkledag: change 'Node' to be an interface Also change existing 'Node' type to 'ProtoNode' and use that most everywhere for now. As we move forward with the integration we will try and use the Node interface in more places that we're currently using ProtoNode. License: MIT Signed-off-by: Jeromy This commit was moved from ipfs/go-bitswap@abfdac9a5327e0d5c3b1b7b1e6f8a609f2fb5d68 --- bitswap/workers.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bitswap/workers.go b/bitswap/workers.go index 9fba1b0c3..3a5184e74 100644 --- a/bitswap/workers.go +++ b/bitswap/workers.go @@ -60,7 +60,7 @@ func (bs *Bitswap) taskWorker(ctx context.Context, id int) { log.Event(ctx, "Bitswap.TaskWorker.Work", logging.LoggableMap{ "ID": id, "Target": envelope.Peer.Pretty(), - "Block": envelope.Block.Multihash().B58String(), + "Block": envelope.Block.Cid().String(), }) bs.wm.SendBlock(ctx, envelope) From a74331e6cbb1e8e699712e6a7c40314284061058 Mon Sep 17 00:00:00 2001 From: Jeromy Date: Mon, 24 Oct 2016 20:39:27 -0700 Subject: [PATCH 0510/1035] update to new cid and ipld node packages License: MIT Signed-off-by: Jeromy This commit was moved from ipfs/go-bitswap@f349cbee50aa720923491039486edbeddd39d1a4 --- bitswap/bitswap.go | 2 +- bitswap/bitswap_test.go | 2 +- bitswap/decision/bench_test.go | 2 +- bitswap/decision/ledger.go | 2 +- bitswap/decision/peer_request_queue.go | 2 +- bitswap/decision/peer_request_queue_test.go | 2 +- bitswap/message/message.go | 2 +- bitswap/message/message_test.go | 2 +- bitswap/network/interface.go | 2 +- bitswap/network/ipfs_impl.go | 4 ++-- bitswap/notifications/notifications.go | 2 +- bitswap/notifications/notifications_test.go | 2 +- bitswap/stat.go | 2 +- bitswap/testnet/virtual.go | 4 ++-- bitswap/wantlist/wantlist.go | 2 +- bitswap/wantmanager.go | 2 +- bitswap/workers.go | 2 +- 17 files changed, 19 insertions(+), 19 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index fd36f904a..57b7cba13 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -23,7 +23,7 @@ import ( procctx "gx/ipfs/QmSF8fPo3jgVBAy8fpdjjYqgG87dkJgUprRBHRd2tmfgpP/goprocess/context" logging "gx/ipfs/QmSpJByNKFX1sCsHBEp3R73FL4NF6FnQTEGyNAXHm2GS52/go-log" loggables "gx/ipfs/QmTMy4hVSY28DdwJ9kBz6y7q6MuioFzPcpM3Ma3aPjo1i3/go-libp2p-loggables" - cid "gx/ipfs/QmXUuRadqDq5BuFWzVU6VuKaSjTcNm1gNCtLvvP1TJCW4z/go-cid" + cid "gx/ipfs/QmXfiyr2RWEXpVDdaYnD2HNiBk6UBddsvEP4RPfXb6nGqY/go-cid" peer "gx/ipfs/QmfMmLGoKzCHDN7cGgk64PJr4iipzidDRME8HABSJqvmhC/go-libp2p-peer" ) diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index ab46e3607..48d599355 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -16,7 +16,7 @@ import ( travis "github.com/ipfs/go-ipfs/thirdparty/testutil/ci/travis" detectrace "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-detect-race" - cid "gx/ipfs/QmXUuRadqDq5BuFWzVU6VuKaSjTcNm1gNCtLvvP1TJCW4z/go-cid" + cid "gx/ipfs/QmXfiyr2RWEXpVDdaYnD2HNiBk6UBddsvEP4RPfXb6nGqY/go-cid" p2ptestutil "gx/ipfs/QmcRa2qn6iCmap9bjp8jAwkvYAq13AUfxdY3rrYiaJbLum/go-libp2p/p2p/test/util" ) diff --git a/bitswap/decision/bench_test.go b/bitswap/decision/bench_test.go index 91515875a..1cc6780b6 100644 --- a/bitswap/decision/bench_test.go +++ b/bitswap/decision/bench_test.go @@ -7,7 +7,7 @@ import ( "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" "github.com/ipfs/go-ipfs/thirdparty/testutil" - cid "gx/ipfs/QmXUuRadqDq5BuFWzVU6VuKaSjTcNm1gNCtLvvP1TJCW4z/go-cid" + cid "gx/ipfs/QmXfiyr2RWEXpVDdaYnD2HNiBk6UBddsvEP4RPfXb6nGqY/go-cid" u "gx/ipfs/Qmb912gdngC1UWwTkhuW8knyRbcWeu5kqkxBpveLmW8bSr/go-ipfs-util" "gx/ipfs/QmfMmLGoKzCHDN7cGgk64PJr4iipzidDRME8HABSJqvmhC/go-libp2p-peer" ) diff --git a/bitswap/decision/ledger.go b/bitswap/decision/ledger.go index c6e66451e..7f7b14f11 100644 --- a/bitswap/decision/ledger.go +++ b/bitswap/decision/ledger.go @@ -6,7 +6,7 @@ import ( wl "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" - cid "gx/ipfs/QmXUuRadqDq5BuFWzVU6VuKaSjTcNm1gNCtLvvP1TJCW4z/go-cid" + cid "gx/ipfs/QmXfiyr2RWEXpVDdaYnD2HNiBk6UBddsvEP4RPfXb6nGqY/go-cid" peer "gx/ipfs/QmfMmLGoKzCHDN7cGgk64PJr4iipzidDRME8HABSJqvmhC/go-libp2p-peer" ) diff --git a/bitswap/decision/peer_request_queue.go b/bitswap/decision/peer_request_queue.go index 63f4426d4..fff1ff0b8 100644 --- a/bitswap/decision/peer_request_queue.go +++ b/bitswap/decision/peer_request_queue.go @@ -7,7 +7,7 @@ import ( wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" pq "github.com/ipfs/go-ipfs/thirdparty/pq" - cid "gx/ipfs/QmXUuRadqDq5BuFWzVU6VuKaSjTcNm1gNCtLvvP1TJCW4z/go-cid" + cid "gx/ipfs/QmXfiyr2RWEXpVDdaYnD2HNiBk6UBddsvEP4RPfXb6nGqY/go-cid" peer "gx/ipfs/QmfMmLGoKzCHDN7cGgk64PJr4iipzidDRME8HABSJqvmhC/go-libp2p-peer" ) diff --git a/bitswap/decision/peer_request_queue_test.go b/bitswap/decision/peer_request_queue_test.go index cf9913955..ffd0041ed 100644 --- a/bitswap/decision/peer_request_queue_test.go +++ b/bitswap/decision/peer_request_queue_test.go @@ -10,7 +10,7 @@ import ( "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" "github.com/ipfs/go-ipfs/thirdparty/testutil" - cid "gx/ipfs/QmXUuRadqDq5BuFWzVU6VuKaSjTcNm1gNCtLvvP1TJCW4z/go-cid" + cid "gx/ipfs/QmXfiyr2RWEXpVDdaYnD2HNiBk6UBddsvEP4RPfXb6nGqY/go-cid" u "gx/ipfs/Qmb912gdngC1UWwTkhuW8knyRbcWeu5kqkxBpveLmW8bSr/go-ipfs-util" ) diff --git a/bitswap/message/message.go b/bitswap/message/message.go index ed58541d3..1c112dd93 100644 --- a/bitswap/message/message.go +++ b/bitswap/message/message.go @@ -8,7 +8,7 @@ import ( pb "github.com/ipfs/go-ipfs/exchange/bitswap/message/pb" wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" - cid "gx/ipfs/QmXUuRadqDq5BuFWzVU6VuKaSjTcNm1gNCtLvvP1TJCW4z/go-cid" + cid "gx/ipfs/QmXfiyr2RWEXpVDdaYnD2HNiBk6UBddsvEP4RPfXb6nGqY/go-cid" ggio "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/io" proto "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/proto" inet "gx/ipfs/QmdXimY9QHaasZmw6hWojWnCJvfgxETjZQfg9g6ZrA9wMX/go-libp2p-net" diff --git a/bitswap/message/message_test.go b/bitswap/message/message_test.go index 4cfbf8f27..cd8cd2fcf 100644 --- a/bitswap/message/message_test.go +++ b/bitswap/message/message_test.go @@ -8,7 +8,7 @@ import ( blocks "github.com/ipfs/go-ipfs/blocks" pb "github.com/ipfs/go-ipfs/exchange/bitswap/message/pb" - cid "gx/ipfs/QmXUuRadqDq5BuFWzVU6VuKaSjTcNm1gNCtLvvP1TJCW4z/go-cid" + cid "gx/ipfs/QmXfiyr2RWEXpVDdaYnD2HNiBk6UBddsvEP4RPfXb6nGqY/go-cid" u "gx/ipfs/Qmb912gdngC1UWwTkhuW8knyRbcWeu5kqkxBpveLmW8bSr/go-ipfs-util" ) diff --git a/bitswap/network/interface.go b/bitswap/network/interface.go index 3f61f43fa..a763a128a 100644 --- a/bitswap/network/interface.go +++ b/bitswap/network/interface.go @@ -4,7 +4,7 @@ import ( "context" bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" - cid "gx/ipfs/QmXUuRadqDq5BuFWzVU6VuKaSjTcNm1gNCtLvvP1TJCW4z/go-cid" + cid "gx/ipfs/QmXfiyr2RWEXpVDdaYnD2HNiBk6UBddsvEP4RPfXb6nGqY/go-cid" protocol "gx/ipfs/QmZNkThpqfVXs9GNbexPrfBbXSLNYeKrE7jwFM2oqHbyqN/go-libp2p-protocol" peer "gx/ipfs/QmfMmLGoKzCHDN7cGgk64PJr4iipzidDRME8HABSJqvmhC/go-libp2p-peer" ) diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index 2addd37d1..a078c89fa 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -7,11 +7,11 @@ import ( bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" - routing "gx/ipfs/QmNUgVQTYnXQVrGT2rajZYsuKV8GYdiL91cdZSQDKNPNgE/go-libp2p-routing" + routing "gx/ipfs/QmQKEgGgYCDyk8VNY6A65FpuE4YwbspvjXHco1rdb75PVc/go-libp2p-routing" logging "gx/ipfs/QmSpJByNKFX1sCsHBEp3R73FL4NF6FnQTEGyNAXHm2GS52/go-log" ma "gx/ipfs/QmUAQaWbKxGCUTuoQVvvicbQNZ9APF5pDGWyAZSe93AtKH/go-multiaddr" - cid "gx/ipfs/QmXUuRadqDq5BuFWzVU6VuKaSjTcNm1gNCtLvvP1TJCW4z/go-cid" pstore "gx/ipfs/QmXXCcQ7CLg5a81Ui9TTR35QcR4y7ZyihxwfjqaHfUVcVo/go-libp2p-peerstore" + cid "gx/ipfs/QmXfiyr2RWEXpVDdaYnD2HNiBk6UBddsvEP4RPfXb6nGqY/go-cid" ggio "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/io" host "gx/ipfs/QmdML3R42PRSwnt46jSuEts9bHSqLctVYEjJqMR3UYV8ki/go-libp2p-host" inet "gx/ipfs/QmdXimY9QHaasZmw6hWojWnCJvfgxETjZQfg9g6ZrA9wMX/go-libp2p-net" diff --git a/bitswap/notifications/notifications.go b/bitswap/notifications/notifications.go index d56750ee2..0dab1793d 100644 --- a/bitswap/notifications/notifications.go +++ b/bitswap/notifications/notifications.go @@ -6,7 +6,7 @@ import ( blocks "github.com/ipfs/go-ipfs/blocks" pubsub "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/briantigerchow/pubsub" - cid "gx/ipfs/QmXUuRadqDq5BuFWzVU6VuKaSjTcNm1gNCtLvvP1TJCW4z/go-cid" + cid "gx/ipfs/QmXfiyr2RWEXpVDdaYnD2HNiBk6UBddsvEP4RPfXb6nGqY/go-cid" ) const bufferSize = 16 diff --git a/bitswap/notifications/notifications_test.go b/bitswap/notifications/notifications_test.go index f4fa9b766..659d0ca1d 100644 --- a/bitswap/notifications/notifications_test.go +++ b/bitswap/notifications/notifications_test.go @@ -8,7 +8,7 @@ import ( blocks "github.com/ipfs/go-ipfs/blocks" blocksutil "github.com/ipfs/go-ipfs/blocks/blocksutil" - cid "gx/ipfs/QmXUuRadqDq5BuFWzVU6VuKaSjTcNm1gNCtLvvP1TJCW4z/go-cid" + cid "gx/ipfs/QmXfiyr2RWEXpVDdaYnD2HNiBk6UBddsvEP4RPfXb6nGqY/go-cid" ) func TestDuplicates(t *testing.T) { diff --git a/bitswap/stat.go b/bitswap/stat.go index 692794869..817acc9b0 100644 --- a/bitswap/stat.go +++ b/bitswap/stat.go @@ -3,7 +3,7 @@ package bitswap import ( "sort" - cid "gx/ipfs/QmXUuRadqDq5BuFWzVU6VuKaSjTcNm1gNCtLvvP1TJCW4z/go-cid" + cid "gx/ipfs/QmXfiyr2RWEXpVDdaYnD2HNiBk6UBddsvEP4RPfXb6nGqY/go-cid" ) type Stat struct { diff --git a/bitswap/testnet/virtual.go b/bitswap/testnet/virtual.go index 7142aa61f..997d03ba1 100644 --- a/bitswap/testnet/virtual.go +++ b/bitswap/testnet/virtual.go @@ -9,8 +9,8 @@ import ( mockrouting "github.com/ipfs/go-ipfs/routing/mock" delay "github.com/ipfs/go-ipfs/thirdparty/delay" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" - routing "gx/ipfs/QmNUgVQTYnXQVrGT2rajZYsuKV8GYdiL91cdZSQDKNPNgE/go-libp2p-routing" - cid "gx/ipfs/QmXUuRadqDq5BuFWzVU6VuKaSjTcNm1gNCtLvvP1TJCW4z/go-cid" + routing "gx/ipfs/QmQKEgGgYCDyk8VNY6A65FpuE4YwbspvjXHco1rdb75PVc/go-libp2p-routing" + cid "gx/ipfs/QmXfiyr2RWEXpVDdaYnD2HNiBk6UBddsvEP4RPfXb6nGqY/go-cid" peer "gx/ipfs/QmfMmLGoKzCHDN7cGgk64PJr4iipzidDRME8HABSJqvmhC/go-libp2p-peer" ) diff --git a/bitswap/wantlist/wantlist.go b/bitswap/wantlist/wantlist.go index ee6c20f8e..ef145b14b 100644 --- a/bitswap/wantlist/wantlist.go +++ b/bitswap/wantlist/wantlist.go @@ -6,7 +6,7 @@ import ( "sort" "sync" - cid "gx/ipfs/QmXUuRadqDq5BuFWzVU6VuKaSjTcNm1gNCtLvvP1TJCW4z/go-cid" + cid "gx/ipfs/QmXfiyr2RWEXpVDdaYnD2HNiBk6UBddsvEP4RPfXb6nGqY/go-cid" ) type ThreadSafe struct { diff --git a/bitswap/wantmanager.go b/bitswap/wantmanager.go index 82fab8b08..fb0e2a6b7 100644 --- a/bitswap/wantmanager.go +++ b/bitswap/wantmanager.go @@ -9,7 +9,7 @@ import ( bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" - cid "gx/ipfs/QmXUuRadqDq5BuFWzVU6VuKaSjTcNm1gNCtLvvP1TJCW4z/go-cid" + cid "gx/ipfs/QmXfiyr2RWEXpVDdaYnD2HNiBk6UBddsvEP4RPfXb6nGqY/go-cid" peer "gx/ipfs/QmfMmLGoKzCHDN7cGgk64PJr4iipzidDRME8HABSJqvmhC/go-libp2p-peer" ) diff --git a/bitswap/workers.go b/bitswap/workers.go index 3a5184e74..e2f837823 100644 --- a/bitswap/workers.go +++ b/bitswap/workers.go @@ -9,7 +9,7 @@ import ( process "gx/ipfs/QmSF8fPo3jgVBAy8fpdjjYqgG87dkJgUprRBHRd2tmfgpP/goprocess" procctx "gx/ipfs/QmSF8fPo3jgVBAy8fpdjjYqgG87dkJgUprRBHRd2tmfgpP/goprocess/context" logging "gx/ipfs/QmSpJByNKFX1sCsHBEp3R73FL4NF6FnQTEGyNAXHm2GS52/go-log" - cid "gx/ipfs/QmXUuRadqDq5BuFWzVU6VuKaSjTcNm1gNCtLvvP1TJCW4z/go-cid" + cid "gx/ipfs/QmXfiyr2RWEXpVDdaYnD2HNiBk6UBddsvEP4RPfXb6nGqY/go-cid" peer "gx/ipfs/QmfMmLGoKzCHDN7cGgk64PJr4iipzidDRME8HABSJqvmhC/go-libp2p-peer" ) From a2b5ce060aa43a91215c5031bb2b55caf5dae6db Mon Sep 17 00:00:00 2001 From: Richard Littauer Date: Fri, 1 Jul 2016 18:36:55 +0100 Subject: [PATCH 0511/1035] Changed so only explicit ipfs cli commands are lowercased License: MIT Signed-off-by: Richard Littauer This commit was moved from ipfs/go-bitswap@10cb1a0567cfe154ff013d12da58988c5222ed53 --- bitswap/bitswap.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 57b7cba13..d778756bf 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -1,4 +1,4 @@ -// package bitswap implements the IPFS Exchange interface with the BitSwap +// package bitswap implements the IPFS exchange interface with the BitSwap // bilateral exchange protocol. package bitswap @@ -68,7 +68,7 @@ func New(parent context.Context, p peer.ID, network bsnet.BitSwapNetwork, // important to use provided parent context (since it may include important // loggable data). It's probably not a good idea to allow bitswap to be - // coupled to the concerns of the IPFS daemon in this way. + // coupled to the concerns of the ipfs daemon in this way. // // FIXME(btc) Now that bitswap manages itself using a process, it probably // shouldn't accept a context anymore. Clients should probably use Close() From 6ef14f2083a8d859de7293f1f0415a637988e67d Mon Sep 17 00:00:00 2001 From: Jeromy Date: Sun, 30 Oct 2016 19:01:03 -0700 Subject: [PATCH 0512/1035] update go-libp2p-swarm with deadlock fixes License: MIT Signed-off-by: Jeromy This commit was moved from ipfs/go-bitswap@0ddfa952ec99ebb809446e86a6e62265c5a6ab67 --- bitswap/bitswap_test.go | 2 +- bitswap/testnet/peernet.go | 2 +- bitswap/testutils.go | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index 48d599355..35314b23b 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -16,8 +16,8 @@ import ( travis "github.com/ipfs/go-ipfs/thirdparty/testutil/ci/travis" detectrace "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-detect-race" + p2ptestutil "gx/ipfs/QmQ7iWUfqrLEoJwsoNdrZu4625bKyhZCi4Sh6MfjywEfbG/go-libp2p/p2p/test/util" cid "gx/ipfs/QmXfiyr2RWEXpVDdaYnD2HNiBk6UBddsvEP4RPfXb6nGqY/go-cid" - p2ptestutil "gx/ipfs/QmcRa2qn6iCmap9bjp8jAwkvYAq13AUfxdY3rrYiaJbLum/go-libp2p/p2p/test/util" ) // FIXME the tests are really sensitive to the network delay. fix them to work diff --git a/bitswap/testnet/peernet.go b/bitswap/testnet/peernet.go index 047202c7d..060d24f1a 100644 --- a/bitswap/testnet/peernet.go +++ b/bitswap/testnet/peernet.go @@ -5,8 +5,8 @@ import ( bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" mockrouting "github.com/ipfs/go-ipfs/routing/mock" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" + mockpeernet "gx/ipfs/QmQ7iWUfqrLEoJwsoNdrZu4625bKyhZCi4Sh6MfjywEfbG/go-libp2p/p2p/net/mock" ds "gx/ipfs/QmbzuUusHqaLLoNTDEVLcSF6vZDHZDLPC7p4bztRvvkXxU/go-datastore" - mockpeernet "gx/ipfs/QmcRa2qn6iCmap9bjp8jAwkvYAq13AUfxdY3rrYiaJbLum/go-libp2p/p2p/net/mock" peer "gx/ipfs/QmfMmLGoKzCHDN7cGgk64PJr4iipzidDRME8HABSJqvmhC/go-libp2p-peer" ) diff --git a/bitswap/testutils.go b/bitswap/testutils.go index 4987e2faf..99788e96c 100644 --- a/bitswap/testutils.go +++ b/bitswap/testutils.go @@ -9,9 +9,9 @@ import ( datastore2 "github.com/ipfs/go-ipfs/thirdparty/datastore2" delay "github.com/ipfs/go-ipfs/thirdparty/delay" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" + p2ptestutil "gx/ipfs/QmQ7iWUfqrLEoJwsoNdrZu4625bKyhZCi4Sh6MfjywEfbG/go-libp2p/p2p/test/util" ds "gx/ipfs/QmbzuUusHqaLLoNTDEVLcSF6vZDHZDLPC7p4bztRvvkXxU/go-datastore" ds_sync "gx/ipfs/QmbzuUusHqaLLoNTDEVLcSF6vZDHZDLPC7p4bztRvvkXxU/go-datastore/sync" - p2ptestutil "gx/ipfs/QmcRa2qn6iCmap9bjp8jAwkvYAq13AUfxdY3rrYiaJbLum/go-libp2p/p2p/test/util" peer "gx/ipfs/QmfMmLGoKzCHDN7cGgk64PJr4iipzidDRME8HABSJqvmhC/go-libp2p-peer" ) From c92604b047d36a8e971fbd6f51636b2c1af90bfc Mon Sep 17 00:00:00 2001 From: Jeromy Date: Thu, 3 Nov 2016 20:06:32 -0700 Subject: [PATCH 0513/1035] update go-libp2p License: MIT Signed-off-by: Jeromy This commit was moved from ipfs/go-bitswap@46a39ed00eef690cc861951124d16b7b676a26ba --- bitswap/bitswap_test.go | 2 +- bitswap/message/message.go | 2 +- bitswap/network/ipfs_impl.go | 4 ++-- bitswap/testnet/peernet.go | 2 +- bitswap/testutils.go | 2 +- 5 files changed, 6 insertions(+), 6 deletions(-) diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index 35314b23b..4d388b234 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -16,7 +16,7 @@ import ( travis "github.com/ipfs/go-ipfs/thirdparty/testutil/ci/travis" detectrace "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-detect-race" - p2ptestutil "gx/ipfs/QmQ7iWUfqrLEoJwsoNdrZu4625bKyhZCi4Sh6MfjywEfbG/go-libp2p/p2p/test/util" + p2ptestutil "gx/ipfs/QmVN76ekoYakYa8WVDwhkUsnjt2MYuFpQs1uuU57T5KMD8/go-libp2p/p2p/test/util" cid "gx/ipfs/QmXfiyr2RWEXpVDdaYnD2HNiBk6UBddsvEP4RPfXb6nGqY/go-cid" ) diff --git a/bitswap/message/message.go b/bitswap/message/message.go index 1c112dd93..aad5bd314 100644 --- a/bitswap/message/message.go +++ b/bitswap/message/message.go @@ -11,7 +11,7 @@ import ( cid "gx/ipfs/QmXfiyr2RWEXpVDdaYnD2HNiBk6UBddsvEP4RPfXb6nGqY/go-cid" ggio "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/io" proto "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/proto" - inet "gx/ipfs/QmdXimY9QHaasZmw6hWojWnCJvfgxETjZQfg9g6ZrA9wMX/go-libp2p-net" + inet "gx/ipfs/QmdysBu77i3YaagNtMAjiCJdeWWvds18ho5XEB784guQ41/go-libp2p-net" ) // TODO move message.go into the bitswap package diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index a078c89fa..806acb957 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -10,11 +10,11 @@ import ( routing "gx/ipfs/QmQKEgGgYCDyk8VNY6A65FpuE4YwbspvjXHco1rdb75PVc/go-libp2p-routing" logging "gx/ipfs/QmSpJByNKFX1sCsHBEp3R73FL4NF6FnQTEGyNAXHm2GS52/go-log" ma "gx/ipfs/QmUAQaWbKxGCUTuoQVvvicbQNZ9APF5pDGWyAZSe93AtKH/go-multiaddr" + host "gx/ipfs/QmWf338UyG5DKyemvoFiomDPtkVNHLsw3GAt9XXHX5ZtsM/go-libp2p-host" pstore "gx/ipfs/QmXXCcQ7CLg5a81Ui9TTR35QcR4y7ZyihxwfjqaHfUVcVo/go-libp2p-peerstore" cid "gx/ipfs/QmXfiyr2RWEXpVDdaYnD2HNiBk6UBddsvEP4RPfXb6nGqY/go-cid" ggio "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/io" - host "gx/ipfs/QmdML3R42PRSwnt46jSuEts9bHSqLctVYEjJqMR3UYV8ki/go-libp2p-host" - inet "gx/ipfs/QmdXimY9QHaasZmw6hWojWnCJvfgxETjZQfg9g6ZrA9wMX/go-libp2p-net" + inet "gx/ipfs/QmdysBu77i3YaagNtMAjiCJdeWWvds18ho5XEB784guQ41/go-libp2p-net" peer "gx/ipfs/QmfMmLGoKzCHDN7cGgk64PJr4iipzidDRME8HABSJqvmhC/go-libp2p-peer" ) diff --git a/bitswap/testnet/peernet.go b/bitswap/testnet/peernet.go index 060d24f1a..8168dad73 100644 --- a/bitswap/testnet/peernet.go +++ b/bitswap/testnet/peernet.go @@ -5,7 +5,7 @@ import ( bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" mockrouting "github.com/ipfs/go-ipfs/routing/mock" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" - mockpeernet "gx/ipfs/QmQ7iWUfqrLEoJwsoNdrZu4625bKyhZCi4Sh6MfjywEfbG/go-libp2p/p2p/net/mock" + mockpeernet "gx/ipfs/QmVN76ekoYakYa8WVDwhkUsnjt2MYuFpQs1uuU57T5KMD8/go-libp2p/p2p/net/mock" ds "gx/ipfs/QmbzuUusHqaLLoNTDEVLcSF6vZDHZDLPC7p4bztRvvkXxU/go-datastore" peer "gx/ipfs/QmfMmLGoKzCHDN7cGgk64PJr4iipzidDRME8HABSJqvmhC/go-libp2p-peer" ) diff --git a/bitswap/testutils.go b/bitswap/testutils.go index 99788e96c..b6cc8c0c9 100644 --- a/bitswap/testutils.go +++ b/bitswap/testutils.go @@ -9,7 +9,7 @@ import ( datastore2 "github.com/ipfs/go-ipfs/thirdparty/datastore2" delay "github.com/ipfs/go-ipfs/thirdparty/delay" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" - p2ptestutil "gx/ipfs/QmQ7iWUfqrLEoJwsoNdrZu4625bKyhZCi4Sh6MfjywEfbG/go-libp2p/p2p/test/util" + p2ptestutil "gx/ipfs/QmVN76ekoYakYa8WVDwhkUsnjt2MYuFpQs1uuU57T5KMD8/go-libp2p/p2p/test/util" ds "gx/ipfs/QmbzuUusHqaLLoNTDEVLcSF6vZDHZDLPC7p4bztRvvkXxU/go-datastore" ds_sync "gx/ipfs/QmbzuUusHqaLLoNTDEVLcSF6vZDHZDLPC7p4bztRvvkXxU/go-datastore/sync" peer "gx/ipfs/QmfMmLGoKzCHDN7cGgk64PJr4iipzidDRME8HABSJqvmhC/go-libp2p-peer" From 3c0f9a99d4d48c2cb6bc1c4b1ee5eb726a813882 Mon Sep 17 00:00:00 2001 From: Jeromy Date: Sat, 5 Nov 2016 20:10:32 -0700 Subject: [PATCH 0514/1035] update to libp2p 4.0.4 License: MIT Signed-off-by: Jeromy This commit was moved from ipfs/go-bitswap@62c95c3b69341b65b7d1ee5f1407ee26d37776cf --- bitswap/bitswap_test.go | 2 +- bitswap/testnet/peernet.go | 2 +- bitswap/testutils.go | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index 4d388b234..1b4a2883b 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -16,7 +16,7 @@ import ( travis "github.com/ipfs/go-ipfs/thirdparty/testutil/ci/travis" detectrace "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-detect-race" - p2ptestutil "gx/ipfs/QmVN76ekoYakYa8WVDwhkUsnjt2MYuFpQs1uuU57T5KMD8/go-libp2p/p2p/test/util" + p2ptestutil "gx/ipfs/QmQfvKShQ2v7nkfCE4ygisxpcSBFvBYaorQ54SibY6PGXV/go-libp2p/p2p/test/util" cid "gx/ipfs/QmXfiyr2RWEXpVDdaYnD2HNiBk6UBddsvEP4RPfXb6nGqY/go-cid" ) diff --git a/bitswap/testnet/peernet.go b/bitswap/testnet/peernet.go index 8168dad73..3c2f0c99a 100644 --- a/bitswap/testnet/peernet.go +++ b/bitswap/testnet/peernet.go @@ -5,7 +5,7 @@ import ( bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" mockrouting "github.com/ipfs/go-ipfs/routing/mock" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" - mockpeernet "gx/ipfs/QmVN76ekoYakYa8WVDwhkUsnjt2MYuFpQs1uuU57T5KMD8/go-libp2p/p2p/net/mock" + mockpeernet "gx/ipfs/QmQfvKShQ2v7nkfCE4ygisxpcSBFvBYaorQ54SibY6PGXV/go-libp2p/p2p/net/mock" ds "gx/ipfs/QmbzuUusHqaLLoNTDEVLcSF6vZDHZDLPC7p4bztRvvkXxU/go-datastore" peer "gx/ipfs/QmfMmLGoKzCHDN7cGgk64PJr4iipzidDRME8HABSJqvmhC/go-libp2p-peer" ) diff --git a/bitswap/testutils.go b/bitswap/testutils.go index b6cc8c0c9..d531b7487 100644 --- a/bitswap/testutils.go +++ b/bitswap/testutils.go @@ -9,7 +9,7 @@ import ( datastore2 "github.com/ipfs/go-ipfs/thirdparty/datastore2" delay "github.com/ipfs/go-ipfs/thirdparty/delay" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" - p2ptestutil "gx/ipfs/QmVN76ekoYakYa8WVDwhkUsnjt2MYuFpQs1uuU57T5KMD8/go-libp2p/p2p/test/util" + p2ptestutil "gx/ipfs/QmQfvKShQ2v7nkfCE4ygisxpcSBFvBYaorQ54SibY6PGXV/go-libp2p/p2p/test/util" ds "gx/ipfs/QmbzuUusHqaLLoNTDEVLcSF6vZDHZDLPC7p4bztRvvkXxU/go-datastore" ds_sync "gx/ipfs/QmbzuUusHqaLLoNTDEVLcSF6vZDHZDLPC7p4bztRvvkXxU/go-datastore/sync" peer "gx/ipfs/QmfMmLGoKzCHDN7cGgk64PJr4iipzidDRME8HABSJqvmhC/go-libp2p-peer" From 3075f505bdf4acd13a2a6a658eda4b99dc1f1ed2 Mon Sep 17 00:00:00 2001 From: Jeromy Date: Thu, 10 Nov 2016 17:38:10 -0800 Subject: [PATCH 0515/1035] update to go-libp2p 4.1.0 License: MIT Signed-off-by: Jeromy This commit was moved from ipfs/go-bitswap@aee9654f2f61e79eafa1b081b8da6c17fd3bfd84 --- bitswap/bitswap_test.go | 2 +- bitswap/message/message.go | 2 +- bitswap/network/ipfs_impl.go | 4 ++-- bitswap/testnet/peernet.go | 2 +- bitswap/testutils.go | 2 +- 5 files changed, 6 insertions(+), 6 deletions(-) diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index 1b4a2883b..7ebbdb504 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -16,7 +16,7 @@ import ( travis "github.com/ipfs/go-ipfs/thirdparty/testutil/ci/travis" detectrace "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-detect-race" - p2ptestutil "gx/ipfs/QmQfvKShQ2v7nkfCE4ygisxpcSBFvBYaorQ54SibY6PGXV/go-libp2p/p2p/test/util" + p2ptestutil "gx/ipfs/QmUYzZRJcuUxLSnSzF1bSyw1jYbNAULkBrbS6rnr7F72uK/go-libp2p/p2p/test/util" cid "gx/ipfs/QmXfiyr2RWEXpVDdaYnD2HNiBk6UBddsvEP4RPfXb6nGqY/go-cid" ) diff --git a/bitswap/message/message.go b/bitswap/message/message.go index aad5bd314..60ef73517 100644 --- a/bitswap/message/message.go +++ b/bitswap/message/message.go @@ -8,10 +8,10 @@ import ( pb "github.com/ipfs/go-ipfs/exchange/bitswap/message/pb" wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" + inet "gx/ipfs/QmU3pGGVT1riXp5dBJbNrGpxssVScfvk9236drRHZZbKJ1/go-libp2p-net" cid "gx/ipfs/QmXfiyr2RWEXpVDdaYnD2HNiBk6UBddsvEP4RPfXb6nGqY/go-cid" ggio "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/io" proto "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/proto" - inet "gx/ipfs/QmdysBu77i3YaagNtMAjiCJdeWWvds18ho5XEB784guQ41/go-libp2p-net" ) // TODO move message.go into the bitswap package diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index 806acb957..73294b5da 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -9,12 +9,12 @@ import ( routing "gx/ipfs/QmQKEgGgYCDyk8VNY6A65FpuE4YwbspvjXHco1rdb75PVc/go-libp2p-routing" logging "gx/ipfs/QmSpJByNKFX1sCsHBEp3R73FL4NF6FnQTEGyNAXHm2GS52/go-log" + inet "gx/ipfs/QmU3pGGVT1riXp5dBJbNrGpxssVScfvk9236drRHZZbKJ1/go-libp2p-net" ma "gx/ipfs/QmUAQaWbKxGCUTuoQVvvicbQNZ9APF5pDGWyAZSe93AtKH/go-multiaddr" - host "gx/ipfs/QmWf338UyG5DKyemvoFiomDPtkVNHLsw3GAt9XXHX5ZtsM/go-libp2p-host" pstore "gx/ipfs/QmXXCcQ7CLg5a81Ui9TTR35QcR4y7ZyihxwfjqaHfUVcVo/go-libp2p-peerstore" cid "gx/ipfs/QmXfiyr2RWEXpVDdaYnD2HNiBk6UBddsvEP4RPfXb6nGqY/go-cid" ggio "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/io" - inet "gx/ipfs/QmdysBu77i3YaagNtMAjiCJdeWWvds18ho5XEB784guQ41/go-libp2p-net" + host "gx/ipfs/Qmb6UFbVu1grhv5o5KnouvtZ6cqdrjXj6zLejAHWunxgCt/go-libp2p-host" peer "gx/ipfs/QmfMmLGoKzCHDN7cGgk64PJr4iipzidDRME8HABSJqvmhC/go-libp2p-peer" ) diff --git a/bitswap/testnet/peernet.go b/bitswap/testnet/peernet.go index 3c2f0c99a..4bc288490 100644 --- a/bitswap/testnet/peernet.go +++ b/bitswap/testnet/peernet.go @@ -5,7 +5,7 @@ import ( bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" mockrouting "github.com/ipfs/go-ipfs/routing/mock" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" - mockpeernet "gx/ipfs/QmQfvKShQ2v7nkfCE4ygisxpcSBFvBYaorQ54SibY6PGXV/go-libp2p/p2p/net/mock" + mockpeernet "gx/ipfs/QmUYzZRJcuUxLSnSzF1bSyw1jYbNAULkBrbS6rnr7F72uK/go-libp2p/p2p/net/mock" ds "gx/ipfs/QmbzuUusHqaLLoNTDEVLcSF6vZDHZDLPC7p4bztRvvkXxU/go-datastore" peer "gx/ipfs/QmfMmLGoKzCHDN7cGgk64PJr4iipzidDRME8HABSJqvmhC/go-libp2p-peer" ) diff --git a/bitswap/testutils.go b/bitswap/testutils.go index d531b7487..f01cb1c82 100644 --- a/bitswap/testutils.go +++ b/bitswap/testutils.go @@ -9,7 +9,7 @@ import ( datastore2 "github.com/ipfs/go-ipfs/thirdparty/datastore2" delay "github.com/ipfs/go-ipfs/thirdparty/delay" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" - p2ptestutil "gx/ipfs/QmQfvKShQ2v7nkfCE4ygisxpcSBFvBYaorQ54SibY6PGXV/go-libp2p/p2p/test/util" + p2ptestutil "gx/ipfs/QmUYzZRJcuUxLSnSzF1bSyw1jYbNAULkBrbS6rnr7F72uK/go-libp2p/p2p/test/util" ds "gx/ipfs/QmbzuUusHqaLLoNTDEVLcSF6vZDHZDLPC7p4bztRvvkXxU/go-datastore" ds_sync "gx/ipfs/QmbzuUusHqaLLoNTDEVLcSF6vZDHZDLPC7p4bztRvvkXxU/go-datastore/sync" peer "gx/ipfs/QmfMmLGoKzCHDN7cGgk64PJr4iipzidDRME8HABSJqvmhC/go-libp2p-peer" From 06d16cfef5437b9b0bb11d5e8fd23376698f6853 Mon Sep 17 00:00:00 2001 From: Jeromy Date: Tue, 15 Nov 2016 18:00:49 -0800 Subject: [PATCH 0516/1035] update to newer ipld node interface with Copy and better Tree License: MIT Signed-off-by: Jeromy This commit was moved from ipfs/go-bitswap@0f7b0a06304564096dd78455f7c4c71979cf3538 --- bitswap/bitswap.go | 2 +- bitswap/bitswap_test.go | 2 +- bitswap/decision/bench_test.go | 2 +- bitswap/decision/ledger.go | 2 +- bitswap/decision/peer_request_queue.go | 2 +- bitswap/decision/peer_request_queue_test.go | 2 +- bitswap/message/message.go | 2 +- bitswap/message/message_test.go | 2 +- bitswap/network/interface.go | 2 +- bitswap/network/ipfs_impl.go | 4 ++-- bitswap/notifications/notifications.go | 2 +- bitswap/notifications/notifications_test.go | 2 +- bitswap/stat.go | 2 +- bitswap/testnet/virtual.go | 4 ++-- bitswap/wantlist/wantlist.go | 2 +- bitswap/wantmanager.go | 2 +- bitswap/workers.go | 2 +- 17 files changed, 19 insertions(+), 19 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index d778756bf..91f66551d 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -23,7 +23,7 @@ import ( procctx "gx/ipfs/QmSF8fPo3jgVBAy8fpdjjYqgG87dkJgUprRBHRd2tmfgpP/goprocess/context" logging "gx/ipfs/QmSpJByNKFX1sCsHBEp3R73FL4NF6FnQTEGyNAXHm2GS52/go-log" loggables "gx/ipfs/QmTMy4hVSY28DdwJ9kBz6y7q6MuioFzPcpM3Ma3aPjo1i3/go-libp2p-loggables" - cid "gx/ipfs/QmXfiyr2RWEXpVDdaYnD2HNiBk6UBddsvEP4RPfXb6nGqY/go-cid" + cid "gx/ipfs/QmcEcrBAMrwMyhSjXt4yfyPpzgSuV8HLHavnfmiKCSRqZU/go-cid" peer "gx/ipfs/QmfMmLGoKzCHDN7cGgk64PJr4iipzidDRME8HABSJqvmhC/go-libp2p-peer" ) diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index 7ebbdb504..1d4f56ead 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -17,7 +17,7 @@ import ( detectrace "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-detect-race" p2ptestutil "gx/ipfs/QmUYzZRJcuUxLSnSzF1bSyw1jYbNAULkBrbS6rnr7F72uK/go-libp2p/p2p/test/util" - cid "gx/ipfs/QmXfiyr2RWEXpVDdaYnD2HNiBk6UBddsvEP4RPfXb6nGqY/go-cid" + cid "gx/ipfs/QmcEcrBAMrwMyhSjXt4yfyPpzgSuV8HLHavnfmiKCSRqZU/go-cid" ) // FIXME the tests are really sensitive to the network delay. fix them to work diff --git a/bitswap/decision/bench_test.go b/bitswap/decision/bench_test.go index 1cc6780b6..d71454600 100644 --- a/bitswap/decision/bench_test.go +++ b/bitswap/decision/bench_test.go @@ -7,8 +7,8 @@ import ( "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" "github.com/ipfs/go-ipfs/thirdparty/testutil" - cid "gx/ipfs/QmXfiyr2RWEXpVDdaYnD2HNiBk6UBddsvEP4RPfXb6nGqY/go-cid" u "gx/ipfs/Qmb912gdngC1UWwTkhuW8knyRbcWeu5kqkxBpveLmW8bSr/go-ipfs-util" + cid "gx/ipfs/QmcEcrBAMrwMyhSjXt4yfyPpzgSuV8HLHavnfmiKCSRqZU/go-cid" "gx/ipfs/QmfMmLGoKzCHDN7cGgk64PJr4iipzidDRME8HABSJqvmhC/go-libp2p-peer" ) diff --git a/bitswap/decision/ledger.go b/bitswap/decision/ledger.go index 7f7b14f11..7d759873e 100644 --- a/bitswap/decision/ledger.go +++ b/bitswap/decision/ledger.go @@ -6,7 +6,7 @@ import ( wl "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" - cid "gx/ipfs/QmXfiyr2RWEXpVDdaYnD2HNiBk6UBddsvEP4RPfXb6nGqY/go-cid" + cid "gx/ipfs/QmcEcrBAMrwMyhSjXt4yfyPpzgSuV8HLHavnfmiKCSRqZU/go-cid" peer "gx/ipfs/QmfMmLGoKzCHDN7cGgk64PJr4iipzidDRME8HABSJqvmhC/go-libp2p-peer" ) diff --git a/bitswap/decision/peer_request_queue.go b/bitswap/decision/peer_request_queue.go index fff1ff0b8..7f5f0301d 100644 --- a/bitswap/decision/peer_request_queue.go +++ b/bitswap/decision/peer_request_queue.go @@ -7,7 +7,7 @@ import ( wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" pq "github.com/ipfs/go-ipfs/thirdparty/pq" - cid "gx/ipfs/QmXfiyr2RWEXpVDdaYnD2HNiBk6UBddsvEP4RPfXb6nGqY/go-cid" + cid "gx/ipfs/QmcEcrBAMrwMyhSjXt4yfyPpzgSuV8HLHavnfmiKCSRqZU/go-cid" peer "gx/ipfs/QmfMmLGoKzCHDN7cGgk64PJr4iipzidDRME8HABSJqvmhC/go-libp2p-peer" ) diff --git a/bitswap/decision/peer_request_queue_test.go b/bitswap/decision/peer_request_queue_test.go index ffd0041ed..81c14979b 100644 --- a/bitswap/decision/peer_request_queue_test.go +++ b/bitswap/decision/peer_request_queue_test.go @@ -10,8 +10,8 @@ import ( "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" "github.com/ipfs/go-ipfs/thirdparty/testutil" - cid "gx/ipfs/QmXfiyr2RWEXpVDdaYnD2HNiBk6UBddsvEP4RPfXb6nGqY/go-cid" u "gx/ipfs/Qmb912gdngC1UWwTkhuW8knyRbcWeu5kqkxBpveLmW8bSr/go-ipfs-util" + cid "gx/ipfs/QmcEcrBAMrwMyhSjXt4yfyPpzgSuV8HLHavnfmiKCSRqZU/go-cid" ) func TestPushPop(t *testing.T) { diff --git a/bitswap/message/message.go b/bitswap/message/message.go index 60ef73517..a54c14da9 100644 --- a/bitswap/message/message.go +++ b/bitswap/message/message.go @@ -9,9 +9,9 @@ import ( wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" inet "gx/ipfs/QmU3pGGVT1riXp5dBJbNrGpxssVScfvk9236drRHZZbKJ1/go-libp2p-net" - cid "gx/ipfs/QmXfiyr2RWEXpVDdaYnD2HNiBk6UBddsvEP4RPfXb6nGqY/go-cid" ggio "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/io" proto "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/proto" + cid "gx/ipfs/QmcEcrBAMrwMyhSjXt4yfyPpzgSuV8HLHavnfmiKCSRqZU/go-cid" ) // TODO move message.go into the bitswap package diff --git a/bitswap/message/message_test.go b/bitswap/message/message_test.go index cd8cd2fcf..ed656c646 100644 --- a/bitswap/message/message_test.go +++ b/bitswap/message/message_test.go @@ -8,8 +8,8 @@ import ( blocks "github.com/ipfs/go-ipfs/blocks" pb "github.com/ipfs/go-ipfs/exchange/bitswap/message/pb" - cid "gx/ipfs/QmXfiyr2RWEXpVDdaYnD2HNiBk6UBddsvEP4RPfXb6nGqY/go-cid" u "gx/ipfs/Qmb912gdngC1UWwTkhuW8knyRbcWeu5kqkxBpveLmW8bSr/go-ipfs-util" + cid "gx/ipfs/QmcEcrBAMrwMyhSjXt4yfyPpzgSuV8HLHavnfmiKCSRqZU/go-cid" ) func mkFakeCid(s string) *cid.Cid { diff --git a/bitswap/network/interface.go b/bitswap/network/interface.go index a763a128a..72dfa7c4a 100644 --- a/bitswap/network/interface.go +++ b/bitswap/network/interface.go @@ -4,8 +4,8 @@ import ( "context" bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" - cid "gx/ipfs/QmXfiyr2RWEXpVDdaYnD2HNiBk6UBddsvEP4RPfXb6nGqY/go-cid" protocol "gx/ipfs/QmZNkThpqfVXs9GNbexPrfBbXSLNYeKrE7jwFM2oqHbyqN/go-libp2p-protocol" + cid "gx/ipfs/QmcEcrBAMrwMyhSjXt4yfyPpzgSuV8HLHavnfmiKCSRqZU/go-cid" peer "gx/ipfs/QmfMmLGoKzCHDN7cGgk64PJr4iipzidDRME8HABSJqvmhC/go-libp2p-peer" ) diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index 73294b5da..4d441a31d 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -7,14 +7,14 @@ import ( bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" - routing "gx/ipfs/QmQKEgGgYCDyk8VNY6A65FpuE4YwbspvjXHco1rdb75PVc/go-libp2p-routing" logging "gx/ipfs/QmSpJByNKFX1sCsHBEp3R73FL4NF6FnQTEGyNAXHm2GS52/go-log" inet "gx/ipfs/QmU3pGGVT1riXp5dBJbNrGpxssVScfvk9236drRHZZbKJ1/go-libp2p-net" ma "gx/ipfs/QmUAQaWbKxGCUTuoQVvvicbQNZ9APF5pDGWyAZSe93AtKH/go-multiaddr" + routing "gx/ipfs/QmUrCwTDvJgmBbJVHu1HGEyqDaod3dR6sEkZkpxZk4u47c/go-libp2p-routing" pstore "gx/ipfs/QmXXCcQ7CLg5a81Ui9TTR35QcR4y7ZyihxwfjqaHfUVcVo/go-libp2p-peerstore" - cid "gx/ipfs/QmXfiyr2RWEXpVDdaYnD2HNiBk6UBddsvEP4RPfXb6nGqY/go-cid" ggio "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/io" host "gx/ipfs/Qmb6UFbVu1grhv5o5KnouvtZ6cqdrjXj6zLejAHWunxgCt/go-libp2p-host" + cid "gx/ipfs/QmcEcrBAMrwMyhSjXt4yfyPpzgSuV8HLHavnfmiKCSRqZU/go-cid" peer "gx/ipfs/QmfMmLGoKzCHDN7cGgk64PJr4iipzidDRME8HABSJqvmhC/go-libp2p-peer" ) diff --git a/bitswap/notifications/notifications.go b/bitswap/notifications/notifications.go index 0dab1793d..a673b2d47 100644 --- a/bitswap/notifications/notifications.go +++ b/bitswap/notifications/notifications.go @@ -6,7 +6,7 @@ import ( blocks "github.com/ipfs/go-ipfs/blocks" pubsub "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/briantigerchow/pubsub" - cid "gx/ipfs/QmXfiyr2RWEXpVDdaYnD2HNiBk6UBddsvEP4RPfXb6nGqY/go-cid" + cid "gx/ipfs/QmcEcrBAMrwMyhSjXt4yfyPpzgSuV8HLHavnfmiKCSRqZU/go-cid" ) const bufferSize = 16 diff --git a/bitswap/notifications/notifications_test.go b/bitswap/notifications/notifications_test.go index 659d0ca1d..07577d026 100644 --- a/bitswap/notifications/notifications_test.go +++ b/bitswap/notifications/notifications_test.go @@ -8,7 +8,7 @@ import ( blocks "github.com/ipfs/go-ipfs/blocks" blocksutil "github.com/ipfs/go-ipfs/blocks/blocksutil" - cid "gx/ipfs/QmXfiyr2RWEXpVDdaYnD2HNiBk6UBddsvEP4RPfXb6nGqY/go-cid" + cid "gx/ipfs/QmcEcrBAMrwMyhSjXt4yfyPpzgSuV8HLHavnfmiKCSRqZU/go-cid" ) func TestDuplicates(t *testing.T) { diff --git a/bitswap/stat.go b/bitswap/stat.go index 817acc9b0..85f3a7ea8 100644 --- a/bitswap/stat.go +++ b/bitswap/stat.go @@ -3,7 +3,7 @@ package bitswap import ( "sort" - cid "gx/ipfs/QmXfiyr2RWEXpVDdaYnD2HNiBk6UBddsvEP4RPfXb6nGqY/go-cid" + cid "gx/ipfs/QmcEcrBAMrwMyhSjXt4yfyPpzgSuV8HLHavnfmiKCSRqZU/go-cid" ) type Stat struct { diff --git a/bitswap/testnet/virtual.go b/bitswap/testnet/virtual.go index 997d03ba1..36d9088f8 100644 --- a/bitswap/testnet/virtual.go +++ b/bitswap/testnet/virtual.go @@ -9,8 +9,8 @@ import ( mockrouting "github.com/ipfs/go-ipfs/routing/mock" delay "github.com/ipfs/go-ipfs/thirdparty/delay" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" - routing "gx/ipfs/QmQKEgGgYCDyk8VNY6A65FpuE4YwbspvjXHco1rdb75PVc/go-libp2p-routing" - cid "gx/ipfs/QmXfiyr2RWEXpVDdaYnD2HNiBk6UBddsvEP4RPfXb6nGqY/go-cid" + routing "gx/ipfs/QmUrCwTDvJgmBbJVHu1HGEyqDaod3dR6sEkZkpxZk4u47c/go-libp2p-routing" + cid "gx/ipfs/QmcEcrBAMrwMyhSjXt4yfyPpzgSuV8HLHavnfmiKCSRqZU/go-cid" peer "gx/ipfs/QmfMmLGoKzCHDN7cGgk64PJr4iipzidDRME8HABSJqvmhC/go-libp2p-peer" ) diff --git a/bitswap/wantlist/wantlist.go b/bitswap/wantlist/wantlist.go index ef145b14b..457d052e9 100644 --- a/bitswap/wantlist/wantlist.go +++ b/bitswap/wantlist/wantlist.go @@ -6,7 +6,7 @@ import ( "sort" "sync" - cid "gx/ipfs/QmXfiyr2RWEXpVDdaYnD2HNiBk6UBddsvEP4RPfXb6nGqY/go-cid" + cid "gx/ipfs/QmcEcrBAMrwMyhSjXt4yfyPpzgSuV8HLHavnfmiKCSRqZU/go-cid" ) type ThreadSafe struct { diff --git a/bitswap/wantmanager.go b/bitswap/wantmanager.go index fb0e2a6b7..c0eeb2b5c 100644 --- a/bitswap/wantmanager.go +++ b/bitswap/wantmanager.go @@ -9,7 +9,7 @@ import ( bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" - cid "gx/ipfs/QmXfiyr2RWEXpVDdaYnD2HNiBk6UBddsvEP4RPfXb6nGqY/go-cid" + cid "gx/ipfs/QmcEcrBAMrwMyhSjXt4yfyPpzgSuV8HLHavnfmiKCSRqZU/go-cid" peer "gx/ipfs/QmfMmLGoKzCHDN7cGgk64PJr4iipzidDRME8HABSJqvmhC/go-libp2p-peer" ) diff --git a/bitswap/workers.go b/bitswap/workers.go index e2f837823..942c37ba8 100644 --- a/bitswap/workers.go +++ b/bitswap/workers.go @@ -9,7 +9,7 @@ import ( process "gx/ipfs/QmSF8fPo3jgVBAy8fpdjjYqgG87dkJgUprRBHRd2tmfgpP/goprocess" procctx "gx/ipfs/QmSF8fPo3jgVBAy8fpdjjYqgG87dkJgUprRBHRd2tmfgpP/goprocess/context" logging "gx/ipfs/QmSpJByNKFX1sCsHBEp3R73FL4NF6FnQTEGyNAXHm2GS52/go-log" - cid "gx/ipfs/QmXfiyr2RWEXpVDdaYnD2HNiBk6UBddsvEP4RPfXb6nGqY/go-cid" + cid "gx/ipfs/QmcEcrBAMrwMyhSjXt4yfyPpzgSuV8HLHavnfmiKCSRqZU/go-cid" peer "gx/ipfs/QmfMmLGoKzCHDN7cGgk64PJr4iipzidDRME8HABSJqvmhC/go-libp2p-peer" ) From b750b6aaffe08803913b94ec7350cb37b77b960b Mon Sep 17 00:00:00 2001 From: Jakub Sztandera Date: Fri, 18 Nov 2016 00:24:00 +0100 Subject: [PATCH 0517/1035] Update go-libp2p across codebase License: MIT Signed-off-by: Jakub Sztandera This commit was moved from ipfs/go-bitswap@b6329a6bd9931fc507ffaf7302be5ef65ef206f7 --- bitswap/bitswap_test.go | 3 ++- bitswap/testnet/peernet.go | 2 +- bitswap/testutils.go | 5 +++-- 3 files changed, 6 insertions(+), 4 deletions(-) diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index 1d4f56ead..c6c1975ba 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -16,7 +16,8 @@ import ( travis "github.com/ipfs/go-ipfs/thirdparty/testutil/ci/travis" detectrace "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-detect-race" - p2ptestutil "gx/ipfs/QmUYzZRJcuUxLSnSzF1bSyw1jYbNAULkBrbS6rnr7F72uK/go-libp2p/p2p/test/util" + + p2ptestutil "gx/ipfs/QmcDTquYLTYirqj71RRWKUWEEw3nJt11Awzun5ep8kfY7W/go-libp2p-netutil" cid "gx/ipfs/QmcEcrBAMrwMyhSjXt4yfyPpzgSuV8HLHavnfmiKCSRqZU/go-cid" ) diff --git a/bitswap/testnet/peernet.go b/bitswap/testnet/peernet.go index 4bc288490..730ce51bb 100644 --- a/bitswap/testnet/peernet.go +++ b/bitswap/testnet/peernet.go @@ -5,7 +5,7 @@ import ( bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" mockrouting "github.com/ipfs/go-ipfs/routing/mock" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" - mockpeernet "gx/ipfs/QmUYzZRJcuUxLSnSzF1bSyw1jYbNAULkBrbS6rnr7F72uK/go-libp2p/p2p/net/mock" + mockpeernet "gx/ipfs/QmZyBJGpRnbQ7oUstoGNZbhXC4HJuFUCgpp8pmsVTUwdS3/go-libp2p/p2p/net/mock" ds "gx/ipfs/QmbzuUusHqaLLoNTDEVLcSF6vZDHZDLPC7p4bztRvvkXxU/go-datastore" peer "gx/ipfs/QmfMmLGoKzCHDN7cGgk64PJr4iipzidDRME8HABSJqvmhC/go-libp2p-peer" ) diff --git a/bitswap/testutils.go b/bitswap/testutils.go index f01cb1c82..8a510effd 100644 --- a/bitswap/testutils.go +++ b/bitswap/testutils.go @@ -1,17 +1,18 @@ package bitswap import ( + "context" "time" - context "context" blockstore "github.com/ipfs/go-ipfs/blocks/blockstore" tn "github.com/ipfs/go-ipfs/exchange/bitswap/testnet" datastore2 "github.com/ipfs/go-ipfs/thirdparty/datastore2" delay "github.com/ipfs/go-ipfs/thirdparty/delay" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" - p2ptestutil "gx/ipfs/QmUYzZRJcuUxLSnSzF1bSyw1jYbNAULkBrbS6rnr7F72uK/go-libp2p/p2p/test/util" + ds "gx/ipfs/QmbzuUusHqaLLoNTDEVLcSF6vZDHZDLPC7p4bztRvvkXxU/go-datastore" ds_sync "gx/ipfs/QmbzuUusHqaLLoNTDEVLcSF6vZDHZDLPC7p4bztRvvkXxU/go-datastore/sync" + p2ptestutil "gx/ipfs/QmcDTquYLTYirqj71RRWKUWEEw3nJt11Awzun5ep8kfY7W/go-libp2p-netutil" peer "gx/ipfs/QmfMmLGoKzCHDN7cGgk64PJr4iipzidDRME8HABSJqvmhC/go-libp2p-peer" ) From 3f2a7fa4848725525fb12c94c7cfe325e8d3078a Mon Sep 17 00:00:00 2001 From: Jeromy Date: Mon, 21 Nov 2016 20:32:18 -0800 Subject: [PATCH 0518/1035] cleanup bitswap and handle message send failure slightly better License: MIT Signed-off-by: Jeromy This commit was moved from ipfs/go-bitswap@6370a0b90c11ecebdf7b887cb80546a946d40bcc --- bitswap/bitswap.go | 36 ++++++++-------- bitswap/wantmanager.go | 95 ++++++++++++++++++++++++++++-------------- bitswap/workers.go | 6 +++ 3 files changed, 88 insertions(+), 49 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 91f66551d..dc5dcafe3 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -82,7 +82,6 @@ func New(parent context.Context, p peer.ID, network bsnet.BitSwapNetwork, }) bs := &Bitswap{ - self: p, blockstore: bstore, notifications: notif, engine: decision.NewEngine(ctx, bstore), // TODO close the engine with Close() method @@ -112,34 +111,36 @@ func New(parent context.Context, p peer.ID, network bsnet.BitSwapNetwork, // Bitswap instances implement the bitswap protocol. type Bitswap struct { + // the peermanager manages sending messages to peers in a way that + // wont block bitswap operation + wm *WantManager - // the ID of the peer to act on behalf of - self peer.ID + // the engine is the bit of logic that decides who to send which blocks to + engine *decision.Engine // network delivers messages on behalf of the session network bsnet.BitSwapNetwork - // the peermanager manages sending messages to peers in a way that - // wont block bitswap operation - wm *WantManager - // blockstore is the local database // NB: ensure threadsafety blockstore blockstore.Blockstore + // notifications engine for receiving new blocks and routing them to the + // appropriate user requests notifications notifications.PubSub - // send keys to a worker to find and connect to providers for them + // findKeys sends keys to a worker to find and connect to providers for them findKeys chan *blockRequest - - engine *decision.Engine - - process process.Process - + // newBlocks is a channel for newly added blocks to be provided to the + // network. blocks pushed down this channel get buffered and fed to the + // provideKeys channel later on to avoid too much network activity newBlocks chan *cid.Cid - + // provideKeys directly feeds provide workers provideKeys chan *cid.Cid + process process.Process + + // Counters for various statistics counterLk sync.Mutex blocksRecvd int dupBlocksRecvd int @@ -167,13 +168,12 @@ func (bs *Bitswap) GetBlock(parent context.Context, k *cid.Cid) (blocks.Block, e // enforce. May this comment keep you safe. ctx, cancelFunc := context.WithCancel(parent) + // TODO: this request ID should come in from a higher layer so we can track + // across multiple 'GetBlock' invocations ctx = logging.ContextWithLoggable(ctx, loggables.Uuid("GetBlockRequest")) log.Event(ctx, "Bitswap.GetBlockRequest.Start", k) defer log.Event(ctx, "Bitswap.GetBlockRequest.End", k) - - defer func() { - cancelFunc() - }() + defer cancelFunc() promise, err := bs.GetBlocks(ctx, []*cid.Cid{k}) if err != nil { diff --git a/bitswap/wantmanager.go b/bitswap/wantmanager.go index c0eeb2b5c..28d4690dd 100644 --- a/bitswap/wantmanager.go +++ b/bitswap/wantmanager.go @@ -175,28 +175,13 @@ func (mq *msgQueue) runQueue(ctx context.Context) { } func (mq *msgQueue) doWork(ctx context.Context) { - // allow ten minutes for connections - // this includes looking them up in the dht - // dialing them, and handshaking if mq.sender == nil { - conctx, cancel := context.WithTimeout(ctx, time.Minute*10) - defer cancel() - - err := mq.network.ConnectTo(conctx, mq.p) + err := mq.openSender(ctx) if err != nil { - log.Infof("cant connect to peer %s: %s", mq.p, err) + log.Infof("cant open message sender to peer %s: %s", mq.p, err) // TODO: cant connect, what now? return } - - nsender, err := mq.network.NewMessageSender(ctx, mq.p) - if err != nil { - log.Infof("cant open new stream to peer %s: %s", mq.p, err) - // TODO: cant open stream, what now? - return - } - - mq.sender = nsender } // grab outgoing message @@ -210,14 +195,64 @@ func (mq *msgQueue) doWork(ctx context.Context) { mq.outlk.Unlock() // send wantlist updates - err := mq.sender.SendMsg(wlm) - if err != nil { + for { // try to send this message until we fail. + err := mq.sender.SendMsg(wlm) + if err == nil { + return + } + log.Infof("bitswap send error: %s", err) mq.sender.Close() mq.sender = nil - // TODO: what do we do if this fails? - return + + select { + case <-mq.done: + return + case <-ctx.Done(): + return + case <-time.After(time.Millisecond * 100): + // wait 100ms in case disconnect notifications are still propogating + log.Warning("SendMsg errored but neither 'done' nor context.Done() were set") + } + + err = mq.openSender(ctx) + if err != nil { + log.Error("couldnt open sender again after SendMsg(%s) failed: %s", mq.p, err) + // TODO(why): what do we do now? + // I think the *right* answer is to probably put the message we're + // trying to send back, and then return to waiting for new work or + // a disconnect. + return + } + + // TODO: Is this the same instance for the remote peer? + // If its not, we should resend our entire wantlist to them + /* + if mq.sender.InstanceID() != mq.lastSeenInstanceID { + wlm = mq.getFullWantlistMessage() + } + */ + } +} + +func (mq *msgQueue) openSender(ctx context.Context) error { + // allow ten minutes for connections this includes looking them up in the + // dht dialing them, and handshaking + conctx, cancel := context.WithTimeout(ctx, time.Minute*10) + defer cancel() + + err := mq.network.ConnectTo(conctx, mq.p) + if err != nil { + return err + } + + nsender, err := mq.network.NewMessageSender(ctx, mq.p) + if err != nil { + return err } + + mq.sender = nsender + return nil } func (pm *WantManager) Connected(p peer.ID) { @@ -292,14 +327,13 @@ func (pm *WantManager) Run() { } func (wm *WantManager) newMsgQueue(p peer.ID) *msgQueue { - mq := new(msgQueue) - mq.done = make(chan struct{}) - mq.work = make(chan struct{}, 1) - mq.network = wm.network - mq.p = p - mq.refcnt = 1 - - return mq + return &msgQueue{ + done: make(chan struct{}), + work: make(chan struct{}, 1), + network: wm.network, + p: p, + refcnt: 1, + } } func (mq *msgQueue) addMessage(entries []*bsmsg.Entry) { @@ -312,8 +346,7 @@ func (mq *msgQueue) addMessage(entries []*bsmsg.Entry) { } }() - // if we have no message held, or the one we are given is full - // overwrite the one we are holding + // if we have no message held allocate a new one if mq.out == nil { mq.out = bsmsg.New(false) } diff --git a/bitswap/workers.go b/bitswap/workers.go index 942c37ba8..5e0644782 100644 --- a/bitswap/workers.go +++ b/bitswap/workers.go @@ -197,6 +197,12 @@ func (bs *Bitswap) providerQueryManager(ctx context.Context) { for { select { case e := <-bs.findKeys: + select { // make sure its not already cancelled + case <-e.Ctx.Done(): + continue + default: + } + activeLk.Lock() if kset.Has(e.Cid) { activeLk.Unlock() From 25b2ebe2b5f3114bc8ca73ff996fa8edc375c343 Mon Sep 17 00:00:00 2001 From: Jeromy Date: Mon, 28 Nov 2016 13:42:47 -0800 Subject: [PATCH 0519/1035] fix formatting on error call License: MIT Signed-off-by: Jeromy This commit was moved from ipfs/go-bitswap@76835ff34f9fd98e2c92a9f1b90f1fad03a7f83e --- bitswap/wantmanager.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bitswap/wantmanager.go b/bitswap/wantmanager.go index 28d4690dd..75b835ecf 100644 --- a/bitswap/wantmanager.go +++ b/bitswap/wantmanager.go @@ -217,7 +217,7 @@ func (mq *msgQueue) doWork(ctx context.Context) { err = mq.openSender(ctx) if err != nil { - log.Error("couldnt open sender again after SendMsg(%s) failed: %s", mq.p, err) + log.Errorf("couldnt open sender again after SendMsg(%s) failed: %s", mq.p, err) // TODO(why): what do we do now? // I think the *right* answer is to probably put the message we're // trying to send back, and then return to waiting for new work or From c4c3f49a7e88d42bfa30ee788a3002f8b23d222f Mon Sep 17 00:00:00 2001 From: Jeromy Date: Mon, 28 Nov 2016 22:29:38 -0800 Subject: [PATCH 0520/1035] bubble up go-datastore deps License: MIT Signed-off-by: Jeromy This commit was moved from ipfs/go-bitswap@1fbd7c052b198e47a1b1e901c4beba3f59fce4e7 --- bitswap/bitswap.go | 2 +- bitswap/bitswap_test.go | 4 ++-- bitswap/decision/bench_test.go | 2 +- bitswap/decision/engine_test.go | 4 ++-- bitswap/decision/ledger.go | 2 +- bitswap/decision/peer_request_queue.go | 2 +- bitswap/decision/peer_request_queue_test.go | 2 +- bitswap/message/message.go | 4 ++-- bitswap/message/message_test.go | 2 +- bitswap/network/interface.go | 2 +- bitswap/network/ipfs_impl.go | 10 +++++----- bitswap/notifications/notifications.go | 2 +- bitswap/notifications/notifications_test.go | 2 +- bitswap/stat.go | 2 +- bitswap/testnet/peernet.go | 4 ++-- bitswap/testnet/virtual.go | 4 ++-- bitswap/testutils.go | 6 +++--- bitswap/wantlist/wantlist.go | 2 +- bitswap/wantmanager.go | 2 +- bitswap/workers.go | 2 +- 20 files changed, 31 insertions(+), 31 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index dc5dcafe3..7910b24c9 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -23,7 +23,7 @@ import ( procctx "gx/ipfs/QmSF8fPo3jgVBAy8fpdjjYqgG87dkJgUprRBHRd2tmfgpP/goprocess/context" logging "gx/ipfs/QmSpJByNKFX1sCsHBEp3R73FL4NF6FnQTEGyNAXHm2GS52/go-log" loggables "gx/ipfs/QmTMy4hVSY28DdwJ9kBz6y7q6MuioFzPcpM3Ma3aPjo1i3/go-libp2p-loggables" - cid "gx/ipfs/QmcEcrBAMrwMyhSjXt4yfyPpzgSuV8HLHavnfmiKCSRqZU/go-cid" + cid "gx/ipfs/QmcTcsTvfaeEBRFo1TkFgT8sRmgi1n1LTZpecfVP8fzpGD/go-cid" peer "gx/ipfs/QmfMmLGoKzCHDN7cGgk64PJr4iipzidDRME8HABSJqvmhC/go-libp2p-peer" ) diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index c6c1975ba..e50509461 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -17,8 +17,8 @@ import ( detectrace "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-detect-race" - p2ptestutil "gx/ipfs/QmcDTquYLTYirqj71RRWKUWEEw3nJt11Awzun5ep8kfY7W/go-libp2p-netutil" - cid "gx/ipfs/QmcEcrBAMrwMyhSjXt4yfyPpzgSuV8HLHavnfmiKCSRqZU/go-cid" + p2ptestutil "gx/ipfs/QmWdGJY4fcsfhLHucEfivw8J71yUqNUFbzdU1jnJBnN5Xh/go-libp2p-netutil" + cid "gx/ipfs/QmcTcsTvfaeEBRFo1TkFgT8sRmgi1n1LTZpecfVP8fzpGD/go-cid" ) // FIXME the tests are really sensitive to the network delay. fix them to work diff --git a/bitswap/decision/bench_test.go b/bitswap/decision/bench_test.go index d71454600..43a1f6969 100644 --- a/bitswap/decision/bench_test.go +++ b/bitswap/decision/bench_test.go @@ -8,7 +8,7 @@ import ( "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" "github.com/ipfs/go-ipfs/thirdparty/testutil" u "gx/ipfs/Qmb912gdngC1UWwTkhuW8knyRbcWeu5kqkxBpveLmW8bSr/go-ipfs-util" - cid "gx/ipfs/QmcEcrBAMrwMyhSjXt4yfyPpzgSuV8HLHavnfmiKCSRqZU/go-cid" + cid "gx/ipfs/QmcTcsTvfaeEBRFo1TkFgT8sRmgi1n1LTZpecfVP8fzpGD/go-cid" "gx/ipfs/QmfMmLGoKzCHDN7cGgk64PJr4iipzidDRME8HABSJqvmhC/go-libp2p-peer" ) diff --git a/bitswap/decision/engine_test.go b/bitswap/decision/engine_test.go index d2d4fa0ca..ed985d166 100644 --- a/bitswap/decision/engine_test.go +++ b/bitswap/decision/engine_test.go @@ -13,8 +13,8 @@ import ( blockstore "github.com/ipfs/go-ipfs/blocks/blockstore" message "github.com/ipfs/go-ipfs/exchange/bitswap/message" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" - ds "gx/ipfs/QmbzuUusHqaLLoNTDEVLcSF6vZDHZDLPC7p4bztRvvkXxU/go-datastore" - dssync "gx/ipfs/QmbzuUusHqaLLoNTDEVLcSF6vZDHZDLPC7p4bztRvvkXxU/go-datastore/sync" + ds "gx/ipfs/QmRWDav6mzWseLWeYfVd5fvUKiVe9xNH29YfMF438fG364/go-datastore" + dssync "gx/ipfs/QmRWDav6mzWseLWeYfVd5fvUKiVe9xNH29YfMF438fG364/go-datastore/sync" peer "gx/ipfs/QmfMmLGoKzCHDN7cGgk64PJr4iipzidDRME8HABSJqvmhC/go-libp2p-peer" ) diff --git a/bitswap/decision/ledger.go b/bitswap/decision/ledger.go index 7d759873e..db1f24287 100644 --- a/bitswap/decision/ledger.go +++ b/bitswap/decision/ledger.go @@ -6,7 +6,7 @@ import ( wl "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" - cid "gx/ipfs/QmcEcrBAMrwMyhSjXt4yfyPpzgSuV8HLHavnfmiKCSRqZU/go-cid" + cid "gx/ipfs/QmcTcsTvfaeEBRFo1TkFgT8sRmgi1n1LTZpecfVP8fzpGD/go-cid" peer "gx/ipfs/QmfMmLGoKzCHDN7cGgk64PJr4iipzidDRME8HABSJqvmhC/go-libp2p-peer" ) diff --git a/bitswap/decision/peer_request_queue.go b/bitswap/decision/peer_request_queue.go index 7f5f0301d..0f4246697 100644 --- a/bitswap/decision/peer_request_queue.go +++ b/bitswap/decision/peer_request_queue.go @@ -7,7 +7,7 @@ import ( wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" pq "github.com/ipfs/go-ipfs/thirdparty/pq" - cid "gx/ipfs/QmcEcrBAMrwMyhSjXt4yfyPpzgSuV8HLHavnfmiKCSRqZU/go-cid" + cid "gx/ipfs/QmcTcsTvfaeEBRFo1TkFgT8sRmgi1n1LTZpecfVP8fzpGD/go-cid" peer "gx/ipfs/QmfMmLGoKzCHDN7cGgk64PJr4iipzidDRME8HABSJqvmhC/go-libp2p-peer" ) diff --git a/bitswap/decision/peer_request_queue_test.go b/bitswap/decision/peer_request_queue_test.go index 81c14979b..18c29f1e4 100644 --- a/bitswap/decision/peer_request_queue_test.go +++ b/bitswap/decision/peer_request_queue_test.go @@ -11,7 +11,7 @@ import ( "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" "github.com/ipfs/go-ipfs/thirdparty/testutil" u "gx/ipfs/Qmb912gdngC1UWwTkhuW8knyRbcWeu5kqkxBpveLmW8bSr/go-ipfs-util" - cid "gx/ipfs/QmcEcrBAMrwMyhSjXt4yfyPpzgSuV8HLHavnfmiKCSRqZU/go-cid" + cid "gx/ipfs/QmcTcsTvfaeEBRFo1TkFgT8sRmgi1n1LTZpecfVP8fzpGD/go-cid" ) func TestPushPop(t *testing.T) { diff --git a/bitswap/message/message.go b/bitswap/message/message.go index a54c14da9..41ae59bf0 100644 --- a/bitswap/message/message.go +++ b/bitswap/message/message.go @@ -8,10 +8,10 @@ import ( pb "github.com/ipfs/go-ipfs/exchange/bitswap/message/pb" wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" - inet "gx/ipfs/QmU3pGGVT1riXp5dBJbNrGpxssVScfvk9236drRHZZbKJ1/go-libp2p-net" + inet "gx/ipfs/QmQx1dHDDYENugYgqA22BaBrRfuv1coSsuPiM7rYh1wwGH/go-libp2p-net" ggio "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/io" proto "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/proto" - cid "gx/ipfs/QmcEcrBAMrwMyhSjXt4yfyPpzgSuV8HLHavnfmiKCSRqZU/go-cid" + cid "gx/ipfs/QmcTcsTvfaeEBRFo1TkFgT8sRmgi1n1LTZpecfVP8fzpGD/go-cid" ) // TODO move message.go into the bitswap package diff --git a/bitswap/message/message_test.go b/bitswap/message/message_test.go index ed656c646..00740b424 100644 --- a/bitswap/message/message_test.go +++ b/bitswap/message/message_test.go @@ -9,7 +9,7 @@ import ( blocks "github.com/ipfs/go-ipfs/blocks" pb "github.com/ipfs/go-ipfs/exchange/bitswap/message/pb" u "gx/ipfs/Qmb912gdngC1UWwTkhuW8knyRbcWeu5kqkxBpveLmW8bSr/go-ipfs-util" - cid "gx/ipfs/QmcEcrBAMrwMyhSjXt4yfyPpzgSuV8HLHavnfmiKCSRqZU/go-cid" + cid "gx/ipfs/QmcTcsTvfaeEBRFo1TkFgT8sRmgi1n1LTZpecfVP8fzpGD/go-cid" ) func mkFakeCid(s string) *cid.Cid { diff --git a/bitswap/network/interface.go b/bitswap/network/interface.go index 72dfa7c4a..21b4d9ead 100644 --- a/bitswap/network/interface.go +++ b/bitswap/network/interface.go @@ -5,7 +5,7 @@ import ( bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" protocol "gx/ipfs/QmZNkThpqfVXs9GNbexPrfBbXSLNYeKrE7jwFM2oqHbyqN/go-libp2p-protocol" - cid "gx/ipfs/QmcEcrBAMrwMyhSjXt4yfyPpzgSuV8HLHavnfmiKCSRqZU/go-cid" + cid "gx/ipfs/QmcTcsTvfaeEBRFo1TkFgT8sRmgi1n1LTZpecfVP8fzpGD/go-cid" peer "gx/ipfs/QmfMmLGoKzCHDN7cGgk64PJr4iipzidDRME8HABSJqvmhC/go-libp2p-peer" ) diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index 4d441a31d..3d992769b 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -7,14 +7,14 @@ import ( bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" + host "gx/ipfs/QmPTGbC34bPKaUm9wTxBo7zSCac7pDuG42ZmnXC718CKZZ/go-libp2p-host" + inet "gx/ipfs/QmQx1dHDDYENugYgqA22BaBrRfuv1coSsuPiM7rYh1wwGH/go-libp2p-net" logging "gx/ipfs/QmSpJByNKFX1sCsHBEp3R73FL4NF6FnQTEGyNAXHm2GS52/go-log" - inet "gx/ipfs/QmU3pGGVT1riXp5dBJbNrGpxssVScfvk9236drRHZZbKJ1/go-libp2p-net" ma "gx/ipfs/QmUAQaWbKxGCUTuoQVvvicbQNZ9APF5pDGWyAZSe93AtKH/go-multiaddr" - routing "gx/ipfs/QmUrCwTDvJgmBbJVHu1HGEyqDaod3dR6sEkZkpxZk4u47c/go-libp2p-routing" - pstore "gx/ipfs/QmXXCcQ7CLg5a81Ui9TTR35QcR4y7ZyihxwfjqaHfUVcVo/go-libp2p-peerstore" ggio "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/io" - host "gx/ipfs/Qmb6UFbVu1grhv5o5KnouvtZ6cqdrjXj6zLejAHWunxgCt/go-libp2p-host" - cid "gx/ipfs/QmcEcrBAMrwMyhSjXt4yfyPpzgSuV8HLHavnfmiKCSRqZU/go-cid" + routing "gx/ipfs/QmbkGVaN9W6RYJK4Ws5FvMKXKDqdRQ5snhtaa92qP6L8eU/go-libp2p-routing" + cid "gx/ipfs/QmcTcsTvfaeEBRFo1TkFgT8sRmgi1n1LTZpecfVP8fzpGD/go-cid" + pstore "gx/ipfs/QmeXj9VAjmYQZxpmVz7VzccbJrpmr8qkCDSjfVNsPTWTYU/go-libp2p-peerstore" peer "gx/ipfs/QmfMmLGoKzCHDN7cGgk64PJr4iipzidDRME8HABSJqvmhC/go-libp2p-peer" ) diff --git a/bitswap/notifications/notifications.go b/bitswap/notifications/notifications.go index a673b2d47..440247fed 100644 --- a/bitswap/notifications/notifications.go +++ b/bitswap/notifications/notifications.go @@ -6,7 +6,7 @@ import ( blocks "github.com/ipfs/go-ipfs/blocks" pubsub "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/briantigerchow/pubsub" - cid "gx/ipfs/QmcEcrBAMrwMyhSjXt4yfyPpzgSuV8HLHavnfmiKCSRqZU/go-cid" + cid "gx/ipfs/QmcTcsTvfaeEBRFo1TkFgT8sRmgi1n1LTZpecfVP8fzpGD/go-cid" ) const bufferSize = 16 diff --git a/bitswap/notifications/notifications_test.go b/bitswap/notifications/notifications_test.go index 07577d026..ff2811884 100644 --- a/bitswap/notifications/notifications_test.go +++ b/bitswap/notifications/notifications_test.go @@ -8,7 +8,7 @@ import ( blocks "github.com/ipfs/go-ipfs/blocks" blocksutil "github.com/ipfs/go-ipfs/blocks/blocksutil" - cid "gx/ipfs/QmcEcrBAMrwMyhSjXt4yfyPpzgSuV8HLHavnfmiKCSRqZU/go-cid" + cid "gx/ipfs/QmcTcsTvfaeEBRFo1TkFgT8sRmgi1n1LTZpecfVP8fzpGD/go-cid" ) func TestDuplicates(t *testing.T) { diff --git a/bitswap/stat.go b/bitswap/stat.go index 85f3a7ea8..f8ca0d0a4 100644 --- a/bitswap/stat.go +++ b/bitswap/stat.go @@ -3,7 +3,7 @@ package bitswap import ( "sort" - cid "gx/ipfs/QmcEcrBAMrwMyhSjXt4yfyPpzgSuV8HLHavnfmiKCSRqZU/go-cid" + cid "gx/ipfs/QmcTcsTvfaeEBRFo1TkFgT8sRmgi1n1LTZpecfVP8fzpGD/go-cid" ) type Stat struct { diff --git a/bitswap/testnet/peernet.go b/bitswap/testnet/peernet.go index 730ce51bb..94baee01d 100644 --- a/bitswap/testnet/peernet.go +++ b/bitswap/testnet/peernet.go @@ -5,8 +5,8 @@ import ( bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" mockrouting "github.com/ipfs/go-ipfs/routing/mock" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" - mockpeernet "gx/ipfs/QmZyBJGpRnbQ7oUstoGNZbhXC4HJuFUCgpp8pmsVTUwdS3/go-libp2p/p2p/net/mock" - ds "gx/ipfs/QmbzuUusHqaLLoNTDEVLcSF6vZDHZDLPC7p4bztRvvkXxU/go-datastore" + ds "gx/ipfs/QmRWDav6mzWseLWeYfVd5fvUKiVe9xNH29YfMF438fG364/go-datastore" + mockpeernet "gx/ipfs/QmbzCT1CwxVZ2ednptC9RavuJe7Bv8DDi2Ne89qUrA37XM/go-libp2p/p2p/net/mock" peer "gx/ipfs/QmfMmLGoKzCHDN7cGgk64PJr4iipzidDRME8HABSJqvmhC/go-libp2p-peer" ) diff --git a/bitswap/testnet/virtual.go b/bitswap/testnet/virtual.go index 36d9088f8..ab3535c1f 100644 --- a/bitswap/testnet/virtual.go +++ b/bitswap/testnet/virtual.go @@ -9,8 +9,8 @@ import ( mockrouting "github.com/ipfs/go-ipfs/routing/mock" delay "github.com/ipfs/go-ipfs/thirdparty/delay" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" - routing "gx/ipfs/QmUrCwTDvJgmBbJVHu1HGEyqDaod3dR6sEkZkpxZk4u47c/go-libp2p-routing" - cid "gx/ipfs/QmcEcrBAMrwMyhSjXt4yfyPpzgSuV8HLHavnfmiKCSRqZU/go-cid" + routing "gx/ipfs/QmbkGVaN9W6RYJK4Ws5FvMKXKDqdRQ5snhtaa92qP6L8eU/go-libp2p-routing" + cid "gx/ipfs/QmcTcsTvfaeEBRFo1TkFgT8sRmgi1n1LTZpecfVP8fzpGD/go-cid" peer "gx/ipfs/QmfMmLGoKzCHDN7cGgk64PJr4iipzidDRME8HABSJqvmhC/go-libp2p-peer" ) diff --git a/bitswap/testutils.go b/bitswap/testutils.go index 8a510effd..4099d18ff 100644 --- a/bitswap/testutils.go +++ b/bitswap/testutils.go @@ -10,9 +10,9 @@ import ( delay "github.com/ipfs/go-ipfs/thirdparty/delay" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" - ds "gx/ipfs/QmbzuUusHqaLLoNTDEVLcSF6vZDHZDLPC7p4bztRvvkXxU/go-datastore" - ds_sync "gx/ipfs/QmbzuUusHqaLLoNTDEVLcSF6vZDHZDLPC7p4bztRvvkXxU/go-datastore/sync" - p2ptestutil "gx/ipfs/QmcDTquYLTYirqj71RRWKUWEEw3nJt11Awzun5ep8kfY7W/go-libp2p-netutil" + ds "gx/ipfs/QmRWDav6mzWseLWeYfVd5fvUKiVe9xNH29YfMF438fG364/go-datastore" + ds_sync "gx/ipfs/QmRWDav6mzWseLWeYfVd5fvUKiVe9xNH29YfMF438fG364/go-datastore/sync" + p2ptestutil "gx/ipfs/QmWdGJY4fcsfhLHucEfivw8J71yUqNUFbzdU1jnJBnN5Xh/go-libp2p-netutil" peer "gx/ipfs/QmfMmLGoKzCHDN7cGgk64PJr4iipzidDRME8HABSJqvmhC/go-libp2p-peer" ) diff --git a/bitswap/wantlist/wantlist.go b/bitswap/wantlist/wantlist.go index 457d052e9..dedf87140 100644 --- a/bitswap/wantlist/wantlist.go +++ b/bitswap/wantlist/wantlist.go @@ -6,7 +6,7 @@ import ( "sort" "sync" - cid "gx/ipfs/QmcEcrBAMrwMyhSjXt4yfyPpzgSuV8HLHavnfmiKCSRqZU/go-cid" + cid "gx/ipfs/QmcTcsTvfaeEBRFo1TkFgT8sRmgi1n1LTZpecfVP8fzpGD/go-cid" ) type ThreadSafe struct { diff --git a/bitswap/wantmanager.go b/bitswap/wantmanager.go index 28d4690dd..388db20b5 100644 --- a/bitswap/wantmanager.go +++ b/bitswap/wantmanager.go @@ -9,7 +9,7 @@ import ( bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" - cid "gx/ipfs/QmcEcrBAMrwMyhSjXt4yfyPpzgSuV8HLHavnfmiKCSRqZU/go-cid" + cid "gx/ipfs/QmcTcsTvfaeEBRFo1TkFgT8sRmgi1n1LTZpecfVP8fzpGD/go-cid" peer "gx/ipfs/QmfMmLGoKzCHDN7cGgk64PJr4iipzidDRME8HABSJqvmhC/go-libp2p-peer" ) diff --git a/bitswap/workers.go b/bitswap/workers.go index 5e0644782..4df8af11d 100644 --- a/bitswap/workers.go +++ b/bitswap/workers.go @@ -9,7 +9,7 @@ import ( process "gx/ipfs/QmSF8fPo3jgVBAy8fpdjjYqgG87dkJgUprRBHRd2tmfgpP/goprocess" procctx "gx/ipfs/QmSF8fPo3jgVBAy8fpdjjYqgG87dkJgUprRBHRd2tmfgpP/goprocess/context" logging "gx/ipfs/QmSpJByNKFX1sCsHBEp3R73FL4NF6FnQTEGyNAXHm2GS52/go-log" - cid "gx/ipfs/QmcEcrBAMrwMyhSjXt4yfyPpzgSuV8HLHavnfmiKCSRqZU/go-cid" + cid "gx/ipfs/QmcTcsTvfaeEBRFo1TkFgT8sRmgi1n1LTZpecfVP8fzpGD/go-cid" peer "gx/ipfs/QmfMmLGoKzCHDN7cGgk64PJr4iipzidDRME8HABSJqvmhC/go-libp2p-peer" ) From 703ff8a9d6035df47fae57fa4022b32be135a705 Mon Sep 17 00:00:00 2001 From: Jeromy Date: Tue, 29 Nov 2016 15:22:05 -0800 Subject: [PATCH 0521/1035] bitswap: add a deadline to sendmsg calls License: MIT Signed-off-by: Jeromy This commit was moved from ipfs/go-bitswap@90faeaf22ccf2996f4995d4ca71b19bb1cd732a1 --- bitswap/network/interface.go | 2 +- bitswap/network/ipfs_impl.go | 24 ++++++++++++++++++++---- bitswap/testnet/virtual.go | 4 ++-- bitswap/wantmanager.go | 2 +- 4 files changed, 24 insertions(+), 8 deletions(-) diff --git a/bitswap/network/interface.go b/bitswap/network/interface.go index 21b4d9ead..dfc1b3f02 100644 --- a/bitswap/network/interface.go +++ b/bitswap/network/interface.go @@ -38,7 +38,7 @@ type BitSwapNetwork interface { } type MessageSender interface { - SendMsg(bsmsg.BitSwapMessage) error + SendMsg(context.Context, bsmsg.BitSwapMessage) error Close() error } diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index 3d992769b..c854f853e 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "io" + "time" bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" @@ -20,6 +21,8 @@ import ( var log = logging.Logger("bitswap_network") +var sendMessageTimeout = time.Minute * 10 + // NewFromIpfsHost returns a BitSwapNetwork supported by underlying IPFS host func NewFromIpfsHost(host host.Host, r routing.ContentRouting) BitSwapNetwork { bitswapNetwork := impl{ @@ -53,11 +56,20 @@ func (s *streamMessageSender) Close() error { return s.s.Close() } -func (s *streamMessageSender) SendMsg(msg bsmsg.BitSwapMessage) error { - return msgToStream(s.s, msg) +func (s *streamMessageSender) SendMsg(ctx context.Context, msg bsmsg.BitSwapMessage) error { + return msgToStream(ctx, s.s, msg) } -func msgToStream(s inet.Stream, msg bsmsg.BitSwapMessage) error { +func msgToStream(ctx context.Context, s inet.Stream, msg bsmsg.BitSwapMessage) error { + deadline := time.Now().Add(sendMessageTimeout) + if dl, ok := ctx.Deadline(); ok { + deadline = dl + } + + if err := s.SetWriteDeadline(deadline); err != nil { + log.Warningf("error setting deadline: %s", err) + } + switch s.Protocol() { case ProtocolBitswap: if err := msg.ToNetV1(s); err != nil { @@ -72,6 +84,10 @@ func msgToStream(s inet.Stream, msg bsmsg.BitSwapMessage) error { default: return fmt.Errorf("unrecognized protocol on remote: %s", s.Protocol()) } + + if err := s.SetWriteDeadline(time.Time{}); err != nil { + log.Warningf("error resetting deadline: %s", err) + } return nil } @@ -107,7 +123,7 @@ func (bsnet *impl) SendMessage( } defer s.Close() - return msgToStream(s, outgoing) + return msgToStream(ctx, s, outgoing) } func (bsnet *impl) SetDelegate(r Receiver) { diff --git a/bitswap/testnet/virtual.go b/bitswap/testnet/virtual.go index ab3535c1f..4d8769e5b 100644 --- a/bitswap/testnet/virtual.go +++ b/bitswap/testnet/virtual.go @@ -119,8 +119,8 @@ type messagePasser struct { ctx context.Context } -func (mp *messagePasser) SendMsg(m bsmsg.BitSwapMessage) error { - return mp.net.SendMessage(mp.ctx, mp.local, mp.target, m) +func (mp *messagePasser) SendMsg(ctx context.Context, m bsmsg.BitSwapMessage) error { + return mp.net.SendMessage(ctx, mp.local, mp.target, m) } func (mp *messagePasser) Close() error { diff --git a/bitswap/wantmanager.go b/bitswap/wantmanager.go index 388db20b5..f5869d82e 100644 --- a/bitswap/wantmanager.go +++ b/bitswap/wantmanager.go @@ -196,7 +196,7 @@ func (mq *msgQueue) doWork(ctx context.Context) { // send wantlist updates for { // try to send this message until we fail. - err := mq.sender.SendMsg(wlm) + err := mq.sender.SendMsg(ctx, wlm) if err == nil { return } From 459baa24368199caa3082776bdbe7e97de1dd0bf Mon Sep 17 00:00:00 2001 From: Jeromy Date: Tue, 29 Nov 2016 19:28:33 -0800 Subject: [PATCH 0522/1035] bitswap: increase wantlist resend delay to one minute License: MIT Signed-off-by: Jeromy This commit was moved from ipfs/go-bitswap@aaa7de54c416b8a83c3ed3081e2f55387e074c5b --- bitswap/bitswap.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 7910b24c9..e1fb20de4 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -57,7 +57,7 @@ func init() { } } -var rebroadcastDelay = delay.Fixed(time.Second * 10) +var rebroadcastDelay = delay.Fixed(time.Minute) // New initializes a BitSwap instance that communicates over the provided // BitSwapNetwork. This function registers the returned instance as the network From 18a917c545b01d835db4e6c849e1cc7188c5785d Mon Sep 17 00:00:00 2001 From: Jeromy Date: Fri, 2 Dec 2016 14:15:24 -0800 Subject: [PATCH 0523/1035] bitswap: add wantlist fullness to protobuf messages License: MIT Signed-off-by: Jeromy This commit was moved from ipfs/go-bitswap@ecd52489e465fa526e291cc28ccc82712286b26b --- bitswap/message/message.go | 2 ++ bitswap/message/message_test.go | 4 ++++ 2 files changed, 6 insertions(+) diff --git a/bitswap/message/message.go b/bitswap/message/message.go index 41ae59bf0..ad7177f02 100644 --- a/bitswap/message/message.go +++ b/bitswap/message/message.go @@ -189,6 +189,7 @@ func (m *impl) ToProtoV0() *pb.Message { Cancel: proto.Bool(e.Cancel), }) } + pbm.Wantlist.Full = proto.Bool(m.full) for _, b := range m.Blocks() { pbm.Blocks = append(pbm.Blocks, b.RawData()) } @@ -205,6 +206,7 @@ func (m *impl) ToProtoV1() *pb.Message { Cancel: proto.Bool(e.Cancel), }) } + pbm.Wantlist.Full = proto.Bool(m.full) for _, b := range m.Blocks() { blk := &pb.Message_Block{ Data: b.RawData(), diff --git a/bitswap/message/message_test.go b/bitswap/message/message_test.go index 00740b424..add64878f 100644 --- a/bitswap/message/message_test.go +++ b/bitswap/message/message_test.go @@ -118,6 +118,10 @@ func TestToNetFromNetPreservesWantList(t *testing.T) { t.Fatal(err) } + if !copied.Full() { + t.Fatal("fullness attribute got dropped on marshal") + } + keys := make(map[string]bool) for _, k := range copied.Wantlist() { keys[k.Cid.KeyString()] = true From db289f907feba5c0addd2cb4af32f56f52f140f8 Mon Sep 17 00:00:00 2001 From: David Dias Date: Tue, 6 Dec 2016 18:53:04 -0800 Subject: [PATCH 0524/1035] update message.proto Add some comments so that I don't forget about these License: MIT Signed-off-by: David Dias This commit was moved from ipfs/go-bitswap@60152c265f9c128b4f392854ec5a7e92bcedea1b --- bitswap/message/pb/message.proto | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/bitswap/message/pb/message.proto b/bitswap/message/pb/message.proto index bd4f41b3e..59d03a6e1 100644 --- a/bitswap/message/pb/message.proto +++ b/bitswap/message/pb/message.proto @@ -5,21 +5,21 @@ message Message { message Wantlist { message Entry { - optional string block = 1; // the block key - optional int32 priority = 2; // the priority (normalized). default to 1 - optional bool cancel = 3; // whether this revokes an entry + optional string block = 1; // the block cid (cidV0 in bitswap 1.0.0, cidV1 in bitswap 1.1.0) + optional int32 priority = 2; // the priority (normalized). default to 1 + optional bool cancel = 3; // whether this revokes an entry } - repeated Entry entries = 1; // a list of wantlist entries - optional bool full = 2; // whether this is the full wantlist. default to false + repeated Entry entries = 1; // a list of wantlist entries + optional bool full = 2; // whether this is the full wantlist. default to false } message Block { - optional bytes prefix = 1; - optional bytes data = 2; + optional bytes prefix = 1; // CID prefix (cid version, multicodec and multihash prefix (type + length) + optional bytes data = 2; } optional Wantlist wantlist = 1; - repeated bytes blocks = 2; - repeated Block payload = 3; + repeated bytes blocks = 2; // used to send Blocks in bitswap 1.0.0 + repeated Block payload = 3; // used to send Blocks in bitswap 1.1.0 } From 6fcc8f034309894569cb70addac9bff03d2f2571 Mon Sep 17 00:00:00 2001 From: Jeromy Date: Tue, 27 Dec 2016 02:13:59 -0800 Subject: [PATCH 0525/1035] update libp2p for identify configuration updates License: MIT Signed-off-by: Jeromy This commit was moved from ipfs/go-bitswap@54b5286bee94c08ed9bd6e43b176c8571af78ca7 --- bitswap/testnet/peernet.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bitswap/testnet/peernet.go b/bitswap/testnet/peernet.go index 94baee01d..f3e30c929 100644 --- a/bitswap/testnet/peernet.go +++ b/bitswap/testnet/peernet.go @@ -5,8 +5,8 @@ import ( bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" mockrouting "github.com/ipfs/go-ipfs/routing/mock" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" + mockpeernet "gx/ipfs/QmQHmMFyhfp2ZXnbYWqAWhEideDCNDM6hzJwqCU29Y5zV2/go-libp2p/p2p/net/mock" ds "gx/ipfs/QmRWDav6mzWseLWeYfVd5fvUKiVe9xNH29YfMF438fG364/go-datastore" - mockpeernet "gx/ipfs/QmbzCT1CwxVZ2ednptC9RavuJe7Bv8DDi2Ne89qUrA37XM/go-libp2p/p2p/net/mock" peer "gx/ipfs/QmfMmLGoKzCHDN7cGgk64PJr4iipzidDRME8HABSJqvmhC/go-libp2p-peer" ) From bb19e93d25b590e47e60cecd2b0ee0b93baf896d Mon Sep 17 00:00:00 2001 From: Jeromy Date: Tue, 10 Jan 2017 05:56:28 -0800 Subject: [PATCH 0526/1035] update go-libp2p with negotiate lazy fixes License: MIT Signed-off-by: Jeromy This commit was moved from ipfs/go-bitswap@475ee252dd49ce11c36c1671d289d334b207b4f7 --- bitswap/bitswap_test.go | 2 +- bitswap/network/ipfs_impl.go | 2 +- bitswap/testnet/peernet.go | 2 +- bitswap/testutils.go | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index e50509461..849c2db41 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -17,7 +17,7 @@ import ( detectrace "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-detect-race" - p2ptestutil "gx/ipfs/QmWdGJY4fcsfhLHucEfivw8J71yUqNUFbzdU1jnJBnN5Xh/go-libp2p-netutil" + p2ptestutil "gx/ipfs/QmPS1HTBHiJcqxDAZ4s8bGt22HtL3oC67TPR3BsrvM44Z1/go-libp2p-netutil" cid "gx/ipfs/QmcTcsTvfaeEBRFo1TkFgT8sRmgi1n1LTZpecfVP8fzpGD/go-cid" ) diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index c854f853e..68296e55a 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -8,7 +8,7 @@ import ( bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" - host "gx/ipfs/QmPTGbC34bPKaUm9wTxBo7zSCac7pDuG42ZmnXC718CKZZ/go-libp2p-host" + host "gx/ipfs/QmPsRtodRuBUir32nz5v4zuSBTSszrR1d3fA6Ahb6eaejj/go-libp2p-host" inet "gx/ipfs/QmQx1dHDDYENugYgqA22BaBrRfuv1coSsuPiM7rYh1wwGH/go-libp2p-net" logging "gx/ipfs/QmSpJByNKFX1sCsHBEp3R73FL4NF6FnQTEGyNAXHm2GS52/go-log" ma "gx/ipfs/QmUAQaWbKxGCUTuoQVvvicbQNZ9APF5pDGWyAZSe93AtKH/go-multiaddr" diff --git a/bitswap/testnet/peernet.go b/bitswap/testnet/peernet.go index f3e30c929..f1590f577 100644 --- a/bitswap/testnet/peernet.go +++ b/bitswap/testnet/peernet.go @@ -5,8 +5,8 @@ import ( bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" mockrouting "github.com/ipfs/go-ipfs/routing/mock" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" - mockpeernet "gx/ipfs/QmQHmMFyhfp2ZXnbYWqAWhEideDCNDM6hzJwqCU29Y5zV2/go-libp2p/p2p/net/mock" ds "gx/ipfs/QmRWDav6mzWseLWeYfVd5fvUKiVe9xNH29YfMF438fG364/go-datastore" + mockpeernet "gx/ipfs/QmdzDdLZ7nj133QvNHypyS9Y39g35bMFk5DJ2pmX7YqtKU/go-libp2p/p2p/net/mock" peer "gx/ipfs/QmfMmLGoKzCHDN7cGgk64PJr4iipzidDRME8HABSJqvmhC/go-libp2p-peer" ) diff --git a/bitswap/testutils.go b/bitswap/testutils.go index 4099d18ff..ca1370e2e 100644 --- a/bitswap/testutils.go +++ b/bitswap/testutils.go @@ -10,9 +10,9 @@ import ( delay "github.com/ipfs/go-ipfs/thirdparty/delay" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" + p2ptestutil "gx/ipfs/QmPS1HTBHiJcqxDAZ4s8bGt22HtL3oC67TPR3BsrvM44Z1/go-libp2p-netutil" ds "gx/ipfs/QmRWDav6mzWseLWeYfVd5fvUKiVe9xNH29YfMF438fG364/go-datastore" ds_sync "gx/ipfs/QmRWDav6mzWseLWeYfVd5fvUKiVe9xNH29YfMF438fG364/go-datastore/sync" - p2ptestutil "gx/ipfs/QmWdGJY4fcsfhLHucEfivw8J71yUqNUFbzdU1jnJBnN5Xh/go-libp2p-netutil" peer "gx/ipfs/QmfMmLGoKzCHDN7cGgk64PJr4iipzidDRME8HABSJqvmhC/go-libp2p-peer" ) From ea8eee8ded33ae8e91a6fafd5639742b44a8efcd Mon Sep 17 00:00:00 2001 From: Jakub Sztandera Date: Fri, 16 Dec 2016 19:04:22 +0100 Subject: [PATCH 0527/1035] make: rework makefiles for non-recursive make and add sharness coverage This commit introduces non-recursive Makefile infrastructure that replaces current Makefile infrastructure. It also generally cleanups the Makefiles, separates them into nicer sub-modules and centralizes common operations into single definitions. It allows to depend on any target that is defined in the makefile, this means that for example `gx install` is called once when `make build test_expensive_sharness` is called instead of 4 or 5 times. It also makes the dependencies much cleaner and allows for reuse of modules. For example sharness coverage collection (WIP) uses sharness target with amended PATH, previously it might have been possible but not without wiring in the coverage collection into sharness make runner code. Yes, it is more complex but not much more. There are few rules that have to be followed and few complexities added but IMHO it is worth it. How to NR-make: 1. If make is to generate some file via a target, it MUST be defined in Rules.mk file in the directory of the target. 2. `Rules.mk` file MUST have `include mk/header.mk` statement as the first line and `include mk/footer.mk` statement as the last line (apart from project root `Rules.mk`). 3. It then MUST be included by the closest `Rules.mk` file up the directory tree. 4. Inside a `Rules.mk` special variable accessed as `$(d)` is defined. Its value is current directory, use it so if the `Rules.mk` file is moved in the tree it still works without a problem. Caution: this variable is not available in the recipe part and MUST NOT be used. Use name of the target or prerequisite to extract it if you need it. 5. Make has only one global scope, this means that name conflicts are a thing. Names SHOULD follow `VAR_NAME_$(d)` convention. There are exceptions from this rule in form of well defined global variables. Examples: General lists `TGT_BIN`, `CLEAN`; General targets: `TEST`, `COVERAGE`; General variables: `GOFLAGS`, `DEPS_GO`. 3. Any rules, definitions or variables that fit some family SHOULD be defined in `mk/$family.mk` file and included from project root `Rules.mk` License: MIT Signed-off-by: Jakub Sztandera This commit was moved from ipfs/go-bitswap@77a6c3128ab4c527a077f32fcb583da4910668ab --- bitswap/message/pb/Makefile | 8 -------- 1 file changed, 8 deletions(-) delete mode 100644 bitswap/message/pb/Makefile diff --git a/bitswap/message/pb/Makefile b/bitswap/message/pb/Makefile deleted file mode 100644 index 5bbebea07..000000000 --- a/bitswap/message/pb/Makefile +++ /dev/null @@ -1,8 +0,0 @@ -# TODO(brian): add proto tasks -all: message.pb.go - -message.pb.go: message.proto - protoc --gogo_out=. --proto_path=../../../../../:/usr/local/opt/protobuf/include:. $< - -clean: - rm message.pb.go From 02cb2f5a66a743c1af7f694add66b2e3996c994a Mon Sep 17 00:00:00 2001 From: Jeromy Date: Thu, 2 Feb 2017 20:09:02 -0800 Subject: [PATCH 0528/1035] update go-multihash and bubble up deps License: MIT Signed-off-by: Jeromy This commit was moved from ipfs/go-bitswap@9d4dee74cf8beb1ca327cfc63523c0f6e13c1a82 --- bitswap/bitswap.go | 6 +++--- bitswap/bitswap_test.go | 4 ++-- bitswap/decision/bench_test.go | 6 +++--- bitswap/decision/engine.go | 2 +- bitswap/decision/engine_test.go | 2 +- bitswap/decision/ledger.go | 4 ++-- bitswap/decision/peer_request_queue.go | 4 ++-- bitswap/decision/peer_request_queue_test.go | 4 ++-- bitswap/message/message.go | 4 ++-- bitswap/message/message_test.go | 4 ++-- bitswap/network/interface.go | 4 ++-- bitswap/network/ipfs_impl.go | 14 +++++++------- bitswap/notifications/notifications.go | 2 +- bitswap/notifications/notifications_test.go | 2 +- bitswap/stat.go | 2 +- bitswap/testnet/interface.go | 2 +- bitswap/testnet/network_test.go | 2 +- bitswap/testnet/peernet.go | 4 ++-- bitswap/testnet/virtual.go | 6 +++--- bitswap/testutils.go | 4 ++-- bitswap/wantlist/wantlist.go | 2 +- bitswap/wantmanager.go | 4 ++-- bitswap/workers.go | 4 ++-- 23 files changed, 46 insertions(+), 46 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index e1fb20de4..a951e3fe8 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -22,9 +22,9 @@ import ( process "gx/ipfs/QmSF8fPo3jgVBAy8fpdjjYqgG87dkJgUprRBHRd2tmfgpP/goprocess" procctx "gx/ipfs/QmSF8fPo3jgVBAy8fpdjjYqgG87dkJgUprRBHRd2tmfgpP/goprocess/context" logging "gx/ipfs/QmSpJByNKFX1sCsHBEp3R73FL4NF6FnQTEGyNAXHm2GS52/go-log" - loggables "gx/ipfs/QmTMy4hVSY28DdwJ9kBz6y7q6MuioFzPcpM3Ma3aPjo1i3/go-libp2p-loggables" - cid "gx/ipfs/QmcTcsTvfaeEBRFo1TkFgT8sRmgi1n1LTZpecfVP8fzpGD/go-cid" - peer "gx/ipfs/QmfMmLGoKzCHDN7cGgk64PJr4iipzidDRME8HABSJqvmhC/go-libp2p-peer" + loggables "gx/ipfs/QmTcfnDHimxBJqx6utpnWqVHdvyquXgkwAvYt4zMaJMKS2/go-libp2p-loggables" + cid "gx/ipfs/QmV5gPoRsjN1Gid3LMdNZTyfCtP2DsvqEbMAmz82RmmiGk/go-cid" + peer "gx/ipfs/QmZcUPvPhD1Xvk6mwijYF8AfR3mG31S1YsEfHG4khrFPRr/go-libp2p-peer" ) var log = logging.Logger("bitswap") diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index 849c2db41..6ebcdd350 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -17,8 +17,8 @@ import ( detectrace "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-detect-race" - p2ptestutil "gx/ipfs/QmPS1HTBHiJcqxDAZ4s8bGt22HtL3oC67TPR3BsrvM44Z1/go-libp2p-netutil" - cid "gx/ipfs/QmcTcsTvfaeEBRFo1TkFgT8sRmgi1n1LTZpecfVP8fzpGD/go-cid" + p2ptestutil "gx/ipfs/QmTcGn1vzu7YNxz6FEXvfUfMy6WmYeQ5VtU3MbWM8c92rB/go-libp2p-netutil" + cid "gx/ipfs/QmV5gPoRsjN1Gid3LMdNZTyfCtP2DsvqEbMAmz82RmmiGk/go-cid" ) // FIXME the tests are really sensitive to the network delay. fix them to work diff --git a/bitswap/decision/bench_test.go b/bitswap/decision/bench_test.go index 43a1f6969..c1f16068e 100644 --- a/bitswap/decision/bench_test.go +++ b/bitswap/decision/bench_test.go @@ -7,9 +7,9 @@ import ( "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" "github.com/ipfs/go-ipfs/thirdparty/testutil" - u "gx/ipfs/Qmb912gdngC1UWwTkhuW8knyRbcWeu5kqkxBpveLmW8bSr/go-ipfs-util" - cid "gx/ipfs/QmcTcsTvfaeEBRFo1TkFgT8sRmgi1n1LTZpecfVP8fzpGD/go-cid" - "gx/ipfs/QmfMmLGoKzCHDN7cGgk64PJr4iipzidDRME8HABSJqvmhC/go-libp2p-peer" + cid "gx/ipfs/QmV5gPoRsjN1Gid3LMdNZTyfCtP2DsvqEbMAmz82RmmiGk/go-cid" + "gx/ipfs/QmZcUPvPhD1Xvk6mwijYF8AfR3mG31S1YsEfHG4khrFPRr/go-libp2p-peer" + u "gx/ipfs/QmZuY8aV7zbNXVy6DyN9SmnuH3o9nG852F4aTiSBpts8d1/go-ipfs-util" ) // FWIW: At the time of this commit, including a timestamp in task increases diff --git a/bitswap/decision/engine.go b/bitswap/decision/engine.go index d494554d0..38b87dfc2 100644 --- a/bitswap/decision/engine.go +++ b/bitswap/decision/engine.go @@ -11,7 +11,7 @@ import ( bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" wl "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" logging "gx/ipfs/QmSpJByNKFX1sCsHBEp3R73FL4NF6FnQTEGyNAXHm2GS52/go-log" - peer "gx/ipfs/QmfMmLGoKzCHDN7cGgk64PJr4iipzidDRME8HABSJqvmhC/go-libp2p-peer" + peer "gx/ipfs/QmZcUPvPhD1Xvk6mwijYF8AfR3mG31S1YsEfHG4khrFPRr/go-libp2p-peer" ) // TODO consider taking responsibility for other types of requests. For diff --git a/bitswap/decision/engine_test.go b/bitswap/decision/engine_test.go index ed985d166..d4ac303e6 100644 --- a/bitswap/decision/engine_test.go +++ b/bitswap/decision/engine_test.go @@ -15,7 +15,7 @@ import ( testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" ds "gx/ipfs/QmRWDav6mzWseLWeYfVd5fvUKiVe9xNH29YfMF438fG364/go-datastore" dssync "gx/ipfs/QmRWDav6mzWseLWeYfVd5fvUKiVe9xNH29YfMF438fG364/go-datastore/sync" - peer "gx/ipfs/QmfMmLGoKzCHDN7cGgk64PJr4iipzidDRME8HABSJqvmhC/go-libp2p-peer" + peer "gx/ipfs/QmZcUPvPhD1Xvk6mwijYF8AfR3mG31S1YsEfHG4khrFPRr/go-libp2p-peer" ) type peerAndEngine struct { diff --git a/bitswap/decision/ledger.go b/bitswap/decision/ledger.go index db1f24287..0cb7855d7 100644 --- a/bitswap/decision/ledger.go +++ b/bitswap/decision/ledger.go @@ -6,8 +6,8 @@ import ( wl "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" - cid "gx/ipfs/QmcTcsTvfaeEBRFo1TkFgT8sRmgi1n1LTZpecfVP8fzpGD/go-cid" - peer "gx/ipfs/QmfMmLGoKzCHDN7cGgk64PJr4iipzidDRME8HABSJqvmhC/go-libp2p-peer" + cid "gx/ipfs/QmV5gPoRsjN1Gid3LMdNZTyfCtP2DsvqEbMAmz82RmmiGk/go-cid" + peer "gx/ipfs/QmZcUPvPhD1Xvk6mwijYF8AfR3mG31S1YsEfHG4khrFPRr/go-libp2p-peer" ) func newLedger(p peer.ID) *ledger { diff --git a/bitswap/decision/peer_request_queue.go b/bitswap/decision/peer_request_queue.go index 0f4246697..f3324e13a 100644 --- a/bitswap/decision/peer_request_queue.go +++ b/bitswap/decision/peer_request_queue.go @@ -7,8 +7,8 @@ import ( wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" pq "github.com/ipfs/go-ipfs/thirdparty/pq" - cid "gx/ipfs/QmcTcsTvfaeEBRFo1TkFgT8sRmgi1n1LTZpecfVP8fzpGD/go-cid" - peer "gx/ipfs/QmfMmLGoKzCHDN7cGgk64PJr4iipzidDRME8HABSJqvmhC/go-libp2p-peer" + cid "gx/ipfs/QmV5gPoRsjN1Gid3LMdNZTyfCtP2DsvqEbMAmz82RmmiGk/go-cid" + peer "gx/ipfs/QmZcUPvPhD1Xvk6mwijYF8AfR3mG31S1YsEfHG4khrFPRr/go-libp2p-peer" ) type peerRequestQueue interface { diff --git a/bitswap/decision/peer_request_queue_test.go b/bitswap/decision/peer_request_queue_test.go index 18c29f1e4..ef9e9d3f0 100644 --- a/bitswap/decision/peer_request_queue_test.go +++ b/bitswap/decision/peer_request_queue_test.go @@ -10,8 +10,8 @@ import ( "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" "github.com/ipfs/go-ipfs/thirdparty/testutil" - u "gx/ipfs/Qmb912gdngC1UWwTkhuW8knyRbcWeu5kqkxBpveLmW8bSr/go-ipfs-util" - cid "gx/ipfs/QmcTcsTvfaeEBRFo1TkFgT8sRmgi1n1LTZpecfVP8fzpGD/go-cid" + cid "gx/ipfs/QmV5gPoRsjN1Gid3LMdNZTyfCtP2DsvqEbMAmz82RmmiGk/go-cid" + u "gx/ipfs/QmZuY8aV7zbNXVy6DyN9SmnuH3o9nG852F4aTiSBpts8d1/go-ipfs-util" ) func TestPushPop(t *testing.T) { diff --git a/bitswap/message/message.go b/bitswap/message/message.go index ad7177f02..578f2fbe1 100644 --- a/bitswap/message/message.go +++ b/bitswap/message/message.go @@ -8,10 +8,10 @@ import ( pb "github.com/ipfs/go-ipfs/exchange/bitswap/message/pb" wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" - inet "gx/ipfs/QmQx1dHDDYENugYgqA22BaBrRfuv1coSsuPiM7rYh1wwGH/go-libp2p-net" + inet "gx/ipfs/QmRuZnMorqodado1yeTQiv1i9rmtKj29CjPSsBKM7DFXV4/go-libp2p-net" + cid "gx/ipfs/QmV5gPoRsjN1Gid3LMdNZTyfCtP2DsvqEbMAmz82RmmiGk/go-cid" ggio "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/io" proto "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/proto" - cid "gx/ipfs/QmcTcsTvfaeEBRFo1TkFgT8sRmgi1n1LTZpecfVP8fzpGD/go-cid" ) // TODO move message.go into the bitswap package diff --git a/bitswap/message/message_test.go b/bitswap/message/message_test.go index add64878f..a93b9ccc2 100644 --- a/bitswap/message/message_test.go +++ b/bitswap/message/message_test.go @@ -8,8 +8,8 @@ import ( blocks "github.com/ipfs/go-ipfs/blocks" pb "github.com/ipfs/go-ipfs/exchange/bitswap/message/pb" - u "gx/ipfs/Qmb912gdngC1UWwTkhuW8knyRbcWeu5kqkxBpveLmW8bSr/go-ipfs-util" - cid "gx/ipfs/QmcTcsTvfaeEBRFo1TkFgT8sRmgi1n1LTZpecfVP8fzpGD/go-cid" + cid "gx/ipfs/QmV5gPoRsjN1Gid3LMdNZTyfCtP2DsvqEbMAmz82RmmiGk/go-cid" + u "gx/ipfs/QmZuY8aV7zbNXVy6DyN9SmnuH3o9nG852F4aTiSBpts8d1/go-ipfs-util" ) func mkFakeCid(s string) *cid.Cid { diff --git a/bitswap/network/interface.go b/bitswap/network/interface.go index dfc1b3f02..1f071822f 100644 --- a/bitswap/network/interface.go +++ b/bitswap/network/interface.go @@ -4,9 +4,9 @@ import ( "context" bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" + cid "gx/ipfs/QmV5gPoRsjN1Gid3LMdNZTyfCtP2DsvqEbMAmz82RmmiGk/go-cid" protocol "gx/ipfs/QmZNkThpqfVXs9GNbexPrfBbXSLNYeKrE7jwFM2oqHbyqN/go-libp2p-protocol" - cid "gx/ipfs/QmcTcsTvfaeEBRFo1TkFgT8sRmgi1n1LTZpecfVP8fzpGD/go-cid" - peer "gx/ipfs/QmfMmLGoKzCHDN7cGgk64PJr4iipzidDRME8HABSJqvmhC/go-libp2p-peer" + peer "gx/ipfs/QmZcUPvPhD1Xvk6mwijYF8AfR3mG31S1YsEfHG4khrFPRr/go-libp2p-peer" ) var ( diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index 68296e55a..8df9f2f98 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -8,15 +8,15 @@ import ( bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" - host "gx/ipfs/QmPsRtodRuBUir32nz5v4zuSBTSszrR1d3fA6Ahb6eaejj/go-libp2p-host" - inet "gx/ipfs/QmQx1dHDDYENugYgqA22BaBrRfuv1coSsuPiM7rYh1wwGH/go-libp2p-net" + pstore "gx/ipfs/QmQMQ2RUjnaEEX8ybmrhuFFGhAwPjyL1Eo6ZoJGD7aAccM/go-libp2p-peerstore" + inet "gx/ipfs/QmRuZnMorqodado1yeTQiv1i9rmtKj29CjPSsBKM7DFXV4/go-libp2p-net" + ma "gx/ipfs/QmSWLfmj5frN9xVLMMN846dMDriy5wN5jeghUm7aTW3DAG/go-multiaddr" logging "gx/ipfs/QmSpJByNKFX1sCsHBEp3R73FL4NF6FnQTEGyNAXHm2GS52/go-log" - ma "gx/ipfs/QmUAQaWbKxGCUTuoQVvvicbQNZ9APF5pDGWyAZSe93AtKH/go-multiaddr" + cid "gx/ipfs/QmV5gPoRsjN1Gid3LMdNZTyfCtP2DsvqEbMAmz82RmmiGk/go-cid" ggio "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/io" - routing "gx/ipfs/QmbkGVaN9W6RYJK4Ws5FvMKXKDqdRQ5snhtaa92qP6L8eU/go-libp2p-routing" - cid "gx/ipfs/QmcTcsTvfaeEBRFo1TkFgT8sRmgi1n1LTZpecfVP8fzpGD/go-cid" - pstore "gx/ipfs/QmeXj9VAjmYQZxpmVz7VzccbJrpmr8qkCDSjfVNsPTWTYU/go-libp2p-peerstore" - peer "gx/ipfs/QmfMmLGoKzCHDN7cGgk64PJr4iipzidDRME8HABSJqvmhC/go-libp2p-peer" + peer "gx/ipfs/QmZcUPvPhD1Xvk6mwijYF8AfR3mG31S1YsEfHG4khrFPRr/go-libp2p-peer" + routing "gx/ipfs/QmZghcVHwXQC3Zvnvn24LgTmSPkEn2o3PDyKb6nrtPRzRh/go-libp2p-routing" + host "gx/ipfs/QmbzbRyd22gcW92U1rA2yKagB3myMYhk45XBknJ49F9XWJ/go-libp2p-host" ) var log = logging.Logger("bitswap_network") diff --git a/bitswap/notifications/notifications.go b/bitswap/notifications/notifications.go index 440247fed..f0d0402c8 100644 --- a/bitswap/notifications/notifications.go +++ b/bitswap/notifications/notifications.go @@ -6,7 +6,7 @@ import ( blocks "github.com/ipfs/go-ipfs/blocks" pubsub "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/briantigerchow/pubsub" - cid "gx/ipfs/QmcTcsTvfaeEBRFo1TkFgT8sRmgi1n1LTZpecfVP8fzpGD/go-cid" + cid "gx/ipfs/QmV5gPoRsjN1Gid3LMdNZTyfCtP2DsvqEbMAmz82RmmiGk/go-cid" ) const bufferSize = 16 diff --git a/bitswap/notifications/notifications_test.go b/bitswap/notifications/notifications_test.go index ff2811884..d66864811 100644 --- a/bitswap/notifications/notifications_test.go +++ b/bitswap/notifications/notifications_test.go @@ -8,7 +8,7 @@ import ( blocks "github.com/ipfs/go-ipfs/blocks" blocksutil "github.com/ipfs/go-ipfs/blocks/blocksutil" - cid "gx/ipfs/QmcTcsTvfaeEBRFo1TkFgT8sRmgi1n1LTZpecfVP8fzpGD/go-cid" + cid "gx/ipfs/QmV5gPoRsjN1Gid3LMdNZTyfCtP2DsvqEbMAmz82RmmiGk/go-cid" ) func TestDuplicates(t *testing.T) { diff --git a/bitswap/stat.go b/bitswap/stat.go index f8ca0d0a4..7f4ff1751 100644 --- a/bitswap/stat.go +++ b/bitswap/stat.go @@ -3,7 +3,7 @@ package bitswap import ( "sort" - cid "gx/ipfs/QmcTcsTvfaeEBRFo1TkFgT8sRmgi1n1LTZpecfVP8fzpGD/go-cid" + cid "gx/ipfs/QmV5gPoRsjN1Gid3LMdNZTyfCtP2DsvqEbMAmz82RmmiGk/go-cid" ) type Stat struct { diff --git a/bitswap/testnet/interface.go b/bitswap/testnet/interface.go index 0e9331627..60ceae491 100644 --- a/bitswap/testnet/interface.go +++ b/bitswap/testnet/interface.go @@ -3,7 +3,7 @@ package bitswap import ( bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" "github.com/ipfs/go-ipfs/thirdparty/testutil" - peer "gx/ipfs/QmfMmLGoKzCHDN7cGgk64PJr4iipzidDRME8HABSJqvmhC/go-libp2p-peer" + peer "gx/ipfs/QmZcUPvPhD1Xvk6mwijYF8AfR3mG31S1YsEfHG4khrFPRr/go-libp2p-peer" ) type Network interface { diff --git a/bitswap/testnet/network_test.go b/bitswap/testnet/network_test.go index 31d572283..062f59bce 100644 --- a/bitswap/testnet/network_test.go +++ b/bitswap/testnet/network_test.go @@ -11,7 +11,7 @@ import ( mockrouting "github.com/ipfs/go-ipfs/routing/mock" delay "github.com/ipfs/go-ipfs/thirdparty/delay" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" - peer "gx/ipfs/QmfMmLGoKzCHDN7cGgk64PJr4iipzidDRME8HABSJqvmhC/go-libp2p-peer" + peer "gx/ipfs/QmZcUPvPhD1Xvk6mwijYF8AfR3mG31S1YsEfHG4khrFPRr/go-libp2p-peer" ) func TestSendMessageAsyncButWaitForResponse(t *testing.T) { diff --git a/bitswap/testnet/peernet.go b/bitswap/testnet/peernet.go index f1590f577..bfaa13aa2 100644 --- a/bitswap/testnet/peernet.go +++ b/bitswap/testnet/peernet.go @@ -6,8 +6,8 @@ import ( mockrouting "github.com/ipfs/go-ipfs/routing/mock" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" ds "gx/ipfs/QmRWDav6mzWseLWeYfVd5fvUKiVe9xNH29YfMF438fG364/go-datastore" - mockpeernet "gx/ipfs/QmdzDdLZ7nj133QvNHypyS9Y39g35bMFk5DJ2pmX7YqtKU/go-libp2p/p2p/net/mock" - peer "gx/ipfs/QmfMmLGoKzCHDN7cGgk64PJr4iipzidDRME8HABSJqvmhC/go-libp2p-peer" + mockpeernet "gx/ipfs/QmSNJRX4uphb3Eyp69uYbpRVvgqjPxfjnJmjcdMWkDH5Pn/go-libp2p/p2p/net/mock" + peer "gx/ipfs/QmZcUPvPhD1Xvk6mwijYF8AfR3mG31S1YsEfHG4khrFPRr/go-libp2p-peer" ) type peernet struct { diff --git a/bitswap/testnet/virtual.go b/bitswap/testnet/virtual.go index 4d8769e5b..b5eec43ea 100644 --- a/bitswap/testnet/virtual.go +++ b/bitswap/testnet/virtual.go @@ -9,9 +9,9 @@ import ( mockrouting "github.com/ipfs/go-ipfs/routing/mock" delay "github.com/ipfs/go-ipfs/thirdparty/delay" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" - routing "gx/ipfs/QmbkGVaN9W6RYJK4Ws5FvMKXKDqdRQ5snhtaa92qP6L8eU/go-libp2p-routing" - cid "gx/ipfs/QmcTcsTvfaeEBRFo1TkFgT8sRmgi1n1LTZpecfVP8fzpGD/go-cid" - peer "gx/ipfs/QmfMmLGoKzCHDN7cGgk64PJr4iipzidDRME8HABSJqvmhC/go-libp2p-peer" + cid "gx/ipfs/QmV5gPoRsjN1Gid3LMdNZTyfCtP2DsvqEbMAmz82RmmiGk/go-cid" + peer "gx/ipfs/QmZcUPvPhD1Xvk6mwijYF8AfR3mG31S1YsEfHG4khrFPRr/go-libp2p-peer" + routing "gx/ipfs/QmZghcVHwXQC3Zvnvn24LgTmSPkEn2o3PDyKb6nrtPRzRh/go-libp2p-routing" ) func VirtualNetwork(rs mockrouting.Server, d delay.D) Network { diff --git a/bitswap/testutils.go b/bitswap/testutils.go index ca1370e2e..526b6fa88 100644 --- a/bitswap/testutils.go +++ b/bitswap/testutils.go @@ -10,10 +10,10 @@ import ( delay "github.com/ipfs/go-ipfs/thirdparty/delay" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" - p2ptestutil "gx/ipfs/QmPS1HTBHiJcqxDAZ4s8bGt22HtL3oC67TPR3BsrvM44Z1/go-libp2p-netutil" ds "gx/ipfs/QmRWDav6mzWseLWeYfVd5fvUKiVe9xNH29YfMF438fG364/go-datastore" ds_sync "gx/ipfs/QmRWDav6mzWseLWeYfVd5fvUKiVe9xNH29YfMF438fG364/go-datastore/sync" - peer "gx/ipfs/QmfMmLGoKzCHDN7cGgk64PJr4iipzidDRME8HABSJqvmhC/go-libp2p-peer" + p2ptestutil "gx/ipfs/QmTcGn1vzu7YNxz6FEXvfUfMy6WmYeQ5VtU3MbWM8c92rB/go-libp2p-netutil" + peer "gx/ipfs/QmZcUPvPhD1Xvk6mwijYF8AfR3mG31S1YsEfHG4khrFPRr/go-libp2p-peer" ) // WARNING: this uses RandTestBogusIdentity DO NOT USE for NON TESTS! diff --git a/bitswap/wantlist/wantlist.go b/bitswap/wantlist/wantlist.go index dedf87140..07d8dcaee 100644 --- a/bitswap/wantlist/wantlist.go +++ b/bitswap/wantlist/wantlist.go @@ -6,7 +6,7 @@ import ( "sort" "sync" - cid "gx/ipfs/QmcTcsTvfaeEBRFo1TkFgT8sRmgi1n1LTZpecfVP8fzpGD/go-cid" + cid "gx/ipfs/QmV5gPoRsjN1Gid3LMdNZTyfCtP2DsvqEbMAmz82RmmiGk/go-cid" ) type ThreadSafe struct { diff --git a/bitswap/wantmanager.go b/bitswap/wantmanager.go index 83910c47b..899a188fb 100644 --- a/bitswap/wantmanager.go +++ b/bitswap/wantmanager.go @@ -9,8 +9,8 @@ import ( bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" - cid "gx/ipfs/QmcTcsTvfaeEBRFo1TkFgT8sRmgi1n1LTZpecfVP8fzpGD/go-cid" - peer "gx/ipfs/QmfMmLGoKzCHDN7cGgk64PJr4iipzidDRME8HABSJqvmhC/go-libp2p-peer" + cid "gx/ipfs/QmV5gPoRsjN1Gid3LMdNZTyfCtP2DsvqEbMAmz82RmmiGk/go-cid" + peer "gx/ipfs/QmZcUPvPhD1Xvk6mwijYF8AfR3mG31S1YsEfHG4khrFPRr/go-libp2p-peer" ) type WantManager struct { diff --git a/bitswap/workers.go b/bitswap/workers.go index 4df8af11d..b6840ef52 100644 --- a/bitswap/workers.go +++ b/bitswap/workers.go @@ -9,8 +9,8 @@ import ( process "gx/ipfs/QmSF8fPo3jgVBAy8fpdjjYqgG87dkJgUprRBHRd2tmfgpP/goprocess" procctx "gx/ipfs/QmSF8fPo3jgVBAy8fpdjjYqgG87dkJgUprRBHRd2tmfgpP/goprocess/context" logging "gx/ipfs/QmSpJByNKFX1sCsHBEp3R73FL4NF6FnQTEGyNAXHm2GS52/go-log" - cid "gx/ipfs/QmcTcsTvfaeEBRFo1TkFgT8sRmgi1n1LTZpecfVP8fzpGD/go-cid" - peer "gx/ipfs/QmfMmLGoKzCHDN7cGgk64PJr4iipzidDRME8HABSJqvmhC/go-libp2p-peer" + cid "gx/ipfs/QmV5gPoRsjN1Gid3LMdNZTyfCtP2DsvqEbMAmz82RmmiGk/go-cid" + peer "gx/ipfs/QmZcUPvPhD1Xvk6mwijYF8AfR3mG31S1YsEfHG4khrFPRr/go-libp2p-peer" ) var TaskWorkerCount = 8 From 95081632ce2cba9936891a920be2cae56aa20311 Mon Sep 17 00:00:00 2001 From: Jakub Sztandera Date: Thu, 16 Feb 2017 15:19:48 +0100 Subject: [PATCH 0529/1035] deps: update dependencies for PNet License: MIT Signed-off-by: Jakub Sztandera This commit was moved from ipfs/go-bitswap@94bcd50698c1b50dca015a2788ee9abb7716e9cb --- bitswap/bitswap_test.go | 2 +- bitswap/testnet/peernet.go | 2 +- bitswap/testutils.go | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index 6ebcdd350..8cef2d3ad 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -17,8 +17,8 @@ import ( detectrace "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-detect-race" - p2ptestutil "gx/ipfs/QmTcGn1vzu7YNxz6FEXvfUfMy6WmYeQ5VtU3MbWM8c92rB/go-libp2p-netutil" cid "gx/ipfs/QmV5gPoRsjN1Gid3LMdNZTyfCtP2DsvqEbMAmz82RmmiGk/go-cid" + p2ptestutil "gx/ipfs/QmdGRzr9bPTt2ZrBFaq5R2zzD7JFXNRxXZGkzsVcW6pEzh/go-libp2p-netutil" ) // FIXME the tests are really sensitive to the network delay. fix them to work diff --git a/bitswap/testnet/peernet.go b/bitswap/testnet/peernet.go index bfaa13aa2..38378736d 100644 --- a/bitswap/testnet/peernet.go +++ b/bitswap/testnet/peernet.go @@ -6,7 +6,7 @@ import ( mockrouting "github.com/ipfs/go-ipfs/routing/mock" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" ds "gx/ipfs/QmRWDav6mzWseLWeYfVd5fvUKiVe9xNH29YfMF438fG364/go-datastore" - mockpeernet "gx/ipfs/QmSNJRX4uphb3Eyp69uYbpRVvgqjPxfjnJmjcdMWkDH5Pn/go-libp2p/p2p/net/mock" + mockpeernet "gx/ipfs/QmU3g3psEDiC4tQh1Qu2NYg5aYVQqxC3m74ZavLwPfJEtu/go-libp2p/p2p/net/mock" peer "gx/ipfs/QmZcUPvPhD1Xvk6mwijYF8AfR3mG31S1YsEfHG4khrFPRr/go-libp2p-peer" ) diff --git a/bitswap/testutils.go b/bitswap/testutils.go index 526b6fa88..65d122bf3 100644 --- a/bitswap/testutils.go +++ b/bitswap/testutils.go @@ -12,8 +12,8 @@ import ( ds "gx/ipfs/QmRWDav6mzWseLWeYfVd5fvUKiVe9xNH29YfMF438fG364/go-datastore" ds_sync "gx/ipfs/QmRWDav6mzWseLWeYfVd5fvUKiVe9xNH29YfMF438fG364/go-datastore/sync" - p2ptestutil "gx/ipfs/QmTcGn1vzu7YNxz6FEXvfUfMy6WmYeQ5VtU3MbWM8c92rB/go-libp2p-netutil" peer "gx/ipfs/QmZcUPvPhD1Xvk6mwijYF8AfR3mG31S1YsEfHG4khrFPRr/go-libp2p-peer" + p2ptestutil "gx/ipfs/QmdGRzr9bPTt2ZrBFaq5R2zzD7JFXNRxXZGkzsVcW6pEzh/go-libp2p-netutil" ) // WARNING: this uses RandTestBogusIdentity DO NOT USE for NON TESTS! From 6123c60dc45f8c389925455e5965ce6018a4fb7b Mon Sep 17 00:00:00 2001 From: Jakub Sztandera Date: Fri, 20 Jan 2017 14:09:03 +0100 Subject: [PATCH 0530/1035] Introduce block and dup histograms to bitswap License: MIT Signed-off-by: Jakub Sztandera This commit was moved from ipfs/go-bitswap@2156770506425e51fbb94829373e75bbe3331449 --- bitswap/bitswap.go | 21 ++++++++++++++++++++- 1 file changed, 20 insertions(+), 1 deletion(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index a951e3fe8..cc821dc1e 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -19,6 +19,7 @@ import ( flags "github.com/ipfs/go-ipfs/flags" "github.com/ipfs/go-ipfs/thirdparty/delay" + metrics "gx/ipfs/QmRg1gKTHzc3CZXSKzem8aR4E3TubFhbgXwfVuWnSK5CC5/go-metrics-interface" process "gx/ipfs/QmSF8fPo3jgVBAy8fpdjjYqgG87dkJgUprRBHRd2tmfgpP/goprocess" procctx "gx/ipfs/QmSF8fPo3jgVBAy8fpdjjYqgG87dkJgUprRBHRd2tmfgpP/goprocess/context" logging "gx/ipfs/QmSpJByNKFX1sCsHBEp3R73FL4NF6FnQTEGyNAXHm2GS52/go-log" @@ -47,6 +48,9 @@ var ( HasBlockBufferSize = 256 provideKeysBufferSize = 2048 provideWorkerMax = 512 + + // the 1<<18+15 is to observe old file chunks that are 1<<18 + 14 in size + metricsBuckets = []float64{1 << 6, 1 << 10, 1 << 14, 1 << 18, 1<<18 + 15, 1 << 22} ) func init() { @@ -74,6 +78,11 @@ func New(parent context.Context, p peer.ID, network bsnet.BitSwapNetwork, // shouldn't accept a context anymore. Clients should probably use Close() // exclusively. We should probably find another way to share logging data ctx, cancelFunc := context.WithCancel(parent) + ctx = metrics.CtxSubScope(ctx, "bitswap") + dupHist := metrics.NewCtx(ctx, "dup_blocks_bytes", "Summary of duplicate"+ + " data blocks recived").Histogram(metricsBuckets) + allHist := metrics.NewCtx(ctx, "all_blocks_bytes", "Summary of all"+ + " data blocks recived").Histogram(metricsBuckets) notif := notifications.New() px := process.WithTeardown(func() error { @@ -91,6 +100,9 @@ func New(parent context.Context, p peer.ID, network bsnet.BitSwapNetwork, newBlocks: make(chan *cid.Cid, HasBlockBufferSize), provideKeys: make(chan *cid.Cid, provideKeysBufferSize), wm: NewWantManager(ctx, network), + + dupMetric: dupHist, + allMetric: allHist, } go bs.wm.Run() network.SetDelegate(bs) @@ -145,6 +157,10 @@ type Bitswap struct { blocksRecvd int dupBlocksRecvd int dupDataRecvd uint64 + + // Metrics interface metrics + dupMetric metrics.Histogram + allMetric metrics.Histogram } type blockRequest struct { @@ -373,6 +389,8 @@ var ErrAlreadyHaveBlock = errors.New("already have block") func (bs *Bitswap) updateReceiveCounters(b blocks.Block) error { bs.counterLk.Lock() defer bs.counterLk.Unlock() + blkLen := len(b.RawData()) + bs.allMetric.Observe(float64(blkLen)) bs.blocksRecvd++ has, err := bs.blockstore.Has(b.Cid()) if err != nil { @@ -380,8 +398,9 @@ func (bs *Bitswap) updateReceiveCounters(b blocks.Block) error { return err } if err == nil && has { + bs.dupMetric.Observe(float64(blkLen)) bs.dupBlocksRecvd++ - bs.dupDataRecvd += uint64(len(b.RawData())) + bs.dupDataRecvd += uint64(blkLen) } if has { From 282182607ef3768c507beec277b5de0d35762d40 Mon Sep 17 00:00:00 2001 From: Jakub Sztandera Date: Fri, 20 Jan 2017 14:13:04 +0100 Subject: [PATCH 0531/1035] refactor: cleanup bitswap metrics collection License: MIT Signed-off-by: Jakub Sztandera This commit was moved from ipfs/go-bitswap@78ce3724314b66a29fefa84f9752fc5ddf8a656c --- bitswap/bitswap.go | 26 ++++++++++++-------------- 1 file changed, 12 insertions(+), 14 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index cc821dc1e..46cc4dbd8 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -368,9 +368,7 @@ func (bs *Bitswap) ReceiveMessage(ctx context.Context, p peer.ID, incoming bsmsg go func(b blocks.Block) { defer wg.Done() - if err := bs.updateReceiveCounters(b); err != nil { - return // ignore error, is either logged previously, or ErrAlreadyHaveBlock - } + bs.updateReceiveCounters(b) k := b.Cid() log.Event(ctx, "Bitswap.GetBlockRequest.End", k) @@ -386,27 +384,27 @@ func (bs *Bitswap) ReceiveMessage(ctx context.Context, p peer.ID, incoming bsmsg var ErrAlreadyHaveBlock = errors.New("already have block") -func (bs *Bitswap) updateReceiveCounters(b blocks.Block) error { - bs.counterLk.Lock() - defer bs.counterLk.Unlock() +func (bs *Bitswap) updateReceiveCounters(b blocks.Block) { blkLen := len(b.RawData()) - bs.allMetric.Observe(float64(blkLen)) - bs.blocksRecvd++ has, err := bs.blockstore.Has(b.Cid()) if err != nil { log.Infof("blockstore.Has error: %s", err) - return err + return } - if err == nil && has { + + bs.allMetric.Observe(float64(blkLen)) + if has { bs.dupMetric.Observe(float64(blkLen)) - bs.dupBlocksRecvd++ - bs.dupDataRecvd += uint64(blkLen) } + bs.counterLk.Lock() + defer bs.counterLk.Unlock() + + bs.blocksRecvd++ if has { - return ErrAlreadyHaveBlock + bs.dupBlocksRecvd++ + bs.dupDataRecvd += uint64(blkLen) } - return nil } // Connected/Disconnected warns bitswap about peer connections From f2117b4cc16e182c748db8cc4cf2a71e1c9cf2b9 Mon Sep 17 00:00:00 2001 From: Jakub Sztandera Date: Fri, 20 Jan 2017 18:28:42 +0100 Subject: [PATCH 0532/1035] Add metric of number of elements in the wantlist License: MIT Signed-off-by: Jakub Sztandera This commit was moved from ipfs/go-bitswap@c4f7e855e97e1df638afbd255891e252a2ca3006 --- bitswap/wantmanager.go | 27 ++++++++++++++++++--------- 1 file changed, 18 insertions(+), 9 deletions(-) diff --git a/bitswap/wantmanager.go b/bitswap/wantmanager.go index 899a188fb..a9afc3cd1 100644 --- a/bitswap/wantmanager.go +++ b/bitswap/wantmanager.go @@ -9,6 +9,8 @@ import ( bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" + + metrics "gx/ipfs/QmRg1gKTHzc3CZXSKzem8aR4E3TubFhbgXwfVuWnSK5CC5/go-metrics-interface" cid "gx/ipfs/QmV5gPoRsjN1Gid3LMdNZTyfCtP2DsvqEbMAmz82RmmiGk/go-cid" peer "gx/ipfs/QmZcUPvPhD1Xvk6mwijYF8AfR3mG31S1YsEfHG4khrFPRr/go-libp2p-peer" ) @@ -27,20 +29,25 @@ type WantManager struct { network bsnet.BitSwapNetwork ctx context.Context cancel func() + + metricWantlist metrics.Gauge } func NewWantManager(ctx context.Context, network bsnet.BitSwapNetwork) *WantManager { ctx, cancel := context.WithCancel(ctx) + wantlistGauge := metrics.NewCtx(ctx, "wanlist_total", + "Number of items in wantlist.").Gauge() return &WantManager{ - incoming: make(chan []*bsmsg.Entry, 10), - connect: make(chan peer.ID, 10), - disconnect: make(chan peer.ID, 10), - peerReqs: make(chan chan []peer.ID), - peers: make(map[peer.ID]*msgQueue), - wl: wantlist.NewThreadSafe(), - network: network, - ctx: ctx, - cancel: cancel, + incoming: make(chan []*bsmsg.Entry, 10), + connect: make(chan peer.ID, 10), + disconnect: make(chan peer.ID, 10), + peerReqs: make(chan chan []peer.ID), + peers: make(map[peer.ID]*msgQueue), + wl: wantlist.NewThreadSafe(), + network: network, + ctx: ctx, + cancel: cancel, + metricWantlist: wantlistGauge, } } @@ -282,10 +289,12 @@ func (pm *WantManager) Run() { for _, e := range entries { if e.Cancel { if pm.wl.Remove(e.Cid) { + pm.metricWantlist.Dec() filtered = append(filtered, e) } } else { if pm.wl.AddEntry(e.Entry) { + pm.metricWantlist.Inc() filtered = append(filtered, e) } } From 2f7de2d45ecb2d77f861589caa2dabd223b048c4 Mon Sep 17 00:00:00 2001 From: Jakub Sztandera Date: Fri, 20 Jan 2017 18:40:47 +0100 Subject: [PATCH 0533/1035] Introduce sent blocks histogram License: MIT Signed-off-by: Jakub Sztandera This commit was moved from ipfs/go-bitswap@2712d2985e6912477303de59935b578f26587940 --- bitswap/bitswap.go | 4 ++-- bitswap/wantmanager.go | 32 +++++++++++++++++++------------- 2 files changed, 21 insertions(+), 15 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 46cc4dbd8..7e565e837 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -79,9 +79,9 @@ func New(parent context.Context, p peer.ID, network bsnet.BitSwapNetwork, // exclusively. We should probably find another way to share logging data ctx, cancelFunc := context.WithCancel(parent) ctx = metrics.CtxSubScope(ctx, "bitswap") - dupHist := metrics.NewCtx(ctx, "dup_blocks_bytes", "Summary of duplicate"+ + dupHist := metrics.NewCtx(ctx, "recv_dup_blocks_bytes", "Summary of duplicate"+ " data blocks recived").Histogram(metricsBuckets) - allHist := metrics.NewCtx(ctx, "all_blocks_bytes", "Summary of all"+ + allHist := metrics.NewCtx(ctx, "recv_all_blocks_bytes", "Summary of all"+ " data blocks recived").Histogram(metricsBuckets) notif := notifications.New() diff --git a/bitswap/wantmanager.go b/bitswap/wantmanager.go index a9afc3cd1..555debf2c 100644 --- a/bitswap/wantmanager.go +++ b/bitswap/wantmanager.go @@ -30,24 +30,28 @@ type WantManager struct { ctx context.Context cancel func() - metricWantlist metrics.Gauge + wantlistGauge metrics.Gauge + sentHistogram metrics.Histogram } func NewWantManager(ctx context.Context, network bsnet.BitSwapNetwork) *WantManager { ctx, cancel := context.WithCancel(ctx) wantlistGauge := metrics.NewCtx(ctx, "wanlist_total", "Number of items in wantlist.").Gauge() + sentHistogram := metrics.NewCtx(ctx, "sent_all_blocks_bytes", "Histogram of blocks sent by"+ + " this bitswap").Histogram(metricsBuckets) return &WantManager{ - incoming: make(chan []*bsmsg.Entry, 10), - connect: make(chan peer.ID, 10), - disconnect: make(chan peer.ID, 10), - peerReqs: make(chan chan []peer.ID), - peers: make(map[peer.ID]*msgQueue), - wl: wantlist.NewThreadSafe(), - network: network, - ctx: ctx, - cancel: cancel, - metricWantlist: wantlistGauge, + incoming: make(chan []*bsmsg.Entry, 10), + connect: make(chan peer.ID, 10), + disconnect: make(chan peer.ID, 10), + peerReqs: make(chan chan []peer.ID), + peers: make(map[peer.ID]*msgQueue), + wl: wantlist.NewThreadSafe(), + network: network, + ctx: ctx, + cancel: cancel, + wantlistGauge: wantlistGauge, + sentHistogram: sentHistogram, } } @@ -116,6 +120,8 @@ func (pm *WantManager) SendBlock(ctx context.Context, env *engine.Envelope) { // throughout the network stack defer env.Sent() + pm.sentHistogram.Observe(float64(len(env.Block.RawData()))) + msg := bsmsg.New(false) msg.AddBlock(env.Block) log.Infof("Sending block %s to %s", env.Block, env.Peer) @@ -289,12 +295,12 @@ func (pm *WantManager) Run() { for _, e := range entries { if e.Cancel { if pm.wl.Remove(e.Cid) { - pm.metricWantlist.Dec() + pm.wantlistGauge.Dec() filtered = append(filtered, e) } } else { if pm.wl.AddEntry(e.Entry) { - pm.metricWantlist.Inc() + pm.wantlistGauge.Inc() filtered = append(filtered, e) } } From ccdaa7e69a8e0157d9332a566b2115b5be7c7663 Mon Sep 17 00:00:00 2001 From: Jeromy Date: Sun, 5 Mar 2017 23:06:04 -0800 Subject: [PATCH 0534/1035] update go-libp2p-kad-dht with getclosestpeers fix License: MIT Signed-off-by: Jeromy This commit was moved from ipfs/go-bitswap@310f9b197cf2911873dd24ec1d9776433fe0fba7 --- bitswap/bitswap.go | 4 ++-- bitswap/bitswap_test.go | 2 +- bitswap/decision/bench_test.go | 2 +- bitswap/decision/engine.go | 2 +- bitswap/decision/engine_test.go | 2 +- bitswap/decision/ledger.go | 2 +- bitswap/decision/peer_request_queue.go | 2 +- bitswap/message/message.go | 2 +- bitswap/network/interface.go | 2 +- bitswap/network/ipfs_impl.go | 10 +++++----- bitswap/testnet/interface.go | 2 +- bitswap/testnet/network_test.go | 2 +- bitswap/testnet/peernet.go | 4 ++-- bitswap/testnet/virtual.go | 4 ++-- bitswap/testutils.go | 4 ++-- bitswap/wantmanager.go | 2 +- bitswap/workers.go | 2 +- 17 files changed, 25 insertions(+), 25 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 7e565e837..3c9903b71 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -23,9 +23,9 @@ import ( process "gx/ipfs/QmSF8fPo3jgVBAy8fpdjjYqgG87dkJgUprRBHRd2tmfgpP/goprocess" procctx "gx/ipfs/QmSF8fPo3jgVBAy8fpdjjYqgG87dkJgUprRBHRd2tmfgpP/goprocess/context" logging "gx/ipfs/QmSpJByNKFX1sCsHBEp3R73FL4NF6FnQTEGyNAXHm2GS52/go-log" - loggables "gx/ipfs/QmTcfnDHimxBJqx6utpnWqVHdvyquXgkwAvYt4zMaJMKS2/go-libp2p-loggables" cid "gx/ipfs/QmV5gPoRsjN1Gid3LMdNZTyfCtP2DsvqEbMAmz82RmmiGk/go-cid" - peer "gx/ipfs/QmZcUPvPhD1Xvk6mwijYF8AfR3mG31S1YsEfHG4khrFPRr/go-libp2p-peer" + peer "gx/ipfs/QmWUswjn261LSyVxWAEpMVtPdy8zmKBJJfBpG3Qdpa8ZsE/go-libp2p-peer" + loggables "gx/ipfs/QmXs1igHHEaUmMxKtbP8Z9wTjitQ75sqxaKQP4QgnLN4nn/go-libp2p-loggables" ) var log = logging.Logger("bitswap") diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index 8cef2d3ad..91e5d563d 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -17,8 +17,8 @@ import ( detectrace "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-detect-race" + p2ptestutil "gx/ipfs/QmNqvnxGtJBaKQnenD6uboNGdjSjHGmZGRxMHEevKJe5Pk/go-libp2p-netutil" cid "gx/ipfs/QmV5gPoRsjN1Gid3LMdNZTyfCtP2DsvqEbMAmz82RmmiGk/go-cid" - p2ptestutil "gx/ipfs/QmdGRzr9bPTt2ZrBFaq5R2zzD7JFXNRxXZGkzsVcW6pEzh/go-libp2p-netutil" ) // FIXME the tests are really sensitive to the network delay. fix them to work diff --git a/bitswap/decision/bench_test.go b/bitswap/decision/bench_test.go index c1f16068e..4c3158bba 100644 --- a/bitswap/decision/bench_test.go +++ b/bitswap/decision/bench_test.go @@ -8,7 +8,7 @@ import ( "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" "github.com/ipfs/go-ipfs/thirdparty/testutil" cid "gx/ipfs/QmV5gPoRsjN1Gid3LMdNZTyfCtP2DsvqEbMAmz82RmmiGk/go-cid" - "gx/ipfs/QmZcUPvPhD1Xvk6mwijYF8AfR3mG31S1YsEfHG4khrFPRr/go-libp2p-peer" + "gx/ipfs/QmWUswjn261LSyVxWAEpMVtPdy8zmKBJJfBpG3Qdpa8ZsE/go-libp2p-peer" u "gx/ipfs/QmZuY8aV7zbNXVy6DyN9SmnuH3o9nG852F4aTiSBpts8d1/go-ipfs-util" ) diff --git a/bitswap/decision/engine.go b/bitswap/decision/engine.go index 38b87dfc2..c92c8363a 100644 --- a/bitswap/decision/engine.go +++ b/bitswap/decision/engine.go @@ -11,7 +11,7 @@ import ( bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" wl "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" logging "gx/ipfs/QmSpJByNKFX1sCsHBEp3R73FL4NF6FnQTEGyNAXHm2GS52/go-log" - peer "gx/ipfs/QmZcUPvPhD1Xvk6mwijYF8AfR3mG31S1YsEfHG4khrFPRr/go-libp2p-peer" + peer "gx/ipfs/QmWUswjn261LSyVxWAEpMVtPdy8zmKBJJfBpG3Qdpa8ZsE/go-libp2p-peer" ) // TODO consider taking responsibility for other types of requests. For diff --git a/bitswap/decision/engine_test.go b/bitswap/decision/engine_test.go index d4ac303e6..650159cb6 100644 --- a/bitswap/decision/engine_test.go +++ b/bitswap/decision/engine_test.go @@ -15,7 +15,7 @@ import ( testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" ds "gx/ipfs/QmRWDav6mzWseLWeYfVd5fvUKiVe9xNH29YfMF438fG364/go-datastore" dssync "gx/ipfs/QmRWDav6mzWseLWeYfVd5fvUKiVe9xNH29YfMF438fG364/go-datastore/sync" - peer "gx/ipfs/QmZcUPvPhD1Xvk6mwijYF8AfR3mG31S1YsEfHG4khrFPRr/go-libp2p-peer" + peer "gx/ipfs/QmWUswjn261LSyVxWAEpMVtPdy8zmKBJJfBpG3Qdpa8ZsE/go-libp2p-peer" ) type peerAndEngine struct { diff --git a/bitswap/decision/ledger.go b/bitswap/decision/ledger.go index 0cb7855d7..0fcfb5b61 100644 --- a/bitswap/decision/ledger.go +++ b/bitswap/decision/ledger.go @@ -7,7 +7,7 @@ import ( wl "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" cid "gx/ipfs/QmV5gPoRsjN1Gid3LMdNZTyfCtP2DsvqEbMAmz82RmmiGk/go-cid" - peer "gx/ipfs/QmZcUPvPhD1Xvk6mwijYF8AfR3mG31S1YsEfHG4khrFPRr/go-libp2p-peer" + peer "gx/ipfs/QmWUswjn261LSyVxWAEpMVtPdy8zmKBJJfBpG3Qdpa8ZsE/go-libp2p-peer" ) func newLedger(p peer.ID) *ledger { diff --git a/bitswap/decision/peer_request_queue.go b/bitswap/decision/peer_request_queue.go index f3324e13a..76e859f4d 100644 --- a/bitswap/decision/peer_request_queue.go +++ b/bitswap/decision/peer_request_queue.go @@ -8,7 +8,7 @@ import ( pq "github.com/ipfs/go-ipfs/thirdparty/pq" cid "gx/ipfs/QmV5gPoRsjN1Gid3LMdNZTyfCtP2DsvqEbMAmz82RmmiGk/go-cid" - peer "gx/ipfs/QmZcUPvPhD1Xvk6mwijYF8AfR3mG31S1YsEfHG4khrFPRr/go-libp2p-peer" + peer "gx/ipfs/QmWUswjn261LSyVxWAEpMVtPdy8zmKBJJfBpG3Qdpa8ZsE/go-libp2p-peer" ) type peerRequestQueue interface { diff --git a/bitswap/message/message.go b/bitswap/message/message.go index 578f2fbe1..2e8c531db 100644 --- a/bitswap/message/message.go +++ b/bitswap/message/message.go @@ -8,8 +8,8 @@ import ( pb "github.com/ipfs/go-ipfs/exchange/bitswap/message/pb" wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" - inet "gx/ipfs/QmRuZnMorqodado1yeTQiv1i9rmtKj29CjPSsBKM7DFXV4/go-libp2p-net" cid "gx/ipfs/QmV5gPoRsjN1Gid3LMdNZTyfCtP2DsvqEbMAmz82RmmiGk/go-cid" + inet "gx/ipfs/QmVtMT3fD7DzQNW7hdm6Xe6KPstzcggrhNpeVZ4422UpKK/go-libp2p-net" ggio "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/io" proto "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/proto" ) diff --git a/bitswap/network/interface.go b/bitswap/network/interface.go index 1f071822f..278fe530d 100644 --- a/bitswap/network/interface.go +++ b/bitswap/network/interface.go @@ -5,8 +5,8 @@ import ( bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" cid "gx/ipfs/QmV5gPoRsjN1Gid3LMdNZTyfCtP2DsvqEbMAmz82RmmiGk/go-cid" + peer "gx/ipfs/QmWUswjn261LSyVxWAEpMVtPdy8zmKBJJfBpG3Qdpa8ZsE/go-libp2p-peer" protocol "gx/ipfs/QmZNkThpqfVXs9GNbexPrfBbXSLNYeKrE7jwFM2oqHbyqN/go-libp2p-protocol" - peer "gx/ipfs/QmZcUPvPhD1Xvk6mwijYF8AfR3mG31S1YsEfHG4khrFPRr/go-libp2p-peer" ) var ( diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index 8df9f2f98..7f18800ea 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -8,15 +8,15 @@ import ( bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" - pstore "gx/ipfs/QmQMQ2RUjnaEEX8ybmrhuFFGhAwPjyL1Eo6ZoJGD7aAccM/go-libp2p-peerstore" - inet "gx/ipfs/QmRuZnMorqodado1yeTQiv1i9rmtKj29CjPSsBKM7DFXV4/go-libp2p-net" ma "gx/ipfs/QmSWLfmj5frN9xVLMMN846dMDriy5wN5jeghUm7aTW3DAG/go-multiaddr" logging "gx/ipfs/QmSpJByNKFX1sCsHBEp3R73FL4NF6FnQTEGyNAXHm2GS52/go-log" + routing "gx/ipfs/QmUc6twRJRE9MNrUGd8eo9WjHHxebGppdZfptGCASkR7fF/go-libp2p-routing" cid "gx/ipfs/QmV5gPoRsjN1Gid3LMdNZTyfCtP2DsvqEbMAmz82RmmiGk/go-cid" + inet "gx/ipfs/QmVtMT3fD7DzQNW7hdm6Xe6KPstzcggrhNpeVZ4422UpKK/go-libp2p-net" + peer "gx/ipfs/QmWUswjn261LSyVxWAEpMVtPdy8zmKBJJfBpG3Qdpa8ZsE/go-libp2p-peer" + host "gx/ipfs/QmXzeAcmKDTfNZQBiyF22hQKuTK7P5z6MBBQLTk9bbiSUc/go-libp2p-host" ggio "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/io" - peer "gx/ipfs/QmZcUPvPhD1Xvk6mwijYF8AfR3mG31S1YsEfHG4khrFPRr/go-libp2p-peer" - routing "gx/ipfs/QmZghcVHwXQC3Zvnvn24LgTmSPkEn2o3PDyKb6nrtPRzRh/go-libp2p-routing" - host "gx/ipfs/QmbzbRyd22gcW92U1rA2yKagB3myMYhk45XBknJ49F9XWJ/go-libp2p-host" + pstore "gx/ipfs/Qme1g4e3m2SmdiSGGU3vSWmUStwUjc5oECnEriaK9Xa1HU/go-libp2p-peerstore" ) var log = logging.Logger("bitswap_network") diff --git a/bitswap/testnet/interface.go b/bitswap/testnet/interface.go index 60ceae491..748cadfd1 100644 --- a/bitswap/testnet/interface.go +++ b/bitswap/testnet/interface.go @@ -3,7 +3,7 @@ package bitswap import ( bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" "github.com/ipfs/go-ipfs/thirdparty/testutil" - peer "gx/ipfs/QmZcUPvPhD1Xvk6mwijYF8AfR3mG31S1YsEfHG4khrFPRr/go-libp2p-peer" + peer "gx/ipfs/QmWUswjn261LSyVxWAEpMVtPdy8zmKBJJfBpG3Qdpa8ZsE/go-libp2p-peer" ) type Network interface { diff --git a/bitswap/testnet/network_test.go b/bitswap/testnet/network_test.go index 062f59bce..286d345d0 100644 --- a/bitswap/testnet/network_test.go +++ b/bitswap/testnet/network_test.go @@ -11,7 +11,7 @@ import ( mockrouting "github.com/ipfs/go-ipfs/routing/mock" delay "github.com/ipfs/go-ipfs/thirdparty/delay" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" - peer "gx/ipfs/QmZcUPvPhD1Xvk6mwijYF8AfR3mG31S1YsEfHG4khrFPRr/go-libp2p-peer" + peer "gx/ipfs/QmWUswjn261LSyVxWAEpMVtPdy8zmKBJJfBpG3Qdpa8ZsE/go-libp2p-peer" ) func TestSendMessageAsyncButWaitForResponse(t *testing.T) { diff --git a/bitswap/testnet/peernet.go b/bitswap/testnet/peernet.go index 38378736d..b26a02d75 100644 --- a/bitswap/testnet/peernet.go +++ b/bitswap/testnet/peernet.go @@ -6,8 +6,8 @@ import ( mockrouting "github.com/ipfs/go-ipfs/routing/mock" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" ds "gx/ipfs/QmRWDav6mzWseLWeYfVd5fvUKiVe9xNH29YfMF438fG364/go-datastore" - mockpeernet "gx/ipfs/QmU3g3psEDiC4tQh1Qu2NYg5aYVQqxC3m74ZavLwPfJEtu/go-libp2p/p2p/net/mock" - peer "gx/ipfs/QmZcUPvPhD1Xvk6mwijYF8AfR3mG31S1YsEfHG4khrFPRr/go-libp2p-peer" + peer "gx/ipfs/QmWUswjn261LSyVxWAEpMVtPdy8zmKBJJfBpG3Qdpa8ZsE/go-libp2p-peer" + mockpeernet "gx/ipfs/QmeWJwi61vii5g8zQUB9UGegfUbmhTKHgeDFP9XuSp5jZ4/go-libp2p/p2p/net/mock" ) type peernet struct { diff --git a/bitswap/testnet/virtual.go b/bitswap/testnet/virtual.go index b5eec43ea..790c801da 100644 --- a/bitswap/testnet/virtual.go +++ b/bitswap/testnet/virtual.go @@ -9,9 +9,9 @@ import ( mockrouting "github.com/ipfs/go-ipfs/routing/mock" delay "github.com/ipfs/go-ipfs/thirdparty/delay" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" + routing "gx/ipfs/QmUc6twRJRE9MNrUGd8eo9WjHHxebGppdZfptGCASkR7fF/go-libp2p-routing" cid "gx/ipfs/QmV5gPoRsjN1Gid3LMdNZTyfCtP2DsvqEbMAmz82RmmiGk/go-cid" - peer "gx/ipfs/QmZcUPvPhD1Xvk6mwijYF8AfR3mG31S1YsEfHG4khrFPRr/go-libp2p-peer" - routing "gx/ipfs/QmZghcVHwXQC3Zvnvn24LgTmSPkEn2o3PDyKb6nrtPRzRh/go-libp2p-routing" + peer "gx/ipfs/QmWUswjn261LSyVxWAEpMVtPdy8zmKBJJfBpG3Qdpa8ZsE/go-libp2p-peer" ) func VirtualNetwork(rs mockrouting.Server, d delay.D) Network { diff --git a/bitswap/testutils.go b/bitswap/testutils.go index 65d122bf3..4b14f8297 100644 --- a/bitswap/testutils.go +++ b/bitswap/testutils.go @@ -10,10 +10,10 @@ import ( delay "github.com/ipfs/go-ipfs/thirdparty/delay" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" + p2ptestutil "gx/ipfs/QmNqvnxGtJBaKQnenD6uboNGdjSjHGmZGRxMHEevKJe5Pk/go-libp2p-netutil" ds "gx/ipfs/QmRWDav6mzWseLWeYfVd5fvUKiVe9xNH29YfMF438fG364/go-datastore" ds_sync "gx/ipfs/QmRWDav6mzWseLWeYfVd5fvUKiVe9xNH29YfMF438fG364/go-datastore/sync" - peer "gx/ipfs/QmZcUPvPhD1Xvk6mwijYF8AfR3mG31S1YsEfHG4khrFPRr/go-libp2p-peer" - p2ptestutil "gx/ipfs/QmdGRzr9bPTt2ZrBFaq5R2zzD7JFXNRxXZGkzsVcW6pEzh/go-libp2p-netutil" + peer "gx/ipfs/QmWUswjn261LSyVxWAEpMVtPdy8zmKBJJfBpG3Qdpa8ZsE/go-libp2p-peer" ) // WARNING: this uses RandTestBogusIdentity DO NOT USE for NON TESTS! diff --git a/bitswap/wantmanager.go b/bitswap/wantmanager.go index 555debf2c..5017d6532 100644 --- a/bitswap/wantmanager.go +++ b/bitswap/wantmanager.go @@ -12,7 +12,7 @@ import ( metrics "gx/ipfs/QmRg1gKTHzc3CZXSKzem8aR4E3TubFhbgXwfVuWnSK5CC5/go-metrics-interface" cid "gx/ipfs/QmV5gPoRsjN1Gid3LMdNZTyfCtP2DsvqEbMAmz82RmmiGk/go-cid" - peer "gx/ipfs/QmZcUPvPhD1Xvk6mwijYF8AfR3mG31S1YsEfHG4khrFPRr/go-libp2p-peer" + peer "gx/ipfs/QmWUswjn261LSyVxWAEpMVtPdy8zmKBJJfBpG3Qdpa8ZsE/go-libp2p-peer" ) type WantManager struct { diff --git a/bitswap/workers.go b/bitswap/workers.go index b6840ef52..722e129d5 100644 --- a/bitswap/workers.go +++ b/bitswap/workers.go @@ -10,7 +10,7 @@ import ( procctx "gx/ipfs/QmSF8fPo3jgVBAy8fpdjjYqgG87dkJgUprRBHRd2tmfgpP/goprocess/context" logging "gx/ipfs/QmSpJByNKFX1sCsHBEp3R73FL4NF6FnQTEGyNAXHm2GS52/go-log" cid "gx/ipfs/QmV5gPoRsjN1Gid3LMdNZTyfCtP2DsvqEbMAmz82RmmiGk/go-cid" - peer "gx/ipfs/QmZcUPvPhD1Xvk6mwijYF8AfR3mG31S1YsEfHG4khrFPRr/go-libp2p-peer" + peer "gx/ipfs/QmWUswjn261LSyVxWAEpMVtPdy8zmKBJJfBpG3Qdpa8ZsE/go-libp2p-peer" ) var TaskWorkerCount = 8 From 375cbcba83cdda1de9a5bd11824c4dc00818c6f2 Mon Sep 17 00:00:00 2001 From: Jeromy Date: Thu, 26 Jan 2017 16:25:06 -0800 Subject: [PATCH 0535/1035] Add more info to bitswap stat License: MIT Signed-off-by: Jeromy This commit was moved from ipfs/go-bitswap@d183ae75bada3b283f60d40e40243095142a97e7 --- bitswap/bitswap.go | 4 ++++ bitswap/bitswap_test.go | 38 ++++++++++++++++++++++++++++++++++++++ bitswap/stat.go | 6 ++++++ bitswap/workers.go | 4 ++++ 4 files changed, 52 insertions(+) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 7e565e837..d60be11d0 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -157,6 +157,9 @@ type Bitswap struct { blocksRecvd int dupBlocksRecvd int dupDataRecvd uint64 + blocksSent int + dataSent uint64 + dataRecvd uint64 // Metrics interface metrics dupMetric metrics.Histogram @@ -401,6 +404,7 @@ func (bs *Bitswap) updateReceiveCounters(b blocks.Block) { defer bs.counterLk.Unlock() bs.blocksRecvd++ + bs.dataRecvd += uint64(len(b.RawData())) if has { bs.dupBlocksRecvd++ bs.dupDataRecvd += uint64(blkLen) diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index 8cef2d3ad..7b72279bf 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -3,6 +3,7 @@ package bitswap import ( "bytes" "context" + "fmt" "sync" "testing" "time" @@ -299,6 +300,25 @@ func TestEmptyKey(t *testing.T) { } } +func assertStat(st *Stat, sblks, rblks int, sdata, rdata uint64) error { + if sblks != st.BlocksSent { + return fmt.Errorf("mismatch in blocks sent: %d vs %d", sblks, st.BlocksSent) + } + + if rblks != st.BlocksReceived { + return fmt.Errorf("mismatch in blocks recvd: %d vs %d", rblks, st.BlocksReceived) + } + + if sdata != st.DataSent { + return fmt.Errorf("mismatch in data sent: %d vs %d", sdata, st.DataSent) + } + + if rdata != st.DataReceived { + return fmt.Errorf("mismatch in data recvd: %d vs %d", rdata, st.DataReceived) + } + return nil +} + func TestBasicBitswap(t *testing.T) { net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay)) sg := NewTestSessionGenerator(net) @@ -321,6 +341,24 @@ func TestBasicBitswap(t *testing.T) { t.Fatal(err) } + st0, err := instances[0].Exchange.Stat() + if err != nil { + t.Fatal(err) + } + + st1, err := instances[1].Exchange.Stat() + if err != nil { + t.Fatal(err) + } + + if err := assertStat(st0, 1, 0, 1, 0); err != nil { + t.Fatal(err) + } + + if err := assertStat(st1, 0, 1, 0, 1); err != nil { + t.Fatal(err) + } + t.Log(blk) for _, inst := range instances { err := inst.Exchange.Close() diff --git a/bitswap/stat.go b/bitswap/stat.go index 7f4ff1751..87da3b49f 100644 --- a/bitswap/stat.go +++ b/bitswap/stat.go @@ -11,6 +11,9 @@ type Stat struct { Wantlist []*cid.Cid Peers []string BlocksReceived int + DataReceived uint64 + BlocksSent int + DataSent uint64 DupBlksReceived int DupDataReceived uint64 } @@ -23,6 +26,9 @@ func (bs *Bitswap) Stat() (*Stat, error) { st.BlocksReceived = bs.blocksRecvd st.DupBlksReceived = bs.dupBlocksRecvd st.DupDataReceived = bs.dupDataRecvd + st.BlocksSent = bs.blocksSent + st.DataSent = bs.dataSent + st.DataReceived = bs.dataRecvd bs.counterLk.Unlock() for _, p := range bs.engine.Peers() { diff --git a/bitswap/workers.go b/bitswap/workers.go index b6840ef52..a8c5117e8 100644 --- a/bitswap/workers.go +++ b/bitswap/workers.go @@ -64,6 +64,10 @@ func (bs *Bitswap) taskWorker(ctx context.Context, id int) { }) bs.wm.SendBlock(ctx, envelope) + bs.counterLk.Lock() + bs.blocksSent++ + bs.dataSent += uint64(len(envelope.Block.RawData())) + bs.counterLk.Unlock() case <-ctx.Done(): return } From 5707f4d25c026cb05078cd25d7d8f0faf9f398e6 Mon Sep 17 00:00:00 2001 From: Hector Sanjuan Date: Fri, 24 Mar 2017 16:36:46 +0100 Subject: [PATCH 0536/1035] Make Golint happy in the blocks submodule. This has required changing the order of some parameters and adding HashOnRead to the Blockstore interface (which I have in turn added to all the wrapper implementations). License: MIT Signed-off-by: Hector Sanjuan This commit was moved from ipfs/go-bitswap@92eb5ae286122bdb728bfbcf33272cda68c26d39 --- bitswap/testutils.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/bitswap/testutils.go b/bitswap/testutils.go index 4b14f8297..6c615acfe 100644 --- a/bitswap/testutils.go +++ b/bitswap/testutils.go @@ -94,8 +94,9 @@ func Session(ctx context.Context, net tn.Network, p testutil.Identity) Instance adapter := net.Adapter(p) dstore := ds_sync.MutexWrap(datastore2.WithDelay(ds.NewMapDatastore(), bsdelay)) - bstore, err := blockstore.CachedBlockstore(blockstore.NewBlockstore( - ds_sync.MutexWrap(dstore)), ctx, blockstore.DefaultCacheOpts()) + bstore, err := blockstore.CachedBlockstore(ctx, + blockstore.NewBlockstore(ds_sync.MutexWrap(dstore)), + blockstore.DefaultCacheOpts()) if err != nil { panic(err.Error()) // FIXME perhaps change signature and return error. } From 77de55d09dc260c55326e33f1d74ac78330b3fde Mon Sep 17 00:00:00 2001 From: Andrew Chin Date: Tue, 28 Mar 2017 23:32:21 -0400 Subject: [PATCH 0537/1035] Fix wanlist typo in prometheus metric name This will be a breaking change for anyone who is currently monitoring the `ipfs_bitswap_wanlist_total` prometheus stat License: MIT Signed-off-by: Andrew Chin This commit was moved from ipfs/go-bitswap@c2dd4deaf0efa20ee1c9aa53fc5c4bbb5e1d3e58 --- bitswap/wantmanager.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bitswap/wantmanager.go b/bitswap/wantmanager.go index 5017d6532..68f14f493 100644 --- a/bitswap/wantmanager.go +++ b/bitswap/wantmanager.go @@ -36,7 +36,7 @@ type WantManager struct { func NewWantManager(ctx context.Context, network bsnet.BitSwapNetwork) *WantManager { ctx, cancel := context.WithCancel(ctx) - wantlistGauge := metrics.NewCtx(ctx, "wanlist_total", + wantlistGauge := metrics.NewCtx(ctx, "wantlist_total", "Number of items in wantlist.").Gauge() sentHistogram := metrics.NewCtx(ctx, "sent_all_blocks_bytes", "Histogram of blocks sent by"+ " this bitswap").Histogram(metricsBuckets) From e7e8257e96477e3a2286613cbc334b28bcb495fb Mon Sep 17 00:00:00 2001 From: Jeromy Date: Fri, 24 Mar 2017 23:51:18 -0700 Subject: [PATCH 0538/1035] bubble up updates from go-multihash changes License: MIT Signed-off-by: Jeromy This commit was moved from ipfs/go-bitswap@bec73288a388c157f5c493fddc57fbfc9119d2db --- bitswap/bitswap.go | 6 +++--- bitswap/bitswap_test.go | 4 ++-- bitswap/decision/bench_test.go | 6 +++--- bitswap/decision/engine.go | 2 +- bitswap/decision/engine_test.go | 2 +- bitswap/decision/ledger.go | 4 ++-- bitswap/decision/peer_request_queue.go | 4 ++-- bitswap/decision/peer_request_queue_test.go | 4 ++-- bitswap/message/message.go | 4 ++-- bitswap/message/message_test.go | 4 ++-- bitswap/network/interface.go | 4 ++-- bitswap/network/ipfs_impl.go | 14 +++++++------- bitswap/notifications/notifications.go | 2 +- bitswap/notifications/notifications_test.go | 2 +- bitswap/stat.go | 2 +- bitswap/testnet/interface.go | 2 +- bitswap/testnet/network_test.go | 2 +- bitswap/testnet/peernet.go | 4 ++-- bitswap/testnet/virtual.go | 6 +++--- bitswap/testutils.go | 4 ++-- bitswap/wantlist/wantlist.go | 2 +- bitswap/wantmanager.go | 4 ++-- bitswap/workers.go | 4 ++-- 23 files changed, 46 insertions(+), 46 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index d5c26e5a7..d76dbb320 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -23,9 +23,9 @@ import ( process "gx/ipfs/QmSF8fPo3jgVBAy8fpdjjYqgG87dkJgUprRBHRd2tmfgpP/goprocess" procctx "gx/ipfs/QmSF8fPo3jgVBAy8fpdjjYqgG87dkJgUprRBHRd2tmfgpP/goprocess/context" logging "gx/ipfs/QmSpJByNKFX1sCsHBEp3R73FL4NF6FnQTEGyNAXHm2GS52/go-log" - cid "gx/ipfs/QmV5gPoRsjN1Gid3LMdNZTyfCtP2DsvqEbMAmz82RmmiGk/go-cid" - peer "gx/ipfs/QmWUswjn261LSyVxWAEpMVtPdy8zmKBJJfBpG3Qdpa8ZsE/go-libp2p-peer" - loggables "gx/ipfs/QmXs1igHHEaUmMxKtbP8Z9wTjitQ75sqxaKQP4QgnLN4nn/go-libp2p-loggables" + loggables "gx/ipfs/QmVesPmqbPp7xRGyY96tnBwzDtVV1nqv4SCVxo5zCqKyH8/go-libp2p-loggables" + cid "gx/ipfs/QmYhQaCYEcaPPjxJX7YcPcVKkQfRy6sJ7B3XmGFk82XYdQ/go-cid" + peer "gx/ipfs/QmdS9KpbDyPrieswibZhkod1oXqRwZJrUPzxCofAMWpFGq/go-libp2p-peer" ) var log = logging.Logger("bitswap") diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index 5e5ea2cee..78467ce94 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -18,8 +18,8 @@ import ( detectrace "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-detect-race" - p2ptestutil "gx/ipfs/QmNqvnxGtJBaKQnenD6uboNGdjSjHGmZGRxMHEevKJe5Pk/go-libp2p-netutil" - cid "gx/ipfs/QmV5gPoRsjN1Gid3LMdNZTyfCtP2DsvqEbMAmz82RmmiGk/go-cid" + cid "gx/ipfs/QmYhQaCYEcaPPjxJX7YcPcVKkQfRy6sJ7B3XmGFk82XYdQ/go-cid" + p2ptestutil "gx/ipfs/QmcCgouQ5iXfmxmVNc1fpXLacRSPMNHx4tzqDpou6XNvvd/go-libp2p-netutil" ) // FIXME the tests are really sensitive to the network delay. fix them to work diff --git a/bitswap/decision/bench_test.go b/bitswap/decision/bench_test.go index 4c3158bba..f77044f94 100644 --- a/bitswap/decision/bench_test.go +++ b/bitswap/decision/bench_test.go @@ -7,9 +7,9 @@ import ( "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" "github.com/ipfs/go-ipfs/thirdparty/testutil" - cid "gx/ipfs/QmV5gPoRsjN1Gid3LMdNZTyfCtP2DsvqEbMAmz82RmmiGk/go-cid" - "gx/ipfs/QmWUswjn261LSyVxWAEpMVtPdy8zmKBJJfBpG3Qdpa8ZsE/go-libp2p-peer" - u "gx/ipfs/QmZuY8aV7zbNXVy6DyN9SmnuH3o9nG852F4aTiSBpts8d1/go-ipfs-util" + u "gx/ipfs/QmWbjfz3u6HkAdPh34dgPchGbQjob6LXLhAeCGii2TX69n/go-ipfs-util" + cid "gx/ipfs/QmYhQaCYEcaPPjxJX7YcPcVKkQfRy6sJ7B3XmGFk82XYdQ/go-cid" + "gx/ipfs/QmdS9KpbDyPrieswibZhkod1oXqRwZJrUPzxCofAMWpFGq/go-libp2p-peer" ) // FWIW: At the time of this commit, including a timestamp in task increases diff --git a/bitswap/decision/engine.go b/bitswap/decision/engine.go index c92c8363a..f4b170800 100644 --- a/bitswap/decision/engine.go +++ b/bitswap/decision/engine.go @@ -11,7 +11,7 @@ import ( bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" wl "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" logging "gx/ipfs/QmSpJByNKFX1sCsHBEp3R73FL4NF6FnQTEGyNAXHm2GS52/go-log" - peer "gx/ipfs/QmWUswjn261LSyVxWAEpMVtPdy8zmKBJJfBpG3Qdpa8ZsE/go-libp2p-peer" + peer "gx/ipfs/QmdS9KpbDyPrieswibZhkod1oXqRwZJrUPzxCofAMWpFGq/go-libp2p-peer" ) // TODO consider taking responsibility for other types of requests. For diff --git a/bitswap/decision/engine_test.go b/bitswap/decision/engine_test.go index 650159cb6..851e1469d 100644 --- a/bitswap/decision/engine_test.go +++ b/bitswap/decision/engine_test.go @@ -15,7 +15,7 @@ import ( testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" ds "gx/ipfs/QmRWDav6mzWseLWeYfVd5fvUKiVe9xNH29YfMF438fG364/go-datastore" dssync "gx/ipfs/QmRWDav6mzWseLWeYfVd5fvUKiVe9xNH29YfMF438fG364/go-datastore/sync" - peer "gx/ipfs/QmWUswjn261LSyVxWAEpMVtPdy8zmKBJJfBpG3Qdpa8ZsE/go-libp2p-peer" + peer "gx/ipfs/QmdS9KpbDyPrieswibZhkod1oXqRwZJrUPzxCofAMWpFGq/go-libp2p-peer" ) type peerAndEngine struct { diff --git a/bitswap/decision/ledger.go b/bitswap/decision/ledger.go index 0fcfb5b61..ac8362467 100644 --- a/bitswap/decision/ledger.go +++ b/bitswap/decision/ledger.go @@ -6,8 +6,8 @@ import ( wl "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" - cid "gx/ipfs/QmV5gPoRsjN1Gid3LMdNZTyfCtP2DsvqEbMAmz82RmmiGk/go-cid" - peer "gx/ipfs/QmWUswjn261LSyVxWAEpMVtPdy8zmKBJJfBpG3Qdpa8ZsE/go-libp2p-peer" + cid "gx/ipfs/QmYhQaCYEcaPPjxJX7YcPcVKkQfRy6sJ7B3XmGFk82XYdQ/go-cid" + peer "gx/ipfs/QmdS9KpbDyPrieswibZhkod1oXqRwZJrUPzxCofAMWpFGq/go-libp2p-peer" ) func newLedger(p peer.ID) *ledger { diff --git a/bitswap/decision/peer_request_queue.go b/bitswap/decision/peer_request_queue.go index 76e859f4d..d989174a2 100644 --- a/bitswap/decision/peer_request_queue.go +++ b/bitswap/decision/peer_request_queue.go @@ -7,8 +7,8 @@ import ( wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" pq "github.com/ipfs/go-ipfs/thirdparty/pq" - cid "gx/ipfs/QmV5gPoRsjN1Gid3LMdNZTyfCtP2DsvqEbMAmz82RmmiGk/go-cid" - peer "gx/ipfs/QmWUswjn261LSyVxWAEpMVtPdy8zmKBJJfBpG3Qdpa8ZsE/go-libp2p-peer" + cid "gx/ipfs/QmYhQaCYEcaPPjxJX7YcPcVKkQfRy6sJ7B3XmGFk82XYdQ/go-cid" + peer "gx/ipfs/QmdS9KpbDyPrieswibZhkod1oXqRwZJrUPzxCofAMWpFGq/go-libp2p-peer" ) type peerRequestQueue interface { diff --git a/bitswap/decision/peer_request_queue_test.go b/bitswap/decision/peer_request_queue_test.go index ef9e9d3f0..f0fa03bb2 100644 --- a/bitswap/decision/peer_request_queue_test.go +++ b/bitswap/decision/peer_request_queue_test.go @@ -10,8 +10,8 @@ import ( "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" "github.com/ipfs/go-ipfs/thirdparty/testutil" - cid "gx/ipfs/QmV5gPoRsjN1Gid3LMdNZTyfCtP2DsvqEbMAmz82RmmiGk/go-cid" - u "gx/ipfs/QmZuY8aV7zbNXVy6DyN9SmnuH3o9nG852F4aTiSBpts8d1/go-ipfs-util" + u "gx/ipfs/QmWbjfz3u6HkAdPh34dgPchGbQjob6LXLhAeCGii2TX69n/go-ipfs-util" + cid "gx/ipfs/QmYhQaCYEcaPPjxJX7YcPcVKkQfRy6sJ7B3XmGFk82XYdQ/go-cid" ) func TestPushPop(t *testing.T) { diff --git a/bitswap/message/message.go b/bitswap/message/message.go index 2e8c531db..ecf3d9957 100644 --- a/bitswap/message/message.go +++ b/bitswap/message/message.go @@ -8,8 +8,8 @@ import ( pb "github.com/ipfs/go-ipfs/exchange/bitswap/message/pb" wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" - cid "gx/ipfs/QmV5gPoRsjN1Gid3LMdNZTyfCtP2DsvqEbMAmz82RmmiGk/go-cid" - inet "gx/ipfs/QmVtMT3fD7DzQNW7hdm6Xe6KPstzcggrhNpeVZ4422UpKK/go-libp2p-net" + inet "gx/ipfs/QmVHSBsn8LEeay8m5ERebgUVuhzw838PsyTttCmP6GMJkg/go-libp2p-net" + cid "gx/ipfs/QmYhQaCYEcaPPjxJX7YcPcVKkQfRy6sJ7B3XmGFk82XYdQ/go-cid" ggio "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/io" proto "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/proto" ) diff --git a/bitswap/message/message_test.go b/bitswap/message/message_test.go index a93b9ccc2..ddcba8e17 100644 --- a/bitswap/message/message_test.go +++ b/bitswap/message/message_test.go @@ -8,8 +8,8 @@ import ( blocks "github.com/ipfs/go-ipfs/blocks" pb "github.com/ipfs/go-ipfs/exchange/bitswap/message/pb" - cid "gx/ipfs/QmV5gPoRsjN1Gid3LMdNZTyfCtP2DsvqEbMAmz82RmmiGk/go-cid" - u "gx/ipfs/QmZuY8aV7zbNXVy6DyN9SmnuH3o9nG852F4aTiSBpts8d1/go-ipfs-util" + u "gx/ipfs/QmWbjfz3u6HkAdPh34dgPchGbQjob6LXLhAeCGii2TX69n/go-ipfs-util" + cid "gx/ipfs/QmYhQaCYEcaPPjxJX7YcPcVKkQfRy6sJ7B3XmGFk82XYdQ/go-cid" ) func mkFakeCid(s string) *cid.Cid { diff --git a/bitswap/network/interface.go b/bitswap/network/interface.go index 278fe530d..7288024fe 100644 --- a/bitswap/network/interface.go +++ b/bitswap/network/interface.go @@ -4,9 +4,9 @@ import ( "context" bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" - cid "gx/ipfs/QmV5gPoRsjN1Gid3LMdNZTyfCtP2DsvqEbMAmz82RmmiGk/go-cid" - peer "gx/ipfs/QmWUswjn261LSyVxWAEpMVtPdy8zmKBJJfBpG3Qdpa8ZsE/go-libp2p-peer" + cid "gx/ipfs/QmYhQaCYEcaPPjxJX7YcPcVKkQfRy6sJ7B3XmGFk82XYdQ/go-cid" protocol "gx/ipfs/QmZNkThpqfVXs9GNbexPrfBbXSLNYeKrE7jwFM2oqHbyqN/go-libp2p-protocol" + peer "gx/ipfs/QmdS9KpbDyPrieswibZhkod1oXqRwZJrUPzxCofAMWpFGq/go-libp2p-peer" ) var ( diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index 7f18800ea..de9959e4a 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -8,15 +8,15 @@ import ( bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" - ma "gx/ipfs/QmSWLfmj5frN9xVLMMN846dMDriy5wN5jeghUm7aTW3DAG/go-multiaddr" + pstore "gx/ipfs/QmNUVzEjq3XWJ89hegahPvyfJbTXgTaom48pLb7YBD9gHQ/go-libp2p-peerstore" logging "gx/ipfs/QmSpJByNKFX1sCsHBEp3R73FL4NF6FnQTEGyNAXHm2GS52/go-log" - routing "gx/ipfs/QmUc6twRJRE9MNrUGd8eo9WjHHxebGppdZfptGCASkR7fF/go-libp2p-routing" - cid "gx/ipfs/QmV5gPoRsjN1Gid3LMdNZTyfCtP2DsvqEbMAmz82RmmiGk/go-cid" - inet "gx/ipfs/QmVtMT3fD7DzQNW7hdm6Xe6KPstzcggrhNpeVZ4422UpKK/go-libp2p-net" - peer "gx/ipfs/QmWUswjn261LSyVxWAEpMVtPdy8zmKBJJfBpG3Qdpa8ZsE/go-libp2p-peer" - host "gx/ipfs/QmXzeAcmKDTfNZQBiyF22hQKuTK7P5z6MBBQLTk9bbiSUc/go-libp2p-host" + inet "gx/ipfs/QmVHSBsn8LEeay8m5ERebgUVuhzw838PsyTttCmP6GMJkg/go-libp2p-net" + cid "gx/ipfs/QmYhQaCYEcaPPjxJX7YcPcVKkQfRy6sJ7B3XmGFk82XYdQ/go-cid" ggio "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/io" - pstore "gx/ipfs/Qme1g4e3m2SmdiSGGU3vSWmUStwUjc5oECnEriaK9Xa1HU/go-libp2p-peerstore" + routing "gx/ipfs/QmafuecpeZp3k3sHJ5mUARHd4795revuadECQMkmHB8LfW/go-libp2p-routing" + host "gx/ipfs/QmcyNeWPsoFGxThGpV8JnJdfUNankKhWCTrbrcFRQda4xR/go-libp2p-host" + ma "gx/ipfs/QmcyqRMCAXVtYPS4DiBrA7sezL9rRGfW8Ctx7cywL4TXJj/go-multiaddr" + peer "gx/ipfs/QmdS9KpbDyPrieswibZhkod1oXqRwZJrUPzxCofAMWpFGq/go-libp2p-peer" ) var log = logging.Logger("bitswap_network") diff --git a/bitswap/notifications/notifications.go b/bitswap/notifications/notifications.go index f0d0402c8..43322793b 100644 --- a/bitswap/notifications/notifications.go +++ b/bitswap/notifications/notifications.go @@ -6,7 +6,7 @@ import ( blocks "github.com/ipfs/go-ipfs/blocks" pubsub "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/briantigerchow/pubsub" - cid "gx/ipfs/QmV5gPoRsjN1Gid3LMdNZTyfCtP2DsvqEbMAmz82RmmiGk/go-cid" + cid "gx/ipfs/QmYhQaCYEcaPPjxJX7YcPcVKkQfRy6sJ7B3XmGFk82XYdQ/go-cid" ) const bufferSize = 16 diff --git a/bitswap/notifications/notifications_test.go b/bitswap/notifications/notifications_test.go index d66864811..ab83015e4 100644 --- a/bitswap/notifications/notifications_test.go +++ b/bitswap/notifications/notifications_test.go @@ -8,7 +8,7 @@ import ( blocks "github.com/ipfs/go-ipfs/blocks" blocksutil "github.com/ipfs/go-ipfs/blocks/blocksutil" - cid "gx/ipfs/QmV5gPoRsjN1Gid3LMdNZTyfCtP2DsvqEbMAmz82RmmiGk/go-cid" + cid "gx/ipfs/QmYhQaCYEcaPPjxJX7YcPcVKkQfRy6sJ7B3XmGFk82XYdQ/go-cid" ) func TestDuplicates(t *testing.T) { diff --git a/bitswap/stat.go b/bitswap/stat.go index 87da3b49f..8dae9abbf 100644 --- a/bitswap/stat.go +++ b/bitswap/stat.go @@ -3,7 +3,7 @@ package bitswap import ( "sort" - cid "gx/ipfs/QmV5gPoRsjN1Gid3LMdNZTyfCtP2DsvqEbMAmz82RmmiGk/go-cid" + cid "gx/ipfs/QmYhQaCYEcaPPjxJX7YcPcVKkQfRy6sJ7B3XmGFk82XYdQ/go-cid" ) type Stat struct { diff --git a/bitswap/testnet/interface.go b/bitswap/testnet/interface.go index 748cadfd1..aaa0d24fd 100644 --- a/bitswap/testnet/interface.go +++ b/bitswap/testnet/interface.go @@ -3,7 +3,7 @@ package bitswap import ( bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" "github.com/ipfs/go-ipfs/thirdparty/testutil" - peer "gx/ipfs/QmWUswjn261LSyVxWAEpMVtPdy8zmKBJJfBpG3Qdpa8ZsE/go-libp2p-peer" + peer "gx/ipfs/QmdS9KpbDyPrieswibZhkod1oXqRwZJrUPzxCofAMWpFGq/go-libp2p-peer" ) type Network interface { diff --git a/bitswap/testnet/network_test.go b/bitswap/testnet/network_test.go index 286d345d0..44f663787 100644 --- a/bitswap/testnet/network_test.go +++ b/bitswap/testnet/network_test.go @@ -11,7 +11,7 @@ import ( mockrouting "github.com/ipfs/go-ipfs/routing/mock" delay "github.com/ipfs/go-ipfs/thirdparty/delay" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" - peer "gx/ipfs/QmWUswjn261LSyVxWAEpMVtPdy8zmKBJJfBpG3Qdpa8ZsE/go-libp2p-peer" + peer "gx/ipfs/QmdS9KpbDyPrieswibZhkod1oXqRwZJrUPzxCofAMWpFGq/go-libp2p-peer" ) func TestSendMessageAsyncButWaitForResponse(t *testing.T) { diff --git a/bitswap/testnet/peernet.go b/bitswap/testnet/peernet.go index b26a02d75..e3f14d3ea 100644 --- a/bitswap/testnet/peernet.go +++ b/bitswap/testnet/peernet.go @@ -6,8 +6,8 @@ import ( mockrouting "github.com/ipfs/go-ipfs/routing/mock" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" ds "gx/ipfs/QmRWDav6mzWseLWeYfVd5fvUKiVe9xNH29YfMF438fG364/go-datastore" - peer "gx/ipfs/QmWUswjn261LSyVxWAEpMVtPdy8zmKBJJfBpG3Qdpa8ZsE/go-libp2p-peer" - mockpeernet "gx/ipfs/QmeWJwi61vii5g8zQUB9UGegfUbmhTKHgeDFP9XuSp5jZ4/go-libp2p/p2p/net/mock" + mockpeernet "gx/ipfs/QmRai5yZNL67pWCoznW7sBdFnqZrFULuJ5w8KhmRyhdgN4/go-libp2p/p2p/net/mock" + peer "gx/ipfs/QmdS9KpbDyPrieswibZhkod1oXqRwZJrUPzxCofAMWpFGq/go-libp2p-peer" ) type peernet struct { diff --git a/bitswap/testnet/virtual.go b/bitswap/testnet/virtual.go index 790c801da..3a743a27d 100644 --- a/bitswap/testnet/virtual.go +++ b/bitswap/testnet/virtual.go @@ -9,9 +9,9 @@ import ( mockrouting "github.com/ipfs/go-ipfs/routing/mock" delay "github.com/ipfs/go-ipfs/thirdparty/delay" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" - routing "gx/ipfs/QmUc6twRJRE9MNrUGd8eo9WjHHxebGppdZfptGCASkR7fF/go-libp2p-routing" - cid "gx/ipfs/QmV5gPoRsjN1Gid3LMdNZTyfCtP2DsvqEbMAmz82RmmiGk/go-cid" - peer "gx/ipfs/QmWUswjn261LSyVxWAEpMVtPdy8zmKBJJfBpG3Qdpa8ZsE/go-libp2p-peer" + cid "gx/ipfs/QmYhQaCYEcaPPjxJX7YcPcVKkQfRy6sJ7B3XmGFk82XYdQ/go-cid" + routing "gx/ipfs/QmafuecpeZp3k3sHJ5mUARHd4795revuadECQMkmHB8LfW/go-libp2p-routing" + peer "gx/ipfs/QmdS9KpbDyPrieswibZhkod1oXqRwZJrUPzxCofAMWpFGq/go-libp2p-peer" ) func VirtualNetwork(rs mockrouting.Server, d delay.D) Network { diff --git a/bitswap/testutils.go b/bitswap/testutils.go index 6c615acfe..cbc621b6e 100644 --- a/bitswap/testutils.go +++ b/bitswap/testutils.go @@ -10,10 +10,10 @@ import ( delay "github.com/ipfs/go-ipfs/thirdparty/delay" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" - p2ptestutil "gx/ipfs/QmNqvnxGtJBaKQnenD6uboNGdjSjHGmZGRxMHEevKJe5Pk/go-libp2p-netutil" ds "gx/ipfs/QmRWDav6mzWseLWeYfVd5fvUKiVe9xNH29YfMF438fG364/go-datastore" ds_sync "gx/ipfs/QmRWDav6mzWseLWeYfVd5fvUKiVe9xNH29YfMF438fG364/go-datastore/sync" - peer "gx/ipfs/QmWUswjn261LSyVxWAEpMVtPdy8zmKBJJfBpG3Qdpa8ZsE/go-libp2p-peer" + p2ptestutil "gx/ipfs/QmcCgouQ5iXfmxmVNc1fpXLacRSPMNHx4tzqDpou6XNvvd/go-libp2p-netutil" + peer "gx/ipfs/QmdS9KpbDyPrieswibZhkod1oXqRwZJrUPzxCofAMWpFGq/go-libp2p-peer" ) // WARNING: this uses RandTestBogusIdentity DO NOT USE for NON TESTS! diff --git a/bitswap/wantlist/wantlist.go b/bitswap/wantlist/wantlist.go index 07d8dcaee..94b8219c3 100644 --- a/bitswap/wantlist/wantlist.go +++ b/bitswap/wantlist/wantlist.go @@ -6,7 +6,7 @@ import ( "sort" "sync" - cid "gx/ipfs/QmV5gPoRsjN1Gid3LMdNZTyfCtP2DsvqEbMAmz82RmmiGk/go-cid" + cid "gx/ipfs/QmYhQaCYEcaPPjxJX7YcPcVKkQfRy6sJ7B3XmGFk82XYdQ/go-cid" ) type ThreadSafe struct { diff --git a/bitswap/wantmanager.go b/bitswap/wantmanager.go index 68f14f493..0825e8cfc 100644 --- a/bitswap/wantmanager.go +++ b/bitswap/wantmanager.go @@ -11,8 +11,8 @@ import ( wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" metrics "gx/ipfs/QmRg1gKTHzc3CZXSKzem8aR4E3TubFhbgXwfVuWnSK5CC5/go-metrics-interface" - cid "gx/ipfs/QmV5gPoRsjN1Gid3LMdNZTyfCtP2DsvqEbMAmz82RmmiGk/go-cid" - peer "gx/ipfs/QmWUswjn261LSyVxWAEpMVtPdy8zmKBJJfBpG3Qdpa8ZsE/go-libp2p-peer" + cid "gx/ipfs/QmYhQaCYEcaPPjxJX7YcPcVKkQfRy6sJ7B3XmGFk82XYdQ/go-cid" + peer "gx/ipfs/QmdS9KpbDyPrieswibZhkod1oXqRwZJrUPzxCofAMWpFGq/go-libp2p-peer" ) type WantManager struct { diff --git a/bitswap/workers.go b/bitswap/workers.go index 184e80870..6c6fe0e8b 100644 --- a/bitswap/workers.go +++ b/bitswap/workers.go @@ -9,8 +9,8 @@ import ( process "gx/ipfs/QmSF8fPo3jgVBAy8fpdjjYqgG87dkJgUprRBHRd2tmfgpP/goprocess" procctx "gx/ipfs/QmSF8fPo3jgVBAy8fpdjjYqgG87dkJgUprRBHRd2tmfgpP/goprocess/context" logging "gx/ipfs/QmSpJByNKFX1sCsHBEp3R73FL4NF6FnQTEGyNAXHm2GS52/go-log" - cid "gx/ipfs/QmV5gPoRsjN1Gid3LMdNZTyfCtP2DsvqEbMAmz82RmmiGk/go-cid" - peer "gx/ipfs/QmWUswjn261LSyVxWAEpMVtPdy8zmKBJJfBpG3Qdpa8ZsE/go-libp2p-peer" + cid "gx/ipfs/QmYhQaCYEcaPPjxJX7YcPcVKkQfRy6sJ7B3XmGFk82XYdQ/go-cid" + peer "gx/ipfs/QmdS9KpbDyPrieswibZhkod1oXqRwZJrUPzxCofAMWpFGq/go-libp2p-peer" ) var TaskWorkerCount = 8 From 0ed40caa2b31f010c57b18e5ad95fd12c6cc591a Mon Sep 17 00:00:00 2001 From: dgrisham Date: Mon, 24 Apr 2017 15:50:25 -0600 Subject: [PATCH 0539/1035] bug fix: `BytesSent` in peers' ledgers now updates When sending data to another user, the number of bytes sent to that user (saved by the corresponding Bitswap ledger) was not updated (it was always 0). This also meant that the debt ratio was also always 0. The function that updates the `BytesSent` value in the ledger, `MessageSent()`, was already implemented, however it was not called when the peer was sent data. To fix this, a call to `MessageSent()` was made in the `taskWorker()` function, which is where both the message in question and the Bitswap engine were available to make the call. `MessageSent()` requires the peer's ID and `BitSwapMessage` as its arguments, the latter of which had to be created by making a new `BitSwapMessage`, then the block being sent was added to the new message. Note that, similar to the analagous call to `MessageReceived()`, records *all* of the bytes sent to a particular user. At some point, both of these should be updated to only record the numbers of *useful* bytes sent and received between peers. License: MIT Signed-off-by: David Grisham This commit was moved from ipfs/go-bitswap@bc9342bf1b8950b949bab6c8890a932ad3dc0b6e --- bitswap/workers.go | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/bitswap/workers.go b/bitswap/workers.go index 6c6fe0e8b..028b9735d 100644 --- a/bitswap/workers.go +++ b/bitswap/workers.go @@ -6,6 +6,8 @@ import ( "sync" "time" + bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" + process "gx/ipfs/QmSF8fPo3jgVBAy8fpdjjYqgG87dkJgUprRBHRd2tmfgpP/goprocess" procctx "gx/ipfs/QmSF8fPo3jgVBAy8fpdjjYqgG87dkJgUprRBHRd2tmfgpP/goprocess/context" logging "gx/ipfs/QmSpJByNKFX1sCsHBEp3R73FL4NF6FnQTEGyNAXHm2GS52/go-log" @@ -63,6 +65,12 @@ func (bs *Bitswap) taskWorker(ctx context.Context, id int) { "Block": envelope.Block.Cid().String(), }) + // update the BS ledger to reflect sent message + // TODO: Should only track *useful* messages in ledger + outgoing := bsmsg.New(false) + outgoing.AddBlock(envelope.Block) + bs.engine.MessageSent(envelope.Peer, outgoing) + bs.wm.SendBlock(ctx, envelope) bs.counterLk.Lock() bs.blocksSent++ From c24975a6d4f92fb2382e79b590cdc6dcdef275ec Mon Sep 17 00:00:00 2001 From: dgrisham Date: Mon, 24 Apr 2017 20:33:52 -0600 Subject: [PATCH 0540/1035] tests + data dependency fix: `BytesSent` bug now completely fixed Tests were added to ensure that the bug fix in commit 000fbd25 was correct. The tests caught an error where a peer's ledger was not properly locked when updating it in the `MessageSent()` function. The appropriate calls to lock the ledger were made, and the tests successfully passed. License: MIT Signed-off-by: David Grisham This commit was moved from ipfs/go-bitswap@ca0df11689f9e03625892d2f96c987c84d013c62 --- bitswap/bitswap_test.go | 107 +++++++++++++++++++++++++++++++++++++ bitswap/decision/engine.go | 3 ++ 2 files changed, 110 insertions(+) diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index 78467ce94..e13ff4c8e 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -11,6 +11,7 @@ import ( blocks "github.com/ipfs/go-ipfs/blocks" blockstore "github.com/ipfs/go-ipfs/blocks/blockstore" blocksutil "github.com/ipfs/go-ipfs/blocks/blocksutil" + decision "github.com/ipfs/go-ipfs/exchange/bitswap/decision" tn "github.com/ipfs/go-ipfs/exchange/bitswap/testnet" mockrouting "github.com/ipfs/go-ipfs/routing/mock" delay "github.com/ipfs/go-ipfs/thirdparty/delay" @@ -489,3 +490,109 @@ func TestWantlistCleanup(t *testing.T) { t.Fatal("should only have keys[0] in wantlist") } } + +func assertLedgerMatch(ra, rb *decision.Receipt) error { + if ra.Sent != rb.Recv { + return fmt.Errorf("mismatch in ledgers (exchanged bytes): %d sent vs %d recvd", ra.Sent, rb.Recv) + } + + if ra.Recv != rb.Sent { + return fmt.Errorf("mismatch in ledgers (exchanged bytes): %d recvd vs %d sent", ra.Recv, rb.Sent) + } + + if ra.Exchanged != rb.Exchanged { + return fmt.Errorf("mismatch in ledgers (exchanged blocks): %d vs %d ", ra.Exchanged, rb.Exchanged) + } + + return nil +} + +func TestBitswapBytesSentOneWay(t *testing.T) { + net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay)) + sg := NewTestSessionGenerator(net) + defer sg.Close() + bg := blocksutil.NewBlockGenerator() + + t.Log("Test ledgers match when one peer sends block to another") + + instances := sg.Instances(2) + blocks := bg.Blocks(1) + err := instances[0].Exchange.HasBlock(blocks[0]) + if err != nil { + t.Fatal(err) + } + + ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) + defer cancel() + blk, err := instances[1].Exchange.GetBlock(ctx, blocks[0].Cid()) + if err != nil { + t.Fatal(err) + } + + ra := instances[0].Exchange.LedgerForPeer(instances[1].Peer) + rb := instances[1].Exchange.LedgerForPeer(instances[0].Peer) + + err = assertLedgerMatch(ra, rb) + if err != nil { + t.Fatal(err) + } + + t.Log(blk) + for _, inst := range instances { + err := inst.Exchange.Close() + if err != nil { + t.Fatal(err) + } + } +} + +func TestBitswapBytesSentTwoWay(t *testing.T) { + net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay)) + sg := NewTestSessionGenerator(net) + defer sg.Close() + bg := blocksutil.NewBlockGenerator() + + t.Log("Test ledgers match when two peers send one block to each other") + + instances := sg.Instances(2) + blocks := bg.Blocks(2) + err := instances[0].Exchange.HasBlock(blocks[0]) + if err != nil { + t.Fatal(err) + } + + err = instances[1].Exchange.HasBlock(blocks[1]) + if err != nil { + t.Fatal(err) + } + + ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) + defer cancel() + blk, err := instances[1].Exchange.GetBlock(ctx, blocks[0].Cid()) + if err != nil { + t.Fatal(err) + } + + ctx, cancel = context.WithTimeout(context.Background(), time.Second*5) + defer cancel() + blk, err = instances[0].Exchange.GetBlock(ctx, blocks[1].Cid()) + if err != nil { + t.Fatal(err) + } + + ra := instances[0].Exchange.LedgerForPeer(instances[1].Peer) + rb := instances[1].Exchange.LedgerForPeer(instances[0].Peer) + + err = assertLedgerMatch(ra, rb) + if err != nil { + t.Fatal(err) + } + + t.Log(blk) + for _, inst := range instances { + err := inst.Exchange.Close() + if err != nil { + t.Fatal(err) + } + } +} diff --git a/bitswap/decision/engine.go b/bitswap/decision/engine.go index f4b170800..6c1a9e936 100644 --- a/bitswap/decision/engine.go +++ b/bitswap/decision/engine.go @@ -286,6 +286,9 @@ func (e *Engine) AddBlock(block blocks.Block) { func (e *Engine) MessageSent(p peer.ID, m bsmsg.BitSwapMessage) error { l := e.findOrCreate(p) + l.lk.Lock() + defer l.lk.Unlock() + for _, block := range m.Blocks() { l.SentBytes(len(block.RawData())) l.wantList.Remove(block.Cid()) From cf0ba2b360b1cebeb73e4131953075d156955f80 Mon Sep 17 00:00:00 2001 From: dgrisham Date: Tue, 25 Apr 2017 13:54:49 -0600 Subject: [PATCH 0541/1035] tests: bitswap ledger tests modified Updated the `TestBitswapLedger*` tests and added assertions to check concrete values for ledgers (rather than just checking that two peers' ledgers match). The names for these tests were also changed from the previous commit, according to 's/BytesSent/Ledger/'. License: MIT Signed-off-by: David Grisham This commit was moved from ipfs/go-bitswap@b649f755a46ba38bea8cc4d64bc2360ce49d9db2 --- bitswap/bitswap_test.go | 60 +++++++++++++++++++++++++++++++++++++++-- 1 file changed, 58 insertions(+), 2 deletions(-) diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index e13ff4c8e..548c4a62d 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -507,7 +507,37 @@ func assertLedgerMatch(ra, rb *decision.Receipt) error { return nil } -func TestBitswapBytesSentOneWay(t *testing.T) { +func assertLedgerEqual(ra, rb *decision.Receipt) error { + if ra.Value != rb.Value { + return fmt.Errorf("mismatch in ledgers (value/debt ratio): %f vs %f ", ra.Value, rb.Value) + } + + if ra.Sent != rb.Sent { + return fmt.Errorf("mismatch in ledgers (sent bytes): %d vs %d", ra.Sent, rb.Sent) + } + + if ra.Recv != rb.Recv { + return fmt.Errorf("mismatch in ledgers (recvd bytes): %d vs %d", ra.Recv, rb.Recv) + } + + if ra.Exchanged != rb.Exchanged { + return fmt.Errorf("mismatch in ledgers (exchanged blocks): %d vs %d ", ra.Exchanged, rb.Exchanged) + } + + return nil +} + +func newReceipt(sent, recv, exchanged uint64) *decision.Receipt { + return &decision.Receipt{ + Peer: "test", + Value: float64(sent) / (1 + float64(recv)), + Sent: sent, + Recv: recv, + Exchanged: exchanged, + } +} + +func TestBitswapLedgerOneWay(t *testing.T) { net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay)) sg := NewTestSessionGenerator(net) defer sg.Close() @@ -532,11 +562,24 @@ func TestBitswapBytesSentOneWay(t *testing.T) { ra := instances[0].Exchange.LedgerForPeer(instances[1].Peer) rb := instances[1].Exchange.LedgerForPeer(instances[0].Peer) + // compare peer ledger receipts err = assertLedgerMatch(ra, rb) if err != nil { t.Fatal(err) } + // check that receipts have intended values + ratest := newReceipt(1, 0, 1) + err = assertLedgerEqual(ratest, ra) + if err != nil { + t.Fatal(err) + } + rbtest := newReceipt(0, 1, 1) + err = assertLedgerEqual(rbtest, rb) + if err != nil { + t.Fatal(err) + } + t.Log(blk) for _, inst := range instances { err := inst.Exchange.Close() @@ -546,7 +589,7 @@ func TestBitswapBytesSentOneWay(t *testing.T) { } } -func TestBitswapBytesSentTwoWay(t *testing.T) { +func TestBitswapLedgerTwoWay(t *testing.T) { net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay)) sg := NewTestSessionGenerator(net) defer sg.Close() @@ -583,11 +626,24 @@ func TestBitswapBytesSentTwoWay(t *testing.T) { ra := instances[0].Exchange.LedgerForPeer(instances[1].Peer) rb := instances[1].Exchange.LedgerForPeer(instances[0].Peer) + // compare peer ledger receipts err = assertLedgerMatch(ra, rb) if err != nil { t.Fatal(err) } + // check that receipts have intended values + rtest := newReceipt(1, 1, 2) + err = assertLedgerEqual(rtest, ra) + if err != nil { + t.Fatal(err) + } + + err = assertLedgerEqual(rtest, rb) + if err != nil { + t.Fatal(err) + } + t.Log(blk) for _, inst := range instances { err := inst.Exchange.Close() From 549914c102bd7ae5a98e4a853837804c8b8f6337 Mon Sep 17 00:00:00 2001 From: Jeromy Date: Mon, 28 Nov 2016 17:36:45 -0800 Subject: [PATCH 0542/1035] bitswap: clean up ledgers when disconnecting License: MIT Signed-off-by: Jeromy This commit was moved from ipfs/go-bitswap@489aeacf5e0b9824bb088901591b766aeaabc658 --- bitswap/bitswap.go | 1 + bitswap/decision/engine.go | 26 +++++++++++++++++++++++++- bitswap/decision/ledger.go | 4 ++++ 3 files changed, 30 insertions(+), 1 deletion(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index d76dbb320..e7a20008b 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -414,6 +414,7 @@ func (bs *Bitswap) updateReceiveCounters(b blocks.Block) { // Connected/Disconnected warns bitswap about peer connections func (bs *Bitswap) PeerConnected(p peer.ID) { bs.wm.Connected(p) + bs.engine.PeerConnected(p) } // Connected/Disconnected warns bitswap about peer connections diff --git a/bitswap/decision/engine.go b/bitswap/decision/engine.go index 6c1a9e936..37e370db0 100644 --- a/bitswap/decision/engine.go +++ b/bitswap/decision/engine.go @@ -298,8 +298,32 @@ func (e *Engine) MessageSent(p peer.ID, m bsmsg.BitSwapMessage) error { return nil } +func (e *Engine) PeerConnected(p peer.ID) { + e.lock.Lock() + l, ok := e.ledgerMap[p] + if !ok { + l = newLedger(p) + e.ledgerMap[p] = l + } + l.lk.Lock() + l.ref++ + l.lk.Unlock() + e.lock.Unlock() +} + func (e *Engine) PeerDisconnected(p peer.ID) { - // TODO: release ledger + e.lock.Lock() + defer e.lock.Unlock() + l, ok := e.ledgerMap[p] + if !ok { + return + } + l.lk.Lock() + l.ref-- + if l.ref <= 0 { + delete(e.ledgerMap, p) + } + l.lk.Unlock() } func (e *Engine) numBytesSentTo(p peer.ID) uint64 { diff --git a/bitswap/decision/ledger.go b/bitswap/decision/ledger.go index ac8362467..cb93f0e95 100644 --- a/bitswap/decision/ledger.go +++ b/bitswap/decision/ledger.go @@ -43,6 +43,10 @@ type ledger struct { // to a given peer sentToPeer map[string]time.Time + // ref is the reference count for this ledger, its used to ensure we + // don't drop the reference to this ledger in multi-connection scenarios + ref int + lk sync.Mutex } From 9482acfec670bf59f71c30be3531854a14289bab Mon Sep 17 00:00:00 2001 From: Jeromy Date: Fri, 19 May 2017 19:05:12 -0700 Subject: [PATCH 0543/1035] test for partner removal License: MIT Signed-off-by: Jeromy This commit was moved from ipfs/go-bitswap@775dd78ff291886eaf46fb3c90602832c9d044bd --- bitswap/decision/engine_test.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/bitswap/decision/engine_test.go b/bitswap/decision/engine_test.go index 851e1469d..fdac4eba1 100644 --- a/bitswap/decision/engine_test.go +++ b/bitswap/decision/engine_test.go @@ -89,6 +89,11 @@ func TestPeerIsAddedToPeersWhenMessageReceivedOrSent(t *testing.T) { if !peerIsPartner(sanfrancisco.Peer, seattle.Engine) { t.Fatal("Peer wasn't added as a Partner") } + + seattle.Engine.PeerDisconnected(sanfrancisco.Peer) + if peerIsPartner(sanfrancisco.Peer, seattle.Engine) { + t.Fatal("expected peer to be removed") + } } func peerIsPartner(p peer.ID, e *Engine) bool { From 805a99a4ff0589618ef9e4ad0b635354598a0fa4 Mon Sep 17 00:00:00 2001 From: Jeromy Date: Tue, 16 May 2017 19:35:43 -0700 Subject: [PATCH 0544/1035] update to dht code with provide announce option License: MIT Signed-off-by: Jeromy This commit was moved from ipfs/go-bitswap@6c43badad1bde14904ab9be62eebdbc1288f3bc8 --- bitswap/bitswap_test.go | 2 +- bitswap/network/ipfs_impl.go | 4 ++-- bitswap/testnet/virtual.go | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index 548c4a62d..6ee6803dd 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -53,7 +53,7 @@ func TestProviderForKeyButNetworkCannotFind(t *testing.T) { // TODO revisit this block := blocks.NewBlock([]byte("block")) pinfo := p2ptestutil.RandTestBogusIdentityOrFatal(t) - rs.Client(pinfo).Provide(context.Background(), block.Cid()) // but not on network + rs.Client(pinfo).Provide(context.Background(), block.Cid(), true) // but not on network solo := g.Next() defer solo.Exchange.Close() diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index de9959e4a..ad5902069 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -11,9 +11,9 @@ import ( pstore "gx/ipfs/QmNUVzEjq3XWJ89hegahPvyfJbTXgTaom48pLb7YBD9gHQ/go-libp2p-peerstore" logging "gx/ipfs/QmSpJByNKFX1sCsHBEp3R73FL4NF6FnQTEGyNAXHm2GS52/go-log" inet "gx/ipfs/QmVHSBsn8LEeay8m5ERebgUVuhzw838PsyTttCmP6GMJkg/go-libp2p-net" + routing "gx/ipfs/QmXiH3yLocPhjkAmL8R29fKRcEKoVXKCaVDbAS9tdTrVEd/go-libp2p-routing" cid "gx/ipfs/QmYhQaCYEcaPPjxJX7YcPcVKkQfRy6sJ7B3XmGFk82XYdQ/go-cid" ggio "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/io" - routing "gx/ipfs/QmafuecpeZp3k3sHJ5mUARHd4795revuadECQMkmHB8LfW/go-libp2p-routing" host "gx/ipfs/QmcyNeWPsoFGxThGpV8JnJdfUNankKhWCTrbrcFRQda4xR/go-libp2p-host" ma "gx/ipfs/QmcyqRMCAXVtYPS4DiBrA7sezL9rRGfW8Ctx7cywL4TXJj/go-multiaddr" peer "gx/ipfs/QmdS9KpbDyPrieswibZhkod1oXqRwZJrUPzxCofAMWpFGq/go-libp2p-peer" @@ -172,7 +172,7 @@ func (bsnet *impl) FindProvidersAsync(ctx context.Context, k *cid.Cid, max int) // Provide provides the key to the network func (bsnet *impl) Provide(ctx context.Context, k *cid.Cid) error { - return bsnet.routing.Provide(ctx, k) + return bsnet.routing.Provide(ctx, k, true) } // handleNewStream receives a new stream from the network. diff --git a/bitswap/testnet/virtual.go b/bitswap/testnet/virtual.go index 3a743a27d..2593cf4f7 100644 --- a/bitswap/testnet/virtual.go +++ b/bitswap/testnet/virtual.go @@ -9,8 +9,8 @@ import ( mockrouting "github.com/ipfs/go-ipfs/routing/mock" delay "github.com/ipfs/go-ipfs/thirdparty/delay" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" + routing "gx/ipfs/QmXiH3yLocPhjkAmL8R29fKRcEKoVXKCaVDbAS9tdTrVEd/go-libp2p-routing" cid "gx/ipfs/QmYhQaCYEcaPPjxJX7YcPcVKkQfRy6sJ7B3XmGFk82XYdQ/go-cid" - routing "gx/ipfs/QmafuecpeZp3k3sHJ5mUARHd4795revuadECQMkmHB8LfW/go-libp2p-routing" peer "gx/ipfs/QmdS9KpbDyPrieswibZhkod1oXqRwZJrUPzxCofAMWpFGq/go-libp2p-peer" ) @@ -138,7 +138,7 @@ func (n *networkClient) NewMessageSender(ctx context.Context, p peer.ID) (bsnet. // Provide provides the key to the network func (nc *networkClient) Provide(ctx context.Context, k *cid.Cid) error { - return nc.routing.Provide(ctx, k) + return nc.routing.Provide(ctx, k, true) } func (nc *networkClient) SetDelegate(r bsnet.Receiver) { From 1a85951012a85984dd687a4065f35fcc61a865f4 Mon Sep 17 00:00:00 2001 From: Lars Gierth Date: Tue, 30 May 2017 02:26:05 +0200 Subject: [PATCH 0545/1035] gx: update go-libp2p-peerstore, go-libp2p, go-libp2p-kbucket License: MIT Signed-off-by: Lars Gierth This commit was moved from ipfs/go-bitswap@940ef108c3c89cad4a2b8de0e30af57c6d4bce1b --- bitswap/bitswap_test.go | 2 +- bitswap/message/message.go | 2 +- bitswap/network/ipfs_impl.go | 8 ++++---- bitswap/testnet/peernet.go | 2 +- bitswap/testnet/virtual.go | 2 +- bitswap/testutils.go | 2 +- 6 files changed, 9 insertions(+), 9 deletions(-) diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index 6ee6803dd..86271f111 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -20,7 +20,7 @@ import ( detectrace "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-detect-race" cid "gx/ipfs/QmYhQaCYEcaPPjxJX7YcPcVKkQfRy6sJ7B3XmGFk82XYdQ/go-cid" - p2ptestutil "gx/ipfs/QmcCgouQ5iXfmxmVNc1fpXLacRSPMNHx4tzqDpou6XNvvd/go-libp2p-netutil" + p2ptestutil "gx/ipfs/Qma2j8dYePrvN5DoNgwh1uAuu3FFtEtrUQFmr737ws8nCp/go-libp2p-netutil" ) // FIXME the tests are really sensitive to the network delay. fix them to work diff --git a/bitswap/message/message.go b/bitswap/message/message.go index ecf3d9957..ac5677929 100644 --- a/bitswap/message/message.go +++ b/bitswap/message/message.go @@ -8,7 +8,7 @@ import ( pb "github.com/ipfs/go-ipfs/exchange/bitswap/message/pb" wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" - inet "gx/ipfs/QmVHSBsn8LEeay8m5ERebgUVuhzw838PsyTttCmP6GMJkg/go-libp2p-net" + inet "gx/ipfs/QmRscs8KxrSmSv4iuevHv8JfuUzHBMoqiaHzxfDRiksd6e/go-libp2p-net" cid "gx/ipfs/QmYhQaCYEcaPPjxJX7YcPcVKkQfRy6sJ7B3XmGFk82XYdQ/go-cid" ggio "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/io" proto "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/proto" diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index ad5902069..5b408a18e 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -8,13 +8,13 @@ import ( bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" - pstore "gx/ipfs/QmNUVzEjq3XWJ89hegahPvyfJbTXgTaom48pLb7YBD9gHQ/go-libp2p-peerstore" + routing "gx/ipfs/QmNdaQ8itUU9jEZUwTsG4gHMaPmRfi6FEe89QjQAFbep3M/go-libp2p-routing" + inet "gx/ipfs/QmRscs8KxrSmSv4iuevHv8JfuUzHBMoqiaHzxfDRiksd6e/go-libp2p-net" logging "gx/ipfs/QmSpJByNKFX1sCsHBEp3R73FL4NF6FnQTEGyNAXHm2GS52/go-log" - inet "gx/ipfs/QmVHSBsn8LEeay8m5ERebgUVuhzw838PsyTttCmP6GMJkg/go-libp2p-net" - routing "gx/ipfs/QmXiH3yLocPhjkAmL8R29fKRcEKoVXKCaVDbAS9tdTrVEd/go-libp2p-routing" + host "gx/ipfs/QmUywuGNZoUKV8B9iyvup9bPkLiMrhTsyVMkeSXW5VxAfC/go-libp2p-host" + pstore "gx/ipfs/QmXZSd1qR5BxZkPyuwfT5jpqQFScZccoZvDneXsKzCNHWX/go-libp2p-peerstore" cid "gx/ipfs/QmYhQaCYEcaPPjxJX7YcPcVKkQfRy6sJ7B3XmGFk82XYdQ/go-cid" ggio "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/io" - host "gx/ipfs/QmcyNeWPsoFGxThGpV8JnJdfUNankKhWCTrbrcFRQda4xR/go-libp2p-host" ma "gx/ipfs/QmcyqRMCAXVtYPS4DiBrA7sezL9rRGfW8Ctx7cywL4TXJj/go-multiaddr" peer "gx/ipfs/QmdS9KpbDyPrieswibZhkod1oXqRwZJrUPzxCofAMWpFGq/go-libp2p-peer" ) diff --git a/bitswap/testnet/peernet.go b/bitswap/testnet/peernet.go index e3f14d3ea..2ff7a05f9 100644 --- a/bitswap/testnet/peernet.go +++ b/bitswap/testnet/peernet.go @@ -5,8 +5,8 @@ import ( bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" mockrouting "github.com/ipfs/go-ipfs/routing/mock" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" + mockpeernet "gx/ipfs/QmQA5mdxru8Bh6dpC9PJfSkumqnmHgJX7knxSgBo5Lpime/go-libp2p/p2p/net/mock" ds "gx/ipfs/QmRWDav6mzWseLWeYfVd5fvUKiVe9xNH29YfMF438fG364/go-datastore" - mockpeernet "gx/ipfs/QmRai5yZNL67pWCoznW7sBdFnqZrFULuJ5w8KhmRyhdgN4/go-libp2p/p2p/net/mock" peer "gx/ipfs/QmdS9KpbDyPrieswibZhkod1oXqRwZJrUPzxCofAMWpFGq/go-libp2p-peer" ) diff --git a/bitswap/testnet/virtual.go b/bitswap/testnet/virtual.go index 2593cf4f7..da23b88a9 100644 --- a/bitswap/testnet/virtual.go +++ b/bitswap/testnet/virtual.go @@ -9,7 +9,7 @@ import ( mockrouting "github.com/ipfs/go-ipfs/routing/mock" delay "github.com/ipfs/go-ipfs/thirdparty/delay" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" - routing "gx/ipfs/QmXiH3yLocPhjkAmL8R29fKRcEKoVXKCaVDbAS9tdTrVEd/go-libp2p-routing" + routing "gx/ipfs/QmNdaQ8itUU9jEZUwTsG4gHMaPmRfi6FEe89QjQAFbep3M/go-libp2p-routing" cid "gx/ipfs/QmYhQaCYEcaPPjxJX7YcPcVKkQfRy6sJ7B3XmGFk82XYdQ/go-cid" peer "gx/ipfs/QmdS9KpbDyPrieswibZhkod1oXqRwZJrUPzxCofAMWpFGq/go-libp2p-peer" ) diff --git a/bitswap/testutils.go b/bitswap/testutils.go index cbc621b6e..588dca184 100644 --- a/bitswap/testutils.go +++ b/bitswap/testutils.go @@ -12,7 +12,7 @@ import ( ds "gx/ipfs/QmRWDav6mzWseLWeYfVd5fvUKiVe9xNH29YfMF438fG364/go-datastore" ds_sync "gx/ipfs/QmRWDav6mzWseLWeYfVd5fvUKiVe9xNH29YfMF438fG364/go-datastore/sync" - p2ptestutil "gx/ipfs/QmcCgouQ5iXfmxmVNc1fpXLacRSPMNHx4tzqDpou6XNvvd/go-libp2p-netutil" + p2ptestutil "gx/ipfs/Qma2j8dYePrvN5DoNgwh1uAuu3FFtEtrUQFmr737ws8nCp/go-libp2p-netutil" peer "gx/ipfs/QmdS9KpbDyPrieswibZhkod1oXqRwZJrUPzxCofAMWpFGq/go-libp2p-peer" ) From 1c191c43ff9bc84da42e1332ecd60f337f0836b4 Mon Sep 17 00:00:00 2001 From: zramsay Date: Wed, 31 May 2017 16:56:11 -0400 Subject: [PATCH 0546/1035] apply the megacheck tool to improve code quality License: MIT Signed-off-by: Zach Ramsay This commit was moved from ipfs/go-bitswap@a1530f84dffc54b60321693fa8afc36591f76770 --- bitswap/bitswap.go | 6 +++--- bitswap/bitswap_test.go | 12 +----------- bitswap/message/message.go | 10 ++-------- bitswap/testutils.go | 4 ++-- bitswap/wantmanager.go | 4 ++-- 5 files changed, 10 insertions(+), 26 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index e7a20008b..e37787b88 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -37,9 +37,9 @@ const ( // TODO: if a 'non-nice' strategy is implemented, consider increasing this value maxProvidersPerRequest = 3 providerRequestTimeout = time.Second * 10 - hasBlockTimeout = time.Second * 15 - provideTimeout = time.Second * 15 - sizeBatchRequestChan = 32 + // hasBlockTimeout = time.Second * 15 + provideTimeout = time.Second * 15 + sizeBatchRequestChan = 32 // kMaxPriority is the max priority as defined by the bitswap protocol kMaxPriority = math.MaxInt32 ) diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index 86271f111..504c31a75 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -199,7 +199,7 @@ func PerformDistributionTest(t *testing.T, numInstances, numBlocks int) { if err != nil { errs <- err } - for _ = range outch { + for range outch { } }(inst) } @@ -226,16 +226,6 @@ func PerformDistributionTest(t *testing.T, numInstances, numBlocks int) { } } -func getOrFail(bitswap Instance, b blocks.Block, t *testing.T, wg *sync.WaitGroup) { - if _, err := bitswap.Blockstore().Get(b.Cid()); err != nil { - _, err := bitswap.Exchange.GetBlock(context.Background(), b.Cid()) - if err != nil { - t.Fatal(err) - } - } - wg.Done() -} - // TODO simplify this test. get to the _essence_! func TestSendToWantingPeer(t *testing.T) { if testing.Short() { diff --git a/bitswap/message/message.go b/bitswap/message/message.go index ac5677929..a0bc2215a 100644 --- a/bitswap/message/message.go +++ b/bitswap/message/message.go @@ -220,19 +220,13 @@ func (m *impl) ToProtoV1() *pb.Message { func (m *impl) ToNetV0(w io.Writer) error { pbw := ggio.NewDelimitedWriter(w) - if err := pbw.WriteMsg(m.ToProtoV0()); err != nil { - return err - } - return nil + return pbw.WriteMsg(m.ToProtoV0()) } func (m *impl) ToNetV1(w io.Writer) error { pbw := ggio.NewDelimitedWriter(w) - if err := pbw.WriteMsg(m.ToProtoV1()); err != nil { - return err - } - return nil + return pbw.WriteMsg(m.ToProtoV1()) } func (m *impl) Loggable() map[string]interface{} { diff --git a/bitswap/testutils.go b/bitswap/testutils.go index 588dca184..3e3bcb474 100644 --- a/bitswap/testutils.go +++ b/bitswap/testutils.go @@ -88,8 +88,8 @@ func (i *Instance) SetBlockstoreLatency(t time.Duration) time.Duration { // just a much better idea. func Session(ctx context.Context, net tn.Network, p testutil.Identity) Instance { bsdelay := delay.Fixed(0) - const bloomSize = 512 - const writeCacheElems = 100 + // const bloomSize = 512 + // const writeCacheElems = 100 adapter := net.Adapter(p) dstore := ds_sync.MutexWrap(datastore2.WithDelay(ds.NewMapDatastore(), bsdelay)) diff --git a/bitswap/wantmanager.go b/bitswap/wantmanager.go index 0825e8cfc..4695256c0 100644 --- a/bitswap/wantmanager.go +++ b/bitswap/wantmanager.go @@ -55,7 +55,7 @@ func NewWantManager(ctx context.Context, network bsnet.BitSwapNetwork) *WantMana } } -type msgPair struct { +/*type msgPair struct { to peer.ID msg bsmsg.BitSwapMessage } @@ -63,7 +63,7 @@ type msgPair struct { type cancellation struct { who peer.ID blk *cid.Cid -} +}*/ type msgQueue struct { p peer.ID From 4c4c2256b8613b7cbc85a2bd34130818000853ed Mon Sep 17 00:00:00 2001 From: zramsay Date: Wed, 31 May 2017 23:41:26 -0400 Subject: [PATCH 0547/1035] address PR comments; remove commented/dead code License: MIT Signed-off-by: Zach Ramsay This commit was moved from ipfs/go-bitswap@43054ff030b700ad1372bb4360e9cc9425fd16d2 --- bitswap/bitswap.go | 5 ++--- bitswap/bitswap_test.go | 4 ++-- bitswap/decision/ledger.go | 3 --- bitswap/testutils.go | 2 -- bitswap/wantmanager.go | 10 ---------- 5 files changed, 4 insertions(+), 20 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index e37787b88..86e53dc2f 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -37,9 +37,8 @@ const ( // TODO: if a 'non-nice' strategy is implemented, consider increasing this value maxProvidersPerRequest = 3 providerRequestTimeout = time.Second * 10 - // hasBlockTimeout = time.Second * 15 - provideTimeout = time.Second * 15 - sizeBatchRequestChan = 32 + provideTimeout = time.Second * 15 + sizeBatchRequestChan = 32 // kMaxPriority is the max priority as defined by the bitswap protocol kMaxPriority = math.MaxInt32 ) diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index 504c31a75..3229b183b 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -601,14 +601,14 @@ func TestBitswapLedgerTwoWay(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) defer cancel() - blk, err := instances[1].Exchange.GetBlock(ctx, blocks[0].Cid()) + _, err = instances[1].Exchange.GetBlock(ctx, blocks[0].Cid()) if err != nil { t.Fatal(err) } ctx, cancel = context.WithTimeout(context.Background(), time.Second*5) defer cancel() - blk, err = instances[0].Exchange.GetBlock(ctx, blocks[1].Cid()) + blk, err := instances[0].Exchange.GetBlock(ctx, blocks[1].Cid()) if err != nil { t.Fatal(err) } diff --git a/bitswap/decision/ledger.go b/bitswap/decision/ledger.go index cb93f0e95..3826b7352 100644 --- a/bitswap/decision/ledger.go +++ b/bitswap/decision/ledger.go @@ -27,9 +27,6 @@ type ledger struct { // Accounting tracks bytes sent and recieved. Accounting debtRatio - // firstExchnage is the time of the first data exchange. - firstExchange time.Time - // lastExchange is the time of the last data exchange. lastExchange time.Time diff --git a/bitswap/testutils.go b/bitswap/testutils.go index 3e3bcb474..fa5e7f940 100644 --- a/bitswap/testutils.go +++ b/bitswap/testutils.go @@ -88,8 +88,6 @@ func (i *Instance) SetBlockstoreLatency(t time.Duration) time.Duration { // just a much better idea. func Session(ctx context.Context, net tn.Network, p testutil.Identity) Instance { bsdelay := delay.Fixed(0) - // const bloomSize = 512 - // const writeCacheElems = 100 adapter := net.Adapter(p) dstore := ds_sync.MutexWrap(datastore2.WithDelay(ds.NewMapDatastore(), bsdelay)) diff --git a/bitswap/wantmanager.go b/bitswap/wantmanager.go index 4695256c0..bdb9db636 100644 --- a/bitswap/wantmanager.go +++ b/bitswap/wantmanager.go @@ -55,16 +55,6 @@ func NewWantManager(ctx context.Context, network bsnet.BitSwapNetwork) *WantMana } } -/*type msgPair struct { - to peer.ID - msg bsmsg.BitSwapMessage -} - -type cancellation struct { - who peer.ID - blk *cid.Cid -}*/ - type msgQueue struct { p peer.ID From 32d46a0dca52964cc1f0213d5bb6328ba61a7808 Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Thu, 15 Jun 2017 21:02:21 -0700 Subject: [PATCH 0548/1035] blocks: move block format to it's own repo We need to reference it from outside of this repo. License: MIT Signed-off-by: Steven Allen This commit was moved from ipfs/go-bitswap@8fadf27672f47f300a3d1fcaa80b4cf54974b8e3 --- bitswap/bitswap.go | 2 +- bitswap/bitswap_test.go | 2 +- bitswap/decision/engine.go | 2 +- bitswap/decision/engine_test.go | 2 +- bitswap/message/message.go | 2 +- bitswap/message/message_test.go | 2 +- bitswap/notifications/notifications.go | 2 +- bitswap/notifications/notifications_test.go | 2 +- bitswap/testnet/network_test.go | 2 +- 9 files changed, 9 insertions(+), 9 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 86e53dc2f..eb408c6c9 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -9,7 +9,7 @@ import ( "sync" "time" - blocks "github.com/ipfs/go-ipfs/blocks" + blocks "github.com/ipfs/go-block-format" blockstore "github.com/ipfs/go-ipfs/blocks/blockstore" exchange "github.com/ipfs/go-ipfs/exchange" decision "github.com/ipfs/go-ipfs/exchange/bitswap/decision" diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index 3229b183b..38d5b4056 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -8,7 +8,7 @@ import ( "testing" "time" - blocks "github.com/ipfs/go-ipfs/blocks" + blocks "github.com/ipfs/go-block-format" blockstore "github.com/ipfs/go-ipfs/blocks/blockstore" blocksutil "github.com/ipfs/go-ipfs/blocks/blocksutil" decision "github.com/ipfs/go-ipfs/exchange/bitswap/decision" diff --git a/bitswap/decision/engine.go b/bitswap/decision/engine.go index 37e370db0..4c8888b71 100644 --- a/bitswap/decision/engine.go +++ b/bitswap/decision/engine.go @@ -6,7 +6,7 @@ import ( "time" context "context" - blocks "github.com/ipfs/go-ipfs/blocks" + blocks "github.com/ipfs/go-block-format" bstore "github.com/ipfs/go-ipfs/blocks/blockstore" bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" wl "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" diff --git a/bitswap/decision/engine_test.go b/bitswap/decision/engine_test.go index fdac4eba1..06734cad7 100644 --- a/bitswap/decision/engine_test.go +++ b/bitswap/decision/engine_test.go @@ -9,7 +9,7 @@ import ( "testing" context "context" - blocks "github.com/ipfs/go-ipfs/blocks" + blocks "github.com/ipfs/go-block-format" blockstore "github.com/ipfs/go-ipfs/blocks/blockstore" message "github.com/ipfs/go-ipfs/exchange/bitswap/message" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" diff --git a/bitswap/message/message.go b/bitswap/message/message.go index a0bc2215a..94a3aecab 100644 --- a/bitswap/message/message.go +++ b/bitswap/message/message.go @@ -4,7 +4,7 @@ import ( "fmt" "io" - blocks "github.com/ipfs/go-ipfs/blocks" + blocks "github.com/ipfs/go-block-format" pb "github.com/ipfs/go-ipfs/exchange/bitswap/message/pb" wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" diff --git a/bitswap/message/message_test.go b/bitswap/message/message_test.go index ddcba8e17..f945048f7 100644 --- a/bitswap/message/message_test.go +++ b/bitswap/message/message_test.go @@ -6,7 +6,7 @@ import ( proto "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/proto" - blocks "github.com/ipfs/go-ipfs/blocks" + blocks "github.com/ipfs/go-block-format" pb "github.com/ipfs/go-ipfs/exchange/bitswap/message/pb" u "gx/ipfs/QmWbjfz3u6HkAdPh34dgPchGbQjob6LXLhAeCGii2TX69n/go-ipfs-util" cid "gx/ipfs/QmYhQaCYEcaPPjxJX7YcPcVKkQfRy6sJ7B3XmGFk82XYdQ/go-cid" diff --git a/bitswap/notifications/notifications.go b/bitswap/notifications/notifications.go index 43322793b..fb82f8326 100644 --- a/bitswap/notifications/notifications.go +++ b/bitswap/notifications/notifications.go @@ -3,7 +3,7 @@ package notifications import ( "context" - blocks "github.com/ipfs/go-ipfs/blocks" + blocks "github.com/ipfs/go-block-format" pubsub "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/briantigerchow/pubsub" cid "gx/ipfs/QmYhQaCYEcaPPjxJX7YcPcVKkQfRy6sJ7B3XmGFk82XYdQ/go-cid" diff --git a/bitswap/notifications/notifications_test.go b/bitswap/notifications/notifications_test.go index ab83015e4..44627d425 100644 --- a/bitswap/notifications/notifications_test.go +++ b/bitswap/notifications/notifications_test.go @@ -6,7 +6,7 @@ import ( "testing" "time" - blocks "github.com/ipfs/go-ipfs/blocks" + blocks "github.com/ipfs/go-block-format" blocksutil "github.com/ipfs/go-ipfs/blocks/blocksutil" cid "gx/ipfs/QmYhQaCYEcaPPjxJX7YcPcVKkQfRy6sJ7B3XmGFk82XYdQ/go-cid" ) diff --git a/bitswap/testnet/network_test.go b/bitswap/testnet/network_test.go index 44f663787..427b95e9e 100644 --- a/bitswap/testnet/network_test.go +++ b/bitswap/testnet/network_test.go @@ -5,7 +5,7 @@ import ( "testing" context "context" - blocks "github.com/ipfs/go-ipfs/blocks" + blocks "github.com/ipfs/go-block-format" bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" mockrouting "github.com/ipfs/go-ipfs/routing/mock" From e7a4fdd27601c425dcd8d0789a34db7242985bb2 Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Sun, 18 Jun 2017 13:07:24 -0700 Subject: [PATCH 0549/1035] blocks: gx import go-block-format And updated related dependencies. License: MIT Signed-off-by: Steven Allen This commit was moved from ipfs/go-bitswap@795a1e3569776226429f3cf0ea1bb22e2e668ba2 --- bitswap/bitswap.go | 4 ++-- bitswap/bitswap_test.go | 4 ++-- bitswap/decision/bench_test.go | 2 +- bitswap/decision/engine.go | 2 +- bitswap/decision/engine_test.go | 2 +- bitswap/decision/ledger.go | 2 +- bitswap/decision/peer_request_queue.go | 2 +- bitswap/decision/peer_request_queue_test.go | 2 +- bitswap/message/message.go | 4 ++-- bitswap/message/message_test.go | 4 ++-- bitswap/network/interface.go | 2 +- bitswap/network/ipfs_impl.go | 2 +- bitswap/notifications/notifications.go | 4 ++-- bitswap/notifications/notifications_test.go | 4 ++-- bitswap/stat.go | 2 +- bitswap/testnet/network_test.go | 2 +- bitswap/testnet/virtual.go | 2 +- bitswap/wantlist/wantlist.go | 2 +- bitswap/wantmanager.go | 2 +- bitswap/workers.go | 2 +- 20 files changed, 26 insertions(+), 26 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index eb408c6c9..a795c6833 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -9,7 +9,6 @@ import ( "sync" "time" - blocks "github.com/ipfs/go-block-format" blockstore "github.com/ipfs/go-ipfs/blocks/blockstore" exchange "github.com/ipfs/go-ipfs/exchange" decision "github.com/ipfs/go-ipfs/exchange/bitswap/decision" @@ -18,13 +17,14 @@ import ( notifications "github.com/ipfs/go-ipfs/exchange/bitswap/notifications" flags "github.com/ipfs/go-ipfs/flags" "github.com/ipfs/go-ipfs/thirdparty/delay" + blocks "gx/ipfs/QmbJUay5h1HtzhJb5QQk2t26yCnJksHynvhcqp18utBPqG/go-block-format" + cid "gx/ipfs/QmNw61A6sJoXMeP37mJRtQZdNhj5e3FdjoTN3v4FyE96Gk/go-cid" metrics "gx/ipfs/QmRg1gKTHzc3CZXSKzem8aR4E3TubFhbgXwfVuWnSK5CC5/go-metrics-interface" process "gx/ipfs/QmSF8fPo3jgVBAy8fpdjjYqgG87dkJgUprRBHRd2tmfgpP/goprocess" procctx "gx/ipfs/QmSF8fPo3jgVBAy8fpdjjYqgG87dkJgUprRBHRd2tmfgpP/goprocess/context" logging "gx/ipfs/QmSpJByNKFX1sCsHBEp3R73FL4NF6FnQTEGyNAXHm2GS52/go-log" loggables "gx/ipfs/QmVesPmqbPp7xRGyY96tnBwzDtVV1nqv4SCVxo5zCqKyH8/go-libp2p-loggables" - cid "gx/ipfs/QmYhQaCYEcaPPjxJX7YcPcVKkQfRy6sJ7B3XmGFk82XYdQ/go-cid" peer "gx/ipfs/QmdS9KpbDyPrieswibZhkod1oXqRwZJrUPzxCofAMWpFGq/go-libp2p-peer" ) diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index 38d5b4056..e3e3682e8 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -8,7 +8,6 @@ import ( "testing" "time" - blocks "github.com/ipfs/go-block-format" blockstore "github.com/ipfs/go-ipfs/blocks/blockstore" blocksutil "github.com/ipfs/go-ipfs/blocks/blocksutil" decision "github.com/ipfs/go-ipfs/exchange/bitswap/decision" @@ -16,10 +15,11 @@ import ( mockrouting "github.com/ipfs/go-ipfs/routing/mock" delay "github.com/ipfs/go-ipfs/thirdparty/delay" travis "github.com/ipfs/go-ipfs/thirdparty/testutil/ci/travis" + blocks "gx/ipfs/QmbJUay5h1HtzhJb5QQk2t26yCnJksHynvhcqp18utBPqG/go-block-format" detectrace "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-detect-race" - cid "gx/ipfs/QmYhQaCYEcaPPjxJX7YcPcVKkQfRy6sJ7B3XmGFk82XYdQ/go-cid" + cid "gx/ipfs/QmNw61A6sJoXMeP37mJRtQZdNhj5e3FdjoTN3v4FyE96Gk/go-cid" p2ptestutil "gx/ipfs/Qma2j8dYePrvN5DoNgwh1uAuu3FFtEtrUQFmr737ws8nCp/go-libp2p-netutil" ) diff --git a/bitswap/decision/bench_test.go b/bitswap/decision/bench_test.go index f77044f94..52c3cd4e9 100644 --- a/bitswap/decision/bench_test.go +++ b/bitswap/decision/bench_test.go @@ -7,8 +7,8 @@ import ( "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" "github.com/ipfs/go-ipfs/thirdparty/testutil" + cid "gx/ipfs/QmNw61A6sJoXMeP37mJRtQZdNhj5e3FdjoTN3v4FyE96Gk/go-cid" u "gx/ipfs/QmWbjfz3u6HkAdPh34dgPchGbQjob6LXLhAeCGii2TX69n/go-ipfs-util" - cid "gx/ipfs/QmYhQaCYEcaPPjxJX7YcPcVKkQfRy6sJ7B3XmGFk82XYdQ/go-cid" "gx/ipfs/QmdS9KpbDyPrieswibZhkod1oXqRwZJrUPzxCofAMWpFGq/go-libp2p-peer" ) diff --git a/bitswap/decision/engine.go b/bitswap/decision/engine.go index 4c8888b71..e4f1d99cd 100644 --- a/bitswap/decision/engine.go +++ b/bitswap/decision/engine.go @@ -6,11 +6,11 @@ import ( "time" context "context" - blocks "github.com/ipfs/go-block-format" bstore "github.com/ipfs/go-ipfs/blocks/blockstore" bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" wl "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" logging "gx/ipfs/QmSpJByNKFX1sCsHBEp3R73FL4NF6FnQTEGyNAXHm2GS52/go-log" + blocks "gx/ipfs/QmbJUay5h1HtzhJb5QQk2t26yCnJksHynvhcqp18utBPqG/go-block-format" peer "gx/ipfs/QmdS9KpbDyPrieswibZhkod1oXqRwZJrUPzxCofAMWpFGq/go-libp2p-peer" ) diff --git a/bitswap/decision/engine_test.go b/bitswap/decision/engine_test.go index 06734cad7..ba2cf02bc 100644 --- a/bitswap/decision/engine_test.go +++ b/bitswap/decision/engine_test.go @@ -9,12 +9,12 @@ import ( "testing" context "context" - blocks "github.com/ipfs/go-block-format" blockstore "github.com/ipfs/go-ipfs/blocks/blockstore" message "github.com/ipfs/go-ipfs/exchange/bitswap/message" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" ds "gx/ipfs/QmRWDav6mzWseLWeYfVd5fvUKiVe9xNH29YfMF438fG364/go-datastore" dssync "gx/ipfs/QmRWDav6mzWseLWeYfVd5fvUKiVe9xNH29YfMF438fG364/go-datastore/sync" + blocks "gx/ipfs/QmbJUay5h1HtzhJb5QQk2t26yCnJksHynvhcqp18utBPqG/go-block-format" peer "gx/ipfs/QmdS9KpbDyPrieswibZhkod1oXqRwZJrUPzxCofAMWpFGq/go-libp2p-peer" ) diff --git a/bitswap/decision/ledger.go b/bitswap/decision/ledger.go index 3826b7352..9c3f0cf76 100644 --- a/bitswap/decision/ledger.go +++ b/bitswap/decision/ledger.go @@ -6,7 +6,7 @@ import ( wl "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" - cid "gx/ipfs/QmYhQaCYEcaPPjxJX7YcPcVKkQfRy6sJ7B3XmGFk82XYdQ/go-cid" + cid "gx/ipfs/QmNw61A6sJoXMeP37mJRtQZdNhj5e3FdjoTN3v4FyE96Gk/go-cid" peer "gx/ipfs/QmdS9KpbDyPrieswibZhkod1oXqRwZJrUPzxCofAMWpFGq/go-libp2p-peer" ) diff --git a/bitswap/decision/peer_request_queue.go b/bitswap/decision/peer_request_queue.go index d989174a2..cd4f2b9e4 100644 --- a/bitswap/decision/peer_request_queue.go +++ b/bitswap/decision/peer_request_queue.go @@ -7,7 +7,7 @@ import ( wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" pq "github.com/ipfs/go-ipfs/thirdparty/pq" - cid "gx/ipfs/QmYhQaCYEcaPPjxJX7YcPcVKkQfRy6sJ7B3XmGFk82XYdQ/go-cid" + cid "gx/ipfs/QmNw61A6sJoXMeP37mJRtQZdNhj5e3FdjoTN3v4FyE96Gk/go-cid" peer "gx/ipfs/QmdS9KpbDyPrieswibZhkod1oXqRwZJrUPzxCofAMWpFGq/go-libp2p-peer" ) diff --git a/bitswap/decision/peer_request_queue_test.go b/bitswap/decision/peer_request_queue_test.go index f0fa03bb2..8980d65ed 100644 --- a/bitswap/decision/peer_request_queue_test.go +++ b/bitswap/decision/peer_request_queue_test.go @@ -10,8 +10,8 @@ import ( "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" "github.com/ipfs/go-ipfs/thirdparty/testutil" + cid "gx/ipfs/QmNw61A6sJoXMeP37mJRtQZdNhj5e3FdjoTN3v4FyE96Gk/go-cid" u "gx/ipfs/QmWbjfz3u6HkAdPh34dgPchGbQjob6LXLhAeCGii2TX69n/go-ipfs-util" - cid "gx/ipfs/QmYhQaCYEcaPPjxJX7YcPcVKkQfRy6sJ7B3XmGFk82XYdQ/go-cid" ) func TestPushPop(t *testing.T) { diff --git a/bitswap/message/message.go b/bitswap/message/message.go index 94a3aecab..aa6ace938 100644 --- a/bitswap/message/message.go +++ b/bitswap/message/message.go @@ -4,12 +4,12 @@ import ( "fmt" "io" - blocks "github.com/ipfs/go-block-format" pb "github.com/ipfs/go-ipfs/exchange/bitswap/message/pb" wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" + blocks "gx/ipfs/QmbJUay5h1HtzhJb5QQk2t26yCnJksHynvhcqp18utBPqG/go-block-format" + cid "gx/ipfs/QmNw61A6sJoXMeP37mJRtQZdNhj5e3FdjoTN3v4FyE96Gk/go-cid" inet "gx/ipfs/QmRscs8KxrSmSv4iuevHv8JfuUzHBMoqiaHzxfDRiksd6e/go-libp2p-net" - cid "gx/ipfs/QmYhQaCYEcaPPjxJX7YcPcVKkQfRy6sJ7B3XmGFk82XYdQ/go-cid" ggio "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/io" proto "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/proto" ) diff --git a/bitswap/message/message_test.go b/bitswap/message/message_test.go index f945048f7..ce3be7dcd 100644 --- a/bitswap/message/message_test.go +++ b/bitswap/message/message_test.go @@ -6,10 +6,10 @@ import ( proto "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/proto" - blocks "github.com/ipfs/go-block-format" pb "github.com/ipfs/go-ipfs/exchange/bitswap/message/pb" + cid "gx/ipfs/QmNw61A6sJoXMeP37mJRtQZdNhj5e3FdjoTN3v4FyE96Gk/go-cid" u "gx/ipfs/QmWbjfz3u6HkAdPh34dgPchGbQjob6LXLhAeCGii2TX69n/go-ipfs-util" - cid "gx/ipfs/QmYhQaCYEcaPPjxJX7YcPcVKkQfRy6sJ7B3XmGFk82XYdQ/go-cid" + blocks "gx/ipfs/QmbJUay5h1HtzhJb5QQk2t26yCnJksHynvhcqp18utBPqG/go-block-format" ) func mkFakeCid(s string) *cid.Cid { diff --git a/bitswap/network/interface.go b/bitswap/network/interface.go index 7288024fe..e0d3f8f30 100644 --- a/bitswap/network/interface.go +++ b/bitswap/network/interface.go @@ -4,7 +4,7 @@ import ( "context" bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" - cid "gx/ipfs/QmYhQaCYEcaPPjxJX7YcPcVKkQfRy6sJ7B3XmGFk82XYdQ/go-cid" + cid "gx/ipfs/QmNw61A6sJoXMeP37mJRtQZdNhj5e3FdjoTN3v4FyE96Gk/go-cid" protocol "gx/ipfs/QmZNkThpqfVXs9GNbexPrfBbXSLNYeKrE7jwFM2oqHbyqN/go-libp2p-protocol" peer "gx/ipfs/QmdS9KpbDyPrieswibZhkod1oXqRwZJrUPzxCofAMWpFGq/go-libp2p-peer" ) diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index 5b408a18e..c0b909180 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -9,11 +9,11 @@ import ( bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" routing "gx/ipfs/QmNdaQ8itUU9jEZUwTsG4gHMaPmRfi6FEe89QjQAFbep3M/go-libp2p-routing" + cid "gx/ipfs/QmNw61A6sJoXMeP37mJRtQZdNhj5e3FdjoTN3v4FyE96Gk/go-cid" inet "gx/ipfs/QmRscs8KxrSmSv4iuevHv8JfuUzHBMoqiaHzxfDRiksd6e/go-libp2p-net" logging "gx/ipfs/QmSpJByNKFX1sCsHBEp3R73FL4NF6FnQTEGyNAXHm2GS52/go-log" host "gx/ipfs/QmUywuGNZoUKV8B9iyvup9bPkLiMrhTsyVMkeSXW5VxAfC/go-libp2p-host" pstore "gx/ipfs/QmXZSd1qR5BxZkPyuwfT5jpqQFScZccoZvDneXsKzCNHWX/go-libp2p-peerstore" - cid "gx/ipfs/QmYhQaCYEcaPPjxJX7YcPcVKkQfRy6sJ7B3XmGFk82XYdQ/go-cid" ggio "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/io" ma "gx/ipfs/QmcyqRMCAXVtYPS4DiBrA7sezL9rRGfW8Ctx7cywL4TXJj/go-multiaddr" peer "gx/ipfs/QmdS9KpbDyPrieswibZhkod1oXqRwZJrUPzxCofAMWpFGq/go-libp2p-peer" diff --git a/bitswap/notifications/notifications.go b/bitswap/notifications/notifications.go index fb82f8326..fc8f3e61f 100644 --- a/bitswap/notifications/notifications.go +++ b/bitswap/notifications/notifications.go @@ -3,10 +3,10 @@ package notifications import ( "context" - blocks "github.com/ipfs/go-block-format" + blocks "gx/ipfs/QmbJUay5h1HtzhJb5QQk2t26yCnJksHynvhcqp18utBPqG/go-block-format" pubsub "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/briantigerchow/pubsub" - cid "gx/ipfs/QmYhQaCYEcaPPjxJX7YcPcVKkQfRy6sJ7B3XmGFk82XYdQ/go-cid" + cid "gx/ipfs/QmNw61A6sJoXMeP37mJRtQZdNhj5e3FdjoTN3v4FyE96Gk/go-cid" ) const bufferSize = 16 diff --git a/bitswap/notifications/notifications_test.go b/bitswap/notifications/notifications_test.go index 44627d425..6f46b79bd 100644 --- a/bitswap/notifications/notifications_test.go +++ b/bitswap/notifications/notifications_test.go @@ -6,9 +6,9 @@ import ( "testing" "time" - blocks "github.com/ipfs/go-block-format" blocksutil "github.com/ipfs/go-ipfs/blocks/blocksutil" - cid "gx/ipfs/QmYhQaCYEcaPPjxJX7YcPcVKkQfRy6sJ7B3XmGFk82XYdQ/go-cid" + cid "gx/ipfs/QmNw61A6sJoXMeP37mJRtQZdNhj5e3FdjoTN3v4FyE96Gk/go-cid" + blocks "gx/ipfs/QmbJUay5h1HtzhJb5QQk2t26yCnJksHynvhcqp18utBPqG/go-block-format" ) func TestDuplicates(t *testing.T) { diff --git a/bitswap/stat.go b/bitswap/stat.go index 8dae9abbf..cf61f1738 100644 --- a/bitswap/stat.go +++ b/bitswap/stat.go @@ -3,7 +3,7 @@ package bitswap import ( "sort" - cid "gx/ipfs/QmYhQaCYEcaPPjxJX7YcPcVKkQfRy6sJ7B3XmGFk82XYdQ/go-cid" + cid "gx/ipfs/QmNw61A6sJoXMeP37mJRtQZdNhj5e3FdjoTN3v4FyE96Gk/go-cid" ) type Stat struct { diff --git a/bitswap/testnet/network_test.go b/bitswap/testnet/network_test.go index 427b95e9e..e4c463f03 100644 --- a/bitswap/testnet/network_test.go +++ b/bitswap/testnet/network_test.go @@ -5,12 +5,12 @@ import ( "testing" context "context" - blocks "github.com/ipfs/go-block-format" bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" mockrouting "github.com/ipfs/go-ipfs/routing/mock" delay "github.com/ipfs/go-ipfs/thirdparty/delay" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" + blocks "gx/ipfs/QmbJUay5h1HtzhJb5QQk2t26yCnJksHynvhcqp18utBPqG/go-block-format" peer "gx/ipfs/QmdS9KpbDyPrieswibZhkod1oXqRwZJrUPzxCofAMWpFGq/go-libp2p-peer" ) diff --git a/bitswap/testnet/virtual.go b/bitswap/testnet/virtual.go index da23b88a9..e6bab49fc 100644 --- a/bitswap/testnet/virtual.go +++ b/bitswap/testnet/virtual.go @@ -10,7 +10,7 @@ import ( delay "github.com/ipfs/go-ipfs/thirdparty/delay" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" routing "gx/ipfs/QmNdaQ8itUU9jEZUwTsG4gHMaPmRfi6FEe89QjQAFbep3M/go-libp2p-routing" - cid "gx/ipfs/QmYhQaCYEcaPPjxJX7YcPcVKkQfRy6sJ7B3XmGFk82XYdQ/go-cid" + cid "gx/ipfs/QmNw61A6sJoXMeP37mJRtQZdNhj5e3FdjoTN3v4FyE96Gk/go-cid" peer "gx/ipfs/QmdS9KpbDyPrieswibZhkod1oXqRwZJrUPzxCofAMWpFGq/go-libp2p-peer" ) diff --git a/bitswap/wantlist/wantlist.go b/bitswap/wantlist/wantlist.go index 94b8219c3..700e64b60 100644 --- a/bitswap/wantlist/wantlist.go +++ b/bitswap/wantlist/wantlist.go @@ -6,7 +6,7 @@ import ( "sort" "sync" - cid "gx/ipfs/QmYhQaCYEcaPPjxJX7YcPcVKkQfRy6sJ7B3XmGFk82XYdQ/go-cid" + cid "gx/ipfs/QmNw61A6sJoXMeP37mJRtQZdNhj5e3FdjoTN3v4FyE96Gk/go-cid" ) type ThreadSafe struct { diff --git a/bitswap/wantmanager.go b/bitswap/wantmanager.go index bdb9db636..3bc24d3b7 100644 --- a/bitswap/wantmanager.go +++ b/bitswap/wantmanager.go @@ -10,8 +10,8 @@ import ( bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" + cid "gx/ipfs/QmNw61A6sJoXMeP37mJRtQZdNhj5e3FdjoTN3v4FyE96Gk/go-cid" metrics "gx/ipfs/QmRg1gKTHzc3CZXSKzem8aR4E3TubFhbgXwfVuWnSK5CC5/go-metrics-interface" - cid "gx/ipfs/QmYhQaCYEcaPPjxJX7YcPcVKkQfRy6sJ7B3XmGFk82XYdQ/go-cid" peer "gx/ipfs/QmdS9KpbDyPrieswibZhkod1oXqRwZJrUPzxCofAMWpFGq/go-libp2p-peer" ) diff --git a/bitswap/workers.go b/bitswap/workers.go index 028b9735d..c7c1f9593 100644 --- a/bitswap/workers.go +++ b/bitswap/workers.go @@ -8,10 +8,10 @@ import ( bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" + cid "gx/ipfs/QmNw61A6sJoXMeP37mJRtQZdNhj5e3FdjoTN3v4FyE96Gk/go-cid" process "gx/ipfs/QmSF8fPo3jgVBAy8fpdjjYqgG87dkJgUprRBHRd2tmfgpP/goprocess" procctx "gx/ipfs/QmSF8fPo3jgVBAy8fpdjjYqgG87dkJgUprRBHRd2tmfgpP/goprocess/context" logging "gx/ipfs/QmSpJByNKFX1sCsHBEp3R73FL4NF6FnQTEGyNAXHm2GS52/go-log" - cid "gx/ipfs/QmYhQaCYEcaPPjxJX7YcPcVKkQfRy6sJ7B3XmGFk82XYdQ/go-cid" peer "gx/ipfs/QmdS9KpbDyPrieswibZhkod1oXqRwZJrUPzxCofAMWpFGq/go-libp2p-peer" ) From 47ad7c67d79b09bebdee8274d20a6a07a74c7f44 Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Mon, 19 Jun 2017 19:11:32 -0700 Subject: [PATCH 0550/1035] gx import/update libp2p/go-libp2p-routing For some reason, this was referenced but wasn't listed in packages.json. License: MIT Signed-off-by: Steven Allen This commit was moved from ipfs/go-bitswap@1c2171f7d43d8abf32a5c537ae5769c21510c5d1 --- bitswap/network/ipfs_impl.go | 2 +- bitswap/testnet/virtual.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index c0b909180..7a4c78615 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -8,13 +8,13 @@ import ( bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" - routing "gx/ipfs/QmNdaQ8itUU9jEZUwTsG4gHMaPmRfi6FEe89QjQAFbep3M/go-libp2p-routing" cid "gx/ipfs/QmNw61A6sJoXMeP37mJRtQZdNhj5e3FdjoTN3v4FyE96Gk/go-cid" inet "gx/ipfs/QmRscs8KxrSmSv4iuevHv8JfuUzHBMoqiaHzxfDRiksd6e/go-libp2p-net" logging "gx/ipfs/QmSpJByNKFX1sCsHBEp3R73FL4NF6FnQTEGyNAXHm2GS52/go-log" host "gx/ipfs/QmUywuGNZoUKV8B9iyvup9bPkLiMrhTsyVMkeSXW5VxAfC/go-libp2p-host" pstore "gx/ipfs/QmXZSd1qR5BxZkPyuwfT5jpqQFScZccoZvDneXsKzCNHWX/go-libp2p-peerstore" ggio "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/io" + routing "gx/ipfs/QmaNDbaV1wvPRLxTYepVsXrppXNjQ1NbrnG7ibAgKeyaXD/go-libp2p-routing" ma "gx/ipfs/QmcyqRMCAXVtYPS4DiBrA7sezL9rRGfW8Ctx7cywL4TXJj/go-multiaddr" peer "gx/ipfs/QmdS9KpbDyPrieswibZhkod1oXqRwZJrUPzxCofAMWpFGq/go-libp2p-peer" ) diff --git a/bitswap/testnet/virtual.go b/bitswap/testnet/virtual.go index e6bab49fc..2ff337f98 100644 --- a/bitswap/testnet/virtual.go +++ b/bitswap/testnet/virtual.go @@ -9,8 +9,8 @@ import ( mockrouting "github.com/ipfs/go-ipfs/routing/mock" delay "github.com/ipfs/go-ipfs/thirdparty/delay" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" - routing "gx/ipfs/QmNdaQ8itUU9jEZUwTsG4gHMaPmRfi6FEe89QjQAFbep3M/go-libp2p-routing" cid "gx/ipfs/QmNw61A6sJoXMeP37mJRtQZdNhj5e3FdjoTN3v4FyE96Gk/go-cid" + routing "gx/ipfs/QmaNDbaV1wvPRLxTYepVsXrppXNjQ1NbrnG7ibAgKeyaXD/go-libp2p-routing" peer "gx/ipfs/QmdS9KpbDyPrieswibZhkod1oXqRwZJrUPzxCofAMWpFGq/go-libp2p-peer" ) From 3a301705345ea1768e1f1ebe7794952965894fc6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Mon, 3 Jul 2017 20:17:03 +0200 Subject: [PATCH 0551/1035] Update go-datastore to 1.2.1 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit License: MIT Signed-off-by: Łukasz Magiera This commit was moved from ipfs/go-bitswap@3bfa038581a84df4a571c342bf7a210ca7b10b22 --- bitswap/decision/engine_test.go | 4 ++-- bitswap/testnet/peernet.go | 2 +- bitswap/testutils.go | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/bitswap/decision/engine_test.go b/bitswap/decision/engine_test.go index ba2cf02bc..1c89ccbce 100644 --- a/bitswap/decision/engine_test.go +++ b/bitswap/decision/engine_test.go @@ -12,8 +12,8 @@ import ( blockstore "github.com/ipfs/go-ipfs/blocks/blockstore" message "github.com/ipfs/go-ipfs/exchange/bitswap/message" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" - ds "gx/ipfs/QmRWDav6mzWseLWeYfVd5fvUKiVe9xNH29YfMF438fG364/go-datastore" - dssync "gx/ipfs/QmRWDav6mzWseLWeYfVd5fvUKiVe9xNH29YfMF438fG364/go-datastore/sync" + ds "gx/ipfs/QmSiN66ybp5udnQnvhb6euiWiiQWdGvwMhAWa95cC1DTCV/go-datastore" + dssync "gx/ipfs/QmSiN66ybp5udnQnvhb6euiWiiQWdGvwMhAWa95cC1DTCV/go-datastore/sync" blocks "gx/ipfs/QmbJUay5h1HtzhJb5QQk2t26yCnJksHynvhcqp18utBPqG/go-block-format" peer "gx/ipfs/QmdS9KpbDyPrieswibZhkod1oXqRwZJrUPzxCofAMWpFGq/go-libp2p-peer" ) diff --git a/bitswap/testnet/peernet.go b/bitswap/testnet/peernet.go index 2ff7a05f9..7e9d11e8a 100644 --- a/bitswap/testnet/peernet.go +++ b/bitswap/testnet/peernet.go @@ -6,7 +6,7 @@ import ( mockrouting "github.com/ipfs/go-ipfs/routing/mock" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" mockpeernet "gx/ipfs/QmQA5mdxru8Bh6dpC9PJfSkumqnmHgJX7knxSgBo5Lpime/go-libp2p/p2p/net/mock" - ds "gx/ipfs/QmRWDav6mzWseLWeYfVd5fvUKiVe9xNH29YfMF438fG364/go-datastore" + ds "gx/ipfs/QmSiN66ybp5udnQnvhb6euiWiiQWdGvwMhAWa95cC1DTCV/go-datastore" peer "gx/ipfs/QmdS9KpbDyPrieswibZhkod1oXqRwZJrUPzxCofAMWpFGq/go-libp2p-peer" ) diff --git a/bitswap/testutils.go b/bitswap/testutils.go index fa5e7f940..84d13cd8c 100644 --- a/bitswap/testutils.go +++ b/bitswap/testutils.go @@ -10,8 +10,8 @@ import ( delay "github.com/ipfs/go-ipfs/thirdparty/delay" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" - ds "gx/ipfs/QmRWDav6mzWseLWeYfVd5fvUKiVe9xNH29YfMF438fG364/go-datastore" - ds_sync "gx/ipfs/QmRWDav6mzWseLWeYfVd5fvUKiVe9xNH29YfMF438fG364/go-datastore/sync" + ds "gx/ipfs/QmSiN66ybp5udnQnvhb6euiWiiQWdGvwMhAWa95cC1DTCV/go-datastore" + ds_sync "gx/ipfs/QmSiN66ybp5udnQnvhb6euiWiiQWdGvwMhAWa95cC1DTCV/go-datastore/sync" p2ptestutil "gx/ipfs/Qma2j8dYePrvN5DoNgwh1uAuu3FFtEtrUQFmr737ws8nCp/go-libp2p-netutil" peer "gx/ipfs/QmdS9KpbDyPrieswibZhkod1oXqRwZJrUPzxCofAMWpFGq/go-libp2p-peer" ) From ff1045ca8ddc8786e2774274be241e2a28f8b660 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Tue, 4 Jul 2017 20:18:57 +0200 Subject: [PATCH 0552/1035] Update go-datastore to 1.2.2, go-cid to 0.7.16 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit License: MIT Signed-off-by: Łukasz Magiera This commit was moved from ipfs/go-bitswap@6ed985a0dd93212974161cd1ea753ab0739d0fc0 --- bitswap/bitswap.go | 4 ++-- bitswap/bitswap_test.go | 4 ++-- bitswap/decision/bench_test.go | 2 +- bitswap/decision/engine.go | 2 +- bitswap/decision/engine_test.go | 6 +++--- bitswap/decision/ledger.go | 2 +- bitswap/decision/peer_request_queue.go | 2 +- bitswap/decision/peer_request_queue_test.go | 2 +- bitswap/message/message.go | 4 ++-- bitswap/message/message_test.go | 4 ++-- bitswap/network/interface.go | 2 +- bitswap/network/ipfs_impl.go | 4 ++-- bitswap/notifications/notifications.go | 4 ++-- bitswap/notifications/notifications_test.go | 4 ++-- bitswap/stat.go | 2 +- bitswap/testnet/network_test.go | 2 +- bitswap/testnet/peernet.go | 2 +- bitswap/testnet/virtual.go | 4 ++-- bitswap/testutils.go | 4 ++-- bitswap/wantlist/wantlist.go | 2 +- bitswap/wantmanager.go | 2 +- bitswap/workers.go | 2 +- 22 files changed, 33 insertions(+), 33 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index a795c6833..ce7bd6b26 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -17,14 +17,14 @@ import ( notifications "github.com/ipfs/go-ipfs/exchange/bitswap/notifications" flags "github.com/ipfs/go-ipfs/flags" "github.com/ipfs/go-ipfs/thirdparty/delay" - blocks "gx/ipfs/QmbJUay5h1HtzhJb5QQk2t26yCnJksHynvhcqp18utBPqG/go-block-format" + blocks "gx/ipfs/QmXxGS5QsUxpR3iqL5DjmsYPHR1Yz74siRQ4ChJqWFosMh/go-block-format" - cid "gx/ipfs/QmNw61A6sJoXMeP37mJRtQZdNhj5e3FdjoTN3v4FyE96Gk/go-cid" metrics "gx/ipfs/QmRg1gKTHzc3CZXSKzem8aR4E3TubFhbgXwfVuWnSK5CC5/go-metrics-interface" process "gx/ipfs/QmSF8fPo3jgVBAy8fpdjjYqgG87dkJgUprRBHRd2tmfgpP/goprocess" procctx "gx/ipfs/QmSF8fPo3jgVBAy8fpdjjYqgG87dkJgUprRBHRd2tmfgpP/goprocess/context" logging "gx/ipfs/QmSpJByNKFX1sCsHBEp3R73FL4NF6FnQTEGyNAXHm2GS52/go-log" loggables "gx/ipfs/QmVesPmqbPp7xRGyY96tnBwzDtVV1nqv4SCVxo5zCqKyH8/go-libp2p-loggables" + cid "gx/ipfs/Qma4RJSuh7mMeJQYCqMbKzekn6EwBo7HEs5AQYjVRMQATB/go-cid" peer "gx/ipfs/QmdS9KpbDyPrieswibZhkod1oXqRwZJrUPzxCofAMWpFGq/go-libp2p-peer" ) diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index e3e3682e8..770041c9f 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -15,12 +15,12 @@ import ( mockrouting "github.com/ipfs/go-ipfs/routing/mock" delay "github.com/ipfs/go-ipfs/thirdparty/delay" travis "github.com/ipfs/go-ipfs/thirdparty/testutil/ci/travis" - blocks "gx/ipfs/QmbJUay5h1HtzhJb5QQk2t26yCnJksHynvhcqp18utBPqG/go-block-format" + blocks "gx/ipfs/QmXxGS5QsUxpR3iqL5DjmsYPHR1Yz74siRQ4ChJqWFosMh/go-block-format" detectrace "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-detect-race" - cid "gx/ipfs/QmNw61A6sJoXMeP37mJRtQZdNhj5e3FdjoTN3v4FyE96Gk/go-cid" p2ptestutil "gx/ipfs/Qma2j8dYePrvN5DoNgwh1uAuu3FFtEtrUQFmr737ws8nCp/go-libp2p-netutil" + cid "gx/ipfs/Qma4RJSuh7mMeJQYCqMbKzekn6EwBo7HEs5AQYjVRMQATB/go-cid" ) // FIXME the tests are really sensitive to the network delay. fix them to work diff --git a/bitswap/decision/bench_test.go b/bitswap/decision/bench_test.go index 52c3cd4e9..3016fd07b 100644 --- a/bitswap/decision/bench_test.go +++ b/bitswap/decision/bench_test.go @@ -7,8 +7,8 @@ import ( "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" "github.com/ipfs/go-ipfs/thirdparty/testutil" - cid "gx/ipfs/QmNw61A6sJoXMeP37mJRtQZdNhj5e3FdjoTN3v4FyE96Gk/go-cid" u "gx/ipfs/QmWbjfz3u6HkAdPh34dgPchGbQjob6LXLhAeCGii2TX69n/go-ipfs-util" + cid "gx/ipfs/Qma4RJSuh7mMeJQYCqMbKzekn6EwBo7HEs5AQYjVRMQATB/go-cid" "gx/ipfs/QmdS9KpbDyPrieswibZhkod1oXqRwZJrUPzxCofAMWpFGq/go-libp2p-peer" ) diff --git a/bitswap/decision/engine.go b/bitswap/decision/engine.go index e4f1d99cd..a51610e60 100644 --- a/bitswap/decision/engine.go +++ b/bitswap/decision/engine.go @@ -10,7 +10,7 @@ import ( bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" wl "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" logging "gx/ipfs/QmSpJByNKFX1sCsHBEp3R73FL4NF6FnQTEGyNAXHm2GS52/go-log" - blocks "gx/ipfs/QmbJUay5h1HtzhJb5QQk2t26yCnJksHynvhcqp18utBPqG/go-block-format" + blocks "gx/ipfs/QmXxGS5QsUxpR3iqL5DjmsYPHR1Yz74siRQ4ChJqWFosMh/go-block-format" peer "gx/ipfs/QmdS9KpbDyPrieswibZhkod1oXqRwZJrUPzxCofAMWpFGq/go-libp2p-peer" ) diff --git a/bitswap/decision/engine_test.go b/bitswap/decision/engine_test.go index 1c89ccbce..7c2da018e 100644 --- a/bitswap/decision/engine_test.go +++ b/bitswap/decision/engine_test.go @@ -12,9 +12,9 @@ import ( blockstore "github.com/ipfs/go-ipfs/blocks/blockstore" message "github.com/ipfs/go-ipfs/exchange/bitswap/message" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" - ds "gx/ipfs/QmSiN66ybp5udnQnvhb6euiWiiQWdGvwMhAWa95cC1DTCV/go-datastore" - dssync "gx/ipfs/QmSiN66ybp5udnQnvhb6euiWiiQWdGvwMhAWa95cC1DTCV/go-datastore/sync" - blocks "gx/ipfs/QmbJUay5h1HtzhJb5QQk2t26yCnJksHynvhcqp18utBPqG/go-block-format" + ds "gx/ipfs/QmVSase1JP7cq9QkPT46oNwdp9pT6kBkG3oqS14y3QcZjG/go-datastore" + dssync "gx/ipfs/QmVSase1JP7cq9QkPT46oNwdp9pT6kBkG3oqS14y3QcZjG/go-datastore/sync" + blocks "gx/ipfs/QmXxGS5QsUxpR3iqL5DjmsYPHR1Yz74siRQ4ChJqWFosMh/go-block-format" peer "gx/ipfs/QmdS9KpbDyPrieswibZhkod1oXqRwZJrUPzxCofAMWpFGq/go-libp2p-peer" ) diff --git a/bitswap/decision/ledger.go b/bitswap/decision/ledger.go index 9c3f0cf76..6c26439ae 100644 --- a/bitswap/decision/ledger.go +++ b/bitswap/decision/ledger.go @@ -6,7 +6,7 @@ import ( wl "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" - cid "gx/ipfs/QmNw61A6sJoXMeP37mJRtQZdNhj5e3FdjoTN3v4FyE96Gk/go-cid" + cid "gx/ipfs/Qma4RJSuh7mMeJQYCqMbKzekn6EwBo7HEs5AQYjVRMQATB/go-cid" peer "gx/ipfs/QmdS9KpbDyPrieswibZhkod1oXqRwZJrUPzxCofAMWpFGq/go-libp2p-peer" ) diff --git a/bitswap/decision/peer_request_queue.go b/bitswap/decision/peer_request_queue.go index cd4f2b9e4..0d37122e9 100644 --- a/bitswap/decision/peer_request_queue.go +++ b/bitswap/decision/peer_request_queue.go @@ -7,7 +7,7 @@ import ( wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" pq "github.com/ipfs/go-ipfs/thirdparty/pq" - cid "gx/ipfs/QmNw61A6sJoXMeP37mJRtQZdNhj5e3FdjoTN3v4FyE96Gk/go-cid" + cid "gx/ipfs/Qma4RJSuh7mMeJQYCqMbKzekn6EwBo7HEs5AQYjVRMQATB/go-cid" peer "gx/ipfs/QmdS9KpbDyPrieswibZhkod1oXqRwZJrUPzxCofAMWpFGq/go-libp2p-peer" ) diff --git a/bitswap/decision/peer_request_queue_test.go b/bitswap/decision/peer_request_queue_test.go index 8980d65ed..edacbd065 100644 --- a/bitswap/decision/peer_request_queue_test.go +++ b/bitswap/decision/peer_request_queue_test.go @@ -10,8 +10,8 @@ import ( "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" "github.com/ipfs/go-ipfs/thirdparty/testutil" - cid "gx/ipfs/QmNw61A6sJoXMeP37mJRtQZdNhj5e3FdjoTN3v4FyE96Gk/go-cid" u "gx/ipfs/QmWbjfz3u6HkAdPh34dgPchGbQjob6LXLhAeCGii2TX69n/go-ipfs-util" + cid "gx/ipfs/Qma4RJSuh7mMeJQYCqMbKzekn6EwBo7HEs5AQYjVRMQATB/go-cid" ) func TestPushPop(t *testing.T) { diff --git a/bitswap/message/message.go b/bitswap/message/message.go index aa6ace938..5c4c31154 100644 --- a/bitswap/message/message.go +++ b/bitswap/message/message.go @@ -6,12 +6,12 @@ import ( pb "github.com/ipfs/go-ipfs/exchange/bitswap/message/pb" wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" - blocks "gx/ipfs/QmbJUay5h1HtzhJb5QQk2t26yCnJksHynvhcqp18utBPqG/go-block-format" + blocks "gx/ipfs/QmXxGS5QsUxpR3iqL5DjmsYPHR1Yz74siRQ4ChJqWFosMh/go-block-format" - cid "gx/ipfs/QmNw61A6sJoXMeP37mJRtQZdNhj5e3FdjoTN3v4FyE96Gk/go-cid" inet "gx/ipfs/QmRscs8KxrSmSv4iuevHv8JfuUzHBMoqiaHzxfDRiksd6e/go-libp2p-net" ggio "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/io" proto "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/proto" + cid "gx/ipfs/Qma4RJSuh7mMeJQYCqMbKzekn6EwBo7HEs5AQYjVRMQATB/go-cid" ) // TODO move message.go into the bitswap package diff --git a/bitswap/message/message_test.go b/bitswap/message/message_test.go index ce3be7dcd..c1f215523 100644 --- a/bitswap/message/message_test.go +++ b/bitswap/message/message_test.go @@ -7,9 +7,9 @@ import ( proto "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/proto" pb "github.com/ipfs/go-ipfs/exchange/bitswap/message/pb" - cid "gx/ipfs/QmNw61A6sJoXMeP37mJRtQZdNhj5e3FdjoTN3v4FyE96Gk/go-cid" u "gx/ipfs/QmWbjfz3u6HkAdPh34dgPchGbQjob6LXLhAeCGii2TX69n/go-ipfs-util" - blocks "gx/ipfs/QmbJUay5h1HtzhJb5QQk2t26yCnJksHynvhcqp18utBPqG/go-block-format" + blocks "gx/ipfs/QmXxGS5QsUxpR3iqL5DjmsYPHR1Yz74siRQ4ChJqWFosMh/go-block-format" + cid "gx/ipfs/Qma4RJSuh7mMeJQYCqMbKzekn6EwBo7HEs5AQYjVRMQATB/go-cid" ) func mkFakeCid(s string) *cid.Cid { diff --git a/bitswap/network/interface.go b/bitswap/network/interface.go index e0d3f8f30..f9289974f 100644 --- a/bitswap/network/interface.go +++ b/bitswap/network/interface.go @@ -4,8 +4,8 @@ import ( "context" bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" - cid "gx/ipfs/QmNw61A6sJoXMeP37mJRtQZdNhj5e3FdjoTN3v4FyE96Gk/go-cid" protocol "gx/ipfs/QmZNkThpqfVXs9GNbexPrfBbXSLNYeKrE7jwFM2oqHbyqN/go-libp2p-protocol" + cid "gx/ipfs/Qma4RJSuh7mMeJQYCqMbKzekn6EwBo7HEs5AQYjVRMQATB/go-cid" peer "gx/ipfs/QmdS9KpbDyPrieswibZhkod1oXqRwZJrUPzxCofAMWpFGq/go-libp2p-peer" ) diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index 7a4c78615..c7b52bc3a 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -8,13 +8,13 @@ import ( bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" - cid "gx/ipfs/QmNw61A6sJoXMeP37mJRtQZdNhj5e3FdjoTN3v4FyE96Gk/go-cid" + routing "gx/ipfs/QmP1wMAqk6aZYRZirbaAwmrNeqFRgQrwBt3orUtvSa1UYD/go-libp2p-routing" inet "gx/ipfs/QmRscs8KxrSmSv4iuevHv8JfuUzHBMoqiaHzxfDRiksd6e/go-libp2p-net" logging "gx/ipfs/QmSpJByNKFX1sCsHBEp3R73FL4NF6FnQTEGyNAXHm2GS52/go-log" host "gx/ipfs/QmUywuGNZoUKV8B9iyvup9bPkLiMrhTsyVMkeSXW5VxAfC/go-libp2p-host" pstore "gx/ipfs/QmXZSd1qR5BxZkPyuwfT5jpqQFScZccoZvDneXsKzCNHWX/go-libp2p-peerstore" ggio "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/io" - routing "gx/ipfs/QmaNDbaV1wvPRLxTYepVsXrppXNjQ1NbrnG7ibAgKeyaXD/go-libp2p-routing" + cid "gx/ipfs/Qma4RJSuh7mMeJQYCqMbKzekn6EwBo7HEs5AQYjVRMQATB/go-cid" ma "gx/ipfs/QmcyqRMCAXVtYPS4DiBrA7sezL9rRGfW8Ctx7cywL4TXJj/go-multiaddr" peer "gx/ipfs/QmdS9KpbDyPrieswibZhkod1oXqRwZJrUPzxCofAMWpFGq/go-libp2p-peer" ) diff --git a/bitswap/notifications/notifications.go b/bitswap/notifications/notifications.go index fc8f3e61f..1999948da 100644 --- a/bitswap/notifications/notifications.go +++ b/bitswap/notifications/notifications.go @@ -3,10 +3,10 @@ package notifications import ( "context" - blocks "gx/ipfs/QmbJUay5h1HtzhJb5QQk2t26yCnJksHynvhcqp18utBPqG/go-block-format" + blocks "gx/ipfs/QmXxGS5QsUxpR3iqL5DjmsYPHR1Yz74siRQ4ChJqWFosMh/go-block-format" pubsub "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/briantigerchow/pubsub" - cid "gx/ipfs/QmNw61A6sJoXMeP37mJRtQZdNhj5e3FdjoTN3v4FyE96Gk/go-cid" + cid "gx/ipfs/Qma4RJSuh7mMeJQYCqMbKzekn6EwBo7HEs5AQYjVRMQATB/go-cid" ) const bufferSize = 16 diff --git a/bitswap/notifications/notifications_test.go b/bitswap/notifications/notifications_test.go index 6f46b79bd..4312444fc 100644 --- a/bitswap/notifications/notifications_test.go +++ b/bitswap/notifications/notifications_test.go @@ -7,8 +7,8 @@ import ( "time" blocksutil "github.com/ipfs/go-ipfs/blocks/blocksutil" - cid "gx/ipfs/QmNw61A6sJoXMeP37mJRtQZdNhj5e3FdjoTN3v4FyE96Gk/go-cid" - blocks "gx/ipfs/QmbJUay5h1HtzhJb5QQk2t26yCnJksHynvhcqp18utBPqG/go-block-format" + blocks "gx/ipfs/QmXxGS5QsUxpR3iqL5DjmsYPHR1Yz74siRQ4ChJqWFosMh/go-block-format" + cid "gx/ipfs/Qma4RJSuh7mMeJQYCqMbKzekn6EwBo7HEs5AQYjVRMQATB/go-cid" ) func TestDuplicates(t *testing.T) { diff --git a/bitswap/stat.go b/bitswap/stat.go index cf61f1738..2f95d9e8b 100644 --- a/bitswap/stat.go +++ b/bitswap/stat.go @@ -3,7 +3,7 @@ package bitswap import ( "sort" - cid "gx/ipfs/QmNw61A6sJoXMeP37mJRtQZdNhj5e3FdjoTN3v4FyE96Gk/go-cid" + cid "gx/ipfs/Qma4RJSuh7mMeJQYCqMbKzekn6EwBo7HEs5AQYjVRMQATB/go-cid" ) type Stat struct { diff --git a/bitswap/testnet/network_test.go b/bitswap/testnet/network_test.go index e4c463f03..325892a46 100644 --- a/bitswap/testnet/network_test.go +++ b/bitswap/testnet/network_test.go @@ -10,7 +10,7 @@ import ( mockrouting "github.com/ipfs/go-ipfs/routing/mock" delay "github.com/ipfs/go-ipfs/thirdparty/delay" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" - blocks "gx/ipfs/QmbJUay5h1HtzhJb5QQk2t26yCnJksHynvhcqp18utBPqG/go-block-format" + blocks "gx/ipfs/QmXxGS5QsUxpR3iqL5DjmsYPHR1Yz74siRQ4ChJqWFosMh/go-block-format" peer "gx/ipfs/QmdS9KpbDyPrieswibZhkod1oXqRwZJrUPzxCofAMWpFGq/go-libp2p-peer" ) diff --git a/bitswap/testnet/peernet.go b/bitswap/testnet/peernet.go index 7e9d11e8a..1e59eb1d4 100644 --- a/bitswap/testnet/peernet.go +++ b/bitswap/testnet/peernet.go @@ -6,7 +6,7 @@ import ( mockrouting "github.com/ipfs/go-ipfs/routing/mock" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" mockpeernet "gx/ipfs/QmQA5mdxru8Bh6dpC9PJfSkumqnmHgJX7knxSgBo5Lpime/go-libp2p/p2p/net/mock" - ds "gx/ipfs/QmSiN66ybp5udnQnvhb6euiWiiQWdGvwMhAWa95cC1DTCV/go-datastore" + ds "gx/ipfs/QmVSase1JP7cq9QkPT46oNwdp9pT6kBkG3oqS14y3QcZjG/go-datastore" peer "gx/ipfs/QmdS9KpbDyPrieswibZhkod1oXqRwZJrUPzxCofAMWpFGq/go-libp2p-peer" ) diff --git a/bitswap/testnet/virtual.go b/bitswap/testnet/virtual.go index 2ff337f98..8c7db87eb 100644 --- a/bitswap/testnet/virtual.go +++ b/bitswap/testnet/virtual.go @@ -9,8 +9,8 @@ import ( mockrouting "github.com/ipfs/go-ipfs/routing/mock" delay "github.com/ipfs/go-ipfs/thirdparty/delay" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" - cid "gx/ipfs/QmNw61A6sJoXMeP37mJRtQZdNhj5e3FdjoTN3v4FyE96Gk/go-cid" - routing "gx/ipfs/QmaNDbaV1wvPRLxTYepVsXrppXNjQ1NbrnG7ibAgKeyaXD/go-libp2p-routing" + routing "gx/ipfs/QmP1wMAqk6aZYRZirbaAwmrNeqFRgQrwBt3orUtvSa1UYD/go-libp2p-routing" + cid "gx/ipfs/Qma4RJSuh7mMeJQYCqMbKzekn6EwBo7HEs5AQYjVRMQATB/go-cid" peer "gx/ipfs/QmdS9KpbDyPrieswibZhkod1oXqRwZJrUPzxCofAMWpFGq/go-libp2p-peer" ) diff --git a/bitswap/testutils.go b/bitswap/testutils.go index 84d13cd8c..4bae29ce3 100644 --- a/bitswap/testutils.go +++ b/bitswap/testutils.go @@ -10,8 +10,8 @@ import ( delay "github.com/ipfs/go-ipfs/thirdparty/delay" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" - ds "gx/ipfs/QmSiN66ybp5udnQnvhb6euiWiiQWdGvwMhAWa95cC1DTCV/go-datastore" - ds_sync "gx/ipfs/QmSiN66ybp5udnQnvhb6euiWiiQWdGvwMhAWa95cC1DTCV/go-datastore/sync" + ds "gx/ipfs/QmVSase1JP7cq9QkPT46oNwdp9pT6kBkG3oqS14y3QcZjG/go-datastore" + ds_sync "gx/ipfs/QmVSase1JP7cq9QkPT46oNwdp9pT6kBkG3oqS14y3QcZjG/go-datastore/sync" p2ptestutil "gx/ipfs/Qma2j8dYePrvN5DoNgwh1uAuu3FFtEtrUQFmr737ws8nCp/go-libp2p-netutil" peer "gx/ipfs/QmdS9KpbDyPrieswibZhkod1oXqRwZJrUPzxCofAMWpFGq/go-libp2p-peer" ) diff --git a/bitswap/wantlist/wantlist.go b/bitswap/wantlist/wantlist.go index 700e64b60..7c77998b3 100644 --- a/bitswap/wantlist/wantlist.go +++ b/bitswap/wantlist/wantlist.go @@ -6,7 +6,7 @@ import ( "sort" "sync" - cid "gx/ipfs/QmNw61A6sJoXMeP37mJRtQZdNhj5e3FdjoTN3v4FyE96Gk/go-cid" + cid "gx/ipfs/Qma4RJSuh7mMeJQYCqMbKzekn6EwBo7HEs5AQYjVRMQATB/go-cid" ) type ThreadSafe struct { diff --git a/bitswap/wantmanager.go b/bitswap/wantmanager.go index 3bc24d3b7..c6cce7ff7 100644 --- a/bitswap/wantmanager.go +++ b/bitswap/wantmanager.go @@ -10,8 +10,8 @@ import ( bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" - cid "gx/ipfs/QmNw61A6sJoXMeP37mJRtQZdNhj5e3FdjoTN3v4FyE96Gk/go-cid" metrics "gx/ipfs/QmRg1gKTHzc3CZXSKzem8aR4E3TubFhbgXwfVuWnSK5CC5/go-metrics-interface" + cid "gx/ipfs/Qma4RJSuh7mMeJQYCqMbKzekn6EwBo7HEs5AQYjVRMQATB/go-cid" peer "gx/ipfs/QmdS9KpbDyPrieswibZhkod1oXqRwZJrUPzxCofAMWpFGq/go-libp2p-peer" ) diff --git a/bitswap/workers.go b/bitswap/workers.go index c7c1f9593..648bfa403 100644 --- a/bitswap/workers.go +++ b/bitswap/workers.go @@ -8,10 +8,10 @@ import ( bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" - cid "gx/ipfs/QmNw61A6sJoXMeP37mJRtQZdNhj5e3FdjoTN3v4FyE96Gk/go-cid" process "gx/ipfs/QmSF8fPo3jgVBAy8fpdjjYqgG87dkJgUprRBHRd2tmfgpP/goprocess" procctx "gx/ipfs/QmSF8fPo3jgVBAy8fpdjjYqgG87dkJgUprRBHRd2tmfgpP/goprocess/context" logging "gx/ipfs/QmSpJByNKFX1sCsHBEp3R73FL4NF6FnQTEGyNAXHm2GS52/go-log" + cid "gx/ipfs/Qma4RJSuh7mMeJQYCqMbKzekn6EwBo7HEs5AQYjVRMQATB/go-cid" peer "gx/ipfs/QmdS9KpbDyPrieswibZhkod1oXqRwZJrUPzxCofAMWpFGq/go-libp2p-peer" ) From 85f56a3745ccc9521d21e2b2628f2dd4c7c35733 Mon Sep 17 00:00:00 2001 From: Jeromy Date: Mon, 10 Apr 2017 22:05:29 -0700 Subject: [PATCH 0553/1035] track wantlists sent to peers individually License: MIT Signed-off-by: Jeromy This commit was moved from ipfs/go-bitswap@7cfa440e53ae4f16c512dc80927a48a27484053a --- bitswap/wantmanager.go | 54 +++++++++++++++++++++++++++++++----------- 1 file changed, 40 insertions(+), 14 deletions(-) diff --git a/bitswap/wantmanager.go b/bitswap/wantmanager.go index c6cce7ff7..34bf78572 100644 --- a/bitswap/wantmanager.go +++ b/bitswap/wantmanager.go @@ -17,7 +17,7 @@ import ( type WantManager struct { // sync channels for Run loop - incoming chan []*bsmsg.Entry + incoming chan *wantSet connect chan peer.ID // notification channel for new peers connecting disconnect chan peer.ID // notification channel for peers disconnecting peerReqs chan chan []peer.ID // channel to request connected peers on @@ -41,7 +41,7 @@ func NewWantManager(ctx context.Context, network bsnet.BitSwapNetwork) *WantMana sentHistogram := metrics.NewCtx(ctx, "sent_all_blocks_bytes", "Histogram of blocks sent by"+ " this bitswap").Histogram(metricsBuckets) return &WantManager{ - incoming: make(chan []*bsmsg.Entry, 10), + incoming: make(chan *wantSet, 10), connect: make(chan peer.ID, 10), disconnect: make(chan peer.ID, 10), peerReqs: make(chan chan []peer.ID), @@ -61,6 +61,7 @@ type msgQueue struct { outlk sync.Mutex out bsmsg.BitSwapMessage network bsnet.BitSwapNetwork + wl *wantlist.Wantlist sender bsnet.MessageSender @@ -76,8 +77,12 @@ func (pm *WantManager) WantBlocks(ctx context.Context, ks []*cid.Cid) { } func (pm *WantManager) CancelWants(ks []*cid.Cid) { - log.Infof("cancel wants: %s", ks) - pm.addEntries(context.TODO(), ks, true) + pm.addEntries(context.Background(), ks, true) +} + +type wantSet struct { + entries []*bsmsg.Entry + targets []peer.ID } func (pm *WantManager) addEntries(ctx context.Context, ks []*cid.Cid, cancel bool) { @@ -93,7 +98,7 @@ func (pm *WantManager) addEntries(ctx context.Context, ks []*cid.Cid, cancel boo }) } select { - case pm.incoming <- entries: + case pm.incoming <- &wantSet{entries: entries}: case <-pm.ctx.Done(): case <-ctx.Done(): } @@ -133,6 +138,8 @@ func (pm *WantManager) startPeerHandler(p peer.ID) *msgQueue { // new peer, we will want to give them our full wantlist fullwantlist := bsmsg.New(true) for _, e := range pm.wl.Entries() { + ne := *e + mq.wl.AddEntry(&ne) fullwantlist.AddEntry(e.Cid, e.Priority) } mq.out = fullwantlist @@ -278,27 +285,35 @@ func (pm *WantManager) Run() { defer tock.Stop() for { select { - case entries := <-pm.incoming: + case ws := <-pm.incoming: // add changes to our wantlist - var filtered []*bsmsg.Entry - for _, e := range entries { + for _, e := range ws.entries { if e.Cancel { if pm.wl.Remove(e.Cid) { pm.wantlistGauge.Dec() - filtered = append(filtered, e) } } else { if pm.wl.AddEntry(e.Entry) { pm.wantlistGauge.Inc() - filtered = append(filtered, e) } } } // broadcast those wantlist changes - for _, p := range pm.peers { - p.addMessage(filtered) + if len(ws.targets) == 0 { + for _, p := range pm.peers { + p.addMessage(ws.entries) + } + } else { + for _, t := range ws.targets { + p, ok := pm.peers[t] + if !ok { + log.Warning("tried sending wantlist change to non-partner peer") + continue + } + p.addMessage(ws.entries) + } } case <-tock.C: @@ -335,6 +350,7 @@ func (wm *WantManager) newMsgQueue(p peer.ID) *msgQueue { return &msgQueue{ done: make(chan struct{}), work: make(chan struct{}, 1), + wl: wantlist.New(), network: wm.network, p: p, refcnt: 1, @@ -342,9 +358,13 @@ func (wm *WantManager) newMsgQueue(p peer.ID) *msgQueue { } func (mq *msgQueue) addMessage(entries []*bsmsg.Entry) { + var work bool mq.outlk.Lock() defer func() { mq.outlk.Unlock() + if !work { + return + } select { case mq.work <- struct{}{}: default: @@ -361,9 +381,15 @@ func (mq *msgQueue) addMessage(entries []*bsmsg.Entry) { // one passed in for _, e := range entries { if e.Cancel { - mq.out.Cancel(e.Cid) + if mq.wl.Remove(e.Cid) { + work = true + mq.out.Cancel(e.Cid) + } } else { - mq.out.AddEntry(e.Cid, e.Priority) + if mq.wl.Add(e.Cid, e.Priority) { + work = true + mq.out.AddEntry(e.Cid, e.Priority) + } } } } From c45e1a70efabb64f8c7ba71904da3b0d3801c53e Mon Sep 17 00:00:00 2001 From: Jeromy Date: Mon, 3 Apr 2017 19:21:52 -0700 Subject: [PATCH 0554/1035] implement bitswap sessions License: MIT Signed-off-by: Jeromy This commit was moved from ipfs/go-bitswap@3538a30e43c5b02b61d5275f27ae804954b90166 --- bitswap/bitswap.go | 73 +++++------- bitswap/bitswap_test.go | 13 ++- bitswap/decision/engine.go | 2 +- bitswap/get.go | 100 +++++++++++++++++ bitswap/session.go | 221 +++++++++++++++++++++++++++++++++++++ bitswap/session_test.go | 152 +++++++++++++++++++++++++ bitswap/testutils.go | 4 +- bitswap/wantmanager.go | 12 +- bitswap/workers.go | 2 +- 9 files changed, 525 insertions(+), 54 deletions(-) create mode 100644 bitswap/get.go create mode 100644 bitswap/session.go create mode 100644 bitswap/session_test.go diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index ce7bd6b26..74c70b108 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -7,6 +7,7 @@ import ( "errors" "math" "sync" + "sync/atomic" "time" blockstore "github.com/ipfs/go-ipfs/blocks/blockstore" @@ -17,13 +18,13 @@ import ( notifications "github.com/ipfs/go-ipfs/exchange/bitswap/notifications" flags "github.com/ipfs/go-ipfs/flags" "github.com/ipfs/go-ipfs/thirdparty/delay" - blocks "gx/ipfs/QmXxGS5QsUxpR3iqL5DjmsYPHR1Yz74siRQ4ChJqWFosMh/go-block-format" metrics "gx/ipfs/QmRg1gKTHzc3CZXSKzem8aR4E3TubFhbgXwfVuWnSK5CC5/go-metrics-interface" process "gx/ipfs/QmSF8fPo3jgVBAy8fpdjjYqgG87dkJgUprRBHRd2tmfgpP/goprocess" procctx "gx/ipfs/QmSF8fPo3jgVBAy8fpdjjYqgG87dkJgUprRBHRd2tmfgpP/goprocess/context" logging "gx/ipfs/QmSpJByNKFX1sCsHBEp3R73FL4NF6FnQTEGyNAXHm2GS52/go-log" loggables "gx/ipfs/QmVesPmqbPp7xRGyY96tnBwzDtVV1nqv4SCVxo5zCqKyH8/go-libp2p-loggables" + blocks "gx/ipfs/QmXxGS5QsUxpR3iqL5DjmsYPHR1Yz74siRQ4ChJqWFosMh/go-block-format" cid "gx/ipfs/Qma4RJSuh7mMeJQYCqMbKzekn6EwBo7HEs5AQYjVRMQATB/go-cid" peer "gx/ipfs/QmdS9KpbDyPrieswibZhkod1oXqRwZJrUPzxCofAMWpFGq/go-libp2p-peer" ) @@ -159,10 +160,15 @@ type Bitswap struct { blocksSent int dataSent uint64 dataRecvd uint64 + messagesRecvd uint64 // Metrics interface metrics dupMetric metrics.Histogram allMetric metrics.Histogram + + // Sessions + sessions []*Session + sessLk sync.Mutex } type blockRequest struct { @@ -173,45 +179,7 @@ type blockRequest struct { // GetBlock attempts to retrieve a particular block from peers within the // deadline enforced by the context. func (bs *Bitswap) GetBlock(parent context.Context, k *cid.Cid) (blocks.Block, error) { - if k == nil { - log.Error("nil cid in GetBlock") - return nil, blockstore.ErrNotFound - } - - // Any async work initiated by this function must end when this function - // returns. To ensure this, derive a new context. Note that it is okay to - // listen on parent in this scope, but NOT okay to pass |parent| to - // functions called by this one. Otherwise those functions won't return - // when this context's cancel func is executed. This is difficult to - // enforce. May this comment keep you safe. - ctx, cancelFunc := context.WithCancel(parent) - - // TODO: this request ID should come in from a higher layer so we can track - // across multiple 'GetBlock' invocations - ctx = logging.ContextWithLoggable(ctx, loggables.Uuid("GetBlockRequest")) - log.Event(ctx, "Bitswap.GetBlockRequest.Start", k) - defer log.Event(ctx, "Bitswap.GetBlockRequest.End", k) - defer cancelFunc() - - promise, err := bs.GetBlocks(ctx, []*cid.Cid{k}) - if err != nil { - return nil, err - } - - select { - case block, ok := <-promise: - if !ok { - select { - case <-ctx.Done(): - return nil, ctx.Err() - default: - return nil, errors.New("promise channel was closed") - } - } - return block, nil - case <-parent.Done(): - return nil, parent.Err() - } + return getBlock(parent, k, bs.GetBlocks) } func (bs *Bitswap) WantlistForPeer(p peer.ID) []*cid.Cid { @@ -251,7 +219,7 @@ func (bs *Bitswap) GetBlocks(ctx context.Context, keys []*cid.Cid) (<-chan block log.Event(ctx, "Bitswap.GetBlockRequest.Start", k) } - bs.wm.WantBlocks(ctx, keys) + bs.wm.WantBlocks(ctx, keys, nil) // NB: Optimization. Assumes that providers of key[0] are likely to // be able to provide for all keys. This currently holds true in most @@ -304,7 +272,7 @@ func (bs *Bitswap) GetBlocks(ctx context.Context, keys []*cid.Cid) (<-chan block // CancelWant removes a given key from the wantlist func (bs *Bitswap) CancelWants(cids []*cid.Cid) { - bs.wm.CancelWants(cids) + bs.wm.CancelWants(context.Background(), cids, nil) } // HasBlock announces the existance of a block to this bitswap service. The @@ -340,7 +308,22 @@ func (bs *Bitswap) HasBlock(blk blocks.Block) error { return nil } +func (bs *Bitswap) SessionsForBlock(c *cid.Cid) []*Session { + bs.sessLk.Lock() + defer bs.sessLk.Unlock() + + var out []*Session + for _, s := range bs.sessions { + if s.InterestedIn(c) { + out = append(out, s) + } + } + return out +} + func (bs *Bitswap) ReceiveMessage(ctx context.Context, p peer.ID, incoming bsmsg.BitSwapMessage) { + atomic.AddUint64(&bs.messagesRecvd, 1) + // This call records changes to wantlists, blocks received, // and number of bytes transfered. bs.engine.MessageReceived(p, incoming) @@ -362,7 +345,8 @@ func (bs *Bitswap) ReceiveMessage(ctx context.Context, p peer.ID, incoming bsmsg } keys = append(keys, block.Cid()) } - bs.wm.CancelWants(keys) + + bs.wm.CancelWants(context.Background(), keys, nil) wg := sync.WaitGroup{} for _, block := range iblocks { @@ -375,6 +359,9 @@ func (bs *Bitswap) ReceiveMessage(ctx context.Context, p peer.ID, incoming bsmsg k := b.Cid() log.Event(ctx, "Bitswap.GetBlockRequest.End", k) + for _, ses := range bs.SessionsForBlock(k) { + ses.ReceiveBlock(p, b) + } log.Debugf("got block %s from %s", b, p) if err := bs.HasBlock(b); err != nil { log.Warningf("ReceiveMessage HasBlock error: %s", err) diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index 770041c9f..76a28d5dc 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -370,6 +370,9 @@ func TestDoubleGet(t *testing.T) { instances := sg.Instances(2) blocks := bg.Blocks(1) + // NOTE: A race condition can happen here where these GetBlocks requests go + // through before the peers even get connected. This is okay, bitswap + // *should* be able to handle this. ctx1, cancel1 := context.WithCancel(context.Background()) blkch1, err := instances[1].Exchange.GetBlocks(ctx1, []*cid.Cid{blocks[0].Cid()}) if err != nil { @@ -385,7 +388,7 @@ func TestDoubleGet(t *testing.T) { } // ensure both requests make it into the wantlist at the same time - time.Sleep(time.Millisecond * 100) + time.Sleep(time.Millisecond * 20) cancel1() _, ok := <-blkch1 @@ -405,6 +408,14 @@ func TestDoubleGet(t *testing.T) { } t.Log(blk) case <-time.After(time.Second * 5): + p1wl := instances[0].Exchange.WantlistForPeer(instances[1].Peer) + if len(p1wl) != 1 { + t.Logf("wantlist view didnt have 1 item (had %d)", len(p1wl)) + } else if !p1wl[0].Equals(blocks[0].Cid()) { + t.Logf("had 1 item, it was wrong: %s %s", blocks[0].Cid(), p1wl[0]) + } else { + t.Log("had correct wantlist, somehow") + } t.Fatal("timed out waiting on block") } diff --git a/bitswap/decision/engine.go b/bitswap/decision/engine.go index a51610e60..973a7eb85 100644 --- a/bitswap/decision/engine.go +++ b/bitswap/decision/engine.go @@ -2,10 +2,10 @@ package decision import ( + "context" "sync" "time" - context "context" bstore "github.com/ipfs/go-ipfs/blocks/blockstore" bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" wl "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" diff --git a/bitswap/get.go b/bitswap/get.go new file mode 100644 index 000000000..3a64f5117 --- /dev/null +++ b/bitswap/get.go @@ -0,0 +1,100 @@ +package bitswap + +import ( + "context" + "errors" + + blocks "github.com/ipfs/go-ipfs/blocks" + blockstore "github.com/ipfs/go-ipfs/blocks/blockstore" + notifications "github.com/ipfs/go-ipfs/exchange/bitswap/notifications" + + cid "gx/ipfs/Qma4RJSuh7mMeJQYCqMbKzekn6EwBo7HEs5AQYjVRMQATB/go-cid" +) + +type getBlocksFunc func(context.Context, []*cid.Cid) (<-chan blocks.Block, error) + +func getBlock(p context.Context, k *cid.Cid, gb getBlocksFunc) (blocks.Block, error) { + if k == nil { + log.Error("nil cid in GetBlock") + return nil, blockstore.ErrNotFound + } + + // Any async work initiated by this function must end when this function + // returns. To ensure this, derive a new context. Note that it is okay to + // listen on parent in this scope, but NOT okay to pass |parent| to + // functions called by this one. Otherwise those functions won't return + // when this context's cancel func is executed. This is difficult to + // enforce. May this comment keep you safe. + ctx, cancel := context.WithCancel(p) + defer cancel() + + promise, err := gb(ctx, []*cid.Cid{k}) + if err != nil { + return nil, err + } + + select { + case block, ok := <-promise: + if !ok { + select { + case <-ctx.Done(): + return nil, ctx.Err() + default: + return nil, errors.New("promise channel was closed") + } + } + return block, nil + case <-p.Done(): + return nil, p.Err() + } +} + +type wantFunc func(context.Context, []*cid.Cid) + +func getBlocksImpl(ctx context.Context, keys []*cid.Cid, notif notifications.PubSub, want wantFunc, cwants func([]*cid.Cid)) (<-chan blocks.Block, error) { + if len(keys) == 0 { + out := make(chan blocks.Block) + close(out) + return out, nil + } + + remaining := cid.NewSet() + promise := notif.Subscribe(ctx, keys...) + for _, k := range keys { + log.Event(ctx, "Bitswap.GetBlockRequest.Start", k) + remaining.Add(k) + } + + want(ctx, keys) + + out := make(chan blocks.Block) + go handleIncoming(ctx, remaining, promise, out, cwants) + return out, nil +} + +func handleIncoming(ctx context.Context, remaining *cid.Set, in <-chan blocks.Block, out chan blocks.Block, cfun func([]*cid.Cid)) { + ctx, cancel := context.WithCancel(ctx) + defer func() { + cancel() + close(out) + // can't just defer this call on its own, arguments are resolved *when* the defer is created + cfun(remaining.Keys()) + }() + for { + select { + case blk, ok := <-in: + if !ok { + return + } + + remaining.Remove(blk.Cid()) + select { + case out <- blk: + case <-ctx.Done(): + return + } + case <-ctx.Done(): + return + } + } +} diff --git a/bitswap/session.go b/bitswap/session.go new file mode 100644 index 000000000..84ab680dd --- /dev/null +++ b/bitswap/session.go @@ -0,0 +1,221 @@ +package bitswap + +import ( + "context" + "time" + + blocks "github.com/ipfs/go-ipfs/blocks" + notifications "github.com/ipfs/go-ipfs/exchange/bitswap/notifications" + + logging "gx/ipfs/QmSpJByNKFX1sCsHBEp3R73FL4NF6FnQTEGyNAXHm2GS52/go-log" + lru "gx/ipfs/QmVYxfoJQiZijTgPNHCHgHELvQpbsJNTg6Crmc3dQkj3yy/golang-lru" + loggables "gx/ipfs/QmVesPmqbPp7xRGyY96tnBwzDtVV1nqv4SCVxo5zCqKyH8/go-libp2p-loggables" + cid "gx/ipfs/Qma4RJSuh7mMeJQYCqMbKzekn6EwBo7HEs5AQYjVRMQATB/go-cid" + peer "gx/ipfs/QmdS9KpbDyPrieswibZhkod1oXqRwZJrUPzxCofAMWpFGq/go-libp2p-peer" +) + +const activeWantsLimit = 16 + +type Session struct { + ctx context.Context + tofetch []*cid.Cid + activePeers map[peer.ID]struct{} + activePeersArr []peer.ID + + bs *Bitswap + incoming chan blkRecv + newReqs chan []*cid.Cid + cancelKeys chan []*cid.Cid + + interest *lru.Cache + liveWants map[string]time.Time + liveCnt int + + tick *time.Timer + baseTickDelay time.Duration + + latTotal time.Duration + fetchcnt int + + notif notifications.PubSub + + uuid logging.Loggable +} + +func (bs *Bitswap) NewSession(ctx context.Context) *Session { + s := &Session{ + activePeers: make(map[peer.ID]struct{}), + liveWants: make(map[string]time.Time), + newReqs: make(chan []*cid.Cid), + cancelKeys: make(chan []*cid.Cid), + ctx: ctx, + bs: bs, + incoming: make(chan blkRecv), + notif: notifications.New(), + uuid: loggables.Uuid("GetBlockRequest"), + baseTickDelay: time.Millisecond * 500, + } + + cache, _ := lru.New(2048) + s.interest = cache + + bs.sessLk.Lock() + bs.sessions = append(bs.sessions, s) + bs.sessLk.Unlock() + + go s.run(ctx) + + return s +} + +type blkRecv struct { + from peer.ID + blk blocks.Block +} + +func (s *Session) ReceiveBlock(from peer.ID, blk blocks.Block) { + s.incoming <- blkRecv{from: from, blk: blk} +} + +func (s *Session) InterestedIn(c *cid.Cid) bool { + return s.interest.Contains(c.KeyString()) +} + +const provSearchDelay = time.Second * 10 + +func (s *Session) addActivePeer(p peer.ID) { + if _, ok := s.activePeers[p]; !ok { + s.activePeers[p] = struct{}{} + s.activePeersArr = append(s.activePeersArr, p) + } +} + +func (s *Session) resetTick() { + if s.latTotal == 0 { + s.tick.Reset(provSearchDelay) + } else { + avLat := s.latTotal / time.Duration(s.fetchcnt) + s.tick.Reset(s.baseTickDelay + (3 * avLat)) + } +} + +func (s *Session) run(ctx context.Context) { + s.tick = time.NewTimer(provSearchDelay) + newpeers := make(chan peer.ID, 16) + for { + select { + case blk := <-s.incoming: + s.tick.Stop() + + s.addActivePeer(blk.from) + + s.receiveBlock(ctx, blk.blk) + + s.resetTick() + case keys := <-s.newReqs: + for _, k := range keys { + s.interest.Add(k.KeyString(), nil) + } + if s.liveCnt < activeWantsLimit { + toadd := activeWantsLimit - s.liveCnt + if toadd > len(keys) { + toadd = len(keys) + } + s.liveCnt += toadd + + now := keys[:toadd] + keys = keys[toadd:] + + s.wantBlocks(ctx, now) + } + s.tofetch = append(s.tofetch, keys...) + case keys := <-s.cancelKeys: + s.cancel(keys) + + case <-s.tick.C: + var live []*cid.Cid + for c, _ := range s.liveWants { + cs, _ := cid.Cast([]byte(c)) + live = append(live, cs) + s.liveWants[c] = time.Now() + } + + // Broadcast these keys to everyone we're connected to + s.bs.wm.WantBlocks(ctx, live, nil) + + if len(live) > 0 { + go func() { + for p := range s.bs.network.FindProvidersAsync(ctx, live[0], 10) { + newpeers <- p + } + }() + } + s.resetTick() + case p := <-newpeers: + s.addActivePeer(p) + case <-ctx.Done(): + return + } + } +} + +func (s *Session) receiveBlock(ctx context.Context, blk blocks.Block) { + ks := blk.Cid().KeyString() + if _, ok := s.liveWants[ks]; ok { + s.liveCnt-- + tval := s.liveWants[ks] + s.latTotal += time.Since(tval) + s.fetchcnt++ + delete(s.liveWants, ks) + s.notif.Publish(blk) + + if len(s.tofetch) > 0 { + next := s.tofetch[0:1] + s.tofetch = s.tofetch[1:] + s.wantBlocks(ctx, next) + } + } +} + +func (s *Session) wantBlocks(ctx context.Context, ks []*cid.Cid) { + for _, c := range ks { + s.liveWants[c.KeyString()] = time.Now() + } + s.bs.wm.WantBlocks(ctx, ks, s.activePeersArr) +} + +func (s *Session) cancel(keys []*cid.Cid) { + sset := cid.NewSet() + for _, c := range keys { + sset.Add(c) + } + var i, j int + for ; j < len(s.tofetch); j++ { + if sset.Has(s.tofetch[j]) { + continue + } + s.tofetch[i] = s.tofetch[j] + i++ + } + s.tofetch = s.tofetch[:i] +} + +func (s *Session) cancelWants(keys []*cid.Cid) { + s.cancelKeys <- keys +} + +func (s *Session) fetch(ctx context.Context, keys []*cid.Cid) { + select { + case s.newReqs <- keys: + case <-ctx.Done(): + } +} + +func (s *Session) GetBlocks(ctx context.Context, keys []*cid.Cid) (<-chan blocks.Block, error) { + ctx = logging.ContextWithLoggable(ctx, s.uuid) + return getBlocksImpl(ctx, keys, s.notif, s.fetch, s.cancelWants) +} + +func (s *Session) GetBlock(parent context.Context, k *cid.Cid) (blocks.Block, error) { + return getBlock(parent, k, s.GetBlocks) +} diff --git a/bitswap/session_test.go b/bitswap/session_test.go new file mode 100644 index 000000000..426acd90a --- /dev/null +++ b/bitswap/session_test.go @@ -0,0 +1,152 @@ +package bitswap + +import ( + "context" + "fmt" + "testing" + "time" + + blocks "github.com/ipfs/go-ipfs/blocks" + blocksutil "github.com/ipfs/go-ipfs/blocks/blocksutil" + + cid "gx/ipfs/Qma4RJSuh7mMeJQYCqMbKzekn6EwBo7HEs5AQYjVRMQATB/go-cid" +) + +func TestBasicSessions(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + vnet := getVirtualNetwork() + sesgen := NewTestSessionGenerator(vnet) + defer sesgen.Close() + bgen := blocksutil.NewBlockGenerator() + + block := bgen.Next() + inst := sesgen.Instances(2) + + a := inst[0] + b := inst[1] + + if err := b.Blockstore().Put(block); err != nil { + t.Fatal(err) + } + + sesa := a.Exchange.NewSession(ctx) + + blkout, err := sesa.GetBlock(ctx, block.Cid()) + if err != nil { + t.Fatal(err) + } + + if !blkout.Cid().Equals(block.Cid()) { + t.Fatal("got wrong block") + } +} + +func assertBlockLists(got, exp []blocks.Block) error { + if len(got) != len(exp) { + return fmt.Errorf("got wrong number of blocks, %d != %d", len(got), len(exp)) + } + + h := cid.NewSet() + for _, b := range got { + h.Add(b.Cid()) + } + for _, b := range exp { + if !h.Has(b.Cid()) { + return fmt.Errorf("didnt have: %s", b.Cid()) + } + } + return nil +} + +func TestSessionBetweenPeers(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + vnet := getVirtualNetwork() + sesgen := NewTestSessionGenerator(vnet) + defer sesgen.Close() + bgen := blocksutil.NewBlockGenerator() + + inst := sesgen.Instances(10) + + blks := bgen.Blocks(101) + if err := inst[0].Blockstore().PutMany(blks); err != nil { + t.Fatal(err) + } + + var cids []*cid.Cid + for _, blk := range blks { + cids = append(cids, blk.Cid()) + } + + ses := inst[1].Exchange.NewSession(ctx) + if _, err := ses.GetBlock(ctx, cids[0]); err != nil { + t.Fatal(err) + } + blks = blks[1:] + cids = cids[1:] + + for i := 0; i < 10; i++ { + ch, err := ses.GetBlocks(ctx, cids[i*10:(i+1)*10]) + if err != nil { + t.Fatal(err) + } + + var got []blocks.Block + for b := range ch { + got = append(got, b) + } + if err := assertBlockLists(got, blks[i*10:(i+1)*10]); err != nil { + t.Fatal(err) + } + } + for _, is := range inst[2:] { + if is.Exchange.messagesRecvd > 2 { + t.Fatal("uninvolved nodes should only receive two messages", is.Exchange.messagesRecvd) + } + } +} + +func TestSessionSplitFetch(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + vnet := getVirtualNetwork() + sesgen := NewTestSessionGenerator(vnet) + defer sesgen.Close() + bgen := blocksutil.NewBlockGenerator() + + inst := sesgen.Instances(11) + + blks := bgen.Blocks(100) + for i := 0; i < 10; i++ { + if err := inst[i].Blockstore().PutMany(blks[i*10 : (i+1)*10]); err != nil { + t.Fatal(err) + } + } + + var cids []*cid.Cid + for _, blk := range blks { + cids = append(cids, blk.Cid()) + } + + ses := inst[10].Exchange.NewSession(ctx) + ses.baseTickDelay = time.Millisecond * 10 + + for i := 0; i < 10; i++ { + ch, err := ses.GetBlocks(ctx, cids[i*10:(i+1)*10]) + if err != nil { + t.Fatal(err) + } + + var got []blocks.Block + for b := range ch { + got = append(got, b) + } + if err := assertBlockLists(got, blks[i*10:(i+1)*10]); err != nil { + t.Fatal(err) + } + } +} diff --git a/bitswap/testutils.go b/bitswap/testutils.go index 4bae29ce3..d3bb98b0e 100644 --- a/bitswap/testutils.go +++ b/bitswap/testutils.go @@ -47,7 +47,7 @@ func (g *SessionGenerator) Next() Instance { if err != nil { panic("FIXME") // TODO change signature } - return Session(g.ctx, g.net, p) + return MkSession(g.ctx, g.net, p) } func (g *SessionGenerator) Instances(n int) []Instance { @@ -86,7 +86,7 @@ func (i *Instance) SetBlockstoreLatency(t time.Duration) time.Duration { // NB: It's easy make mistakes by providing the same peer ID to two different // sessions. To safeguard, use the SessionGenerator to generate sessions. It's // just a much better idea. -func Session(ctx context.Context, net tn.Network, p testutil.Identity) Instance { +func MkSession(ctx context.Context, net tn.Network, p testutil.Identity) Instance { bsdelay := delay.Fixed(0) adapter := net.Adapter(p) diff --git a/bitswap/wantmanager.go b/bitswap/wantmanager.go index 34bf78572..c8a617724 100644 --- a/bitswap/wantmanager.go +++ b/bitswap/wantmanager.go @@ -71,13 +71,13 @@ type msgQueue struct { done chan struct{} } -func (pm *WantManager) WantBlocks(ctx context.Context, ks []*cid.Cid) { +func (pm *WantManager) WantBlocks(ctx context.Context, ks []*cid.Cid, peers []peer.ID) { log.Infof("want blocks: %s", ks) - pm.addEntries(ctx, ks, false) + pm.addEntries(ctx, ks, peers, false) } -func (pm *WantManager) CancelWants(ks []*cid.Cid) { - pm.addEntries(context.Background(), ks, true) +func (pm *WantManager) CancelWants(ctx context.Context, ks []*cid.Cid, peers []peer.ID) { + pm.addEntries(context.Background(), ks, peers, true) } type wantSet struct { @@ -85,7 +85,7 @@ type wantSet struct { targets []peer.ID } -func (pm *WantManager) addEntries(ctx context.Context, ks []*cid.Cid, cancel bool) { +func (pm *WantManager) addEntries(ctx context.Context, ks []*cid.Cid, targets []peer.ID, cancel bool) { var entries []*bsmsg.Entry for i, k := range ks { entries = append(entries, &bsmsg.Entry{ @@ -98,7 +98,7 @@ func (pm *WantManager) addEntries(ctx context.Context, ks []*cid.Cid, cancel boo }) } select { - case pm.incoming <- &wantSet{entries: entries}: + case pm.incoming <- &wantSet{entries: entries, targets: targets}: case <-pm.ctx.Done(): case <-ctx.Done(): } diff --git a/bitswap/workers.go b/bitswap/workers.go index 648bfa403..ac1e41eb8 100644 --- a/bitswap/workers.go +++ b/bitswap/workers.go @@ -49,7 +49,7 @@ func (bs *Bitswap) startWorkers(px process.Process, ctx context.Context) { func (bs *Bitswap) taskWorker(ctx context.Context, id int) { idmap := logging.LoggableMap{"ID": id} - defer log.Info("bitswap task worker shutting down...") + defer log.Debug("bitswap task worker shutting down...") for { log.Event(ctx, "Bitswap.TaskWorker.Loop", idmap) select { From f75ec85f71440dd826b946edb70847b78f5a84bb Mon Sep 17 00:00:00 2001 From: Jeromy Date: Thu, 27 Apr 2017 17:38:46 -0700 Subject: [PATCH 0555/1035] rework how refcounted wantlists work License: MIT Signed-off-by: Jeromy This commit was moved from ipfs/go-bitswap@9ec351de30e13e12a5e6f0a1ca5ee932df5c9765 --- bitswap/bitswap.go | 28 +++++++--- bitswap/bitswap_test.go | 5 ++ bitswap/session.go | 22 ++++++-- bitswap/wantlist/wantlist.go | 92 ++++++++++++++++++++++--------- bitswap/wantlist/wantlist_test.go | 87 +++++++++++++++++++++++++++++ bitswap/wantmanager.go | 23 ++++---- 6 files changed, 206 insertions(+), 51 deletions(-) create mode 100644 bitswap/wantlist/wantlist_test.go diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 74c70b108..065c209a9 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -169,6 +169,9 @@ type Bitswap struct { // Sessions sessions []*Session sessLk sync.Mutex + + sessID uint64 + sessIDLk sync.Mutex } type blockRequest struct { @@ -219,7 +222,9 @@ func (bs *Bitswap) GetBlocks(ctx context.Context, keys []*cid.Cid) (<-chan block log.Event(ctx, "Bitswap.GetBlockRequest.Start", k) } - bs.wm.WantBlocks(ctx, keys, nil) + mses := bs.getNextSessionID() + + bs.wm.WantBlocks(ctx, keys, nil, mses) // NB: Optimization. Assumes that providers of key[0] are likely to // be able to provide for all keys. This currently holds true in most @@ -241,7 +246,7 @@ func (bs *Bitswap) GetBlocks(ctx context.Context, keys []*cid.Cid) (<-chan block defer close(out) defer func() { // can't just defer this call on its own, arguments are resolved *when* the defer is created - bs.CancelWants(remaining.Keys()) + bs.CancelWants(remaining.Keys(), mses) }() for { select { @@ -250,6 +255,7 @@ func (bs *Bitswap) GetBlocks(ctx context.Context, keys []*cid.Cid) (<-chan block return } + bs.CancelWants([]*cid.Cid{blk.Cid()}, mses) remaining.Remove(blk.Cid()) select { case out <- blk: @@ -270,9 +276,16 @@ func (bs *Bitswap) GetBlocks(ctx context.Context, keys []*cid.Cid) (<-chan block } } +func (bs *Bitswap) getNextSessionID() uint64 { + bs.sessIDLk.Lock() + defer bs.sessIDLk.Unlock() + bs.sessID++ + return bs.sessID +} + // CancelWant removes a given key from the wantlist -func (bs *Bitswap) CancelWants(cids []*cid.Cid) { - bs.wm.CancelWants(context.Background(), cids, nil) +func (bs *Bitswap) CancelWants(cids []*cid.Cid, ses uint64) { + bs.wm.CancelWants(context.Background(), cids, nil, ses) } // HasBlock announces the existance of a block to this bitswap service. The @@ -314,7 +327,7 @@ func (bs *Bitswap) SessionsForBlock(c *cid.Cid) []*Session { var out []*Session for _, s := range bs.sessions { - if s.InterestedIn(c) { + if s.interestedIn(c) { out = append(out, s) } } @@ -346,8 +359,6 @@ func (bs *Bitswap) ReceiveMessage(ctx context.Context, p peer.ID, incoming bsmsg keys = append(keys, block.Cid()) } - bs.wm.CancelWants(context.Background(), keys, nil) - wg := sync.WaitGroup{} for _, block := range iblocks { wg.Add(1) @@ -360,7 +371,8 @@ func (bs *Bitswap) ReceiveMessage(ctx context.Context, p peer.ID, incoming bsmsg log.Event(ctx, "Bitswap.GetBlockRequest.End", k) for _, ses := range bs.SessionsForBlock(k) { - ses.ReceiveBlock(p, b) + ses.receiveBlockFrom(p, b) + bs.CancelWants([]*cid.Cid{k}, ses.id) } log.Debugf("got block %s from %s", b, p) if err := bs.HasBlock(b); err != nil { diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index 76a28d5dc..e73022f62 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -332,6 +332,11 @@ func TestBasicBitswap(t *testing.T) { t.Fatal(err) } + time.Sleep(time.Millisecond * 20) + if len(instances[1].Exchange.GetWantlist()) != 0 { + t.Fatal("shouldnt have anything in wantlist") + } + st0, err := instances[0].Exchange.Stat() if err != nil { t.Fatal(err) diff --git a/bitswap/session.go b/bitswap/session.go index 84ab680dd..0a5c7426a 100644 --- a/bitswap/session.go +++ b/bitswap/session.go @@ -16,6 +16,9 @@ import ( const activeWantsLimit = 16 +// Session holds state for an individual bitswap transfer operation. +// This allows bitswap to make smarter decisions about who to send wantlist +// info to, and who to request blocks from type Session struct { ctx context.Context tofetch []*cid.Cid @@ -40,8 +43,12 @@ type Session struct { notif notifications.PubSub uuid logging.Loggable + + id uint64 } +// NewSession creates a new bitswap session whose lifetime is bounded by the +// given context func (bs *Bitswap) NewSession(ctx context.Context) *Session { s := &Session{ activePeers: make(map[peer.ID]struct{}), @@ -54,6 +61,7 @@ func (bs *Bitswap) NewSession(ctx context.Context) *Session { notif: notifications.New(), uuid: loggables.Uuid("GetBlockRequest"), baseTickDelay: time.Millisecond * 500, + id: bs.getNextSessionID(), } cache, _ := lru.New(2048) @@ -73,11 +81,11 @@ type blkRecv struct { blk blocks.Block } -func (s *Session) ReceiveBlock(from peer.ID, blk blocks.Block) { +func (s *Session) receiveBlockFrom(from peer.ID, blk blocks.Block) { s.incoming <- blkRecv{from: from, blk: blk} } -func (s *Session) InterestedIn(c *cid.Cid) bool { +func (s *Session) interestedIn(c *cid.Cid) bool { return s.interest.Contains(c.KeyString()) } @@ -134,14 +142,14 @@ func (s *Session) run(ctx context.Context) { case <-s.tick.C: var live []*cid.Cid - for c, _ := range s.liveWants { + for c := range s.liveWants { cs, _ := cid.Cast([]byte(c)) live = append(live, cs) s.liveWants[c] = time.Now() } // Broadcast these keys to everyone we're connected to - s.bs.wm.WantBlocks(ctx, live, nil) + s.bs.wm.WantBlocks(ctx, live, nil, s.id) if len(live) > 0 { go func() { @@ -181,7 +189,7 @@ func (s *Session) wantBlocks(ctx context.Context, ks []*cid.Cid) { for _, c := range ks { s.liveWants[c.KeyString()] = time.Now() } - s.bs.wm.WantBlocks(ctx, ks, s.activePeersArr) + s.bs.wm.WantBlocks(ctx, ks, s.activePeersArr, s.id) } func (s *Session) cancel(keys []*cid.Cid) { @@ -211,11 +219,15 @@ func (s *Session) fetch(ctx context.Context, keys []*cid.Cid) { } } +// GetBlocks fetches a set of blocks within the context of this session and +// returns a channel that found blocks will be returned on. No order is +// guaranteed on the returned blocks. func (s *Session) GetBlocks(ctx context.Context, keys []*cid.Cid) (<-chan blocks.Block, error) { ctx = logging.ContextWithLoggable(ctx, s.uuid) return getBlocksImpl(ctx, keys, s.notif, s.fetch, s.cancelWants) } +// GetBlock fetches a single block func (s *Session) GetBlock(parent context.Context, k *cid.Cid) (blocks.Block, error) { return getBlock(parent, k, s.GetBlocks) } diff --git a/bitswap/wantlist/wantlist.go b/bitswap/wantlist/wantlist.go index 7c77998b3..06b5b80dc 100644 --- a/bitswap/wantlist/wantlist.go +++ b/bitswap/wantlist/wantlist.go @@ -10,8 +10,8 @@ import ( ) type ThreadSafe struct { - lk sync.RWMutex - Wantlist Wantlist + lk sync.RWMutex + set map[string]*Entry } // not threadsafe @@ -23,7 +23,16 @@ type Entry struct { Cid *cid.Cid Priority int - RefCnt int + SesTrk map[uint64]struct{} +} + +// NewRefEntry creates a new reference tracked wantlist entry +func NewRefEntry(c *cid.Cid, p int) *Entry { + return &Entry{ + Cid: c, + Priority: p, + SesTrk: make(map[uint64]struct{}), + } } type entrySlice []*Entry @@ -34,7 +43,7 @@ func (es entrySlice) Less(i, j int) bool { return es[i].Priority > es[j].Priorit func NewThreadSafe() *ThreadSafe { return &ThreadSafe{ - Wantlist: *New(), + set: make(map[string]*Entry), } } @@ -44,46 +53,86 @@ func New() *Wantlist { } } -func (w *ThreadSafe) Add(k *cid.Cid, priority int) bool { +func (w *ThreadSafe) Add(c *cid.Cid, priority int, ses uint64) bool { w.lk.Lock() defer w.lk.Unlock() - return w.Wantlist.Add(k, priority) + k := c.KeyString() + if e, ok := w.set[k]; ok { + e.SesTrk[ses] = struct{}{} + return false + } + + w.set[k] = &Entry{ + Cid: c, + Priority: priority, + SesTrk: map[uint64]struct{}{ses: struct{}{}}, + } + + return true } -func (w *ThreadSafe) AddEntry(e *Entry) bool { +func (w *ThreadSafe) AddEntry(e *Entry, ses uint64) bool { w.lk.Lock() defer w.lk.Unlock() - return w.Wantlist.AddEntry(e) + k := e.Cid.KeyString() + if ex, ok := w.set[k]; ok { + ex.SesTrk[ses] = struct{}{} + return false + } + w.set[k] = e + e.SesTrk[ses] = struct{}{} + return true } -func (w *ThreadSafe) Remove(k *cid.Cid) bool { +func (w *ThreadSafe) Remove(c *cid.Cid, ses uint64) bool { w.lk.Lock() defer w.lk.Unlock() - return w.Wantlist.Remove(k) + k := c.KeyString() + e, ok := w.set[k] + if !ok { + return false + } + + delete(e.SesTrk, ses) + if len(e.SesTrk) == 0 { + delete(w.set, k) + return true + } + return false } func (w *ThreadSafe) Contains(k *cid.Cid) (*Entry, bool) { w.lk.RLock() defer w.lk.RUnlock() - return w.Wantlist.Contains(k) + e, ok := w.set[k.KeyString()] + return e, ok } func (w *ThreadSafe) Entries() []*Entry { w.lk.RLock() defer w.lk.RUnlock() - return w.Wantlist.Entries() + var es entrySlice + for _, e := range w.set { + es = append(es, e) + } + return es } func (w *ThreadSafe) SortedEntries() []*Entry { w.lk.RLock() defer w.lk.RUnlock() - return w.Wantlist.SortedEntries() + var es entrySlice + for _, e := range w.set { + es = append(es, e) + } + sort.Sort(es) + return es } func (w *ThreadSafe) Len() int { w.lk.RLock() defer w.lk.RUnlock() - return w.Wantlist.Len() + return len(w.set) } func (w *Wantlist) Len() int { @@ -92,15 +141,13 @@ func (w *Wantlist) Len() int { func (w *Wantlist) Add(c *cid.Cid, priority int) bool { k := c.KeyString() - if e, ok := w.set[k]; ok { - e.RefCnt++ + if _, ok := w.set[k]; ok { return false } w.set[k] = &Entry{ Cid: c, Priority: priority, - RefCnt: 1, } return true @@ -108,8 +155,7 @@ func (w *Wantlist) Add(c *cid.Cid, priority int) bool { func (w *Wantlist) AddEntry(e *Entry) bool { k := e.Cid.KeyString() - if ex, ok := w.set[k]; ok { - ex.RefCnt++ + if _, ok := w.set[k]; ok { return false } w.set[k] = e @@ -118,16 +164,12 @@ func (w *Wantlist) AddEntry(e *Entry) bool { func (w *Wantlist) Remove(c *cid.Cid) bool { k := c.KeyString() - e, ok := w.set[k] + _, ok := w.set[k] if !ok { return false } - e.RefCnt-- - if e.RefCnt <= 0 { - delete(w.set, k) - return true - } + delete(w.set, k) return false } diff --git a/bitswap/wantlist/wantlist_test.go b/bitswap/wantlist/wantlist_test.go new file mode 100644 index 000000000..a88825dcd --- /dev/null +++ b/bitswap/wantlist/wantlist_test.go @@ -0,0 +1,87 @@ +package wantlist + +import ( + "testing" + + cid "gx/ipfs/QmYhQaCYEcaPPjxJX7YcPcVKkQfRy6sJ7B3XmGFk82XYdQ/go-cid" +) + +var testcids []*cid.Cid + +func init() { + strs := []string{ + "QmQL8LqkEgYXaDHdNYCG2mmpow7Sp8Z8Kt3QS688vyBeC7", + "QmcBDsdjgSXU7BP4A4V8LJCXENE5xVwnhrhRGVTJr9YCVj", + "QmQakgd2wDxc3uUF4orGdEm28zUT9Mmimp5pyPG2SFS9Gj", + } + for _, s := range strs { + c, err := cid.Decode(s) + if err != nil { + panic(err) + } + testcids = append(testcids, c) + } + +} + +type wli interface { + Contains(*cid.Cid) (*Entry, bool) +} + +func assertHasCid(t *testing.T, w wli, c *cid.Cid) { + e, ok := w.Contains(c) + if !ok { + t.Fatal("expected to have ", c) + } + if !e.Cid.Equals(c) { + t.Fatal("returned entry had wrong cid value") + } +} + +func assertNotHasCid(t *testing.T, w wli, c *cid.Cid) { + _, ok := w.Contains(c) + if ok { + t.Fatal("expected not to have ", c) + } +} + +func TestBasicWantlist(t *testing.T) { + wl := New() + + wl.Add(testcids[0], 5) + assertHasCid(t, wl, testcids[0]) + wl.Add(testcids[1], 4) + assertHasCid(t, wl, testcids[0]) + assertHasCid(t, wl, testcids[1]) + + if wl.Len() != 2 { + t.Fatal("should have had two items") + } + + wl.Add(testcids[1], 4) + assertHasCid(t, wl, testcids[0]) + assertHasCid(t, wl, testcids[1]) + + if wl.Len() != 2 { + t.Fatal("should have had two items") + } + + wl.Remove(testcids[0]) + assertHasCid(t, wl, testcids[1]) + if _, has := wl.Contains(testcids[0]); has { + t.Fatal("shouldnt have this cid") + } +} + +func TestSesRefWantlist(t *testing.T) { + wl := NewThreadSafe() + + wl.Add(testcids[0], 5, 1) + assertHasCid(t, wl, testcids[0]) + wl.Remove(testcids[0], 2) + assertHasCid(t, wl, testcids[0]) + wl.Add(testcids[0], 5, 1) + assertHasCid(t, wl, testcids[0]) + wl.Remove(testcids[0], 1) + assertNotHasCid(t, wl, testcids[0]) +} diff --git a/bitswap/wantmanager.go b/bitswap/wantmanager.go index c8a617724..cb5627b10 100644 --- a/bitswap/wantmanager.go +++ b/bitswap/wantmanager.go @@ -71,34 +71,31 @@ type msgQueue struct { done chan struct{} } -func (pm *WantManager) WantBlocks(ctx context.Context, ks []*cid.Cid, peers []peer.ID) { +func (pm *WantManager) WantBlocks(ctx context.Context, ks []*cid.Cid, peers []peer.ID, ses uint64) { log.Infof("want blocks: %s", ks) - pm.addEntries(ctx, ks, peers, false) + pm.addEntries(ctx, ks, peers, false, ses) } -func (pm *WantManager) CancelWants(ctx context.Context, ks []*cid.Cid, peers []peer.ID) { - pm.addEntries(context.Background(), ks, peers, true) +func (pm *WantManager) CancelWants(ctx context.Context, ks []*cid.Cid, peers []peer.ID, ses uint64) { + pm.addEntries(context.Background(), ks, peers, true, ses) } type wantSet struct { entries []*bsmsg.Entry targets []peer.ID + from uint64 } -func (pm *WantManager) addEntries(ctx context.Context, ks []*cid.Cid, targets []peer.ID, cancel bool) { +func (pm *WantManager) addEntries(ctx context.Context, ks []*cid.Cid, targets []peer.ID, cancel bool, ses uint64) { var entries []*bsmsg.Entry for i, k := range ks { entries = append(entries, &bsmsg.Entry{ Cancel: cancel, - Entry: &wantlist.Entry{ - Cid: k, - Priority: kMaxPriority - i, - RefCnt: 1, - }, + Entry: wantlist.NewRefEntry(k, kMaxPriority-i), }) } select { - case pm.incoming <- &wantSet{entries: entries, targets: targets}: + case pm.incoming <- &wantSet{entries: entries, targets: targets, from: ses}: case <-pm.ctx.Done(): case <-ctx.Done(): } @@ -290,11 +287,11 @@ func (pm *WantManager) Run() { // add changes to our wantlist for _, e := range ws.entries { if e.Cancel { - if pm.wl.Remove(e.Cid) { + if pm.wl.Remove(e.Cid, ws.from) { pm.wantlistGauge.Dec() } } else { - if pm.wl.AddEntry(e.Entry) { + if pm.wl.AddEntry(e.Entry, ws.from) { pm.wantlistGauge.Inc() } } From 787876e684b9db8ad3eb5320c8016eaabec1ee61 Mon Sep 17 00:00:00 2001 From: Jeromy Date: Tue, 2 May 2017 22:54:01 -0700 Subject: [PATCH 0556/1035] fix wantlist removal accounting, add tests License: MIT Signed-off-by: Jeromy This commit was moved from ipfs/go-bitswap@bd7171ee3836f8d69c3c8f5265252ddbb060745f --- bitswap/bitswap.go | 3 +++ bitswap/bitswap_test.go | 6 +++++- bitswap/decision/engine.go | 11 ++++------- bitswap/wantlist/wantlist.go | 2 +- bitswap/wantlist/wantlist_test.go | 33 +++++++++++++++++++++++-------- 5 files changed, 38 insertions(+), 17 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 065c209a9..85f9a05da 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -285,6 +285,9 @@ func (bs *Bitswap) getNextSessionID() uint64 { // CancelWant removes a given key from the wantlist func (bs *Bitswap) CancelWants(cids []*cid.Cid, ses uint64) { + if len(cids) == 0 { + return + } bs.wm.CancelWants(context.Background(), cids, nil, ses) } diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index e73022f62..26ea61f43 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -318,7 +318,7 @@ func TestBasicBitswap(t *testing.T) { t.Log("Test a one node trying to get one block from another") - instances := sg.Instances(2) + instances := sg.Instances(3) blocks := bg.Blocks(1) err := instances[0].Exchange.HasBlock(blocks[0]) if err != nil { @@ -333,6 +333,10 @@ func TestBasicBitswap(t *testing.T) { } time.Sleep(time.Millisecond * 20) + wl := instances[2].Exchange.WantlistForPeer(instances[1].Peer) + if len(wl) != 0 { + t.Fatal("should have no items in other peers wantlist") + } if len(instances[1].Exchange.GetWantlist()) != 0 { t.Fatal("shouldnt have anything in wantlist") } diff --git a/bitswap/decision/engine.go b/bitswap/decision/engine.go index 973a7eb85..600df11f2 100644 --- a/bitswap/decision/engine.go +++ b/bitswap/decision/engine.go @@ -105,13 +105,10 @@ func NewEngine(ctx context.Context, bs bstore.Blockstore) *Engine { } func (e *Engine) WantlistForPeer(p peer.ID) (out []*wl.Entry) { - e.lock.Lock() - partner, ok := e.ledgerMap[p] - if ok { - out = partner.wantList.SortedEntries() - } - e.lock.Unlock() - return out + partner := e.findOrCreate(p) + partner.lk.Lock() + defer partner.lk.Unlock() + return partner.wantList.SortedEntries() } func (e *Engine) LedgerForPeer(p peer.ID) *Receipt { diff --git a/bitswap/wantlist/wantlist.go b/bitswap/wantlist/wantlist.go index 06b5b80dc..73b45815b 100644 --- a/bitswap/wantlist/wantlist.go +++ b/bitswap/wantlist/wantlist.go @@ -170,7 +170,7 @@ func (w *Wantlist) Remove(c *cid.Cid) bool { } delete(w.set, k) - return false + return true } func (w *Wantlist) Contains(k *cid.Cid) (*Entry, bool) { diff --git a/bitswap/wantlist/wantlist_test.go b/bitswap/wantlist/wantlist_test.go index a88825dcd..e3aee3060 100644 --- a/bitswap/wantlist/wantlist_test.go +++ b/bitswap/wantlist/wantlist_test.go @@ -48,9 +48,13 @@ func assertNotHasCid(t *testing.T, w wli, c *cid.Cid) { func TestBasicWantlist(t *testing.T) { wl := New() - wl.Add(testcids[0], 5) + if !wl.Add(testcids[0], 5) { + t.Fatal("expected true") + } assertHasCid(t, wl, testcids[0]) - wl.Add(testcids[1], 4) + if !wl.Add(testcids[1], 4) { + t.Fatal("expected true") + } assertHasCid(t, wl, testcids[0]) assertHasCid(t, wl, testcids[1]) @@ -58,7 +62,9 @@ func TestBasicWantlist(t *testing.T) { t.Fatal("should have had two items") } - wl.Add(testcids[1], 4) + if wl.Add(testcids[1], 4) { + t.Fatal("add shouldnt report success on second add") + } assertHasCid(t, wl, testcids[0]) assertHasCid(t, wl, testcids[1]) @@ -66,7 +72,10 @@ func TestBasicWantlist(t *testing.T) { t.Fatal("should have had two items") } - wl.Remove(testcids[0]) + if !wl.Remove(testcids[0]) { + t.Fatal("should have gotten true") + } + assertHasCid(t, wl, testcids[1]) if _, has := wl.Contains(testcids[0]); has { t.Fatal("shouldnt have this cid") @@ -76,12 +85,20 @@ func TestBasicWantlist(t *testing.T) { func TestSesRefWantlist(t *testing.T) { wl := NewThreadSafe() - wl.Add(testcids[0], 5, 1) + if !wl.Add(testcids[0], 5, 1) { + t.Fatal("should have added") + } assertHasCid(t, wl, testcids[0]) - wl.Remove(testcids[0], 2) + if wl.Remove(testcids[0], 2) { + t.Fatal("shouldnt have removed") + } assertHasCid(t, wl, testcids[0]) - wl.Add(testcids[0], 5, 1) + if wl.Add(testcids[0], 5, 1) { + t.Fatal("shouldnt have added") + } assertHasCid(t, wl, testcids[0]) - wl.Remove(testcids[0], 1) + if !wl.Remove(testcids[0], 1) { + t.Fatal("should have removed") + } assertNotHasCid(t, wl, testcids[0]) } From 23752cadb9e59bdcbb14a3e1408bd5e3f6366722 Mon Sep 17 00:00:00 2001 From: Jeromy Date: Thu, 4 May 2017 18:00:15 -0700 Subject: [PATCH 0557/1035] WIP: wire sessions up through into FetchGraph License: MIT Signed-off-by: Jeromy This commit was moved from ipfs/go-bitswap@4e08b46e5edaca7f5832e9914d3e10d31a348cc8 --- bitswap/bitswap.go | 1 - bitswap/get.go | 2 +- bitswap/session.go | 2 +- bitswap/session_test.go | 2 +- bitswap/wantlist/wantlist_test.go | 2 +- 5 files changed, 4 insertions(+), 5 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 85f9a05da..dd58aee7a 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -23,7 +23,6 @@ import ( process "gx/ipfs/QmSF8fPo3jgVBAy8fpdjjYqgG87dkJgUprRBHRd2tmfgpP/goprocess" procctx "gx/ipfs/QmSF8fPo3jgVBAy8fpdjjYqgG87dkJgUprRBHRd2tmfgpP/goprocess/context" logging "gx/ipfs/QmSpJByNKFX1sCsHBEp3R73FL4NF6FnQTEGyNAXHm2GS52/go-log" - loggables "gx/ipfs/QmVesPmqbPp7xRGyY96tnBwzDtVV1nqv4SCVxo5zCqKyH8/go-libp2p-loggables" blocks "gx/ipfs/QmXxGS5QsUxpR3iqL5DjmsYPHR1Yz74siRQ4ChJqWFosMh/go-block-format" cid "gx/ipfs/Qma4RJSuh7mMeJQYCqMbKzekn6EwBo7HEs5AQYjVRMQATB/go-cid" peer "gx/ipfs/QmdS9KpbDyPrieswibZhkod1oXqRwZJrUPzxCofAMWpFGq/go-libp2p-peer" diff --git a/bitswap/get.go b/bitswap/get.go index 3a64f5117..a72ead83a 100644 --- a/bitswap/get.go +++ b/bitswap/get.go @@ -4,9 +4,9 @@ import ( "context" "errors" - blocks "github.com/ipfs/go-ipfs/blocks" blockstore "github.com/ipfs/go-ipfs/blocks/blockstore" notifications "github.com/ipfs/go-ipfs/exchange/bitswap/notifications" + blocks "gx/ipfs/QmXxGS5QsUxpR3iqL5DjmsYPHR1Yz74siRQ4ChJqWFosMh/go-block-format" cid "gx/ipfs/Qma4RJSuh7mMeJQYCqMbKzekn6EwBo7HEs5AQYjVRMQATB/go-cid" ) diff --git a/bitswap/session.go b/bitswap/session.go index 0a5c7426a..7f1e21d03 100644 --- a/bitswap/session.go +++ b/bitswap/session.go @@ -4,8 +4,8 @@ import ( "context" "time" - blocks "github.com/ipfs/go-ipfs/blocks" notifications "github.com/ipfs/go-ipfs/exchange/bitswap/notifications" + blocks "gx/ipfs/QmXxGS5QsUxpR3iqL5DjmsYPHR1Yz74siRQ4ChJqWFosMh/go-block-format" logging "gx/ipfs/QmSpJByNKFX1sCsHBEp3R73FL4NF6FnQTEGyNAXHm2GS52/go-log" lru "gx/ipfs/QmVYxfoJQiZijTgPNHCHgHELvQpbsJNTg6Crmc3dQkj3yy/golang-lru" diff --git a/bitswap/session_test.go b/bitswap/session_test.go index 426acd90a..d7808b89d 100644 --- a/bitswap/session_test.go +++ b/bitswap/session_test.go @@ -6,8 +6,8 @@ import ( "testing" "time" - blocks "github.com/ipfs/go-ipfs/blocks" blocksutil "github.com/ipfs/go-ipfs/blocks/blocksutil" + blocks "gx/ipfs/QmXxGS5QsUxpR3iqL5DjmsYPHR1Yz74siRQ4ChJqWFosMh/go-block-format" cid "gx/ipfs/Qma4RJSuh7mMeJQYCqMbKzekn6EwBo7HEs5AQYjVRMQATB/go-cid" ) diff --git a/bitswap/wantlist/wantlist_test.go b/bitswap/wantlist/wantlist_test.go index e3aee3060..d6027a718 100644 --- a/bitswap/wantlist/wantlist_test.go +++ b/bitswap/wantlist/wantlist_test.go @@ -3,7 +3,7 @@ package wantlist import ( "testing" - cid "gx/ipfs/QmYhQaCYEcaPPjxJX7YcPcVKkQfRy6sJ7B3XmGFk82XYdQ/go-cid" + cid "gx/ipfs/Qma4RJSuh7mMeJQYCqMbKzekn6EwBo7HEs5AQYjVRMQATB/go-cid" ) var testcids []*cid.Cid From 1f4b0c71432a5a16866ed5cb2c826006f708228c Mon Sep 17 00:00:00 2001 From: Jeromy Date: Fri, 19 May 2017 21:04:11 -0700 Subject: [PATCH 0558/1035] track broadcasted wantlist entries License: MIT Signed-off-by: Jeromy This commit was moved from ipfs/go-bitswap@110b03f1da25b22e6323c0dfb7bb61aef2b458fd --- bitswap/bitswap.go | 1 + bitswap/bitswap_test.go | 2 +- bitswap/session.go | 54 +++++++++++++++++++++++++++--------- bitswap/session_test.go | 52 ++++++++++++++++++++++++++++++++++ bitswap/wantlist/wantlist.go | 14 ++++++++++ bitswap/wantmanager.go | 49 ++++++++++++++++---------------- 6 files changed, 134 insertions(+), 38 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index dd58aee7a..e0da2477a 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -323,6 +323,7 @@ func (bs *Bitswap) HasBlock(blk blocks.Block) error { return nil } +// SessionsForBlock returns a slice of all sessions that may be interested in the given cid func (bs *Bitswap) SessionsForBlock(c *cid.Cid) []*Session { bs.sessLk.Lock() defer bs.sessLk.Unlock() diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index 26ea61f43..7842ae559 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -332,7 +332,7 @@ func TestBasicBitswap(t *testing.T) { t.Fatal(err) } - time.Sleep(time.Millisecond * 20) + time.Sleep(time.Millisecond * 25) wl := instances[2].Exchange.WantlistForPeer(instances[1].Peer) if len(wl) != 0 { t.Fatal("should have no items in other peers wantlist") diff --git a/bitswap/session.go b/bitswap/session.go index 7f1e21d03..128b377d4 100644 --- a/bitswap/session.go +++ b/bitswap/session.go @@ -25,14 +25,14 @@ type Session struct { activePeers map[peer.ID]struct{} activePeersArr []peer.ID - bs *Bitswap - incoming chan blkRecv - newReqs chan []*cid.Cid - cancelKeys chan []*cid.Cid + bs *Bitswap + incoming chan blkRecv + newReqs chan []*cid.Cid + cancelKeys chan []*cid.Cid + interestReqs chan interestReq interest *lru.Cache liveWants map[string]time.Time - liveCnt int tick *time.Timer baseTickDelay time.Duration @@ -55,6 +55,7 @@ func (bs *Bitswap) NewSession(ctx context.Context) *Session { liveWants: make(map[string]time.Time), newReqs: make(chan []*cid.Cid), cancelKeys: make(chan []*cid.Cid), + interestReqs: make(chan interestReq), ctx: ctx, bs: bs, incoming: make(chan blkRecv), @@ -85,8 +86,29 @@ func (s *Session) receiveBlockFrom(from peer.ID, blk blocks.Block) { s.incoming <- blkRecv{from: from, blk: blk} } +type interestReq struct { + c *cid.Cid + resp chan bool +} + +// TODO: PERF: this is using a channel to guard a map access against race +// conditions. This is definitely much slower than a mutex, though its unclear +// if it will actually induce any noticeable slowness. This is implemented this +// way to avoid adding a more complex set of mutexes around the liveWants map. +// note that in the average case (where this session *is* interested in the +// block we received) this function will not be called, as the cid will likely +// still be in the interest cache. +func (s *Session) isLiveWant(c *cid.Cid) bool { + resp := make(chan bool) + s.interestReqs <- interestReq{ + c: c, + resp: resp, + } + return <-resp +} + func (s *Session) interestedIn(c *cid.Cid) bool { - return s.interest.Contains(c.KeyString()) + return s.interest.Contains(c.KeyString()) || s.isLiveWant(c) } const provSearchDelay = time.Second * 10 @@ -124,12 +146,11 @@ func (s *Session) run(ctx context.Context) { for _, k := range keys { s.interest.Add(k.KeyString(), nil) } - if s.liveCnt < activeWantsLimit { - toadd := activeWantsLimit - s.liveCnt + if len(s.liveWants) < activeWantsLimit { + toadd := activeWantsLimit - len(s.liveWants) if toadd > len(keys) { toadd = len(keys) } - s.liveCnt += toadd now := keys[:toadd] keys = keys[toadd:] @@ -152,15 +173,23 @@ func (s *Session) run(ctx context.Context) { s.bs.wm.WantBlocks(ctx, live, nil, s.id) if len(live) > 0 { - go func() { - for p := range s.bs.network.FindProvidersAsync(ctx, live[0], 10) { + go func(k *cid.Cid) { + // TODO: have a task queue setup for this to: + // - rate limit + // - manage timeouts + // - ensure two 'findprovs' calls for the same block don't run concurrently + // - share peers between sessions based on interest set + for p := range s.bs.network.FindProvidersAsync(ctx, k, 10) { newpeers <- p } - }() + }(live[0]) } s.resetTick() case p := <-newpeers: s.addActivePeer(p) + case lwchk := <-s.interestReqs: + _, ok := s.liveWants[lwchk.c.KeyString()] + lwchk.resp <- ok case <-ctx.Done(): return } @@ -170,7 +199,6 @@ func (s *Session) run(ctx context.Context) { func (s *Session) receiveBlock(ctx context.Context, blk blocks.Block) { ks := blk.Cid().KeyString() if _, ok := s.liveWants[ks]; ok { - s.liveCnt-- tval := s.liveWants[ks] s.latTotal += time.Since(tval) s.fetchcnt++ diff --git a/bitswap/session_test.go b/bitswap/session_test.go index d7808b89d..e2b959fed 100644 --- a/bitswap/session_test.go +++ b/bitswap/session_test.go @@ -150,3 +150,55 @@ func TestSessionSplitFetch(t *testing.T) { } } } + +func TestInterestCacheOverflow(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + vnet := getVirtualNetwork() + sesgen := NewTestSessionGenerator(vnet) + defer sesgen.Close() + bgen := blocksutil.NewBlockGenerator() + + blks := bgen.Blocks(2049) + inst := sesgen.Instances(2) + + a := inst[0] + b := inst[1] + + ses := a.Exchange.NewSession(ctx) + zeroch, err := ses.GetBlocks(ctx, []*cid.Cid{blks[0].Cid()}) + if err != nil { + t.Fatal(err) + } + + var restcids []*cid.Cid + for _, blk := range blks[1:] { + restcids = append(restcids, blk.Cid()) + } + + restch, err := ses.GetBlocks(ctx, restcids) + if err != nil { + t.Fatal(err) + } + + // wait to ensure that all the above cids were added to the sessions cache + time.Sleep(time.Millisecond * 50) + + if err := b.Exchange.HasBlock(blks[0]); err != nil { + t.Fatal(err) + } + + select { + case blk, ok := <-zeroch: + if ok && blk.Cid().Equals(blks[0].Cid()) { + // success! + } else { + t.Fatal("failed to get the block") + } + case <-restch: + t.Fatal("should not get anything on restch") + case <-time.After(time.Second * 5): + t.Fatal("timed out waiting for block") + } +} diff --git a/bitswap/wantlist/wantlist.go b/bitswap/wantlist/wantlist.go index 73b45815b..5902442ca 100644 --- a/bitswap/wantlist/wantlist.go +++ b/bitswap/wantlist/wantlist.go @@ -53,6 +53,14 @@ func New() *Wantlist { } } +// Add adds the given cid to the wantlist with the specified priority, governed +// by the session ID 'ses'. if a cid is added under multiple session IDs, then +// it must be removed by each of those sessions before it is no longer 'in the +// wantlist'. Calls to Add are idempotent given the same arguments. Subsequent +// calls with different values for priority will not update the priority +// TODO: think through priority changes here +// Add returns true if the cid did not exist in the wantlist before this call +// (even if it was under a different session) func (w *ThreadSafe) Add(c *cid.Cid, priority int, ses uint64) bool { w.lk.Lock() defer w.lk.Unlock() @@ -84,6 +92,10 @@ func (w *ThreadSafe) AddEntry(e *Entry, ses uint64) bool { return true } +// Remove removes the given cid from being tracked by the given session. +// 'true' is returned if this call to Remove removed the final session ID +// tracking the cid. (meaning true will be returned iff this call caused the +// value of 'Contains(c)' to change from true to false) func (w *ThreadSafe) Remove(c *cid.Cid, ses uint64) bool { w.lk.Lock() defer w.lk.Unlock() @@ -101,6 +113,8 @@ func (w *ThreadSafe) Remove(c *cid.Cid, ses uint64) bool { return false } +// Contains returns true if the given cid is in the wantlist tracked by one or +// more sessions func (w *ThreadSafe) Contains(k *cid.Cid) (*Entry, bool) { w.lk.RLock() defer w.lk.RUnlock() diff --git a/bitswap/wantmanager.go b/bitswap/wantmanager.go index cb5627b10..800fa1c40 100644 --- a/bitswap/wantmanager.go +++ b/bitswap/wantmanager.go @@ -25,6 +25,7 @@ type WantManager struct { // synchronized by Run loop, only touch inside there peers map[peer.ID]*msgQueue wl *wantlist.ThreadSafe + bcwl *wantlist.ThreadSafe network bsnet.BitSwapNetwork ctx context.Context @@ -47,6 +48,7 @@ func NewWantManager(ctx context.Context, network bsnet.BitSwapNetwork) *WantMana peerReqs: make(chan chan []peer.ID), peers: make(map[peer.ID]*msgQueue), wl: wantlist.NewThreadSafe(), + bcwl: wantlist.NewThreadSafe(), network: network, ctx: ctx, cancel: cancel, @@ -61,7 +63,7 @@ type msgQueue struct { outlk sync.Mutex out bsmsg.BitSwapMessage network bsnet.BitSwapNetwork - wl *wantlist.Wantlist + wl *wantlist.ThreadSafe sender bsnet.MessageSender @@ -71,11 +73,13 @@ type msgQueue struct { done chan struct{} } +// WantBlocks adds the given cids to the wantlist, tracked by the given session func (pm *WantManager) WantBlocks(ctx context.Context, ks []*cid.Cid, peers []peer.ID, ses uint64) { log.Infof("want blocks: %s", ks) pm.addEntries(ctx, ks, peers, false, ses) } +// CancelWants removes the given cids from the wantlist, tracked by the given session func (pm *WantManager) CancelWants(ctx context.Context, ks []*cid.Cid, peers []peer.ID, ses uint64) { pm.addEntries(context.Background(), ks, peers, true, ses) } @@ -134,9 +138,10 @@ func (pm *WantManager) startPeerHandler(p peer.ID) *msgQueue { // new peer, we will want to give them our full wantlist fullwantlist := bsmsg.New(true) - for _, e := range pm.wl.Entries() { - ne := *e - mq.wl.AddEntry(&ne) + for _, e := range pm.bcwl.Entries() { + for k := range e.SesTrk { + mq.wl.AddEntry(e, k) + } fullwantlist.AddEntry(e.Cid, e.Priority) } mq.out = fullwantlist @@ -284,13 +289,23 @@ func (pm *WantManager) Run() { select { case ws := <-pm.incoming: + // is this a broadcast or not? + brdc := len(ws.targets) == 0 + // add changes to our wantlist for _, e := range ws.entries { if e.Cancel { + if brdc { + pm.bcwl.Remove(e.Cid, ws.from) + } + if pm.wl.Remove(e.Cid, ws.from) { pm.wantlistGauge.Dec() } } else { + if brdc { + pm.bcwl.AddEntry(e.Entry, ws.from) + } if pm.wl.AddEntry(e.Entry, ws.from) { pm.wantlistGauge.Inc() } @@ -300,7 +315,7 @@ func (pm *WantManager) Run() { // broadcast those wantlist changes if len(ws.targets) == 0 { for _, p := range pm.peers { - p.addMessage(ws.entries) + p.addMessage(ws.entries, ws.from) } } else { for _, t := range ws.targets { @@ -309,24 +324,10 @@ func (pm *WantManager) Run() { log.Warning("tried sending wantlist change to non-partner peer") continue } - p.addMessage(ws.entries) + p.addMessage(ws.entries, ws.from) } } - case <-tock.C: - // resend entire wantlist every so often (REALLY SHOULDNT BE NECESSARY) - var es []*bsmsg.Entry - for _, e := range pm.wl.Entries() { - es = append(es, &bsmsg.Entry{Entry: e}) - } - - for _, p := range pm.peers { - p.outlk.Lock() - p.out = bsmsg.New(true) - p.outlk.Unlock() - - p.addMessage(es) - } case p := <-pm.connect: pm.startPeerHandler(p) case p := <-pm.disconnect: @@ -347,14 +348,14 @@ func (wm *WantManager) newMsgQueue(p peer.ID) *msgQueue { return &msgQueue{ done: make(chan struct{}), work: make(chan struct{}, 1), - wl: wantlist.New(), + wl: wantlist.NewThreadSafe(), network: wm.network, p: p, refcnt: 1, } } -func (mq *msgQueue) addMessage(entries []*bsmsg.Entry) { +func (mq *msgQueue) addMessage(entries []*bsmsg.Entry, ses uint64) { var work bool mq.outlk.Lock() defer func() { @@ -378,12 +379,12 @@ func (mq *msgQueue) addMessage(entries []*bsmsg.Entry) { // one passed in for _, e := range entries { if e.Cancel { - if mq.wl.Remove(e.Cid) { + if mq.wl.Remove(e.Cid, ses) { work = true mq.out.Cancel(e.Cid) } } else { - if mq.wl.Add(e.Cid, e.Priority) { + if mq.wl.Add(e.Cid, e.Priority, ses) { work = true mq.out.AddEntry(e.Cid, e.Priority) } From a19d8ddf089e381000d323ac8b729a7a94fc5724 Mon Sep 17 00:00:00 2001 From: Jeromy Date: Thu, 6 Jul 2017 12:06:57 -0700 Subject: [PATCH 0559/1035] address CR License: MIT Signed-off-by: Jeromy This commit was moved from ipfs/go-bitswap@194f8988a0e6b4020364c020470385a733253792 --- bitswap/session.go | 3 ++- bitswap/session_test.go | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/bitswap/session.go b/bitswap/session.go index 128b377d4..614aa4076 100644 --- a/bitswap/session.go +++ b/bitswap/session.go @@ -5,11 +5,11 @@ import ( "time" notifications "github.com/ipfs/go-ipfs/exchange/bitswap/notifications" - blocks "gx/ipfs/QmXxGS5QsUxpR3iqL5DjmsYPHR1Yz74siRQ4ChJqWFosMh/go-block-format" logging "gx/ipfs/QmSpJByNKFX1sCsHBEp3R73FL4NF6FnQTEGyNAXHm2GS52/go-log" lru "gx/ipfs/QmVYxfoJQiZijTgPNHCHgHELvQpbsJNTg6Crmc3dQkj3yy/golang-lru" loggables "gx/ipfs/QmVesPmqbPp7xRGyY96tnBwzDtVV1nqv4SCVxo5zCqKyH8/go-libp2p-loggables" + blocks "gx/ipfs/QmXxGS5QsUxpR3iqL5DjmsYPHR1Yz74siRQ4ChJqWFosMh/go-block-format" cid "gx/ipfs/Qma4RJSuh7mMeJQYCqMbKzekn6EwBo7HEs5AQYjVRMQATB/go-cid" peer "gx/ipfs/QmdS9KpbDyPrieswibZhkod1oXqRwZJrUPzxCofAMWpFGq/go-libp2p-peer" ) @@ -191,6 +191,7 @@ func (s *Session) run(ctx context.Context) { _, ok := s.liveWants[lwchk.c.KeyString()] lwchk.resp <- ok case <-ctx.Done(): + s.tick.Stop() return } } diff --git a/bitswap/session_test.go b/bitswap/session_test.go index e2b959fed..99a0abd39 100644 --- a/bitswap/session_test.go +++ b/bitswap/session_test.go @@ -7,8 +7,8 @@ import ( "time" blocksutil "github.com/ipfs/go-ipfs/blocks/blocksutil" - blocks "gx/ipfs/QmXxGS5QsUxpR3iqL5DjmsYPHR1Yz74siRQ4ChJqWFosMh/go-block-format" + blocks "gx/ipfs/QmXxGS5QsUxpR3iqL5DjmsYPHR1Yz74siRQ4ChJqWFosMh/go-block-format" cid "gx/ipfs/Qma4RJSuh7mMeJQYCqMbKzekn6EwBo7HEs5AQYjVRMQATB/go-cid" ) From 6593e6ded16aebb3ed50b860b7760ccb214328e3 Mon Sep 17 00:00:00 2001 From: Jeromy Date: Thu, 6 Jul 2017 12:17:25 -0700 Subject: [PATCH 0560/1035] extract bitswap metrics to separate struct for 64bit alignment License: MIT Signed-off-by: Jeromy This commit was moved from ipfs/go-bitswap@c8f38296a34e7b01c6c08c61c2e831ee57b8e926 --- bitswap/bitswap.go | 32 +++++++++++++++++++------------- bitswap/bitswap_test.go | 2 +- bitswap/session_test.go | 4 ++-- bitswap/stat.go | 19 ++++++++++--------- bitswap/workers.go | 4 ++-- 5 files changed, 34 insertions(+), 27 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index e0da2477a..2ebcd4ae7 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -99,6 +99,7 @@ func New(parent context.Context, p peer.ID, network bsnet.BitSwapNetwork, newBlocks: make(chan *cid.Cid, HasBlockBufferSize), provideKeys: make(chan *cid.Cid, provideKeysBufferSize), wm: NewWantManager(ctx, network), + counters: new(counters), dupMetric: dupHist, allMetric: allHist, @@ -152,14 +153,8 @@ type Bitswap struct { process process.Process // Counters for various statistics - counterLk sync.Mutex - blocksRecvd int - dupBlocksRecvd int - dupDataRecvd uint64 - blocksSent int - dataSent uint64 - dataRecvd uint64 - messagesRecvd uint64 + counterLk sync.Mutex + counters *counters // Metrics interface metrics dupMetric metrics.Histogram @@ -173,6 +168,16 @@ type Bitswap struct { sessIDLk sync.Mutex } +type counters struct { + blocksRecvd uint64 + dupBlocksRecvd uint64 + dupDataRecvd uint64 + blocksSent uint64 + dataSent uint64 + dataRecvd uint64 + messagesRecvd uint64 +} + type blockRequest struct { Cid *cid.Cid Ctx context.Context @@ -338,7 +343,7 @@ func (bs *Bitswap) SessionsForBlock(c *cid.Cid) []*Session { } func (bs *Bitswap) ReceiveMessage(ctx context.Context, p peer.ID, incoming bsmsg.BitSwapMessage) { - atomic.AddUint64(&bs.messagesRecvd, 1) + atomic.AddUint64(&bs.counters.messagesRecvd, 1) // This call records changes to wantlists, blocks received, // and number of bytes transfered. @@ -403,12 +408,13 @@ func (bs *Bitswap) updateReceiveCounters(b blocks.Block) { bs.counterLk.Lock() defer bs.counterLk.Unlock() + c := bs.counters - bs.blocksRecvd++ - bs.dataRecvd += uint64(len(b.RawData())) + c.blocksRecvd++ + c.dataRecvd += uint64(len(b.RawData())) if has { - bs.dupBlocksRecvd++ - bs.dupDataRecvd += uint64(blkLen) + c.dupBlocksRecvd++ + c.dupDataRecvd += uint64(blkLen) } } diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index 7842ae559..506b8d0c1 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -291,7 +291,7 @@ func TestEmptyKey(t *testing.T) { } } -func assertStat(st *Stat, sblks, rblks int, sdata, rdata uint64) error { +func assertStat(st *Stat, sblks, rblks, sdata, rdata uint64) error { if sblks != st.BlocksSent { return fmt.Errorf("mismatch in blocks sent: %d vs %d", sblks, st.BlocksSent) } diff --git a/bitswap/session_test.go b/bitswap/session_test.go index 99a0abd39..0574bd0c3 100644 --- a/bitswap/session_test.go +++ b/bitswap/session_test.go @@ -103,8 +103,8 @@ func TestSessionBetweenPeers(t *testing.T) { } } for _, is := range inst[2:] { - if is.Exchange.messagesRecvd > 2 { - t.Fatal("uninvolved nodes should only receive two messages", is.Exchange.messagesRecvd) + if is.Exchange.counters.messagesRecvd > 2 { + t.Fatal("uninvolved nodes should only receive two messages", is.Exchange.counters.messagesRecvd) } } } diff --git a/bitswap/stat.go b/bitswap/stat.go index 2f95d9e8b..fb5eb5011 100644 --- a/bitswap/stat.go +++ b/bitswap/stat.go @@ -10,11 +10,11 @@ type Stat struct { ProvideBufLen int Wantlist []*cid.Cid Peers []string - BlocksReceived int + BlocksReceived uint64 DataReceived uint64 - BlocksSent int + BlocksSent uint64 DataSent uint64 - DupBlksReceived int + DupBlksReceived uint64 DupDataReceived uint64 } @@ -23,12 +23,13 @@ func (bs *Bitswap) Stat() (*Stat, error) { st.ProvideBufLen = len(bs.newBlocks) st.Wantlist = bs.GetWantlist() bs.counterLk.Lock() - st.BlocksReceived = bs.blocksRecvd - st.DupBlksReceived = bs.dupBlocksRecvd - st.DupDataReceived = bs.dupDataRecvd - st.BlocksSent = bs.blocksSent - st.DataSent = bs.dataSent - st.DataReceived = bs.dataRecvd + c := bs.counters + st.BlocksReceived = c.blocksRecvd + st.DupBlksReceived = c.dupBlocksRecvd + st.DupDataReceived = c.dupDataRecvd + st.BlocksSent = c.blocksSent + st.DataSent = c.dataSent + st.DataReceived = c.dataRecvd bs.counterLk.Unlock() for _, p := range bs.engine.Peers() { diff --git a/bitswap/workers.go b/bitswap/workers.go index ac1e41eb8..a899f06bb 100644 --- a/bitswap/workers.go +++ b/bitswap/workers.go @@ -73,8 +73,8 @@ func (bs *Bitswap) taskWorker(ctx context.Context, id int) { bs.wm.SendBlock(ctx, envelope) bs.counterLk.Lock() - bs.blocksSent++ - bs.dataSent += uint64(len(envelope.Block.RawData())) + bs.counters.blocksSent++ + bs.counters.dataSent += uint64(len(envelope.Block.RawData())) bs.counterLk.Unlock() case <-ctx.Done(): return From a8a29b6414daa8f6a76eec06a3afad0f3c4c1557 Mon Sep 17 00:00:00 2001 From: Jeromy Date: Fri, 7 Jul 2017 11:40:41 -0700 Subject: [PATCH 0561/1035] fix issue with sessions not receiving locally added blocks License: MIT Signed-off-by: Jeromy This commit was moved from ipfs/go-bitswap@ee8af715926e0c18bdcd8a40e647b635d9960056 --- bitswap/bitswap.go | 10 ++++- bitswap/session.go | 95 ++++++++++++++++++++++++++++++----------- bitswap/session_test.go | 40 +++++++++++++++++ 3 files changed, 120 insertions(+), 25 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 2ebcd4ae7..d9f4fea9a 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -317,6 +317,10 @@ func (bs *Bitswap) HasBlock(blk blocks.Block) error { // it now as it requires more thought and isnt causing immediate problems. bs.notifications.Publish(blk) + for _, s := range bs.SessionsForBlock(blk.Cid()) { + s.receiveBlockFrom("", blk) + } + bs.engine.AddBlock(blk) select { @@ -370,7 +374,7 @@ func (bs *Bitswap) ReceiveMessage(ctx context.Context, p peer.ID, incoming bsmsg wg := sync.WaitGroup{} for _, block := range iblocks { wg.Add(1) - go func(b blocks.Block) { + go func(b blocks.Block) { // TODO: this probably doesnt need to be a goroutine... defer wg.Done() bs.updateReceiveCounters(b) @@ -382,7 +386,11 @@ func (bs *Bitswap) ReceiveMessage(ctx context.Context, p peer.ID, incoming bsmsg ses.receiveBlockFrom(p, b) bs.CancelWants([]*cid.Cid{k}, ses.id) } + log.Debugf("got block %s from %s", b, p) + // TODO: rework this to not call 'HasBlock'. 'HasBlock' is really + // designed to be called when blocks are coming in from non-bitswap + // places (like the user manually adding data) if err := bs.HasBlock(b); err != nil { log.Warningf("ReceiveMessage HasBlock error: %s", err) } diff --git a/bitswap/session.go b/bitswap/session.go index 614aa4076..53db1a28a 100644 --- a/bitswap/session.go +++ b/bitswap/session.go @@ -21,7 +21,7 @@ const activeWantsLimit = 16 // info to, and who to request blocks from type Session struct { ctx context.Context - tofetch []*cid.Cid + tofetch *cidQueue activePeers map[peer.ID]struct{} activePeersArr []peer.ID @@ -55,6 +55,7 @@ func (bs *Bitswap) NewSession(ctx context.Context) *Session { liveWants: make(map[string]time.Time), newReqs: make(chan []*cid.Cid), cancelKeys: make(chan []*cid.Cid), + tofetch: newCidQueue(), interestReqs: make(chan interestReq), ctx: ctx, bs: bs, @@ -157,7 +158,9 @@ func (s *Session) run(ctx context.Context) { s.wantBlocks(ctx, now) } - s.tofetch = append(s.tofetch, keys...) + for _, k := range keys { + s.tofetch.Push(k) + } case keys := <-s.cancelKeys: s.cancel(keys) @@ -188,8 +191,7 @@ func (s *Session) run(ctx context.Context) { case p := <-newpeers: s.addActivePeer(p) case lwchk := <-s.interestReqs: - _, ok := s.liveWants[lwchk.c.KeyString()] - lwchk.resp <- ok + lwchk.resp <- s.cidIsWanted(lwchk.c) case <-ctx.Done(): s.tick.Stop() return @@ -197,19 +199,31 @@ func (s *Session) run(ctx context.Context) { } } +func (s *Session) cidIsWanted(c *cid.Cid) bool { + _, ok := s.liveWants[c.KeyString()] + if !ok { + ok = s.tofetch.Has(c) + } + + return ok +} + func (s *Session) receiveBlock(ctx context.Context, blk blocks.Block) { - ks := blk.Cid().KeyString() - if _, ok := s.liveWants[ks]; ok { - tval := s.liveWants[ks] - s.latTotal += time.Since(tval) + c := blk.Cid() + if s.cidIsWanted(c) { + ks := c.KeyString() + tval, ok := s.liveWants[ks] + if ok { + s.latTotal += time.Since(tval) + delete(s.liveWants, ks) + } else { + s.tofetch.Remove(c) + } s.fetchcnt++ - delete(s.liveWants, ks) s.notif.Publish(blk) - if len(s.tofetch) > 0 { - next := s.tofetch[0:1] - s.tofetch = s.tofetch[1:] - s.wantBlocks(ctx, next) + if next := s.tofetch.Pop(); next != nil { + s.wantBlocks(ctx, []*cid.Cid{next}) } } } @@ -222,19 +236,9 @@ func (s *Session) wantBlocks(ctx context.Context, ks []*cid.Cid) { } func (s *Session) cancel(keys []*cid.Cid) { - sset := cid.NewSet() for _, c := range keys { - sset.Add(c) + s.tofetch.Remove(c) } - var i, j int - for ; j < len(s.tofetch); j++ { - if sset.Has(s.tofetch[j]) { - continue - } - s.tofetch[i] = s.tofetch[j] - i++ - } - s.tofetch = s.tofetch[:i] } func (s *Session) cancelWants(keys []*cid.Cid) { @@ -260,3 +264,46 @@ func (s *Session) GetBlocks(ctx context.Context, keys []*cid.Cid) (<-chan blocks func (s *Session) GetBlock(parent context.Context, k *cid.Cid) (blocks.Block, error) { return getBlock(parent, k, s.GetBlocks) } + +type cidQueue struct { + elems []*cid.Cid + eset *cid.Set +} + +func newCidQueue() *cidQueue { + return &cidQueue{eset: cid.NewSet()} +} + +func (cq *cidQueue) Pop() *cid.Cid { + for { + if len(cq.elems) == 0 { + return nil + } + + out := cq.elems[0] + cq.elems = cq.elems[1:] + + if cq.eset.Has(out) { + cq.eset.Remove(out) + return out + } + } +} + +func (cq *cidQueue) Push(c *cid.Cid) { + if cq.eset.Visit(c) { + cq.elems = append(cq.elems, c) + } +} + +func (cq *cidQueue) Remove(c *cid.Cid) { + cq.eset.Remove(c) +} + +func (cq *cidQueue) Has(c *cid.Cid) bool { + return cq.eset.Has(c) +} + +func (cq *cidQueue) Len() int { + return cq.eset.Len() +} diff --git a/bitswap/session_test.go b/bitswap/session_test.go index 0574bd0c3..dfdae79cb 100644 --- a/bitswap/session_test.go +++ b/bitswap/session_test.go @@ -202,3 +202,43 @@ func TestInterestCacheOverflow(t *testing.T) { t.Fatal("timed out waiting for block") } } + +func TestPutAfterSessionCacheEvict(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + vnet := getVirtualNetwork() + sesgen := NewTestSessionGenerator(vnet) + defer sesgen.Close() + bgen := blocksutil.NewBlockGenerator() + + blks := bgen.Blocks(2500) + inst := sesgen.Instances(1) + + a := inst[0] + + ses := a.Exchange.NewSession(ctx) + + var allcids []*cid.Cid + for _, blk := range blks[1:] { + allcids = append(allcids, blk.Cid()) + } + + blkch, err := ses.GetBlocks(ctx, allcids) + if err != nil { + t.Fatal(err) + } + + // wait to ensure that all the above cids were added to the sessions cache + time.Sleep(time.Millisecond * 50) + + if err := a.Exchange.HasBlock(blks[17]); err != nil { + t.Fatal(err) + } + + select { + case <-blkch: + case <-time.After(time.Millisecond * 50): + t.Fatal("timed out waiting for block") + } +} From 44d9d59a1e7d92cf404da25f73e12799ebb10a91 Mon Sep 17 00:00:00 2001 From: Jakub Sztandera Date: Fri, 7 Jul 2017 20:54:07 +0200 Subject: [PATCH 0562/1035] bitswap: add few method comments License: MIT Signed-off-by: Jakub Sztandera This commit was moved from ipfs/go-bitswap@3f1d5e0552d3e395914d9a0ec25b21a434acd682 --- bitswap/wantlist/wantlist.go | 1 + 1 file changed, 1 insertion(+) diff --git a/bitswap/wantlist/wantlist.go b/bitswap/wantlist/wantlist.go index 5902442ca..de340ea6a 100644 --- a/bitswap/wantlist/wantlist.go +++ b/bitswap/wantlist/wantlist.go @@ -79,6 +79,7 @@ func (w *ThreadSafe) Add(c *cid.Cid, priority int, ses uint64) bool { return true } +// AddEntry adds given Entry to the wantlist. For more information see Add method. func (w *ThreadSafe) AddEntry(e *Entry, ses uint64) bool { w.lk.Lock() defer w.lk.Unlock() From c3e3bff237b02aa8e2b75be5c950dbcd72550de3 Mon Sep 17 00:00:00 2001 From: Jeromy Date: Mon, 10 Jul 2017 23:05:37 -0700 Subject: [PATCH 0563/1035] fix closing and removal of sessions License: MIT Signed-off-by: Jeromy This commit was moved from ipfs/go-bitswap@a3f0813e58fd1a8ff9a8dd18fce3eeda19ba7b2d --- bitswap/session.go | 26 +++++++++++++++++++++++-- bitswap/session_test.go | 43 +++++++++++++++++++++++++++++++++++++++++ 2 files changed, 67 insertions(+), 2 deletions(-) diff --git a/bitswap/session.go b/bitswap/session.go index 53db1a28a..3128cb0a0 100644 --- a/bitswap/session.go +++ b/bitswap/session.go @@ -78,13 +78,28 @@ func (bs *Bitswap) NewSession(ctx context.Context) *Session { return s } +func (bs *Bitswap) removeSession(s *Session) { + bs.sessLk.Lock() + defer bs.sessLk.Unlock() + for i := 0; i < len(bs.sessions); i++ { + if bs.sessions[i] == s { + bs.sessions[i] = bs.sessions[len(bs.sessions)-1] + bs.sessions = bs.sessions[:len(bs.sessions)-1] + return + } + } +} + type blkRecv struct { from peer.ID blk blocks.Block } func (s *Session) receiveBlockFrom(from peer.ID, blk blocks.Block) { - s.incoming <- blkRecv{from: from, blk: blk} + select { + case s.incoming <- blkRecv{from: from, blk: blk}: + case <-s.ctx.Done(): + } } type interestReq struct { @@ -105,7 +120,13 @@ func (s *Session) isLiveWant(c *cid.Cid) bool { c: c, resp: resp, } - return <-resp + + select { + case want := <-resp: + return want + case <-s.ctx.Done(): + return false + } } func (s *Session) interestedIn(c *cid.Cid) bool { @@ -194,6 +215,7 @@ func (s *Session) run(ctx context.Context) { lwchk.resp <- s.cidIsWanted(lwchk.c) case <-ctx.Done(): s.tick.Stop() + s.bs.removeSession(s) return } } diff --git a/bitswap/session_test.go b/bitswap/session_test.go index dfdae79cb..6d981eb4b 100644 --- a/bitswap/session_test.go +++ b/bitswap/session_test.go @@ -242,3 +242,46 @@ func TestPutAfterSessionCacheEvict(t *testing.T) { t.Fatal("timed out waiting for block") } } + +func TestMultipleSessions(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + vnet := getVirtualNetwork() + sesgen := NewTestSessionGenerator(vnet) + defer sesgen.Close() + bgen := blocksutil.NewBlockGenerator() + + blk := bgen.Blocks(1)[0] + inst := sesgen.Instances(2) + + a := inst[0] + b := inst[1] + + ctx1, cancel1 := context.WithCancel(ctx) + ses := a.Exchange.NewSession(ctx1) + + blkch, err := ses.GetBlocks(ctx, []*cid.Cid{blk.Cid()}) + if err != nil { + t.Fatal(err) + } + cancel1() + + ses2 := a.Exchange.NewSession(ctx) + blkch2, err := ses2.GetBlocks(ctx, []*cid.Cid{blk.Cid()}) + if err != nil { + t.Fatal(err) + } + + time.Sleep(time.Millisecond * 10) + if err := b.Exchange.HasBlock(blk); err != nil { + t.Fatal(err) + } + + select { + case <-blkch2: + case <-time.After(time.Second * 20): + t.Fatal("bad juju") + } + _ = blkch +} From c5eab59819e626c4317601782668899b8efc9f55 Mon Sep 17 00:00:00 2001 From: Jeromy Date: Tue, 11 Jul 2017 19:17:51 -0700 Subject: [PATCH 0564/1035] update go-multihash and bubble up changes License: MIT Signed-off-by: Jeromy This commit was moved from ipfs/go-bitswap@bf2c4e4e7b77f1f969718465703f1cf368d872ae --- bitswap/bitswap.go | 6 +++--- bitswap/bitswap_test.go | 6 +++--- bitswap/decision/bench_test.go | 6 +++--- bitswap/decision/engine.go | 4 ++-- bitswap/decision/engine_test.go | 4 ++-- bitswap/decision/ledger.go | 4 ++-- bitswap/decision/peer_request_queue.go | 4 ++-- bitswap/decision/peer_request_queue_test.go | 4 ++-- bitswap/get.go | 4 ++-- bitswap/message/message.go | 6 +++--- bitswap/message/message_test.go | 6 +++--- bitswap/network/interface.go | 4 ++-- bitswap/network/ipfs_impl.go | 14 +++++++------- bitswap/notifications/notifications.go | 4 ++-- bitswap/notifications/notifications_test.go | 4 ++-- bitswap/session.go | 8 ++++---- bitswap/session_test.go | 4 ++-- bitswap/stat.go | 2 +- bitswap/testnet/interface.go | 2 +- bitswap/testnet/network_test.go | 4 ++-- bitswap/testnet/peernet.go | 4 ++-- bitswap/testnet/virtual.go | 6 +++--- bitswap/testutils.go | 4 ++-- bitswap/wantlist/wantlist.go | 2 +- bitswap/wantlist/wantlist_test.go | 2 +- bitswap/wantmanager.go | 4 ++-- bitswap/workers.go | 4 ++-- 27 files changed, 63 insertions(+), 63 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index d9f4fea9a..1cf9fbd3f 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -23,9 +23,9 @@ import ( process "gx/ipfs/QmSF8fPo3jgVBAy8fpdjjYqgG87dkJgUprRBHRd2tmfgpP/goprocess" procctx "gx/ipfs/QmSF8fPo3jgVBAy8fpdjjYqgG87dkJgUprRBHRd2tmfgpP/goprocess/context" logging "gx/ipfs/QmSpJByNKFX1sCsHBEp3R73FL4NF6FnQTEGyNAXHm2GS52/go-log" - blocks "gx/ipfs/QmXxGS5QsUxpR3iqL5DjmsYPHR1Yz74siRQ4ChJqWFosMh/go-block-format" - cid "gx/ipfs/Qma4RJSuh7mMeJQYCqMbKzekn6EwBo7HEs5AQYjVRMQATB/go-cid" - peer "gx/ipfs/QmdS9KpbDyPrieswibZhkod1oXqRwZJrUPzxCofAMWpFGq/go-libp2p-peer" + cid "gx/ipfs/QmTprEaAA2A9bst5XH7exuyi5KzNMK3SEDNN8rBDnKWcUS/go-cid" + blocks "gx/ipfs/QmVA4mafxbfH5aEvNz8fyoxC6J1xhAtw88B4GerPznSZBg/go-block-format" + peer "gx/ipfs/QmXYjuNuxVzXKJCfWasQk1RqkhVLDM9jtUKhqc2WPQmFSB/go-libp2p-peer" ) var log = logging.Logger("bitswap") diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index 506b8d0c1..fae0868c0 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -15,12 +15,12 @@ import ( mockrouting "github.com/ipfs/go-ipfs/routing/mock" delay "github.com/ipfs/go-ipfs/thirdparty/delay" travis "github.com/ipfs/go-ipfs/thirdparty/testutil/ci/travis" - blocks "gx/ipfs/QmXxGS5QsUxpR3iqL5DjmsYPHR1Yz74siRQ4ChJqWFosMh/go-block-format" + blocks "gx/ipfs/QmVA4mafxbfH5aEvNz8fyoxC6J1xhAtw88B4GerPznSZBg/go-block-format" detectrace "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-detect-race" - p2ptestutil "gx/ipfs/Qma2j8dYePrvN5DoNgwh1uAuu3FFtEtrUQFmr737ws8nCp/go-libp2p-netutil" - cid "gx/ipfs/Qma4RJSuh7mMeJQYCqMbKzekn6EwBo7HEs5AQYjVRMQATB/go-cid" + cid "gx/ipfs/QmTprEaAA2A9bst5XH7exuyi5KzNMK3SEDNN8rBDnKWcUS/go-cid" + p2ptestutil "gx/ipfs/QmViDDJGzv2TKrheoxckReECc72iRgaYsobG2HYUGWuPVF/go-libp2p-netutil" ) // FIXME the tests are really sensitive to the network delay. fix them to work diff --git a/bitswap/decision/bench_test.go b/bitswap/decision/bench_test.go index 3016fd07b..17e6ea085 100644 --- a/bitswap/decision/bench_test.go +++ b/bitswap/decision/bench_test.go @@ -7,9 +7,9 @@ import ( "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" "github.com/ipfs/go-ipfs/thirdparty/testutil" - u "gx/ipfs/QmWbjfz3u6HkAdPh34dgPchGbQjob6LXLhAeCGii2TX69n/go-ipfs-util" - cid "gx/ipfs/Qma4RJSuh7mMeJQYCqMbKzekn6EwBo7HEs5AQYjVRMQATB/go-cid" - "gx/ipfs/QmdS9KpbDyPrieswibZhkod1oXqRwZJrUPzxCofAMWpFGq/go-libp2p-peer" + u "gx/ipfs/QmSU6eubNdhXjFBJBSksTp8kv8YRub8mGAPv8tVJHmL2EU/go-ipfs-util" + cid "gx/ipfs/QmTprEaAA2A9bst5XH7exuyi5KzNMK3SEDNN8rBDnKWcUS/go-cid" + "gx/ipfs/QmXYjuNuxVzXKJCfWasQk1RqkhVLDM9jtUKhqc2WPQmFSB/go-libp2p-peer" ) // FWIW: At the time of this commit, including a timestamp in task increases diff --git a/bitswap/decision/engine.go b/bitswap/decision/engine.go index 600df11f2..83915afd8 100644 --- a/bitswap/decision/engine.go +++ b/bitswap/decision/engine.go @@ -10,8 +10,8 @@ import ( bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" wl "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" logging "gx/ipfs/QmSpJByNKFX1sCsHBEp3R73FL4NF6FnQTEGyNAXHm2GS52/go-log" - blocks "gx/ipfs/QmXxGS5QsUxpR3iqL5DjmsYPHR1Yz74siRQ4ChJqWFosMh/go-block-format" - peer "gx/ipfs/QmdS9KpbDyPrieswibZhkod1oXqRwZJrUPzxCofAMWpFGq/go-libp2p-peer" + blocks "gx/ipfs/QmVA4mafxbfH5aEvNz8fyoxC6J1xhAtw88B4GerPznSZBg/go-block-format" + peer "gx/ipfs/QmXYjuNuxVzXKJCfWasQk1RqkhVLDM9jtUKhqc2WPQmFSB/go-libp2p-peer" ) // TODO consider taking responsibility for other types of requests. For diff --git a/bitswap/decision/engine_test.go b/bitswap/decision/engine_test.go index 7c2da018e..62d8dadd8 100644 --- a/bitswap/decision/engine_test.go +++ b/bitswap/decision/engine_test.go @@ -12,10 +12,10 @@ import ( blockstore "github.com/ipfs/go-ipfs/blocks/blockstore" message "github.com/ipfs/go-ipfs/exchange/bitswap/message" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" + blocks "gx/ipfs/QmVA4mafxbfH5aEvNz8fyoxC6J1xhAtw88B4GerPznSZBg/go-block-format" ds "gx/ipfs/QmVSase1JP7cq9QkPT46oNwdp9pT6kBkG3oqS14y3QcZjG/go-datastore" dssync "gx/ipfs/QmVSase1JP7cq9QkPT46oNwdp9pT6kBkG3oqS14y3QcZjG/go-datastore/sync" - blocks "gx/ipfs/QmXxGS5QsUxpR3iqL5DjmsYPHR1Yz74siRQ4ChJqWFosMh/go-block-format" - peer "gx/ipfs/QmdS9KpbDyPrieswibZhkod1oXqRwZJrUPzxCofAMWpFGq/go-libp2p-peer" + peer "gx/ipfs/QmXYjuNuxVzXKJCfWasQk1RqkhVLDM9jtUKhqc2WPQmFSB/go-libp2p-peer" ) type peerAndEngine struct { diff --git a/bitswap/decision/ledger.go b/bitswap/decision/ledger.go index 6c26439ae..6b249b083 100644 --- a/bitswap/decision/ledger.go +++ b/bitswap/decision/ledger.go @@ -6,8 +6,8 @@ import ( wl "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" - cid "gx/ipfs/Qma4RJSuh7mMeJQYCqMbKzekn6EwBo7HEs5AQYjVRMQATB/go-cid" - peer "gx/ipfs/QmdS9KpbDyPrieswibZhkod1oXqRwZJrUPzxCofAMWpFGq/go-libp2p-peer" + cid "gx/ipfs/QmTprEaAA2A9bst5XH7exuyi5KzNMK3SEDNN8rBDnKWcUS/go-cid" + peer "gx/ipfs/QmXYjuNuxVzXKJCfWasQk1RqkhVLDM9jtUKhqc2WPQmFSB/go-libp2p-peer" ) func newLedger(p peer.ID) *ledger { diff --git a/bitswap/decision/peer_request_queue.go b/bitswap/decision/peer_request_queue.go index 0d37122e9..77d2e8a12 100644 --- a/bitswap/decision/peer_request_queue.go +++ b/bitswap/decision/peer_request_queue.go @@ -7,8 +7,8 @@ import ( wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" pq "github.com/ipfs/go-ipfs/thirdparty/pq" - cid "gx/ipfs/Qma4RJSuh7mMeJQYCqMbKzekn6EwBo7HEs5AQYjVRMQATB/go-cid" - peer "gx/ipfs/QmdS9KpbDyPrieswibZhkod1oXqRwZJrUPzxCofAMWpFGq/go-libp2p-peer" + cid "gx/ipfs/QmTprEaAA2A9bst5XH7exuyi5KzNMK3SEDNN8rBDnKWcUS/go-cid" + peer "gx/ipfs/QmXYjuNuxVzXKJCfWasQk1RqkhVLDM9jtUKhqc2WPQmFSB/go-libp2p-peer" ) type peerRequestQueue interface { diff --git a/bitswap/decision/peer_request_queue_test.go b/bitswap/decision/peer_request_queue_test.go index edacbd065..6c3e9ce50 100644 --- a/bitswap/decision/peer_request_queue_test.go +++ b/bitswap/decision/peer_request_queue_test.go @@ -10,8 +10,8 @@ import ( "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" "github.com/ipfs/go-ipfs/thirdparty/testutil" - u "gx/ipfs/QmWbjfz3u6HkAdPh34dgPchGbQjob6LXLhAeCGii2TX69n/go-ipfs-util" - cid "gx/ipfs/Qma4RJSuh7mMeJQYCqMbKzekn6EwBo7HEs5AQYjVRMQATB/go-cid" + u "gx/ipfs/QmSU6eubNdhXjFBJBSksTp8kv8YRub8mGAPv8tVJHmL2EU/go-ipfs-util" + cid "gx/ipfs/QmTprEaAA2A9bst5XH7exuyi5KzNMK3SEDNN8rBDnKWcUS/go-cid" ) func TestPushPop(t *testing.T) { diff --git a/bitswap/get.go b/bitswap/get.go index a72ead83a..263a6b501 100644 --- a/bitswap/get.go +++ b/bitswap/get.go @@ -6,9 +6,9 @@ import ( blockstore "github.com/ipfs/go-ipfs/blocks/blockstore" notifications "github.com/ipfs/go-ipfs/exchange/bitswap/notifications" - blocks "gx/ipfs/QmXxGS5QsUxpR3iqL5DjmsYPHR1Yz74siRQ4ChJqWFosMh/go-block-format" + blocks "gx/ipfs/QmVA4mafxbfH5aEvNz8fyoxC6J1xhAtw88B4GerPznSZBg/go-block-format" - cid "gx/ipfs/Qma4RJSuh7mMeJQYCqMbKzekn6EwBo7HEs5AQYjVRMQATB/go-cid" + cid "gx/ipfs/QmTprEaAA2A9bst5XH7exuyi5KzNMK3SEDNN8rBDnKWcUS/go-cid" ) type getBlocksFunc func(context.Context, []*cid.Cid) (<-chan blocks.Block, error) diff --git a/bitswap/message/message.go b/bitswap/message/message.go index 5c4c31154..27631e049 100644 --- a/bitswap/message/message.go +++ b/bitswap/message/message.go @@ -6,12 +6,12 @@ import ( pb "github.com/ipfs/go-ipfs/exchange/bitswap/message/pb" wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" - blocks "gx/ipfs/QmXxGS5QsUxpR3iqL5DjmsYPHR1Yz74siRQ4ChJqWFosMh/go-block-format" + blocks "gx/ipfs/QmVA4mafxbfH5aEvNz8fyoxC6J1xhAtw88B4GerPznSZBg/go-block-format" - inet "gx/ipfs/QmRscs8KxrSmSv4iuevHv8JfuUzHBMoqiaHzxfDRiksd6e/go-libp2p-net" + cid "gx/ipfs/QmTprEaAA2A9bst5XH7exuyi5KzNMK3SEDNN8rBDnKWcUS/go-cid" ggio "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/io" proto "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/proto" - cid "gx/ipfs/Qma4RJSuh7mMeJQYCqMbKzekn6EwBo7HEs5AQYjVRMQATB/go-cid" + inet "gx/ipfs/QmahYsGWry85Y7WUe2SX5G4JkH2zifEQAUtJVLZ24aC9DF/go-libp2p-net" ) // TODO move message.go into the bitswap package diff --git a/bitswap/message/message_test.go b/bitswap/message/message_test.go index c1f215523..c4197f9a9 100644 --- a/bitswap/message/message_test.go +++ b/bitswap/message/message_test.go @@ -7,9 +7,9 @@ import ( proto "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/proto" pb "github.com/ipfs/go-ipfs/exchange/bitswap/message/pb" - u "gx/ipfs/QmWbjfz3u6HkAdPh34dgPchGbQjob6LXLhAeCGii2TX69n/go-ipfs-util" - blocks "gx/ipfs/QmXxGS5QsUxpR3iqL5DjmsYPHR1Yz74siRQ4ChJqWFosMh/go-block-format" - cid "gx/ipfs/Qma4RJSuh7mMeJQYCqMbKzekn6EwBo7HEs5AQYjVRMQATB/go-cid" + u "gx/ipfs/QmSU6eubNdhXjFBJBSksTp8kv8YRub8mGAPv8tVJHmL2EU/go-ipfs-util" + cid "gx/ipfs/QmTprEaAA2A9bst5XH7exuyi5KzNMK3SEDNN8rBDnKWcUS/go-cid" + blocks "gx/ipfs/QmVA4mafxbfH5aEvNz8fyoxC6J1xhAtw88B4GerPznSZBg/go-block-format" ) func mkFakeCid(s string) *cid.Cid { diff --git a/bitswap/network/interface.go b/bitswap/network/interface.go index f9289974f..051fccd48 100644 --- a/bitswap/network/interface.go +++ b/bitswap/network/interface.go @@ -4,9 +4,9 @@ import ( "context" bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" + cid "gx/ipfs/QmTprEaAA2A9bst5XH7exuyi5KzNMK3SEDNN8rBDnKWcUS/go-cid" + peer "gx/ipfs/QmXYjuNuxVzXKJCfWasQk1RqkhVLDM9jtUKhqc2WPQmFSB/go-libp2p-peer" protocol "gx/ipfs/QmZNkThpqfVXs9GNbexPrfBbXSLNYeKrE7jwFM2oqHbyqN/go-libp2p-protocol" - cid "gx/ipfs/Qma4RJSuh7mMeJQYCqMbKzekn6EwBo7HEs5AQYjVRMQATB/go-cid" - peer "gx/ipfs/QmdS9KpbDyPrieswibZhkod1oXqRwZJrUPzxCofAMWpFGq/go-libp2p-peer" ) var ( diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index c7b52bc3a..573b64a4f 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -8,15 +8,15 @@ import ( bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" - routing "gx/ipfs/QmP1wMAqk6aZYRZirbaAwmrNeqFRgQrwBt3orUtvSa1UYD/go-libp2p-routing" - inet "gx/ipfs/QmRscs8KxrSmSv4iuevHv8JfuUzHBMoqiaHzxfDRiksd6e/go-libp2p-net" + pstore "gx/ipfs/QmPgDWmTmuzvP7QE5zwo1TmjbJme9pmZHNujB2453jkCTr/go-libp2p-peerstore" + routing "gx/ipfs/QmPjTrrSfE6TzLv6ya6VWhGcCgPrUAdcgrDcQyRDX2VyW1/go-libp2p-routing" logging "gx/ipfs/QmSpJByNKFX1sCsHBEp3R73FL4NF6FnQTEGyNAXHm2GS52/go-log" - host "gx/ipfs/QmUywuGNZoUKV8B9iyvup9bPkLiMrhTsyVMkeSXW5VxAfC/go-libp2p-host" - pstore "gx/ipfs/QmXZSd1qR5BxZkPyuwfT5jpqQFScZccoZvDneXsKzCNHWX/go-libp2p-peerstore" + cid "gx/ipfs/QmTprEaAA2A9bst5XH7exuyi5KzNMK3SEDNN8rBDnKWcUS/go-cid" + ma "gx/ipfs/QmXY77cVe7rVRQXZZQRioukUM7aRW3BTcAgJe12MCtb3Ji/go-multiaddr" + peer "gx/ipfs/QmXYjuNuxVzXKJCfWasQk1RqkhVLDM9jtUKhqc2WPQmFSB/go-libp2p-peer" ggio "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/io" - cid "gx/ipfs/Qma4RJSuh7mMeJQYCqMbKzekn6EwBo7HEs5AQYjVRMQATB/go-cid" - ma "gx/ipfs/QmcyqRMCAXVtYPS4DiBrA7sezL9rRGfW8Ctx7cywL4TXJj/go-multiaddr" - peer "gx/ipfs/QmdS9KpbDyPrieswibZhkod1oXqRwZJrUPzxCofAMWpFGq/go-libp2p-peer" + host "gx/ipfs/QmZy7c24mmkEHpNJndwgsEE3wcVxHd8yB969yTnAJFVw7f/go-libp2p-host" + inet "gx/ipfs/QmahYsGWry85Y7WUe2SX5G4JkH2zifEQAUtJVLZ24aC9DF/go-libp2p-net" ) var log = logging.Logger("bitswap_network") diff --git a/bitswap/notifications/notifications.go b/bitswap/notifications/notifications.go index 1999948da..3a52ed40b 100644 --- a/bitswap/notifications/notifications.go +++ b/bitswap/notifications/notifications.go @@ -3,10 +3,10 @@ package notifications import ( "context" - blocks "gx/ipfs/QmXxGS5QsUxpR3iqL5DjmsYPHR1Yz74siRQ4ChJqWFosMh/go-block-format" + blocks "gx/ipfs/QmVA4mafxbfH5aEvNz8fyoxC6J1xhAtw88B4GerPznSZBg/go-block-format" pubsub "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/briantigerchow/pubsub" - cid "gx/ipfs/Qma4RJSuh7mMeJQYCqMbKzekn6EwBo7HEs5AQYjVRMQATB/go-cid" + cid "gx/ipfs/QmTprEaAA2A9bst5XH7exuyi5KzNMK3SEDNN8rBDnKWcUS/go-cid" ) const bufferSize = 16 diff --git a/bitswap/notifications/notifications_test.go b/bitswap/notifications/notifications_test.go index 4312444fc..968d9b04b 100644 --- a/bitswap/notifications/notifications_test.go +++ b/bitswap/notifications/notifications_test.go @@ -7,8 +7,8 @@ import ( "time" blocksutil "github.com/ipfs/go-ipfs/blocks/blocksutil" - blocks "gx/ipfs/QmXxGS5QsUxpR3iqL5DjmsYPHR1Yz74siRQ4ChJqWFosMh/go-block-format" - cid "gx/ipfs/Qma4RJSuh7mMeJQYCqMbKzekn6EwBo7HEs5AQYjVRMQATB/go-cid" + cid "gx/ipfs/QmTprEaAA2A9bst5XH7exuyi5KzNMK3SEDNN8rBDnKWcUS/go-cid" + blocks "gx/ipfs/QmVA4mafxbfH5aEvNz8fyoxC6J1xhAtw88B4GerPznSZBg/go-block-format" ) func TestDuplicates(t *testing.T) { diff --git a/bitswap/session.go b/bitswap/session.go index 3128cb0a0..553549c99 100644 --- a/bitswap/session.go +++ b/bitswap/session.go @@ -7,11 +7,11 @@ import ( notifications "github.com/ipfs/go-ipfs/exchange/bitswap/notifications" logging "gx/ipfs/QmSpJByNKFX1sCsHBEp3R73FL4NF6FnQTEGyNAXHm2GS52/go-log" + loggables "gx/ipfs/QmT4PgCNdv73hnFAqzHqwW44q7M9PWpykSswHDxndquZbc/go-libp2p-loggables" + cid "gx/ipfs/QmTprEaAA2A9bst5XH7exuyi5KzNMK3SEDNN8rBDnKWcUS/go-cid" + blocks "gx/ipfs/QmVA4mafxbfH5aEvNz8fyoxC6J1xhAtw88B4GerPznSZBg/go-block-format" lru "gx/ipfs/QmVYxfoJQiZijTgPNHCHgHELvQpbsJNTg6Crmc3dQkj3yy/golang-lru" - loggables "gx/ipfs/QmVesPmqbPp7xRGyY96tnBwzDtVV1nqv4SCVxo5zCqKyH8/go-libp2p-loggables" - blocks "gx/ipfs/QmXxGS5QsUxpR3iqL5DjmsYPHR1Yz74siRQ4ChJqWFosMh/go-block-format" - cid "gx/ipfs/Qma4RJSuh7mMeJQYCqMbKzekn6EwBo7HEs5AQYjVRMQATB/go-cid" - peer "gx/ipfs/QmdS9KpbDyPrieswibZhkod1oXqRwZJrUPzxCofAMWpFGq/go-libp2p-peer" + peer "gx/ipfs/QmXYjuNuxVzXKJCfWasQk1RqkhVLDM9jtUKhqc2WPQmFSB/go-libp2p-peer" ) const activeWantsLimit = 16 diff --git a/bitswap/session_test.go b/bitswap/session_test.go index 6d981eb4b..55a79408d 100644 --- a/bitswap/session_test.go +++ b/bitswap/session_test.go @@ -8,8 +8,8 @@ import ( blocksutil "github.com/ipfs/go-ipfs/blocks/blocksutil" - blocks "gx/ipfs/QmXxGS5QsUxpR3iqL5DjmsYPHR1Yz74siRQ4ChJqWFosMh/go-block-format" - cid "gx/ipfs/Qma4RJSuh7mMeJQYCqMbKzekn6EwBo7HEs5AQYjVRMQATB/go-cid" + cid "gx/ipfs/QmTprEaAA2A9bst5XH7exuyi5KzNMK3SEDNN8rBDnKWcUS/go-cid" + blocks "gx/ipfs/QmVA4mafxbfH5aEvNz8fyoxC6J1xhAtw88B4GerPznSZBg/go-block-format" ) func TestBasicSessions(t *testing.T) { diff --git a/bitswap/stat.go b/bitswap/stat.go index fb5eb5011..8e24e3e06 100644 --- a/bitswap/stat.go +++ b/bitswap/stat.go @@ -3,7 +3,7 @@ package bitswap import ( "sort" - cid "gx/ipfs/Qma4RJSuh7mMeJQYCqMbKzekn6EwBo7HEs5AQYjVRMQATB/go-cid" + cid "gx/ipfs/QmTprEaAA2A9bst5XH7exuyi5KzNMK3SEDNN8rBDnKWcUS/go-cid" ) type Stat struct { diff --git a/bitswap/testnet/interface.go b/bitswap/testnet/interface.go index aaa0d24fd..2b94c45b6 100644 --- a/bitswap/testnet/interface.go +++ b/bitswap/testnet/interface.go @@ -3,7 +3,7 @@ package bitswap import ( bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" "github.com/ipfs/go-ipfs/thirdparty/testutil" - peer "gx/ipfs/QmdS9KpbDyPrieswibZhkod1oXqRwZJrUPzxCofAMWpFGq/go-libp2p-peer" + peer "gx/ipfs/QmXYjuNuxVzXKJCfWasQk1RqkhVLDM9jtUKhqc2WPQmFSB/go-libp2p-peer" ) type Network interface { diff --git a/bitswap/testnet/network_test.go b/bitswap/testnet/network_test.go index 325892a46..d4d55a845 100644 --- a/bitswap/testnet/network_test.go +++ b/bitswap/testnet/network_test.go @@ -10,8 +10,8 @@ import ( mockrouting "github.com/ipfs/go-ipfs/routing/mock" delay "github.com/ipfs/go-ipfs/thirdparty/delay" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" - blocks "gx/ipfs/QmXxGS5QsUxpR3iqL5DjmsYPHR1Yz74siRQ4ChJqWFosMh/go-block-format" - peer "gx/ipfs/QmdS9KpbDyPrieswibZhkod1oXqRwZJrUPzxCofAMWpFGq/go-libp2p-peer" + blocks "gx/ipfs/QmVA4mafxbfH5aEvNz8fyoxC6J1xhAtw88B4GerPznSZBg/go-block-format" + peer "gx/ipfs/QmXYjuNuxVzXKJCfWasQk1RqkhVLDM9jtUKhqc2WPQmFSB/go-libp2p-peer" ) func TestSendMessageAsyncButWaitForResponse(t *testing.T) { diff --git a/bitswap/testnet/peernet.go b/bitswap/testnet/peernet.go index 1e59eb1d4..ef152172e 100644 --- a/bitswap/testnet/peernet.go +++ b/bitswap/testnet/peernet.go @@ -5,9 +5,9 @@ import ( bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" mockrouting "github.com/ipfs/go-ipfs/routing/mock" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" - mockpeernet "gx/ipfs/QmQA5mdxru8Bh6dpC9PJfSkumqnmHgJX7knxSgBo5Lpime/go-libp2p/p2p/net/mock" ds "gx/ipfs/QmVSase1JP7cq9QkPT46oNwdp9pT6kBkG3oqS14y3QcZjG/go-datastore" - peer "gx/ipfs/QmdS9KpbDyPrieswibZhkod1oXqRwZJrUPzxCofAMWpFGq/go-libp2p-peer" + peer "gx/ipfs/QmXYjuNuxVzXKJCfWasQk1RqkhVLDM9jtUKhqc2WPQmFSB/go-libp2p-peer" + mockpeernet "gx/ipfs/QmapADMpK4e5kFGBxC2aHreaDqKP9vmMng5f91MA14Ces9/go-libp2p/p2p/net/mock" ) type peernet struct { diff --git a/bitswap/testnet/virtual.go b/bitswap/testnet/virtual.go index 8c7db87eb..c41edb554 100644 --- a/bitswap/testnet/virtual.go +++ b/bitswap/testnet/virtual.go @@ -9,9 +9,9 @@ import ( mockrouting "github.com/ipfs/go-ipfs/routing/mock" delay "github.com/ipfs/go-ipfs/thirdparty/delay" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" - routing "gx/ipfs/QmP1wMAqk6aZYRZirbaAwmrNeqFRgQrwBt3orUtvSa1UYD/go-libp2p-routing" - cid "gx/ipfs/Qma4RJSuh7mMeJQYCqMbKzekn6EwBo7HEs5AQYjVRMQATB/go-cid" - peer "gx/ipfs/QmdS9KpbDyPrieswibZhkod1oXqRwZJrUPzxCofAMWpFGq/go-libp2p-peer" + routing "gx/ipfs/QmPjTrrSfE6TzLv6ya6VWhGcCgPrUAdcgrDcQyRDX2VyW1/go-libp2p-routing" + cid "gx/ipfs/QmTprEaAA2A9bst5XH7exuyi5KzNMK3SEDNN8rBDnKWcUS/go-cid" + peer "gx/ipfs/QmXYjuNuxVzXKJCfWasQk1RqkhVLDM9jtUKhqc2WPQmFSB/go-libp2p-peer" ) func VirtualNetwork(rs mockrouting.Server, d delay.D) Network { diff --git a/bitswap/testutils.go b/bitswap/testutils.go index d3bb98b0e..1b19bdd47 100644 --- a/bitswap/testutils.go +++ b/bitswap/testutils.go @@ -12,8 +12,8 @@ import ( ds "gx/ipfs/QmVSase1JP7cq9QkPT46oNwdp9pT6kBkG3oqS14y3QcZjG/go-datastore" ds_sync "gx/ipfs/QmVSase1JP7cq9QkPT46oNwdp9pT6kBkG3oqS14y3QcZjG/go-datastore/sync" - p2ptestutil "gx/ipfs/Qma2j8dYePrvN5DoNgwh1uAuu3FFtEtrUQFmr737ws8nCp/go-libp2p-netutil" - peer "gx/ipfs/QmdS9KpbDyPrieswibZhkod1oXqRwZJrUPzxCofAMWpFGq/go-libp2p-peer" + p2ptestutil "gx/ipfs/QmViDDJGzv2TKrheoxckReECc72iRgaYsobG2HYUGWuPVF/go-libp2p-netutil" + peer "gx/ipfs/QmXYjuNuxVzXKJCfWasQk1RqkhVLDM9jtUKhqc2WPQmFSB/go-libp2p-peer" ) // WARNING: this uses RandTestBogusIdentity DO NOT USE for NON TESTS! diff --git a/bitswap/wantlist/wantlist.go b/bitswap/wantlist/wantlist.go index de340ea6a..c6dbf6cf6 100644 --- a/bitswap/wantlist/wantlist.go +++ b/bitswap/wantlist/wantlist.go @@ -6,7 +6,7 @@ import ( "sort" "sync" - cid "gx/ipfs/Qma4RJSuh7mMeJQYCqMbKzekn6EwBo7HEs5AQYjVRMQATB/go-cid" + cid "gx/ipfs/QmTprEaAA2A9bst5XH7exuyi5KzNMK3SEDNN8rBDnKWcUS/go-cid" ) type ThreadSafe struct { diff --git a/bitswap/wantlist/wantlist_test.go b/bitswap/wantlist/wantlist_test.go index d6027a718..053186dc9 100644 --- a/bitswap/wantlist/wantlist_test.go +++ b/bitswap/wantlist/wantlist_test.go @@ -3,7 +3,7 @@ package wantlist import ( "testing" - cid "gx/ipfs/Qma4RJSuh7mMeJQYCqMbKzekn6EwBo7HEs5AQYjVRMQATB/go-cid" + cid "gx/ipfs/QmTprEaAA2A9bst5XH7exuyi5KzNMK3SEDNN8rBDnKWcUS/go-cid" ) var testcids []*cid.Cid diff --git a/bitswap/wantmanager.go b/bitswap/wantmanager.go index 800fa1c40..780282a74 100644 --- a/bitswap/wantmanager.go +++ b/bitswap/wantmanager.go @@ -11,8 +11,8 @@ import ( wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" metrics "gx/ipfs/QmRg1gKTHzc3CZXSKzem8aR4E3TubFhbgXwfVuWnSK5CC5/go-metrics-interface" - cid "gx/ipfs/Qma4RJSuh7mMeJQYCqMbKzekn6EwBo7HEs5AQYjVRMQATB/go-cid" - peer "gx/ipfs/QmdS9KpbDyPrieswibZhkod1oXqRwZJrUPzxCofAMWpFGq/go-libp2p-peer" + cid "gx/ipfs/QmTprEaAA2A9bst5XH7exuyi5KzNMK3SEDNN8rBDnKWcUS/go-cid" + peer "gx/ipfs/QmXYjuNuxVzXKJCfWasQk1RqkhVLDM9jtUKhqc2WPQmFSB/go-libp2p-peer" ) type WantManager struct { diff --git a/bitswap/workers.go b/bitswap/workers.go index a899f06bb..424a9b211 100644 --- a/bitswap/workers.go +++ b/bitswap/workers.go @@ -11,8 +11,8 @@ import ( process "gx/ipfs/QmSF8fPo3jgVBAy8fpdjjYqgG87dkJgUprRBHRd2tmfgpP/goprocess" procctx "gx/ipfs/QmSF8fPo3jgVBAy8fpdjjYqgG87dkJgUprRBHRd2tmfgpP/goprocess/context" logging "gx/ipfs/QmSpJByNKFX1sCsHBEp3R73FL4NF6FnQTEGyNAXHm2GS52/go-log" - cid "gx/ipfs/Qma4RJSuh7mMeJQYCqMbKzekn6EwBo7HEs5AQYjVRMQATB/go-cid" - peer "gx/ipfs/QmdS9KpbDyPrieswibZhkod1oXqRwZJrUPzxCofAMWpFGq/go-libp2p-peer" + cid "gx/ipfs/QmTprEaAA2A9bst5XH7exuyi5KzNMK3SEDNN8rBDnKWcUS/go-cid" + peer "gx/ipfs/QmXYjuNuxVzXKJCfWasQk1RqkhVLDM9jtUKhqc2WPQmFSB/go-libp2p-peer" ) var TaskWorkerCount = 8 From 9b4530a14500c6a35475028f9e030a3038358f3f Mon Sep 17 00:00:00 2001 From: Jeromy Date: Sat, 15 Jul 2017 20:18:17 -0700 Subject: [PATCH 0565/1035] Only open a message sender when we have messages to send License: MIT Signed-off-by: Jeromy This commit was moved from ipfs/go-bitswap@f6230f4f97a1b2f237501069cee78b9541e24635 --- bitswap/wantmanager.go | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/bitswap/wantmanager.go b/bitswap/wantmanager.go index 780282a74..4ae12f499 100644 --- a/bitswap/wantmanager.go +++ b/bitswap/wantmanager.go @@ -187,15 +187,6 @@ func (mq *msgQueue) runQueue(ctx context.Context) { } func (mq *msgQueue) doWork(ctx context.Context) { - if mq.sender == nil { - err := mq.openSender(ctx) - if err != nil { - log.Infof("cant open message sender to peer %s: %s", mq.p, err) - // TODO: cant connect, what now? - return - } - } - // grab outgoing message mq.outlk.Lock() wlm := mq.out @@ -206,6 +197,16 @@ func (mq *msgQueue) doWork(ctx context.Context) { mq.out = nil mq.outlk.Unlock() + // NB: only open a stream if we actually have data to send + if mq.sender == nil { + err := mq.openSender(ctx) + if err != nil { + log.Infof("cant open message sender to peer %s: %s", mq.p, err) + // TODO: cant connect, what now? + return + } + } + // send wantlist updates for { // try to send this message until we fail. err := mq.sender.SendMsg(ctx, wlm) From c6044ed8d24701e09b89edc217a840589d587529 Mon Sep 17 00:00:00 2001 From: Jeromy Date: Sat, 15 Jul 2017 22:18:02 -0700 Subject: [PATCH 0566/1035] ensure testnet peers get evenly connected mesh License: MIT Signed-off-by: Jeromy This commit was moved from ipfs/go-bitswap@bb3d2abca59fa1c29036d5e123938be51aa3cd0b --- bitswap/testnet/peernet.go | 4 ++-- bitswap/testnet/virtual.go | 21 +++++++++++++++++++++ bitswap/testutils.go | 2 +- 3 files changed, 24 insertions(+), 3 deletions(-) diff --git a/bitswap/testnet/peernet.go b/bitswap/testnet/peernet.go index ef152172e..93429ef4e 100644 --- a/bitswap/testnet/peernet.go +++ b/bitswap/testnet/peernet.go @@ -1,7 +1,7 @@ package bitswap import ( - context "context" + "context" bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" mockrouting "github.com/ipfs/go-ipfs/routing/mock" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" @@ -37,4 +37,4 @@ func (pn *peernet) HasPeer(p peer.ID) bool { return false } -var _ Network = &peernet{} +var _ Network = (*peernet)(nil) diff --git a/bitswap/testnet/virtual.go b/bitswap/testnet/virtual.go index c41edb554..133ea395d 100644 --- a/bitswap/testnet/virtual.go +++ b/bitswap/testnet/virtual.go @@ -9,16 +9,21 @@ import ( mockrouting "github.com/ipfs/go-ipfs/routing/mock" delay "github.com/ipfs/go-ipfs/thirdparty/delay" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" + routing "gx/ipfs/QmPjTrrSfE6TzLv6ya6VWhGcCgPrUAdcgrDcQyRDX2VyW1/go-libp2p-routing" + logging "gx/ipfs/QmSpJByNKFX1sCsHBEp3R73FL4NF6FnQTEGyNAXHm2GS52/go-log" cid "gx/ipfs/QmTprEaAA2A9bst5XH7exuyi5KzNMK3SEDNN8rBDnKWcUS/go-cid" peer "gx/ipfs/QmXYjuNuxVzXKJCfWasQk1RqkhVLDM9jtUKhqc2WPQmFSB/go-libp2p-peer" ) +var log = logging.Logger("bstestnet") + func VirtualNetwork(rs mockrouting.Server, d delay.D) Network { return &network{ clients: make(map[peer.ID]bsnet.Receiver), delay: d, routingserver: rs, + conns: make(map[string]struct{}), } } @@ -26,6 +31,7 @@ type network struct { clients map[peer.ID]bsnet.Receiver routingserver mockrouting.Server delay delay.D + conns map[string]struct{} } func (n *network) Adapter(p testutil.Identity) bsnet.BitSwapNetwork { @@ -149,7 +155,22 @@ func (nc *networkClient) ConnectTo(_ context.Context, p peer.ID) error { if !nc.network.HasPeer(p) { return errors.New("no such peer in network") } + tag := tagForPeers(nc.local, p) + if _, ok := nc.network.conns[tag]; ok { + log.Warning("ALREADY CONNECTED TO PEER (is this a reconnect? test lib needs fixing)") + return nil + } + nc.network.conns[tag] = struct{}{} + // TODO: add handling for disconnects + nc.network.clients[p].PeerConnected(nc.local) nc.Receiver.PeerConnected(p) return nil } + +func tagForPeers(a, b peer.ID) string { + if a < b { + return string(a + b) + } + return string(b + a) +} diff --git a/bitswap/testutils.go b/bitswap/testutils.go index 1b19bdd47..1b1fcf20a 100644 --- a/bitswap/testutils.go +++ b/bitswap/testutils.go @@ -59,7 +59,7 @@ func (g *SessionGenerator) Instances(n int) []Instance { for i, inst := range instances { for j := i + 1; j < len(instances); j++ { oinst := instances[j] - inst.Exchange.PeerConnected(oinst.Peer) + inst.Exchange.network.ConnectTo(context.Background(), oinst.Peer) } } return instances From 8d4aca2a4d5059ed8ec3926ee5f258708b37cbac Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Thu, 27 Jul 2017 00:02:03 -0700 Subject: [PATCH 0567/1035] gx: update deps License: MIT Signed-off-by: Steven Allen This commit was moved from ipfs/go-bitswap@fcdb52ab3301f130b83c7da69abc8c09c05a6af6 --- bitswap/bitswap_test.go | 2 +- bitswap/network/ipfs_impl.go | 2 +- bitswap/testnet/peernet.go | 2 +- bitswap/testutils.go | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index fae0868c0..316eda279 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -20,7 +20,7 @@ import ( detectrace "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-detect-race" cid "gx/ipfs/QmTprEaAA2A9bst5XH7exuyi5KzNMK3SEDNN8rBDnKWcUS/go-cid" - p2ptestutil "gx/ipfs/QmViDDJGzv2TKrheoxckReECc72iRgaYsobG2HYUGWuPVF/go-libp2p-netutil" + p2ptestutil "gx/ipfs/QmV5Ny5H649nHUEYjtZistVPQVqqNVMZC5khmQvnprzdNZ/go-libp2p-netutil" ) // FIXME the tests are really sensitive to the network delay. fix them to work diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index 573b64a4f..23b421ed3 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -10,12 +10,12 @@ import ( pstore "gx/ipfs/QmPgDWmTmuzvP7QE5zwo1TmjbJme9pmZHNujB2453jkCTr/go-libp2p-peerstore" routing "gx/ipfs/QmPjTrrSfE6TzLv6ya6VWhGcCgPrUAdcgrDcQyRDX2VyW1/go-libp2p-routing" + host "gx/ipfs/QmRNyPNJGNCaZyYonJj7owciWTsMd9gRfEKmZY3o6xwN3h/go-libp2p-host" logging "gx/ipfs/QmSpJByNKFX1sCsHBEp3R73FL4NF6FnQTEGyNAXHm2GS52/go-log" cid "gx/ipfs/QmTprEaAA2A9bst5XH7exuyi5KzNMK3SEDNN8rBDnKWcUS/go-cid" ma "gx/ipfs/QmXY77cVe7rVRQXZZQRioukUM7aRW3BTcAgJe12MCtb3Ji/go-multiaddr" peer "gx/ipfs/QmXYjuNuxVzXKJCfWasQk1RqkhVLDM9jtUKhqc2WPQmFSB/go-libp2p-peer" ggio "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/io" - host "gx/ipfs/QmZy7c24mmkEHpNJndwgsEE3wcVxHd8yB969yTnAJFVw7f/go-libp2p-host" inet "gx/ipfs/QmahYsGWry85Y7WUe2SX5G4JkH2zifEQAUtJVLZ24aC9DF/go-libp2p-net" ) diff --git a/bitswap/testnet/peernet.go b/bitswap/testnet/peernet.go index 93429ef4e..8034484a1 100644 --- a/bitswap/testnet/peernet.go +++ b/bitswap/testnet/peernet.go @@ -5,9 +5,9 @@ import ( bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" mockrouting "github.com/ipfs/go-ipfs/routing/mock" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" + mockpeernet "gx/ipfs/QmSatLR9HCrZjPqomt6VdNCoJmHMz8NP34WfpfBznJZ25M/go-libp2p/p2p/net/mock" ds "gx/ipfs/QmVSase1JP7cq9QkPT46oNwdp9pT6kBkG3oqS14y3QcZjG/go-datastore" peer "gx/ipfs/QmXYjuNuxVzXKJCfWasQk1RqkhVLDM9jtUKhqc2WPQmFSB/go-libp2p-peer" - mockpeernet "gx/ipfs/QmapADMpK4e5kFGBxC2aHreaDqKP9vmMng5f91MA14Ces9/go-libp2p/p2p/net/mock" ) type peernet struct { diff --git a/bitswap/testutils.go b/bitswap/testutils.go index 1b1fcf20a..2ccf70058 100644 --- a/bitswap/testutils.go +++ b/bitswap/testutils.go @@ -10,9 +10,9 @@ import ( delay "github.com/ipfs/go-ipfs/thirdparty/delay" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" + p2ptestutil "gx/ipfs/QmV5Ny5H649nHUEYjtZistVPQVqqNVMZC5khmQvnprzdNZ/go-libp2p-netutil" ds "gx/ipfs/QmVSase1JP7cq9QkPT46oNwdp9pT6kBkG3oqS14y3QcZjG/go-datastore" ds_sync "gx/ipfs/QmVSase1JP7cq9QkPT46oNwdp9pT6kBkG3oqS14y3QcZjG/go-datastore/sync" - p2ptestutil "gx/ipfs/QmViDDJGzv2TKrheoxckReECc72iRgaYsobG2HYUGWuPVF/go-libp2p-netutil" peer "gx/ipfs/QmXYjuNuxVzXKJCfWasQk1RqkhVLDM9jtUKhqc2WPQmFSB/go-libp2p-peer" ) From c6d3204959eac5483f43005987fb34b88c509166 Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Thu, 27 Jul 2017 14:06:27 -0700 Subject: [PATCH 0568/1035] bitswap: serialize connect/disconnect notifications over one channel. Otherwise, we could end up receiving a disconnect notification before a connect notification (and think we have a connection that we don't have). License: MIT Signed-off-by: Steven Allen This commit was moved from ipfs/go-bitswap@9d861423533fea9e72fb25bd94480350479b3cb9 --- bitswap/wantmanager.go | 33 +++++++++++++++++++-------------- 1 file changed, 19 insertions(+), 14 deletions(-) diff --git a/bitswap/wantmanager.go b/bitswap/wantmanager.go index 4ae12f499..39f0a1bae 100644 --- a/bitswap/wantmanager.go +++ b/bitswap/wantmanager.go @@ -17,10 +17,9 @@ import ( type WantManager struct { // sync channels for Run loop - incoming chan *wantSet - connect chan peer.ID // notification channel for new peers connecting - disconnect chan peer.ID // notification channel for peers disconnecting - peerReqs chan chan []peer.ID // channel to request connected peers on + incoming chan *wantSet + connectEvent chan peerStatus // notification channel for peers connecting/disconnecting + peerReqs chan chan []peer.ID // channel to request connected peers on // synchronized by Run loop, only touch inside there peers map[peer.ID]*msgQueue @@ -35,6 +34,11 @@ type WantManager struct { sentHistogram metrics.Histogram } +type peerStatus struct { + connect bool + peer peer.ID +} + func NewWantManager(ctx context.Context, network bsnet.BitSwapNetwork) *WantManager { ctx, cancel := context.WithCancel(ctx) wantlistGauge := metrics.NewCtx(ctx, "wantlist_total", @@ -43,8 +47,7 @@ func NewWantManager(ctx context.Context, network bsnet.BitSwapNetwork) *WantMana " this bitswap").Histogram(metricsBuckets) return &WantManager{ incoming: make(chan *wantSet, 10), - connect: make(chan peer.ID, 10), - disconnect: make(chan peer.ID, 10), + connectEvent: make(chan peerStatus, 10), peerReqs: make(chan chan []peer.ID), peers: make(map[peer.ID]*msgQueue), wl: wantlist.NewThreadSafe(), @@ -270,22 +273,22 @@ func (mq *msgQueue) openSender(ctx context.Context) error { func (pm *WantManager) Connected(p peer.ID) { select { - case pm.connect <- p: + case pm.connectEvent <- peerStatus{peer: p, connect: true}: case <-pm.ctx.Done(): } } func (pm *WantManager) Disconnected(p peer.ID) { select { - case pm.disconnect <- p: + case pm.connectEvent <- peerStatus{peer: p, connect: false}: case <-pm.ctx.Done(): } } // TODO: use goprocess here once i trust it func (pm *WantManager) Run() { - tock := time.NewTicker(rebroadcastDelay.Get()) - defer tock.Stop() + // NOTE: Do not open any streams or connections from anywhere in this + // event loop. Really, just don't do anything likely to block. for { select { case ws := <-pm.incoming: @@ -329,10 +332,12 @@ func (pm *WantManager) Run() { } } - case p := <-pm.connect: - pm.startPeerHandler(p) - case p := <-pm.disconnect: - pm.stopPeerHandler(p) + case p := <-pm.connectEvent: + if p.connect { + pm.startPeerHandler(p.peer) + } else { + pm.stopPeerHandler(p.peer) + } case req := <-pm.peerReqs: var peers []peer.ID for p := range pm.peers { From b8acf58a2959c098fcceca41287afddffa558bda Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Mon, 31 Jul 2017 14:04:40 -0700 Subject: [PATCH 0569/1035] gx: update go-libp2p-swarm fixes #4102 (fixed in go-libp2p-swarm) License: MIT Signed-off-by: Steven Allen This commit was moved from ipfs/go-bitswap@05bed8d0aa4f41b37ea699504898d8aa9aa77d99 --- bitswap/bitswap_test.go | 2 +- bitswap/testnet/peernet.go | 2 +- bitswap/testutils.go | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index 316eda279..3e262849e 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -19,8 +19,8 @@ import ( detectrace "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-detect-race" + p2ptestutil "gx/ipfs/QmSTbByZ1rJVn8KANcoiLDiPH2pgDaz33uT6JW6B9nMBW5/go-libp2p-netutil" cid "gx/ipfs/QmTprEaAA2A9bst5XH7exuyi5KzNMK3SEDNN8rBDnKWcUS/go-cid" - p2ptestutil "gx/ipfs/QmV5Ny5H649nHUEYjtZistVPQVqqNVMZC5khmQvnprzdNZ/go-libp2p-netutil" ) // FIXME the tests are really sensitive to the network delay. fix them to work diff --git a/bitswap/testnet/peernet.go b/bitswap/testnet/peernet.go index 8034484a1..fa64042ca 100644 --- a/bitswap/testnet/peernet.go +++ b/bitswap/testnet/peernet.go @@ -5,9 +5,9 @@ import ( bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" mockrouting "github.com/ipfs/go-ipfs/routing/mock" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" - mockpeernet "gx/ipfs/QmSatLR9HCrZjPqomt6VdNCoJmHMz8NP34WfpfBznJZ25M/go-libp2p/p2p/net/mock" ds "gx/ipfs/QmVSase1JP7cq9QkPT46oNwdp9pT6kBkG3oqS14y3QcZjG/go-datastore" peer "gx/ipfs/QmXYjuNuxVzXKJCfWasQk1RqkhVLDM9jtUKhqc2WPQmFSB/go-libp2p-peer" + mockpeernet "gx/ipfs/QmZPBrKq6S1fdYaRAzYZivJL12QkUqHwnNzF9wC8VXC4bo/go-libp2p/p2p/net/mock" ) type peernet struct { diff --git a/bitswap/testutils.go b/bitswap/testutils.go index 2ccf70058..745c60a47 100644 --- a/bitswap/testutils.go +++ b/bitswap/testutils.go @@ -10,7 +10,7 @@ import ( delay "github.com/ipfs/go-ipfs/thirdparty/delay" testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" - p2ptestutil "gx/ipfs/QmV5Ny5H649nHUEYjtZistVPQVqqNVMZC5khmQvnprzdNZ/go-libp2p-netutil" + p2ptestutil "gx/ipfs/QmSTbByZ1rJVn8KANcoiLDiPH2pgDaz33uT6JW6B9nMBW5/go-libp2p-netutil" ds "gx/ipfs/QmVSase1JP7cq9QkPT46oNwdp9pT6kBkG3oqS14y3QcZjG/go-datastore" ds_sync "gx/ipfs/QmVSase1JP7cq9QkPT46oNwdp9pT6kBkG3oqS14y3QcZjG/go-datastore/sync" peer "gx/ipfs/QmXYjuNuxVzXKJCfWasQk1RqkhVLDM9jtUKhqc2WPQmFSB/go-libp2p-peer" From a9aa9e4d07c2c1e5d7f5762bd41898f9bba9e734 Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Wed, 16 Aug 2017 16:51:18 -0700 Subject: [PATCH 0570/1035] extract update go-testutil License: MIT Signed-off-by: Steven Allen This commit was moved from ipfs/go-bitswap@24b5a964f497e4448ce62ec0a26eb2bd3b401652 --- bitswap/bitswap_test.go | 4 ++-- bitswap/decision/bench_test.go | 2 +- bitswap/decision/engine_test.go | 2 +- bitswap/decision/peer_request_queue_test.go | 2 +- bitswap/network/ipfs_impl.go | 2 +- bitswap/testnet/interface.go | 2 +- bitswap/testnet/network_test.go | 2 +- bitswap/testnet/peernet.go | 4 ++-- bitswap/testnet/virtual.go | 2 +- bitswap/testutils.go | 4 ++-- 10 files changed, 13 insertions(+), 13 deletions(-) diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index 3e262849e..b540bb62e 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -14,13 +14,13 @@ import ( tn "github.com/ipfs/go-ipfs/exchange/bitswap/testnet" mockrouting "github.com/ipfs/go-ipfs/routing/mock" delay "github.com/ipfs/go-ipfs/thirdparty/delay" - travis "github.com/ipfs/go-ipfs/thirdparty/testutil/ci/travis" blocks "gx/ipfs/QmVA4mafxbfH5aEvNz8fyoxC6J1xhAtw88B4GerPznSZBg/go-block-format" + travis "gx/ipfs/QmZJD56ZWLViJAVkvLc7xbbDerHzUMLr2X4fLRYfbxZWDN/go-testutil/ci/travis" detectrace "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-detect-race" - p2ptestutil "gx/ipfs/QmSTbByZ1rJVn8KANcoiLDiPH2pgDaz33uT6JW6B9nMBW5/go-libp2p-netutil" cid "gx/ipfs/QmTprEaAA2A9bst5XH7exuyi5KzNMK3SEDNN8rBDnKWcUS/go-cid" + p2ptestutil "gx/ipfs/QmZG4W8GR9FpC4z69Vab9ENtEoxKjDnTym5oa7Q3Yr7P4o/go-libp2p-netutil" ) // FIXME the tests are really sensitive to the network delay. fix them to work diff --git a/bitswap/decision/bench_test.go b/bitswap/decision/bench_test.go index 17e6ea085..6514faa21 100644 --- a/bitswap/decision/bench_test.go +++ b/bitswap/decision/bench_test.go @@ -6,10 +6,10 @@ import ( "testing" "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" - "github.com/ipfs/go-ipfs/thirdparty/testutil" u "gx/ipfs/QmSU6eubNdhXjFBJBSksTp8kv8YRub8mGAPv8tVJHmL2EU/go-ipfs-util" cid "gx/ipfs/QmTprEaAA2A9bst5XH7exuyi5KzNMK3SEDNN8rBDnKWcUS/go-cid" "gx/ipfs/QmXYjuNuxVzXKJCfWasQk1RqkhVLDM9jtUKhqc2WPQmFSB/go-libp2p-peer" + "gx/ipfs/QmZJD56ZWLViJAVkvLc7xbbDerHzUMLr2X4fLRYfbxZWDN/go-testutil" ) // FWIW: At the time of this commit, including a timestamp in task increases diff --git a/bitswap/decision/engine_test.go b/bitswap/decision/engine_test.go index 62d8dadd8..469fc2648 100644 --- a/bitswap/decision/engine_test.go +++ b/bitswap/decision/engine_test.go @@ -11,11 +11,11 @@ import ( context "context" blockstore "github.com/ipfs/go-ipfs/blocks/blockstore" message "github.com/ipfs/go-ipfs/exchange/bitswap/message" - testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" blocks "gx/ipfs/QmVA4mafxbfH5aEvNz8fyoxC6J1xhAtw88B4GerPznSZBg/go-block-format" ds "gx/ipfs/QmVSase1JP7cq9QkPT46oNwdp9pT6kBkG3oqS14y3QcZjG/go-datastore" dssync "gx/ipfs/QmVSase1JP7cq9QkPT46oNwdp9pT6kBkG3oqS14y3QcZjG/go-datastore/sync" peer "gx/ipfs/QmXYjuNuxVzXKJCfWasQk1RqkhVLDM9jtUKhqc2WPQmFSB/go-libp2p-peer" + testutil "gx/ipfs/QmZJD56ZWLViJAVkvLc7xbbDerHzUMLr2X4fLRYfbxZWDN/go-testutil" ) type peerAndEngine struct { diff --git a/bitswap/decision/peer_request_queue_test.go b/bitswap/decision/peer_request_queue_test.go index 6c3e9ce50..e07addab6 100644 --- a/bitswap/decision/peer_request_queue_test.go +++ b/bitswap/decision/peer_request_queue_test.go @@ -9,9 +9,9 @@ import ( "testing" "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" - "github.com/ipfs/go-ipfs/thirdparty/testutil" u "gx/ipfs/QmSU6eubNdhXjFBJBSksTp8kv8YRub8mGAPv8tVJHmL2EU/go-ipfs-util" cid "gx/ipfs/QmTprEaAA2A9bst5XH7exuyi5KzNMK3SEDNN8rBDnKWcUS/go-cid" + "gx/ipfs/QmZJD56ZWLViJAVkvLc7xbbDerHzUMLr2X4fLRYfbxZWDN/go-testutil" ) func TestPushPop(t *testing.T) { diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index 23b421ed3..15d43a67b 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -10,9 +10,9 @@ import ( pstore "gx/ipfs/QmPgDWmTmuzvP7QE5zwo1TmjbJme9pmZHNujB2453jkCTr/go-libp2p-peerstore" routing "gx/ipfs/QmPjTrrSfE6TzLv6ya6VWhGcCgPrUAdcgrDcQyRDX2VyW1/go-libp2p-routing" - host "gx/ipfs/QmRNyPNJGNCaZyYonJj7owciWTsMd9gRfEKmZY3o6xwN3h/go-libp2p-host" logging "gx/ipfs/QmSpJByNKFX1sCsHBEp3R73FL4NF6FnQTEGyNAXHm2GS52/go-log" cid "gx/ipfs/QmTprEaAA2A9bst5XH7exuyi5KzNMK3SEDNN8rBDnKWcUS/go-cid" + host "gx/ipfs/QmW8Rgju5JrSMHP7RDNdiwwXyenRqAbtSaPfdQKQC7ZdH6/go-libp2p-host" ma "gx/ipfs/QmXY77cVe7rVRQXZZQRioukUM7aRW3BTcAgJe12MCtb3Ji/go-multiaddr" peer "gx/ipfs/QmXYjuNuxVzXKJCfWasQk1RqkhVLDM9jtUKhqc2WPQmFSB/go-libp2p-peer" ggio "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/io" diff --git a/bitswap/testnet/interface.go b/bitswap/testnet/interface.go index 2b94c45b6..34d6377cc 100644 --- a/bitswap/testnet/interface.go +++ b/bitswap/testnet/interface.go @@ -2,8 +2,8 @@ package bitswap import ( bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" - "github.com/ipfs/go-ipfs/thirdparty/testutil" peer "gx/ipfs/QmXYjuNuxVzXKJCfWasQk1RqkhVLDM9jtUKhqc2WPQmFSB/go-libp2p-peer" + "gx/ipfs/QmZJD56ZWLViJAVkvLc7xbbDerHzUMLr2X4fLRYfbxZWDN/go-testutil" ) type Network interface { diff --git a/bitswap/testnet/network_test.go b/bitswap/testnet/network_test.go index d4d55a845..daabe63db 100644 --- a/bitswap/testnet/network_test.go +++ b/bitswap/testnet/network_test.go @@ -9,9 +9,9 @@ import ( bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" mockrouting "github.com/ipfs/go-ipfs/routing/mock" delay "github.com/ipfs/go-ipfs/thirdparty/delay" - testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" blocks "gx/ipfs/QmVA4mafxbfH5aEvNz8fyoxC6J1xhAtw88B4GerPznSZBg/go-block-format" peer "gx/ipfs/QmXYjuNuxVzXKJCfWasQk1RqkhVLDM9jtUKhqc2WPQmFSB/go-libp2p-peer" + testutil "gx/ipfs/QmZJD56ZWLViJAVkvLc7xbbDerHzUMLr2X4fLRYfbxZWDN/go-testutil" ) func TestSendMessageAsyncButWaitForResponse(t *testing.T) { diff --git a/bitswap/testnet/peernet.go b/bitswap/testnet/peernet.go index fa64042ca..2a020ca9c 100644 --- a/bitswap/testnet/peernet.go +++ b/bitswap/testnet/peernet.go @@ -4,10 +4,10 @@ import ( "context" bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" mockrouting "github.com/ipfs/go-ipfs/routing/mock" - testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" ds "gx/ipfs/QmVSase1JP7cq9QkPT46oNwdp9pT6kBkG3oqS14y3QcZjG/go-datastore" peer "gx/ipfs/QmXYjuNuxVzXKJCfWasQk1RqkhVLDM9jtUKhqc2WPQmFSB/go-libp2p-peer" - mockpeernet "gx/ipfs/QmZPBrKq6S1fdYaRAzYZivJL12QkUqHwnNzF9wC8VXC4bo/go-libp2p/p2p/net/mock" + mockpeernet "gx/ipfs/QmXZ6XetFwaDNmszPCux9DaKqMykEJGDtWHSqprn94UXzM/go-libp2p/p2p/net/mock" + testutil "gx/ipfs/QmZJD56ZWLViJAVkvLc7xbbDerHzUMLr2X4fLRYfbxZWDN/go-testutil" ) type peernet struct { diff --git a/bitswap/testnet/virtual.go b/bitswap/testnet/virtual.go index 133ea395d..ee846fc07 100644 --- a/bitswap/testnet/virtual.go +++ b/bitswap/testnet/virtual.go @@ -8,7 +8,7 @@ import ( bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" mockrouting "github.com/ipfs/go-ipfs/routing/mock" delay "github.com/ipfs/go-ipfs/thirdparty/delay" - testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" + testutil "gx/ipfs/QmZJD56ZWLViJAVkvLc7xbbDerHzUMLr2X4fLRYfbxZWDN/go-testutil" routing "gx/ipfs/QmPjTrrSfE6TzLv6ya6VWhGcCgPrUAdcgrDcQyRDX2VyW1/go-libp2p-routing" logging "gx/ipfs/QmSpJByNKFX1sCsHBEp3R73FL4NF6FnQTEGyNAXHm2GS52/go-log" diff --git a/bitswap/testutils.go b/bitswap/testutils.go index 745c60a47..722156c17 100644 --- a/bitswap/testutils.go +++ b/bitswap/testutils.go @@ -8,12 +8,12 @@ import ( tn "github.com/ipfs/go-ipfs/exchange/bitswap/testnet" datastore2 "github.com/ipfs/go-ipfs/thirdparty/datastore2" delay "github.com/ipfs/go-ipfs/thirdparty/delay" - testutil "github.com/ipfs/go-ipfs/thirdparty/testutil" + testutil "gx/ipfs/QmZJD56ZWLViJAVkvLc7xbbDerHzUMLr2X4fLRYfbxZWDN/go-testutil" - p2ptestutil "gx/ipfs/QmSTbByZ1rJVn8KANcoiLDiPH2pgDaz33uT6JW6B9nMBW5/go-libp2p-netutil" ds "gx/ipfs/QmVSase1JP7cq9QkPT46oNwdp9pT6kBkG3oqS14y3QcZjG/go-datastore" ds_sync "gx/ipfs/QmVSase1JP7cq9QkPT46oNwdp9pT6kBkG3oqS14y3QcZjG/go-datastore/sync" peer "gx/ipfs/QmXYjuNuxVzXKJCfWasQk1RqkhVLDM9jtUKhqc2WPQmFSB/go-libp2p-peer" + p2ptestutil "gx/ipfs/QmZG4W8GR9FpC4z69Vab9ENtEoxKjDnTym5oa7Q3Yr7P4o/go-libp2p-netutil" ) // WARNING: this uses RandTestBogusIdentity DO NOT USE for NON TESTS! From 30ac1e5c9f0bd84d74b76b129d22ca9431e109b0 Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Sun, 30 Jul 2017 23:40:25 -0700 Subject: [PATCH 0571/1035] bitswap_test: make racy test less racy fixes #4108 License: MIT Signed-off-by: Steven Allen This commit was moved from ipfs/go-bitswap@dda5a61fa7ebb0e9e55547b2283eb0d88e547000 --- bitswap/bitswap_test.go | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index b540bb62e..8e51ed540 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -21,6 +21,7 @@ import ( cid "gx/ipfs/QmTprEaAA2A9bst5XH7exuyi5KzNMK3SEDNN8rBDnKWcUS/go-cid" p2ptestutil "gx/ipfs/QmZG4W8GR9FpC4z69Vab9ENtEoxKjDnTym5oa7Q3Yr7P4o/go-libp2p-netutil" + tu "gx/ipfs/QmZJD56ZWLViJAVkvLc7xbbDerHzUMLr2X4fLRYfbxZWDN/go-testutil" ) // FIXME the tests are really sensitive to the network delay. fix them to work @@ -332,13 +333,16 @@ func TestBasicBitswap(t *testing.T) { t.Fatal(err) } - time.Sleep(time.Millisecond * 25) - wl := instances[2].Exchange.WantlistForPeer(instances[1].Peer) - if len(wl) != 0 { - t.Fatal("should have no items in other peers wantlist") - } - if len(instances[1].Exchange.GetWantlist()) != 0 { - t.Fatal("shouldnt have anything in wantlist") + if err = tu.WaitFor(ctx, func() error { + if len(instances[2].Exchange.WantlistForPeer(instances[1].Peer)) != 0 { + return fmt.Errorf("should have no items in other peers wantlist") + } + if len(instances[1].Exchange.GetWantlist()) != 0 { + return fmt.Errorf("shouldnt have anything in wantlist") + } + return nil + }); err != nil { + t.Fatal(err) } st0, err := instances[0].Exchange.Stat() From 6110d0c836deeb8eee4e710425ce7dc818cfd39a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Wed, 23 Aug 2017 16:32:32 +0200 Subject: [PATCH 0572/1035] gx: update go-reuseport MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit License: MIT Signed-off-by: Łukasz Magiera This commit was moved from ipfs/go-bitswap@f9bf69edef44c0f226f5f13e36615eca2e4a1334 --- bitswap/bitswap_test.go | 2 +- bitswap/testnet/peernet.go | 2 +- bitswap/testutils.go | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index 8e51ed540..6ae79efe9 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -20,7 +20,7 @@ import ( detectrace "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-detect-race" cid "gx/ipfs/QmTprEaAA2A9bst5XH7exuyi5KzNMK3SEDNN8rBDnKWcUS/go-cid" - p2ptestutil "gx/ipfs/QmZG4W8GR9FpC4z69Vab9ENtEoxKjDnTym5oa7Q3Yr7P4o/go-libp2p-netutil" + p2ptestutil "gx/ipfs/QmYdcTdkuCvFXLj2uejJF5aY3HWhtd8JLT4BjPxF9BNPYf/go-libp2p-netutil" tu "gx/ipfs/QmZJD56ZWLViJAVkvLc7xbbDerHzUMLr2X4fLRYfbxZWDN/go-testutil" ) diff --git a/bitswap/testnet/peernet.go b/bitswap/testnet/peernet.go index 2a020ca9c..f9aa2e1ab 100644 --- a/bitswap/testnet/peernet.go +++ b/bitswap/testnet/peernet.go @@ -6,8 +6,8 @@ import ( mockrouting "github.com/ipfs/go-ipfs/routing/mock" ds "gx/ipfs/QmVSase1JP7cq9QkPT46oNwdp9pT6kBkG3oqS14y3QcZjG/go-datastore" peer "gx/ipfs/QmXYjuNuxVzXKJCfWasQk1RqkhVLDM9jtUKhqc2WPQmFSB/go-libp2p-peer" - mockpeernet "gx/ipfs/QmXZ6XetFwaDNmszPCux9DaKqMykEJGDtWHSqprn94UXzM/go-libp2p/p2p/net/mock" testutil "gx/ipfs/QmZJD56ZWLViJAVkvLc7xbbDerHzUMLr2X4fLRYfbxZWDN/go-testutil" + mockpeernet "gx/ipfs/QmZyngpQxUGyx1T2bzEcst6YzERkvVwDzBMbsSQF4f1smE/go-libp2p/p2p/net/mock" ) type peernet struct { diff --git a/bitswap/testutils.go b/bitswap/testutils.go index 722156c17..5ac4c7847 100644 --- a/bitswap/testutils.go +++ b/bitswap/testutils.go @@ -13,7 +13,7 @@ import ( ds "gx/ipfs/QmVSase1JP7cq9QkPT46oNwdp9pT6kBkG3oqS14y3QcZjG/go-datastore" ds_sync "gx/ipfs/QmVSase1JP7cq9QkPT46oNwdp9pT6kBkG3oqS14y3QcZjG/go-datastore/sync" peer "gx/ipfs/QmXYjuNuxVzXKJCfWasQk1RqkhVLDM9jtUKhqc2WPQmFSB/go-libp2p-peer" - p2ptestutil "gx/ipfs/QmZG4W8GR9FpC4z69Vab9ENtEoxKjDnTym5oa7Q3Yr7P4o/go-libp2p-netutil" + p2ptestutil "gx/ipfs/QmYdcTdkuCvFXLj2uejJF5aY3HWhtd8JLT4BjPxF9BNPYf/go-libp2p-netutil" ) // WARNING: this uses RandTestBogusIdentity DO NOT USE for NON TESTS! From e461efeb48136ee588278d3397644f4bb42db357 Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Wed, 23 Aug 2017 21:02:47 -0700 Subject: [PATCH 0573/1035] add blocks to the blockstore before returning them from blockservice sessions. fixes #4062 (yay!) License: MIT Signed-off-by: Steven Allen This commit was moved from ipfs/go-bitswap@a7be1453b7d6188f9eac95fb5639b16136cbb6a8 --- bitswap/bitswap.go | 32 +++++++++++++++++--------------- 1 file changed, 17 insertions(+), 15 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 1cf9fbd3f..41d2e9255 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -298,6 +298,14 @@ func (bs *Bitswap) CancelWants(cids []*cid.Cid, ses uint64) { // HasBlock announces the existance of a block to this bitswap service. The // service will potentially notify its peers. func (bs *Bitswap) HasBlock(blk blocks.Block) error { + return bs.receiveBlockFrom(blk, "") +} + +// TODO: Some of this stuff really only needs to be done when adding a block +// from the user, not when receiving it from the network. +// In case you run `git blame` on this comment, I'll save you some time: ask +// @whyrusleeping, I don't know the answers you seek. +func (bs *Bitswap) receiveBlockFrom(blk blocks.Block, from peer.ID) error { select { case <-bs.process.Closing(): return errors.New("bitswap is closed") @@ -317,8 +325,11 @@ func (bs *Bitswap) HasBlock(blk blocks.Block) error { // it now as it requires more thought and isnt causing immediate problems. bs.notifications.Publish(blk) - for _, s := range bs.SessionsForBlock(blk.Cid()) { - s.receiveBlockFrom("", blk) + k := blk.Cid() + ks := []*cid.Cid{k} + for _, s := range bs.SessionsForBlock(k) { + s.receiveBlockFrom(from, blk) + bs.CancelWants(ks, s.id) } bs.engine.AddBlock(blk) @@ -379,21 +390,12 @@ func (bs *Bitswap) ReceiveMessage(ctx context.Context, p peer.ID, incoming bsmsg bs.updateReceiveCounters(b) - k := b.Cid() - log.Event(ctx, "Bitswap.GetBlockRequest.End", k) - - for _, ses := range bs.SessionsForBlock(k) { - ses.receiveBlockFrom(p, b) - bs.CancelWants([]*cid.Cid{k}, ses.id) - } - log.Debugf("got block %s from %s", b, p) - // TODO: rework this to not call 'HasBlock'. 'HasBlock' is really - // designed to be called when blocks are coming in from non-bitswap - // places (like the user manually adding data) - if err := bs.HasBlock(b); err != nil { - log.Warningf("ReceiveMessage HasBlock error: %s", err) + + if err := bs.receiveBlockFrom(b, p); err != nil { + log.Warningf("ReceiveMessage recvBlockFrom error: %s", err) } + log.Event(ctx, "Bitswap.GetBlockRequest.End", b.Cid()) }(block) } wg.Wait() From 3c010af47f41a051a33579f574d5ab6248d0ff71 Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Mon, 28 Aug 2017 20:32:16 -0700 Subject: [PATCH 0574/1035] gx: update go-cid, go-multibase, base32 License: MIT Signed-off-by: Steven Allen This commit was moved from ipfs/go-bitswap@93c911ace94db429cf3f1bb2b788c6da1e205748 --- bitswap/bitswap.go | 4 ++-- bitswap/bitswap_test.go | 10 +++++----- bitswap/decision/bench_test.go | 4 ++-- bitswap/decision/engine.go | 2 +- bitswap/decision/engine_test.go | 4 ++-- bitswap/decision/ledger.go | 2 +- bitswap/decision/peer_request_queue.go | 2 +- bitswap/decision/peer_request_queue_test.go | 4 ++-- bitswap/get.go | 4 ++-- bitswap/message/message.go | 4 ++-- bitswap/message/message_test.go | 4 ++-- bitswap/network/interface.go | 2 +- bitswap/network/ipfs_impl.go | 6 +++--- bitswap/notifications/notifications.go | 4 ++-- bitswap/notifications/notifications_test.go | 4 ++-- bitswap/session.go | 4 ++-- bitswap/session_test.go | 4 ++-- bitswap/stat.go | 2 +- bitswap/testnet/interface.go | 2 +- bitswap/testnet/network_test.go | 4 ++-- bitswap/testnet/peernet.go | 4 ++-- bitswap/testnet/virtual.go | 6 +++--- bitswap/testutils.go | 4 ++-- bitswap/wantlist/wantlist.go | 2 +- bitswap/wantlist/wantlist_test.go | 2 +- bitswap/wantmanager.go | 2 +- bitswap/workers.go | 2 +- 27 files changed, 49 insertions(+), 49 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 41d2e9255..35d48a35b 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -19,12 +19,12 @@ import ( flags "github.com/ipfs/go-ipfs/flags" "github.com/ipfs/go-ipfs/thirdparty/delay" + cid "gx/ipfs/QmNp85zy9RLrQ5oQD4hPyS39ezrrXpcaa7R4Y9kxdWQLLQ/go-cid" metrics "gx/ipfs/QmRg1gKTHzc3CZXSKzem8aR4E3TubFhbgXwfVuWnSK5CC5/go-metrics-interface" process "gx/ipfs/QmSF8fPo3jgVBAy8fpdjjYqgG87dkJgUprRBHRd2tmfgpP/goprocess" procctx "gx/ipfs/QmSF8fPo3jgVBAy8fpdjjYqgG87dkJgUprRBHRd2tmfgpP/goprocess/context" + blocks "gx/ipfs/QmSn9Td7xgxm9EV7iEjTckpUWmWApggzPxu7eFGWkkpwin/go-block-format" logging "gx/ipfs/QmSpJByNKFX1sCsHBEp3R73FL4NF6FnQTEGyNAXHm2GS52/go-log" - cid "gx/ipfs/QmTprEaAA2A9bst5XH7exuyi5KzNMK3SEDNN8rBDnKWcUS/go-cid" - blocks "gx/ipfs/QmVA4mafxbfH5aEvNz8fyoxC6J1xhAtw88B4GerPznSZBg/go-block-format" peer "gx/ipfs/QmXYjuNuxVzXKJCfWasQk1RqkhVLDM9jtUKhqc2WPQmFSB/go-libp2p-peer" ) diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index 6ae79efe9..1155309d7 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -14,14 +14,14 @@ import ( tn "github.com/ipfs/go-ipfs/exchange/bitswap/testnet" mockrouting "github.com/ipfs/go-ipfs/routing/mock" delay "github.com/ipfs/go-ipfs/thirdparty/delay" - blocks "gx/ipfs/QmVA4mafxbfH5aEvNz8fyoxC6J1xhAtw88B4GerPznSZBg/go-block-format" - travis "gx/ipfs/QmZJD56ZWLViJAVkvLc7xbbDerHzUMLr2X4fLRYfbxZWDN/go-testutil/ci/travis" + blocks "gx/ipfs/QmSn9Td7xgxm9EV7iEjTckpUWmWApggzPxu7eFGWkkpwin/go-block-format" + travis "gx/ipfs/QmWRCn8vruNAzHx8i6SAXinuheRitKEGu8c7m26stKvsYx/go-testutil/ci/travis" detectrace "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-detect-race" - cid "gx/ipfs/QmTprEaAA2A9bst5XH7exuyi5KzNMK3SEDNN8rBDnKWcUS/go-cid" - p2ptestutil "gx/ipfs/QmYdcTdkuCvFXLj2uejJF5aY3HWhtd8JLT4BjPxF9BNPYf/go-libp2p-netutil" - tu "gx/ipfs/QmZJD56ZWLViJAVkvLc7xbbDerHzUMLr2X4fLRYfbxZWDN/go-testutil" + cid "gx/ipfs/QmNp85zy9RLrQ5oQD4hPyS39ezrrXpcaa7R4Y9kxdWQLLQ/go-cid" + tu "gx/ipfs/QmWRCn8vruNAzHx8i6SAXinuheRitKEGu8c7m26stKvsYx/go-testutil" + p2ptestutil "gx/ipfs/QmYTeBaLWbFKQAtVTHbxvTbKfgqrGJUupK4UwjeugownfD/go-libp2p-netutil" ) // FIXME the tests are really sensitive to the network delay. fix them to work diff --git a/bitswap/decision/bench_test.go b/bitswap/decision/bench_test.go index 6514faa21..cb005e6ef 100644 --- a/bitswap/decision/bench_test.go +++ b/bitswap/decision/bench_test.go @@ -6,10 +6,10 @@ import ( "testing" "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" + cid "gx/ipfs/QmNp85zy9RLrQ5oQD4hPyS39ezrrXpcaa7R4Y9kxdWQLLQ/go-cid" u "gx/ipfs/QmSU6eubNdhXjFBJBSksTp8kv8YRub8mGAPv8tVJHmL2EU/go-ipfs-util" - cid "gx/ipfs/QmTprEaAA2A9bst5XH7exuyi5KzNMK3SEDNN8rBDnKWcUS/go-cid" + "gx/ipfs/QmWRCn8vruNAzHx8i6SAXinuheRitKEGu8c7m26stKvsYx/go-testutil" "gx/ipfs/QmXYjuNuxVzXKJCfWasQk1RqkhVLDM9jtUKhqc2WPQmFSB/go-libp2p-peer" - "gx/ipfs/QmZJD56ZWLViJAVkvLc7xbbDerHzUMLr2X4fLRYfbxZWDN/go-testutil" ) // FWIW: At the time of this commit, including a timestamp in task increases diff --git a/bitswap/decision/engine.go b/bitswap/decision/engine.go index 83915afd8..74d5cf330 100644 --- a/bitswap/decision/engine.go +++ b/bitswap/decision/engine.go @@ -9,8 +9,8 @@ import ( bstore "github.com/ipfs/go-ipfs/blocks/blockstore" bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" wl "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" + blocks "gx/ipfs/QmSn9Td7xgxm9EV7iEjTckpUWmWApggzPxu7eFGWkkpwin/go-block-format" logging "gx/ipfs/QmSpJByNKFX1sCsHBEp3R73FL4NF6FnQTEGyNAXHm2GS52/go-log" - blocks "gx/ipfs/QmVA4mafxbfH5aEvNz8fyoxC6J1xhAtw88B4GerPznSZBg/go-block-format" peer "gx/ipfs/QmXYjuNuxVzXKJCfWasQk1RqkhVLDM9jtUKhqc2WPQmFSB/go-libp2p-peer" ) diff --git a/bitswap/decision/engine_test.go b/bitswap/decision/engine_test.go index 469fc2648..512548cf5 100644 --- a/bitswap/decision/engine_test.go +++ b/bitswap/decision/engine_test.go @@ -11,11 +11,11 @@ import ( context "context" blockstore "github.com/ipfs/go-ipfs/blocks/blockstore" message "github.com/ipfs/go-ipfs/exchange/bitswap/message" - blocks "gx/ipfs/QmVA4mafxbfH5aEvNz8fyoxC6J1xhAtw88B4GerPznSZBg/go-block-format" + blocks "gx/ipfs/QmSn9Td7xgxm9EV7iEjTckpUWmWApggzPxu7eFGWkkpwin/go-block-format" ds "gx/ipfs/QmVSase1JP7cq9QkPT46oNwdp9pT6kBkG3oqS14y3QcZjG/go-datastore" dssync "gx/ipfs/QmVSase1JP7cq9QkPT46oNwdp9pT6kBkG3oqS14y3QcZjG/go-datastore/sync" + testutil "gx/ipfs/QmWRCn8vruNAzHx8i6SAXinuheRitKEGu8c7m26stKvsYx/go-testutil" peer "gx/ipfs/QmXYjuNuxVzXKJCfWasQk1RqkhVLDM9jtUKhqc2WPQmFSB/go-libp2p-peer" - testutil "gx/ipfs/QmZJD56ZWLViJAVkvLc7xbbDerHzUMLr2X4fLRYfbxZWDN/go-testutil" ) type peerAndEngine struct { diff --git a/bitswap/decision/ledger.go b/bitswap/decision/ledger.go index 6b249b083..5cfdeb18d 100644 --- a/bitswap/decision/ledger.go +++ b/bitswap/decision/ledger.go @@ -6,7 +6,7 @@ import ( wl "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" - cid "gx/ipfs/QmTprEaAA2A9bst5XH7exuyi5KzNMK3SEDNN8rBDnKWcUS/go-cid" + cid "gx/ipfs/QmNp85zy9RLrQ5oQD4hPyS39ezrrXpcaa7R4Y9kxdWQLLQ/go-cid" peer "gx/ipfs/QmXYjuNuxVzXKJCfWasQk1RqkhVLDM9jtUKhqc2WPQmFSB/go-libp2p-peer" ) diff --git a/bitswap/decision/peer_request_queue.go b/bitswap/decision/peer_request_queue.go index 77d2e8a12..2606e8a4c 100644 --- a/bitswap/decision/peer_request_queue.go +++ b/bitswap/decision/peer_request_queue.go @@ -7,7 +7,7 @@ import ( wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" pq "github.com/ipfs/go-ipfs/thirdparty/pq" - cid "gx/ipfs/QmTprEaAA2A9bst5XH7exuyi5KzNMK3SEDNN8rBDnKWcUS/go-cid" + cid "gx/ipfs/QmNp85zy9RLrQ5oQD4hPyS39ezrrXpcaa7R4Y9kxdWQLLQ/go-cid" peer "gx/ipfs/QmXYjuNuxVzXKJCfWasQk1RqkhVLDM9jtUKhqc2WPQmFSB/go-libp2p-peer" ) diff --git a/bitswap/decision/peer_request_queue_test.go b/bitswap/decision/peer_request_queue_test.go index e07addab6..718da14e4 100644 --- a/bitswap/decision/peer_request_queue_test.go +++ b/bitswap/decision/peer_request_queue_test.go @@ -9,9 +9,9 @@ import ( "testing" "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" + cid "gx/ipfs/QmNp85zy9RLrQ5oQD4hPyS39ezrrXpcaa7R4Y9kxdWQLLQ/go-cid" u "gx/ipfs/QmSU6eubNdhXjFBJBSksTp8kv8YRub8mGAPv8tVJHmL2EU/go-ipfs-util" - cid "gx/ipfs/QmTprEaAA2A9bst5XH7exuyi5KzNMK3SEDNN8rBDnKWcUS/go-cid" - "gx/ipfs/QmZJD56ZWLViJAVkvLc7xbbDerHzUMLr2X4fLRYfbxZWDN/go-testutil" + "gx/ipfs/QmWRCn8vruNAzHx8i6SAXinuheRitKEGu8c7m26stKvsYx/go-testutil" ) func TestPushPop(t *testing.T) { diff --git a/bitswap/get.go b/bitswap/get.go index 263a6b501..b22f7e1da 100644 --- a/bitswap/get.go +++ b/bitswap/get.go @@ -6,9 +6,9 @@ import ( blockstore "github.com/ipfs/go-ipfs/blocks/blockstore" notifications "github.com/ipfs/go-ipfs/exchange/bitswap/notifications" - blocks "gx/ipfs/QmVA4mafxbfH5aEvNz8fyoxC6J1xhAtw88B4GerPznSZBg/go-block-format" + blocks "gx/ipfs/QmSn9Td7xgxm9EV7iEjTckpUWmWApggzPxu7eFGWkkpwin/go-block-format" - cid "gx/ipfs/QmTprEaAA2A9bst5XH7exuyi5KzNMK3SEDNN8rBDnKWcUS/go-cid" + cid "gx/ipfs/QmNp85zy9RLrQ5oQD4hPyS39ezrrXpcaa7R4Y9kxdWQLLQ/go-cid" ) type getBlocksFunc func(context.Context, []*cid.Cid) (<-chan blocks.Block, error) diff --git a/bitswap/message/message.go b/bitswap/message/message.go index 27631e049..273321305 100644 --- a/bitswap/message/message.go +++ b/bitswap/message/message.go @@ -6,9 +6,9 @@ import ( pb "github.com/ipfs/go-ipfs/exchange/bitswap/message/pb" wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" - blocks "gx/ipfs/QmVA4mafxbfH5aEvNz8fyoxC6J1xhAtw88B4GerPznSZBg/go-block-format" + blocks "gx/ipfs/QmSn9Td7xgxm9EV7iEjTckpUWmWApggzPxu7eFGWkkpwin/go-block-format" - cid "gx/ipfs/QmTprEaAA2A9bst5XH7exuyi5KzNMK3SEDNN8rBDnKWcUS/go-cid" + cid "gx/ipfs/QmNp85zy9RLrQ5oQD4hPyS39ezrrXpcaa7R4Y9kxdWQLLQ/go-cid" ggio "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/io" proto "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/proto" inet "gx/ipfs/QmahYsGWry85Y7WUe2SX5G4JkH2zifEQAUtJVLZ24aC9DF/go-libp2p-net" diff --git a/bitswap/message/message_test.go b/bitswap/message/message_test.go index c4197f9a9..14233bf88 100644 --- a/bitswap/message/message_test.go +++ b/bitswap/message/message_test.go @@ -7,9 +7,9 @@ import ( proto "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/proto" pb "github.com/ipfs/go-ipfs/exchange/bitswap/message/pb" + cid "gx/ipfs/QmNp85zy9RLrQ5oQD4hPyS39ezrrXpcaa7R4Y9kxdWQLLQ/go-cid" u "gx/ipfs/QmSU6eubNdhXjFBJBSksTp8kv8YRub8mGAPv8tVJHmL2EU/go-ipfs-util" - cid "gx/ipfs/QmTprEaAA2A9bst5XH7exuyi5KzNMK3SEDNN8rBDnKWcUS/go-cid" - blocks "gx/ipfs/QmVA4mafxbfH5aEvNz8fyoxC6J1xhAtw88B4GerPznSZBg/go-block-format" + blocks "gx/ipfs/QmSn9Td7xgxm9EV7iEjTckpUWmWApggzPxu7eFGWkkpwin/go-block-format" ) func mkFakeCid(s string) *cid.Cid { diff --git a/bitswap/network/interface.go b/bitswap/network/interface.go index 051fccd48..92d27676c 100644 --- a/bitswap/network/interface.go +++ b/bitswap/network/interface.go @@ -4,7 +4,7 @@ import ( "context" bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" - cid "gx/ipfs/QmTprEaAA2A9bst5XH7exuyi5KzNMK3SEDNN8rBDnKWcUS/go-cid" + cid "gx/ipfs/QmNp85zy9RLrQ5oQD4hPyS39ezrrXpcaa7R4Y9kxdWQLLQ/go-cid" peer "gx/ipfs/QmXYjuNuxVzXKJCfWasQk1RqkhVLDM9jtUKhqc2WPQmFSB/go-libp2p-peer" protocol "gx/ipfs/QmZNkThpqfVXs9GNbexPrfBbXSLNYeKrE7jwFM2oqHbyqN/go-libp2p-protocol" ) diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index 15d43a67b..30b5db20b 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -8,11 +8,11 @@ import ( bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" + cid "gx/ipfs/QmNp85zy9RLrQ5oQD4hPyS39ezrrXpcaa7R4Y9kxdWQLLQ/go-cid" + routing "gx/ipfs/QmPR2JzfKd9poHx9XBhzoFeBBC31ZM3W5iUPKJZWyaoZZm/go-libp2p-routing" pstore "gx/ipfs/QmPgDWmTmuzvP7QE5zwo1TmjbJme9pmZHNujB2453jkCTr/go-libp2p-peerstore" - routing "gx/ipfs/QmPjTrrSfE6TzLv6ya6VWhGcCgPrUAdcgrDcQyRDX2VyW1/go-libp2p-routing" logging "gx/ipfs/QmSpJByNKFX1sCsHBEp3R73FL4NF6FnQTEGyNAXHm2GS52/go-log" - cid "gx/ipfs/QmTprEaAA2A9bst5XH7exuyi5KzNMK3SEDNN8rBDnKWcUS/go-cid" - host "gx/ipfs/QmW8Rgju5JrSMHP7RDNdiwwXyenRqAbtSaPfdQKQC7ZdH6/go-libp2p-host" + host "gx/ipfs/QmUwW8jMQDxXhLD2j4EfWqLEMX3MsvyWcWGvJPVDh1aTmu/go-libp2p-host" ma "gx/ipfs/QmXY77cVe7rVRQXZZQRioukUM7aRW3BTcAgJe12MCtb3Ji/go-multiaddr" peer "gx/ipfs/QmXYjuNuxVzXKJCfWasQk1RqkhVLDM9jtUKhqc2WPQmFSB/go-libp2p-peer" ggio "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/io" diff --git a/bitswap/notifications/notifications.go b/bitswap/notifications/notifications.go index 3a52ed40b..4b1a62eea 100644 --- a/bitswap/notifications/notifications.go +++ b/bitswap/notifications/notifications.go @@ -3,10 +3,10 @@ package notifications import ( "context" - blocks "gx/ipfs/QmVA4mafxbfH5aEvNz8fyoxC6J1xhAtw88B4GerPznSZBg/go-block-format" + blocks "gx/ipfs/QmSn9Td7xgxm9EV7iEjTckpUWmWApggzPxu7eFGWkkpwin/go-block-format" pubsub "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/briantigerchow/pubsub" - cid "gx/ipfs/QmTprEaAA2A9bst5XH7exuyi5KzNMK3SEDNN8rBDnKWcUS/go-cid" + cid "gx/ipfs/QmNp85zy9RLrQ5oQD4hPyS39ezrrXpcaa7R4Y9kxdWQLLQ/go-cid" ) const bufferSize = 16 diff --git a/bitswap/notifications/notifications_test.go b/bitswap/notifications/notifications_test.go index 968d9b04b..d10a0be6b 100644 --- a/bitswap/notifications/notifications_test.go +++ b/bitswap/notifications/notifications_test.go @@ -7,8 +7,8 @@ import ( "time" blocksutil "github.com/ipfs/go-ipfs/blocks/blocksutil" - cid "gx/ipfs/QmTprEaAA2A9bst5XH7exuyi5KzNMK3SEDNN8rBDnKWcUS/go-cid" - blocks "gx/ipfs/QmVA4mafxbfH5aEvNz8fyoxC6J1xhAtw88B4GerPznSZBg/go-block-format" + cid "gx/ipfs/QmNp85zy9RLrQ5oQD4hPyS39ezrrXpcaa7R4Y9kxdWQLLQ/go-cid" + blocks "gx/ipfs/QmSn9Td7xgxm9EV7iEjTckpUWmWApggzPxu7eFGWkkpwin/go-block-format" ) func TestDuplicates(t *testing.T) { diff --git a/bitswap/session.go b/bitswap/session.go index 553549c99..7e55bb5e9 100644 --- a/bitswap/session.go +++ b/bitswap/session.go @@ -6,10 +6,10 @@ import ( notifications "github.com/ipfs/go-ipfs/exchange/bitswap/notifications" + cid "gx/ipfs/QmNp85zy9RLrQ5oQD4hPyS39ezrrXpcaa7R4Y9kxdWQLLQ/go-cid" + blocks "gx/ipfs/QmSn9Td7xgxm9EV7iEjTckpUWmWApggzPxu7eFGWkkpwin/go-block-format" logging "gx/ipfs/QmSpJByNKFX1sCsHBEp3R73FL4NF6FnQTEGyNAXHm2GS52/go-log" loggables "gx/ipfs/QmT4PgCNdv73hnFAqzHqwW44q7M9PWpykSswHDxndquZbc/go-libp2p-loggables" - cid "gx/ipfs/QmTprEaAA2A9bst5XH7exuyi5KzNMK3SEDNN8rBDnKWcUS/go-cid" - blocks "gx/ipfs/QmVA4mafxbfH5aEvNz8fyoxC6J1xhAtw88B4GerPznSZBg/go-block-format" lru "gx/ipfs/QmVYxfoJQiZijTgPNHCHgHELvQpbsJNTg6Crmc3dQkj3yy/golang-lru" peer "gx/ipfs/QmXYjuNuxVzXKJCfWasQk1RqkhVLDM9jtUKhqc2WPQmFSB/go-libp2p-peer" ) diff --git a/bitswap/session_test.go b/bitswap/session_test.go index 55a79408d..9048e59b4 100644 --- a/bitswap/session_test.go +++ b/bitswap/session_test.go @@ -8,8 +8,8 @@ import ( blocksutil "github.com/ipfs/go-ipfs/blocks/blocksutil" - cid "gx/ipfs/QmTprEaAA2A9bst5XH7exuyi5KzNMK3SEDNN8rBDnKWcUS/go-cid" - blocks "gx/ipfs/QmVA4mafxbfH5aEvNz8fyoxC6J1xhAtw88B4GerPznSZBg/go-block-format" + cid "gx/ipfs/QmNp85zy9RLrQ5oQD4hPyS39ezrrXpcaa7R4Y9kxdWQLLQ/go-cid" + blocks "gx/ipfs/QmSn9Td7xgxm9EV7iEjTckpUWmWApggzPxu7eFGWkkpwin/go-block-format" ) func TestBasicSessions(t *testing.T) { diff --git a/bitswap/stat.go b/bitswap/stat.go index 8e24e3e06..39f02c1c9 100644 --- a/bitswap/stat.go +++ b/bitswap/stat.go @@ -3,7 +3,7 @@ package bitswap import ( "sort" - cid "gx/ipfs/QmTprEaAA2A9bst5XH7exuyi5KzNMK3SEDNN8rBDnKWcUS/go-cid" + cid "gx/ipfs/QmNp85zy9RLrQ5oQD4hPyS39ezrrXpcaa7R4Y9kxdWQLLQ/go-cid" ) type Stat struct { diff --git a/bitswap/testnet/interface.go b/bitswap/testnet/interface.go index 34d6377cc..c83b2e78e 100644 --- a/bitswap/testnet/interface.go +++ b/bitswap/testnet/interface.go @@ -2,8 +2,8 @@ package bitswap import ( bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" + "gx/ipfs/QmWRCn8vruNAzHx8i6SAXinuheRitKEGu8c7m26stKvsYx/go-testutil" peer "gx/ipfs/QmXYjuNuxVzXKJCfWasQk1RqkhVLDM9jtUKhqc2WPQmFSB/go-libp2p-peer" - "gx/ipfs/QmZJD56ZWLViJAVkvLc7xbbDerHzUMLr2X4fLRYfbxZWDN/go-testutil" ) type Network interface { diff --git a/bitswap/testnet/network_test.go b/bitswap/testnet/network_test.go index daabe63db..803248552 100644 --- a/bitswap/testnet/network_test.go +++ b/bitswap/testnet/network_test.go @@ -9,9 +9,9 @@ import ( bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" mockrouting "github.com/ipfs/go-ipfs/routing/mock" delay "github.com/ipfs/go-ipfs/thirdparty/delay" - blocks "gx/ipfs/QmVA4mafxbfH5aEvNz8fyoxC6J1xhAtw88B4GerPznSZBg/go-block-format" + blocks "gx/ipfs/QmSn9Td7xgxm9EV7iEjTckpUWmWApggzPxu7eFGWkkpwin/go-block-format" + testutil "gx/ipfs/QmWRCn8vruNAzHx8i6SAXinuheRitKEGu8c7m26stKvsYx/go-testutil" peer "gx/ipfs/QmXYjuNuxVzXKJCfWasQk1RqkhVLDM9jtUKhqc2WPQmFSB/go-libp2p-peer" - testutil "gx/ipfs/QmZJD56ZWLViJAVkvLc7xbbDerHzUMLr2X4fLRYfbxZWDN/go-testutil" ) func TestSendMessageAsyncButWaitForResponse(t *testing.T) { diff --git a/bitswap/testnet/peernet.go b/bitswap/testnet/peernet.go index f9aa2e1ab..7e21b71ee 100644 --- a/bitswap/testnet/peernet.go +++ b/bitswap/testnet/peernet.go @@ -5,9 +5,9 @@ import ( bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" mockrouting "github.com/ipfs/go-ipfs/routing/mock" ds "gx/ipfs/QmVSase1JP7cq9QkPT46oNwdp9pT6kBkG3oqS14y3QcZjG/go-datastore" + testutil "gx/ipfs/QmWRCn8vruNAzHx8i6SAXinuheRitKEGu8c7m26stKvsYx/go-testutil" peer "gx/ipfs/QmXYjuNuxVzXKJCfWasQk1RqkhVLDM9jtUKhqc2WPQmFSB/go-libp2p-peer" - testutil "gx/ipfs/QmZJD56ZWLViJAVkvLc7xbbDerHzUMLr2X4fLRYfbxZWDN/go-testutil" - mockpeernet "gx/ipfs/QmZyngpQxUGyx1T2bzEcst6YzERkvVwDzBMbsSQF4f1smE/go-libp2p/p2p/net/mock" + mockpeernet "gx/ipfs/QmbRT4BwPQEx4CPCd8LKYL46tFWYneGswQnHFdsuiczJRL/go-libp2p/p2p/net/mock" ) type peernet struct { diff --git a/bitswap/testnet/virtual.go b/bitswap/testnet/virtual.go index ee846fc07..a01d4165f 100644 --- a/bitswap/testnet/virtual.go +++ b/bitswap/testnet/virtual.go @@ -8,11 +8,11 @@ import ( bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" mockrouting "github.com/ipfs/go-ipfs/routing/mock" delay "github.com/ipfs/go-ipfs/thirdparty/delay" - testutil "gx/ipfs/QmZJD56ZWLViJAVkvLc7xbbDerHzUMLr2X4fLRYfbxZWDN/go-testutil" + testutil "gx/ipfs/QmWRCn8vruNAzHx8i6SAXinuheRitKEGu8c7m26stKvsYx/go-testutil" - routing "gx/ipfs/QmPjTrrSfE6TzLv6ya6VWhGcCgPrUAdcgrDcQyRDX2VyW1/go-libp2p-routing" + cid "gx/ipfs/QmNp85zy9RLrQ5oQD4hPyS39ezrrXpcaa7R4Y9kxdWQLLQ/go-cid" + routing "gx/ipfs/QmPR2JzfKd9poHx9XBhzoFeBBC31ZM3W5iUPKJZWyaoZZm/go-libp2p-routing" logging "gx/ipfs/QmSpJByNKFX1sCsHBEp3R73FL4NF6FnQTEGyNAXHm2GS52/go-log" - cid "gx/ipfs/QmTprEaAA2A9bst5XH7exuyi5KzNMK3SEDNN8rBDnKWcUS/go-cid" peer "gx/ipfs/QmXYjuNuxVzXKJCfWasQk1RqkhVLDM9jtUKhqc2WPQmFSB/go-libp2p-peer" ) diff --git a/bitswap/testutils.go b/bitswap/testutils.go index 5ac4c7847..85d15c115 100644 --- a/bitswap/testutils.go +++ b/bitswap/testutils.go @@ -8,12 +8,12 @@ import ( tn "github.com/ipfs/go-ipfs/exchange/bitswap/testnet" datastore2 "github.com/ipfs/go-ipfs/thirdparty/datastore2" delay "github.com/ipfs/go-ipfs/thirdparty/delay" - testutil "gx/ipfs/QmZJD56ZWLViJAVkvLc7xbbDerHzUMLr2X4fLRYfbxZWDN/go-testutil" + testutil "gx/ipfs/QmWRCn8vruNAzHx8i6SAXinuheRitKEGu8c7m26stKvsYx/go-testutil" ds "gx/ipfs/QmVSase1JP7cq9QkPT46oNwdp9pT6kBkG3oqS14y3QcZjG/go-datastore" ds_sync "gx/ipfs/QmVSase1JP7cq9QkPT46oNwdp9pT6kBkG3oqS14y3QcZjG/go-datastore/sync" peer "gx/ipfs/QmXYjuNuxVzXKJCfWasQk1RqkhVLDM9jtUKhqc2WPQmFSB/go-libp2p-peer" - p2ptestutil "gx/ipfs/QmYdcTdkuCvFXLj2uejJF5aY3HWhtd8JLT4BjPxF9BNPYf/go-libp2p-netutil" + p2ptestutil "gx/ipfs/QmYTeBaLWbFKQAtVTHbxvTbKfgqrGJUupK4UwjeugownfD/go-libp2p-netutil" ) // WARNING: this uses RandTestBogusIdentity DO NOT USE for NON TESTS! diff --git a/bitswap/wantlist/wantlist.go b/bitswap/wantlist/wantlist.go index c6dbf6cf6..b55bc9421 100644 --- a/bitswap/wantlist/wantlist.go +++ b/bitswap/wantlist/wantlist.go @@ -6,7 +6,7 @@ import ( "sort" "sync" - cid "gx/ipfs/QmTprEaAA2A9bst5XH7exuyi5KzNMK3SEDNN8rBDnKWcUS/go-cid" + cid "gx/ipfs/QmNp85zy9RLrQ5oQD4hPyS39ezrrXpcaa7R4Y9kxdWQLLQ/go-cid" ) type ThreadSafe struct { diff --git a/bitswap/wantlist/wantlist_test.go b/bitswap/wantlist/wantlist_test.go index 053186dc9..07712d98e 100644 --- a/bitswap/wantlist/wantlist_test.go +++ b/bitswap/wantlist/wantlist_test.go @@ -3,7 +3,7 @@ package wantlist import ( "testing" - cid "gx/ipfs/QmTprEaAA2A9bst5XH7exuyi5KzNMK3SEDNN8rBDnKWcUS/go-cid" + cid "gx/ipfs/QmNp85zy9RLrQ5oQD4hPyS39ezrrXpcaa7R4Y9kxdWQLLQ/go-cid" ) var testcids []*cid.Cid diff --git a/bitswap/wantmanager.go b/bitswap/wantmanager.go index 39f0a1bae..cdc8da868 100644 --- a/bitswap/wantmanager.go +++ b/bitswap/wantmanager.go @@ -10,8 +10,8 @@ import ( bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" + cid "gx/ipfs/QmNp85zy9RLrQ5oQD4hPyS39ezrrXpcaa7R4Y9kxdWQLLQ/go-cid" metrics "gx/ipfs/QmRg1gKTHzc3CZXSKzem8aR4E3TubFhbgXwfVuWnSK5CC5/go-metrics-interface" - cid "gx/ipfs/QmTprEaAA2A9bst5XH7exuyi5KzNMK3SEDNN8rBDnKWcUS/go-cid" peer "gx/ipfs/QmXYjuNuxVzXKJCfWasQk1RqkhVLDM9jtUKhqc2WPQmFSB/go-libp2p-peer" ) diff --git a/bitswap/workers.go b/bitswap/workers.go index 424a9b211..3ce4f44c7 100644 --- a/bitswap/workers.go +++ b/bitswap/workers.go @@ -8,10 +8,10 @@ import ( bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" + cid "gx/ipfs/QmNp85zy9RLrQ5oQD4hPyS39ezrrXpcaa7R4Y9kxdWQLLQ/go-cid" process "gx/ipfs/QmSF8fPo3jgVBAy8fpdjjYqgG87dkJgUprRBHRd2tmfgpP/goprocess" procctx "gx/ipfs/QmSF8fPo3jgVBAy8fpdjjYqgG87dkJgUprRBHRd2tmfgpP/goprocess/context" logging "gx/ipfs/QmSpJByNKFX1sCsHBEp3R73FL4NF6FnQTEGyNAXHm2GS52/go-log" - cid "gx/ipfs/QmTprEaAA2A9bst5XH7exuyi5KzNMK3SEDNN8rBDnKWcUS/go-cid" peer "gx/ipfs/QmXYjuNuxVzXKJCfWasQk1RqkhVLDM9jtUKhqc2WPQmFSB/go-libp2p-peer" ) From fd70c5ffd0f0b9680172c0520b108c6bfcc060c8 Mon Sep 17 00:00:00 2001 From: Jeromy Date: Mon, 4 Sep 2017 23:37:11 -0700 Subject: [PATCH 0575/1035] gx: update go-ws-transport License: MIT Signed-off-by: Steven Allen This commit was moved from ipfs/go-bitswap@8d7e1d0d59dbaf7fff377d5166d0fe5d175653bf --- bitswap/bitswap_test.go | 2 +- bitswap/testnet/peernet.go | 2 +- bitswap/testutils.go | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index 1155309d7..973ea0c7c 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -20,8 +20,8 @@ import ( detectrace "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-detect-race" cid "gx/ipfs/QmNp85zy9RLrQ5oQD4hPyS39ezrrXpcaa7R4Y9kxdWQLLQ/go-cid" + p2ptestutil "gx/ipfs/QmQ1bJEsmdEiGfTQRoj6CsshWmAKduAEDEbwzbvk5QT5Ui/go-libp2p-netutil" tu "gx/ipfs/QmWRCn8vruNAzHx8i6SAXinuheRitKEGu8c7m26stKvsYx/go-testutil" - p2ptestutil "gx/ipfs/QmYTeBaLWbFKQAtVTHbxvTbKfgqrGJUupK4UwjeugownfD/go-libp2p-netutil" ) // FIXME the tests are really sensitive to the network delay. fix them to work diff --git a/bitswap/testnet/peernet.go b/bitswap/testnet/peernet.go index 7e21b71ee..2f854eb2b 100644 --- a/bitswap/testnet/peernet.go +++ b/bitswap/testnet/peernet.go @@ -7,7 +7,7 @@ import ( ds "gx/ipfs/QmVSase1JP7cq9QkPT46oNwdp9pT6kBkG3oqS14y3QcZjG/go-datastore" testutil "gx/ipfs/QmWRCn8vruNAzHx8i6SAXinuheRitKEGu8c7m26stKvsYx/go-testutil" peer "gx/ipfs/QmXYjuNuxVzXKJCfWasQk1RqkhVLDM9jtUKhqc2WPQmFSB/go-libp2p-peer" - mockpeernet "gx/ipfs/QmbRT4BwPQEx4CPCd8LKYL46tFWYneGswQnHFdsuiczJRL/go-libp2p/p2p/net/mock" + mockpeernet "gx/ipfs/QmXZyBQMkqSYigxhJResC6fLWDGFhbphK67eZoqMDUvBmK/go-libp2p/p2p/net/mock" ) type peernet struct { diff --git a/bitswap/testutils.go b/bitswap/testutils.go index 85d15c115..3fa069234 100644 --- a/bitswap/testutils.go +++ b/bitswap/testutils.go @@ -10,10 +10,10 @@ import ( delay "github.com/ipfs/go-ipfs/thirdparty/delay" testutil "gx/ipfs/QmWRCn8vruNAzHx8i6SAXinuheRitKEGu8c7m26stKvsYx/go-testutil" + p2ptestutil "gx/ipfs/QmQ1bJEsmdEiGfTQRoj6CsshWmAKduAEDEbwzbvk5QT5Ui/go-libp2p-netutil" ds "gx/ipfs/QmVSase1JP7cq9QkPT46oNwdp9pT6kBkG3oqS14y3QcZjG/go-datastore" ds_sync "gx/ipfs/QmVSase1JP7cq9QkPT46oNwdp9pT6kBkG3oqS14y3QcZjG/go-datastore/sync" peer "gx/ipfs/QmXYjuNuxVzXKJCfWasQk1RqkhVLDM9jtUKhqc2WPQmFSB/go-libp2p-peer" - p2ptestutil "gx/ipfs/QmYTeBaLWbFKQAtVTHbxvTbKfgqrGJUupK4UwjeugownfD/go-libp2p-netutil" ) // WARNING: this uses RandTestBogusIdentity DO NOT USE for NON TESTS! From cfc4396280307b7d5ebe7041b4640b1581593bf0 Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Thu, 14 Sep 2017 11:39:25 -0700 Subject: [PATCH 0576/1035] gx: update go-stream-muxer Introduces a new Reset method on streams that kills both sides of the connection. Close now officially just closes the write side (what it did all along...) * Also pull through shiny new go-multiplexer fixes. * Also pull in go-reuseport update. License: MIT Signed-off-by: Steven Allen This commit was moved from ipfs/go-bitswap@557bef8ca2ef3f8db5d56f9ddb548a4c4792a735 --- bitswap/bitswap_test.go | 2 +- bitswap/message/message.go | 2 +- bitswap/network/ipfs_impl.go | 4 ++-- bitswap/testnet/peernet.go | 2 +- bitswap/testutils.go | 2 +- 5 files changed, 6 insertions(+), 6 deletions(-) diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index 973ea0c7c..f01714529 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -20,7 +20,7 @@ import ( detectrace "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-detect-race" cid "gx/ipfs/QmNp85zy9RLrQ5oQD4hPyS39ezrrXpcaa7R4Y9kxdWQLLQ/go-cid" - p2ptestutil "gx/ipfs/QmQ1bJEsmdEiGfTQRoj6CsshWmAKduAEDEbwzbvk5QT5Ui/go-libp2p-netutil" + p2ptestutil "gx/ipfs/QmP4cEjmvf8tC6ykxKXrvmYLo8vqtGsgduMatjbAKnBzv8/go-libp2p-netutil" tu "gx/ipfs/QmWRCn8vruNAzHx8i6SAXinuheRitKEGu8c7m26stKvsYx/go-testutil" ) diff --git a/bitswap/message/message.go b/bitswap/message/message.go index 273321305..f5720006d 100644 --- a/bitswap/message/message.go +++ b/bitswap/message/message.go @@ -8,10 +8,10 @@ import ( wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" blocks "gx/ipfs/QmSn9Td7xgxm9EV7iEjTckpUWmWApggzPxu7eFGWkkpwin/go-block-format" + inet "gx/ipfs/QmNa31VPzC561NWwRsJLE7nGYZYuuD2QfpK2b1q9BK54J1/go-libp2p-net" cid "gx/ipfs/QmNp85zy9RLrQ5oQD4hPyS39ezrrXpcaa7R4Y9kxdWQLLQ/go-cid" ggio "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/io" proto "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/proto" - inet "gx/ipfs/QmahYsGWry85Y7WUe2SX5G4JkH2zifEQAUtJVLZ24aC9DF/go-libp2p-net" ) // TODO move message.go into the bitswap package diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index 30b5db20b..505ea4d2e 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -8,15 +8,15 @@ import ( bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" + inet "gx/ipfs/QmNa31VPzC561NWwRsJLE7nGYZYuuD2QfpK2b1q9BK54J1/go-libp2p-net" cid "gx/ipfs/QmNp85zy9RLrQ5oQD4hPyS39ezrrXpcaa7R4Y9kxdWQLLQ/go-cid" routing "gx/ipfs/QmPR2JzfKd9poHx9XBhzoFeBBC31ZM3W5iUPKJZWyaoZZm/go-libp2p-routing" pstore "gx/ipfs/QmPgDWmTmuzvP7QE5zwo1TmjbJme9pmZHNujB2453jkCTr/go-libp2p-peerstore" logging "gx/ipfs/QmSpJByNKFX1sCsHBEp3R73FL4NF6FnQTEGyNAXHm2GS52/go-log" - host "gx/ipfs/QmUwW8jMQDxXhLD2j4EfWqLEMX3MsvyWcWGvJPVDh1aTmu/go-libp2p-host" ma "gx/ipfs/QmXY77cVe7rVRQXZZQRioukUM7aRW3BTcAgJe12MCtb3Ji/go-multiaddr" peer "gx/ipfs/QmXYjuNuxVzXKJCfWasQk1RqkhVLDM9jtUKhqc2WPQmFSB/go-libp2p-peer" ggio "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/io" - inet "gx/ipfs/QmahYsGWry85Y7WUe2SX5G4JkH2zifEQAUtJVLZ24aC9DF/go-libp2p-net" + host "gx/ipfs/QmaSxYRuMq4pkpBBG2CYaRrPx2z7NmMVEs34b9g61biQA6/go-libp2p-host" ) var log = logging.Logger("bitswap_network") diff --git a/bitswap/testnet/peernet.go b/bitswap/testnet/peernet.go index 2f854eb2b..7c9857182 100644 --- a/bitswap/testnet/peernet.go +++ b/bitswap/testnet/peernet.go @@ -7,7 +7,7 @@ import ( ds "gx/ipfs/QmVSase1JP7cq9QkPT46oNwdp9pT6kBkG3oqS14y3QcZjG/go-datastore" testutil "gx/ipfs/QmWRCn8vruNAzHx8i6SAXinuheRitKEGu8c7m26stKvsYx/go-testutil" peer "gx/ipfs/QmXYjuNuxVzXKJCfWasQk1RqkhVLDM9jtUKhqc2WPQmFSB/go-libp2p-peer" - mockpeernet "gx/ipfs/QmXZyBQMkqSYigxhJResC6fLWDGFhbphK67eZoqMDUvBmK/go-libp2p/p2p/net/mock" + mockpeernet "gx/ipfs/QmZ3ma9g2NTg7GNF1ntWNRa1GW9jVzGq8UE9cKCwRKv6dS/go-libp2p/p2p/net/mock" ) type peernet struct { diff --git a/bitswap/testutils.go b/bitswap/testutils.go index 3fa069234..b9545ea28 100644 --- a/bitswap/testutils.go +++ b/bitswap/testutils.go @@ -10,7 +10,7 @@ import ( delay "github.com/ipfs/go-ipfs/thirdparty/delay" testutil "gx/ipfs/QmWRCn8vruNAzHx8i6SAXinuheRitKEGu8c7m26stKvsYx/go-testutil" - p2ptestutil "gx/ipfs/QmQ1bJEsmdEiGfTQRoj6CsshWmAKduAEDEbwzbvk5QT5Ui/go-libp2p-netutil" + p2ptestutil "gx/ipfs/QmP4cEjmvf8tC6ykxKXrvmYLo8vqtGsgduMatjbAKnBzv8/go-libp2p-netutil" ds "gx/ipfs/QmVSase1JP7cq9QkPT46oNwdp9pT6kBkG3oqS14y3QcZjG/go-datastore" ds_sync "gx/ipfs/QmVSase1JP7cq9QkPT46oNwdp9pT6kBkG3oqS14y3QcZjG/go-datastore/sync" peer "gx/ipfs/QmXYjuNuxVzXKJCfWasQk1RqkhVLDM9jtUKhqc2WPQmFSB/go-libp2p-peer" From c488d9e21bd92cb3c439682e12e0a4307f529650 Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Thu, 14 Sep 2017 11:52:14 -0700 Subject: [PATCH 0577/1035] use stream.Reset where appropriate License: MIT Signed-off-by: Steven Allen This commit was moved from ipfs/go-bitswap@6cd6f553c47a27212fa6023d8cde5e567efdef35 --- bitswap/network/interface.go | 1 + bitswap/network/ipfs_impl.go | 15 +++++++++++++-- bitswap/testnet/virtual.go | 4 ++++ bitswap/wantmanager.go | 13 +++++++------ 4 files changed, 25 insertions(+), 8 deletions(-) diff --git a/bitswap/network/interface.go b/bitswap/network/interface.go index 92d27676c..2ec1c639b 100644 --- a/bitswap/network/interface.go +++ b/bitswap/network/interface.go @@ -40,6 +40,7 @@ type BitSwapNetwork interface { type MessageSender interface { SendMsg(context.Context, bsmsg.BitSwapMessage) error Close() error + Reset() error } // Implement Receiver to receive messages from the BitSwapNetwork diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index 505ea4d2e..8e18527aa 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -56,6 +56,10 @@ func (s *streamMessageSender) Close() error { return s.s.Close() } +func (s *streamMessageSender) Reset() error { + return s.s.Reset() +} + func (s *streamMessageSender) SendMsg(ctx context.Context, msg bsmsg.BitSwapMessage) error { return msgToStream(ctx, s.s, msg) } @@ -121,9 +125,14 @@ func (bsnet *impl) SendMessage( if err != nil { return err } - defer s.Close() - return msgToStream(ctx, s, outgoing) + err = msgToStream(ctx, s, outgoing) + if err != nil { + s.Reset() + } else { + s.Close() + } + return err } func (bsnet *impl) SetDelegate(r Receiver) { @@ -180,6 +189,7 @@ func (bsnet *impl) handleNewStream(s inet.Stream) { defer s.Close() if bsnet.receiver == nil { + s.Reset() return } @@ -188,6 +198,7 @@ func (bsnet *impl) handleNewStream(s inet.Stream) { received, err := bsmsg.FromPBReader(reader) if err != nil { if err != io.EOF { + s.Reset() go bsnet.receiver.ReceiveError(err) log.Debugf("bitswap net handleNewStream from %s error: %s", s.Conn().RemotePeer(), err) } diff --git a/bitswap/testnet/virtual.go b/bitswap/testnet/virtual.go index a01d4165f..37ae23b54 100644 --- a/bitswap/testnet/virtual.go +++ b/bitswap/testnet/virtual.go @@ -133,6 +133,10 @@ func (mp *messagePasser) Close() error { return nil } +func (mp *messagePasser) Reset() error { + return nil +} + func (n *networkClient) NewMessageSender(ctx context.Context, p peer.ID) (bsnet.MessageSender, error) { return &messagePasser{ net: n.network, diff --git a/bitswap/wantmanager.go b/bitswap/wantmanager.go index cdc8da868..e2859a292 100644 --- a/bitswap/wantmanager.go +++ b/bitswap/wantmanager.go @@ -172,18 +172,19 @@ func (pm *WantManager) stopPeerHandler(p peer.ID) { } func (mq *msgQueue) runQueue(ctx context.Context) { - defer func() { - if mq.sender != nil { - mq.sender.Close() - } - }() for { select { case <-mq.work: // there is work to be done mq.doWork(ctx) case <-mq.done: + if mq.sender != nil { + mq.sender.Close() + } return case <-ctx.Done(): + if mq.sender != nil { + mq.sender.Reset() + } return } } @@ -218,7 +219,7 @@ func (mq *msgQueue) doWork(ctx context.Context) { } log.Infof("bitswap send error: %s", err) - mq.sender.Close() + mq.sender.Reset() mq.sender = nil select { From 3d45531f55d1061a1daae2c20570b17682968748 Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Fri, 15 Sep 2017 18:56:44 -0700 Subject: [PATCH 0578/1035] update yamux We need to cancel out all readers/writers on stream reset. License: MIT Signed-off-by: Steven Allen This commit was moved from ipfs/go-bitswap@707819589b1fa9accf6c7295a8af99342e664286 --- bitswap/bitswap_test.go | 2 +- bitswap/testnet/peernet.go | 2 +- bitswap/testutils.go | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index f01714529..88b510fed 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -20,8 +20,8 @@ import ( detectrace "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-detect-race" cid "gx/ipfs/QmNp85zy9RLrQ5oQD4hPyS39ezrrXpcaa7R4Y9kxdWQLLQ/go-cid" - p2ptestutil "gx/ipfs/QmP4cEjmvf8tC6ykxKXrvmYLo8vqtGsgduMatjbAKnBzv8/go-libp2p-netutil" tu "gx/ipfs/QmWRCn8vruNAzHx8i6SAXinuheRitKEGu8c7m26stKvsYx/go-testutil" + p2ptestutil "gx/ipfs/QmdzuGp4a9pahgXuBeReHdYGUzdVX3FUCwfmWVo5mQfkTi/go-libp2p-netutil" ) // FIXME the tests are really sensitive to the network delay. fix them to work diff --git a/bitswap/testnet/peernet.go b/bitswap/testnet/peernet.go index 7c9857182..0263d61a6 100644 --- a/bitswap/testnet/peernet.go +++ b/bitswap/testnet/peernet.go @@ -4,10 +4,10 @@ import ( "context" bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" mockrouting "github.com/ipfs/go-ipfs/routing/mock" + mockpeernet "gx/ipfs/QmRQ76P5dgvxTujhfPsCRAG83rC15jgb1G9bKLuomuC6dQ/go-libp2p/p2p/net/mock" ds "gx/ipfs/QmVSase1JP7cq9QkPT46oNwdp9pT6kBkG3oqS14y3QcZjG/go-datastore" testutil "gx/ipfs/QmWRCn8vruNAzHx8i6SAXinuheRitKEGu8c7m26stKvsYx/go-testutil" peer "gx/ipfs/QmXYjuNuxVzXKJCfWasQk1RqkhVLDM9jtUKhqc2WPQmFSB/go-libp2p-peer" - mockpeernet "gx/ipfs/QmZ3ma9g2NTg7GNF1ntWNRa1GW9jVzGq8UE9cKCwRKv6dS/go-libp2p/p2p/net/mock" ) type peernet struct { diff --git a/bitswap/testutils.go b/bitswap/testutils.go index b9545ea28..2ff5bc173 100644 --- a/bitswap/testutils.go +++ b/bitswap/testutils.go @@ -10,10 +10,10 @@ import ( delay "github.com/ipfs/go-ipfs/thirdparty/delay" testutil "gx/ipfs/QmWRCn8vruNAzHx8i6SAXinuheRitKEGu8c7m26stKvsYx/go-testutil" - p2ptestutil "gx/ipfs/QmP4cEjmvf8tC6ykxKXrvmYLo8vqtGsgduMatjbAKnBzv8/go-libp2p-netutil" ds "gx/ipfs/QmVSase1JP7cq9QkPT46oNwdp9pT6kBkG3oqS14y3QcZjG/go-datastore" ds_sync "gx/ipfs/QmVSase1JP7cq9QkPT46oNwdp9pT6kBkG3oqS14y3QcZjG/go-datastore/sync" peer "gx/ipfs/QmXYjuNuxVzXKJCfWasQk1RqkhVLDM9jtUKhqc2WPQmFSB/go-libp2p-peer" + p2ptestutil "gx/ipfs/QmdzuGp4a9pahgXuBeReHdYGUzdVX3FUCwfmWVo5mQfkTi/go-libp2p-netutil" ) // WARNING: this uses RandTestBogusIdentity DO NOT USE for NON TESTS! From e7f7cedefd32a01683f82e9bfd673cf543f6c778 Mon Sep 17 00:00:00 2001 From: vyzo Date: Thu, 5 Oct 2017 17:10:16 +0300 Subject: [PATCH 0579/1035] update go-testutil to 1.1.12 License: MIT Signed-off-by: vyzo This commit was moved from ipfs/go-bitswap@80625126c528554977629435b3ce47fd3a191075 --- bitswap/bitswap_test.go | 4 ++-- bitswap/decision/bench_test.go | 2 +- bitswap/decision/engine_test.go | 2 +- bitswap/decision/peer_request_queue_test.go | 2 +- bitswap/testnet/interface.go | 2 +- bitswap/testnet/network_test.go | 2 +- bitswap/testnet/peernet.go | 2 +- bitswap/testnet/virtual.go | 2 +- bitswap/testutils.go | 2 +- 9 files changed, 10 insertions(+), 10 deletions(-) diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index 88b510fed..09d44ba3b 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -14,13 +14,13 @@ import ( tn "github.com/ipfs/go-ipfs/exchange/bitswap/testnet" mockrouting "github.com/ipfs/go-ipfs/routing/mock" delay "github.com/ipfs/go-ipfs/thirdparty/delay" + travis "gx/ipfs/QmQgLZP9haZheimMHqqAjJh2LhRmNfEoZDfbtkpeMhi9xK/go-testutil/ci/travis" blocks "gx/ipfs/QmSn9Td7xgxm9EV7iEjTckpUWmWApggzPxu7eFGWkkpwin/go-block-format" - travis "gx/ipfs/QmWRCn8vruNAzHx8i6SAXinuheRitKEGu8c7m26stKvsYx/go-testutil/ci/travis" detectrace "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-detect-race" cid "gx/ipfs/QmNp85zy9RLrQ5oQD4hPyS39ezrrXpcaa7R4Y9kxdWQLLQ/go-cid" - tu "gx/ipfs/QmWRCn8vruNAzHx8i6SAXinuheRitKEGu8c7m26stKvsYx/go-testutil" + tu "gx/ipfs/QmQgLZP9haZheimMHqqAjJh2LhRmNfEoZDfbtkpeMhi9xK/go-testutil" p2ptestutil "gx/ipfs/QmdzuGp4a9pahgXuBeReHdYGUzdVX3FUCwfmWVo5mQfkTi/go-libp2p-netutil" ) diff --git a/bitswap/decision/bench_test.go b/bitswap/decision/bench_test.go index cb005e6ef..5ffb2aa3c 100644 --- a/bitswap/decision/bench_test.go +++ b/bitswap/decision/bench_test.go @@ -7,8 +7,8 @@ import ( "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" cid "gx/ipfs/QmNp85zy9RLrQ5oQD4hPyS39ezrrXpcaa7R4Y9kxdWQLLQ/go-cid" + "gx/ipfs/QmQgLZP9haZheimMHqqAjJh2LhRmNfEoZDfbtkpeMhi9xK/go-testutil" u "gx/ipfs/QmSU6eubNdhXjFBJBSksTp8kv8YRub8mGAPv8tVJHmL2EU/go-ipfs-util" - "gx/ipfs/QmWRCn8vruNAzHx8i6SAXinuheRitKEGu8c7m26stKvsYx/go-testutil" "gx/ipfs/QmXYjuNuxVzXKJCfWasQk1RqkhVLDM9jtUKhqc2WPQmFSB/go-libp2p-peer" ) diff --git a/bitswap/decision/engine_test.go b/bitswap/decision/engine_test.go index 512548cf5..ac35c7122 100644 --- a/bitswap/decision/engine_test.go +++ b/bitswap/decision/engine_test.go @@ -11,10 +11,10 @@ import ( context "context" blockstore "github.com/ipfs/go-ipfs/blocks/blockstore" message "github.com/ipfs/go-ipfs/exchange/bitswap/message" + testutil "gx/ipfs/QmQgLZP9haZheimMHqqAjJh2LhRmNfEoZDfbtkpeMhi9xK/go-testutil" blocks "gx/ipfs/QmSn9Td7xgxm9EV7iEjTckpUWmWApggzPxu7eFGWkkpwin/go-block-format" ds "gx/ipfs/QmVSase1JP7cq9QkPT46oNwdp9pT6kBkG3oqS14y3QcZjG/go-datastore" dssync "gx/ipfs/QmVSase1JP7cq9QkPT46oNwdp9pT6kBkG3oqS14y3QcZjG/go-datastore/sync" - testutil "gx/ipfs/QmWRCn8vruNAzHx8i6SAXinuheRitKEGu8c7m26stKvsYx/go-testutil" peer "gx/ipfs/QmXYjuNuxVzXKJCfWasQk1RqkhVLDM9jtUKhqc2WPQmFSB/go-libp2p-peer" ) diff --git a/bitswap/decision/peer_request_queue_test.go b/bitswap/decision/peer_request_queue_test.go index 718da14e4..32efd763b 100644 --- a/bitswap/decision/peer_request_queue_test.go +++ b/bitswap/decision/peer_request_queue_test.go @@ -10,8 +10,8 @@ import ( "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" cid "gx/ipfs/QmNp85zy9RLrQ5oQD4hPyS39ezrrXpcaa7R4Y9kxdWQLLQ/go-cid" + "gx/ipfs/QmQgLZP9haZheimMHqqAjJh2LhRmNfEoZDfbtkpeMhi9xK/go-testutil" u "gx/ipfs/QmSU6eubNdhXjFBJBSksTp8kv8YRub8mGAPv8tVJHmL2EU/go-ipfs-util" - "gx/ipfs/QmWRCn8vruNAzHx8i6SAXinuheRitKEGu8c7m26stKvsYx/go-testutil" ) func TestPushPop(t *testing.T) { diff --git a/bitswap/testnet/interface.go b/bitswap/testnet/interface.go index c83b2e78e..69cdbf0cc 100644 --- a/bitswap/testnet/interface.go +++ b/bitswap/testnet/interface.go @@ -2,7 +2,7 @@ package bitswap import ( bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" - "gx/ipfs/QmWRCn8vruNAzHx8i6SAXinuheRitKEGu8c7m26stKvsYx/go-testutil" + "gx/ipfs/QmQgLZP9haZheimMHqqAjJh2LhRmNfEoZDfbtkpeMhi9xK/go-testutil" peer "gx/ipfs/QmXYjuNuxVzXKJCfWasQk1RqkhVLDM9jtUKhqc2WPQmFSB/go-libp2p-peer" ) diff --git a/bitswap/testnet/network_test.go b/bitswap/testnet/network_test.go index 803248552..7fcecc909 100644 --- a/bitswap/testnet/network_test.go +++ b/bitswap/testnet/network_test.go @@ -9,8 +9,8 @@ import ( bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" mockrouting "github.com/ipfs/go-ipfs/routing/mock" delay "github.com/ipfs/go-ipfs/thirdparty/delay" + testutil "gx/ipfs/QmQgLZP9haZheimMHqqAjJh2LhRmNfEoZDfbtkpeMhi9xK/go-testutil" blocks "gx/ipfs/QmSn9Td7xgxm9EV7iEjTckpUWmWApggzPxu7eFGWkkpwin/go-block-format" - testutil "gx/ipfs/QmWRCn8vruNAzHx8i6SAXinuheRitKEGu8c7m26stKvsYx/go-testutil" peer "gx/ipfs/QmXYjuNuxVzXKJCfWasQk1RqkhVLDM9jtUKhqc2WPQmFSB/go-libp2p-peer" ) diff --git a/bitswap/testnet/peernet.go b/bitswap/testnet/peernet.go index 0263d61a6..e1f9f0c54 100644 --- a/bitswap/testnet/peernet.go +++ b/bitswap/testnet/peernet.go @@ -4,9 +4,9 @@ import ( "context" bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" mockrouting "github.com/ipfs/go-ipfs/routing/mock" + testutil "gx/ipfs/QmQgLZP9haZheimMHqqAjJh2LhRmNfEoZDfbtkpeMhi9xK/go-testutil" mockpeernet "gx/ipfs/QmRQ76P5dgvxTujhfPsCRAG83rC15jgb1G9bKLuomuC6dQ/go-libp2p/p2p/net/mock" ds "gx/ipfs/QmVSase1JP7cq9QkPT46oNwdp9pT6kBkG3oqS14y3QcZjG/go-datastore" - testutil "gx/ipfs/QmWRCn8vruNAzHx8i6SAXinuheRitKEGu8c7m26stKvsYx/go-testutil" peer "gx/ipfs/QmXYjuNuxVzXKJCfWasQk1RqkhVLDM9jtUKhqc2WPQmFSB/go-libp2p-peer" ) diff --git a/bitswap/testnet/virtual.go b/bitswap/testnet/virtual.go index 37ae23b54..586f12f65 100644 --- a/bitswap/testnet/virtual.go +++ b/bitswap/testnet/virtual.go @@ -8,7 +8,7 @@ import ( bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" mockrouting "github.com/ipfs/go-ipfs/routing/mock" delay "github.com/ipfs/go-ipfs/thirdparty/delay" - testutil "gx/ipfs/QmWRCn8vruNAzHx8i6SAXinuheRitKEGu8c7m26stKvsYx/go-testutil" + testutil "gx/ipfs/QmQgLZP9haZheimMHqqAjJh2LhRmNfEoZDfbtkpeMhi9xK/go-testutil" cid "gx/ipfs/QmNp85zy9RLrQ5oQD4hPyS39ezrrXpcaa7R4Y9kxdWQLLQ/go-cid" routing "gx/ipfs/QmPR2JzfKd9poHx9XBhzoFeBBC31ZM3W5iUPKJZWyaoZZm/go-libp2p-routing" diff --git a/bitswap/testutils.go b/bitswap/testutils.go index 2ff5bc173..b62375e83 100644 --- a/bitswap/testutils.go +++ b/bitswap/testutils.go @@ -8,7 +8,7 @@ import ( tn "github.com/ipfs/go-ipfs/exchange/bitswap/testnet" datastore2 "github.com/ipfs/go-ipfs/thirdparty/datastore2" delay "github.com/ipfs/go-ipfs/thirdparty/delay" - testutil "gx/ipfs/QmWRCn8vruNAzHx8i6SAXinuheRitKEGu8c7m26stKvsYx/go-testutil" + testutil "gx/ipfs/QmQgLZP9haZheimMHqqAjJh2LhRmNfEoZDfbtkpeMhi9xK/go-testutil" ds "gx/ipfs/QmVSase1JP7cq9QkPT46oNwdp9pT6kBkG3oqS14y3QcZjG/go-datastore" ds_sync "gx/ipfs/QmVSase1JP7cq9QkPT46oNwdp9pT6kBkG3oqS14y3QcZjG/go-datastore/sync" From 426e427fff939453f8df922effbd811c3dc99d9e Mon Sep 17 00:00:00 2001 From: Jeromy Date: Fri, 6 Oct 2017 08:42:59 -0700 Subject: [PATCH 0580/1035] update deps for new connmgr code License: MIT Signed-off-by: Jeromy This commit was moved from ipfs/go-bitswap@8c2a566e0fa7aaac3349c4650d03744c4f36328a --- bitswap/bitswap_test.go | 4 ++-- bitswap/decision/bench_test.go | 2 +- bitswap/decision/engine_test.go | 2 +- bitswap/decision/peer_request_queue_test.go | 2 +- bitswap/network/ipfs_impl.go | 2 +- bitswap/testnet/interface.go | 2 +- bitswap/testnet/network_test.go | 2 +- bitswap/testnet/peernet.go | 4 ++-- bitswap/testnet/virtual.go | 2 +- bitswap/testutils.go | 2 +- 10 files changed, 12 insertions(+), 12 deletions(-) diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index 09d44ba3b..88b510fed 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -14,13 +14,13 @@ import ( tn "github.com/ipfs/go-ipfs/exchange/bitswap/testnet" mockrouting "github.com/ipfs/go-ipfs/routing/mock" delay "github.com/ipfs/go-ipfs/thirdparty/delay" - travis "gx/ipfs/QmQgLZP9haZheimMHqqAjJh2LhRmNfEoZDfbtkpeMhi9xK/go-testutil/ci/travis" blocks "gx/ipfs/QmSn9Td7xgxm9EV7iEjTckpUWmWApggzPxu7eFGWkkpwin/go-block-format" + travis "gx/ipfs/QmWRCn8vruNAzHx8i6SAXinuheRitKEGu8c7m26stKvsYx/go-testutil/ci/travis" detectrace "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-detect-race" cid "gx/ipfs/QmNp85zy9RLrQ5oQD4hPyS39ezrrXpcaa7R4Y9kxdWQLLQ/go-cid" - tu "gx/ipfs/QmQgLZP9haZheimMHqqAjJh2LhRmNfEoZDfbtkpeMhi9xK/go-testutil" + tu "gx/ipfs/QmWRCn8vruNAzHx8i6SAXinuheRitKEGu8c7m26stKvsYx/go-testutil" p2ptestutil "gx/ipfs/QmdzuGp4a9pahgXuBeReHdYGUzdVX3FUCwfmWVo5mQfkTi/go-libp2p-netutil" ) diff --git a/bitswap/decision/bench_test.go b/bitswap/decision/bench_test.go index 5ffb2aa3c..cb005e6ef 100644 --- a/bitswap/decision/bench_test.go +++ b/bitswap/decision/bench_test.go @@ -7,8 +7,8 @@ import ( "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" cid "gx/ipfs/QmNp85zy9RLrQ5oQD4hPyS39ezrrXpcaa7R4Y9kxdWQLLQ/go-cid" - "gx/ipfs/QmQgLZP9haZheimMHqqAjJh2LhRmNfEoZDfbtkpeMhi9xK/go-testutil" u "gx/ipfs/QmSU6eubNdhXjFBJBSksTp8kv8YRub8mGAPv8tVJHmL2EU/go-ipfs-util" + "gx/ipfs/QmWRCn8vruNAzHx8i6SAXinuheRitKEGu8c7m26stKvsYx/go-testutil" "gx/ipfs/QmXYjuNuxVzXKJCfWasQk1RqkhVLDM9jtUKhqc2WPQmFSB/go-libp2p-peer" ) diff --git a/bitswap/decision/engine_test.go b/bitswap/decision/engine_test.go index ac35c7122..512548cf5 100644 --- a/bitswap/decision/engine_test.go +++ b/bitswap/decision/engine_test.go @@ -11,10 +11,10 @@ import ( context "context" blockstore "github.com/ipfs/go-ipfs/blocks/blockstore" message "github.com/ipfs/go-ipfs/exchange/bitswap/message" - testutil "gx/ipfs/QmQgLZP9haZheimMHqqAjJh2LhRmNfEoZDfbtkpeMhi9xK/go-testutil" blocks "gx/ipfs/QmSn9Td7xgxm9EV7iEjTckpUWmWApggzPxu7eFGWkkpwin/go-block-format" ds "gx/ipfs/QmVSase1JP7cq9QkPT46oNwdp9pT6kBkG3oqS14y3QcZjG/go-datastore" dssync "gx/ipfs/QmVSase1JP7cq9QkPT46oNwdp9pT6kBkG3oqS14y3QcZjG/go-datastore/sync" + testutil "gx/ipfs/QmWRCn8vruNAzHx8i6SAXinuheRitKEGu8c7m26stKvsYx/go-testutil" peer "gx/ipfs/QmXYjuNuxVzXKJCfWasQk1RqkhVLDM9jtUKhqc2WPQmFSB/go-libp2p-peer" ) diff --git a/bitswap/decision/peer_request_queue_test.go b/bitswap/decision/peer_request_queue_test.go index 32efd763b..718da14e4 100644 --- a/bitswap/decision/peer_request_queue_test.go +++ b/bitswap/decision/peer_request_queue_test.go @@ -10,8 +10,8 @@ import ( "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" cid "gx/ipfs/QmNp85zy9RLrQ5oQD4hPyS39ezrrXpcaa7R4Y9kxdWQLLQ/go-cid" - "gx/ipfs/QmQgLZP9haZheimMHqqAjJh2LhRmNfEoZDfbtkpeMhi9xK/go-testutil" u "gx/ipfs/QmSU6eubNdhXjFBJBSksTp8kv8YRub8mGAPv8tVJHmL2EU/go-ipfs-util" + "gx/ipfs/QmWRCn8vruNAzHx8i6SAXinuheRitKEGu8c7m26stKvsYx/go-testutil" ) func TestPushPop(t *testing.T) { diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index 8e18527aa..e96d74447 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -16,7 +16,7 @@ import ( ma "gx/ipfs/QmXY77cVe7rVRQXZZQRioukUM7aRW3BTcAgJe12MCtb3Ji/go-multiaddr" peer "gx/ipfs/QmXYjuNuxVzXKJCfWasQk1RqkhVLDM9jtUKhqc2WPQmFSB/go-libp2p-peer" ggio "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/io" - host "gx/ipfs/QmaSxYRuMq4pkpBBG2CYaRrPx2z7NmMVEs34b9g61biQA6/go-libp2p-host" + host "gx/ipfs/Qmc1XhrFEiSeBNn3mpfg6gEuYCt5im2gYmNVmncsvmpeAk/go-libp2p-host" ) var log = logging.Logger("bitswap_network") diff --git a/bitswap/testnet/interface.go b/bitswap/testnet/interface.go index 69cdbf0cc..c83b2e78e 100644 --- a/bitswap/testnet/interface.go +++ b/bitswap/testnet/interface.go @@ -2,7 +2,7 @@ package bitswap import ( bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" - "gx/ipfs/QmQgLZP9haZheimMHqqAjJh2LhRmNfEoZDfbtkpeMhi9xK/go-testutil" + "gx/ipfs/QmWRCn8vruNAzHx8i6SAXinuheRitKEGu8c7m26stKvsYx/go-testutil" peer "gx/ipfs/QmXYjuNuxVzXKJCfWasQk1RqkhVLDM9jtUKhqc2WPQmFSB/go-libp2p-peer" ) diff --git a/bitswap/testnet/network_test.go b/bitswap/testnet/network_test.go index 7fcecc909..803248552 100644 --- a/bitswap/testnet/network_test.go +++ b/bitswap/testnet/network_test.go @@ -9,8 +9,8 @@ import ( bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" mockrouting "github.com/ipfs/go-ipfs/routing/mock" delay "github.com/ipfs/go-ipfs/thirdparty/delay" - testutil "gx/ipfs/QmQgLZP9haZheimMHqqAjJh2LhRmNfEoZDfbtkpeMhi9xK/go-testutil" blocks "gx/ipfs/QmSn9Td7xgxm9EV7iEjTckpUWmWApggzPxu7eFGWkkpwin/go-block-format" + testutil "gx/ipfs/QmWRCn8vruNAzHx8i6SAXinuheRitKEGu8c7m26stKvsYx/go-testutil" peer "gx/ipfs/QmXYjuNuxVzXKJCfWasQk1RqkhVLDM9jtUKhqc2WPQmFSB/go-libp2p-peer" ) diff --git a/bitswap/testnet/peernet.go b/bitswap/testnet/peernet.go index e1f9f0c54..7557542be 100644 --- a/bitswap/testnet/peernet.go +++ b/bitswap/testnet/peernet.go @@ -4,10 +4,10 @@ import ( "context" bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" mockrouting "github.com/ipfs/go-ipfs/routing/mock" - testutil "gx/ipfs/QmQgLZP9haZheimMHqqAjJh2LhRmNfEoZDfbtkpeMhi9xK/go-testutil" - mockpeernet "gx/ipfs/QmRQ76P5dgvxTujhfPsCRAG83rC15jgb1G9bKLuomuC6dQ/go-libp2p/p2p/net/mock" ds "gx/ipfs/QmVSase1JP7cq9QkPT46oNwdp9pT6kBkG3oqS14y3QcZjG/go-datastore" + testutil "gx/ipfs/QmWRCn8vruNAzHx8i6SAXinuheRitKEGu8c7m26stKvsYx/go-testutil" peer "gx/ipfs/QmXYjuNuxVzXKJCfWasQk1RqkhVLDM9jtUKhqc2WPQmFSB/go-libp2p-peer" + mockpeernet "gx/ipfs/Qmbgce14YTWE2qhE49JVvTBPaHTyz3FaFmqQPyuZAz6C28/go-libp2p/p2p/net/mock" ) type peernet struct { diff --git a/bitswap/testnet/virtual.go b/bitswap/testnet/virtual.go index 586f12f65..37ae23b54 100644 --- a/bitswap/testnet/virtual.go +++ b/bitswap/testnet/virtual.go @@ -8,7 +8,7 @@ import ( bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" mockrouting "github.com/ipfs/go-ipfs/routing/mock" delay "github.com/ipfs/go-ipfs/thirdparty/delay" - testutil "gx/ipfs/QmQgLZP9haZheimMHqqAjJh2LhRmNfEoZDfbtkpeMhi9xK/go-testutil" + testutil "gx/ipfs/QmWRCn8vruNAzHx8i6SAXinuheRitKEGu8c7m26stKvsYx/go-testutil" cid "gx/ipfs/QmNp85zy9RLrQ5oQD4hPyS39ezrrXpcaa7R4Y9kxdWQLLQ/go-cid" routing "gx/ipfs/QmPR2JzfKd9poHx9XBhzoFeBBC31ZM3W5iUPKJZWyaoZZm/go-libp2p-routing" diff --git a/bitswap/testutils.go b/bitswap/testutils.go index b62375e83..2ff5bc173 100644 --- a/bitswap/testutils.go +++ b/bitswap/testutils.go @@ -8,7 +8,7 @@ import ( tn "github.com/ipfs/go-ipfs/exchange/bitswap/testnet" datastore2 "github.com/ipfs/go-ipfs/thirdparty/datastore2" delay "github.com/ipfs/go-ipfs/thirdparty/delay" - testutil "gx/ipfs/QmQgLZP9haZheimMHqqAjJh2LhRmNfEoZDfbtkpeMhi9xK/go-testutil" + testutil "gx/ipfs/QmWRCn8vruNAzHx8i6SAXinuheRitKEGu8c7m26stKvsYx/go-testutil" ds "gx/ipfs/QmVSase1JP7cq9QkPT46oNwdp9pT6kBkG3oqS14y3QcZjG/go-datastore" ds_sync "gx/ipfs/QmVSase1JP7cq9QkPT46oNwdp9pT6kBkG3oqS14y3QcZjG/go-datastore/sync" From 3eacc843f76fcf7275eac9f660d38f1215ffc069 Mon Sep 17 00:00:00 2001 From: Jeromy Date: Sat, 14 Oct 2017 08:33:50 -0700 Subject: [PATCH 0581/1035] tag peers associated with a bitswap session License: MIT Signed-off-by: Jeromy This commit was moved from ipfs/go-bitswap@f60995ea48db3fc7bdb52efe78d171758b968770 --- bitswap/network/interface.go | 4 ++++ bitswap/network/ipfs_impl.go | 5 +++++ bitswap/session.go | 14 +++++++++++++- bitswap/testnet/virtual.go | 7 ++++++- 4 files changed, 28 insertions(+), 2 deletions(-) diff --git a/bitswap/network/interface.go b/bitswap/network/interface.go index 2ec1c639b..fa0437bbe 100644 --- a/bitswap/network/interface.go +++ b/bitswap/network/interface.go @@ -4,8 +4,10 @@ import ( "context" bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" + cid "gx/ipfs/QmNp85zy9RLrQ5oQD4hPyS39ezrrXpcaa7R4Y9kxdWQLLQ/go-cid" peer "gx/ipfs/QmXYjuNuxVzXKJCfWasQk1RqkhVLDM9jtUKhqc2WPQmFSB/go-libp2p-peer" + ifconnmgr "gx/ipfs/QmYkCrTwivapqdB3JbwvwvxymseahVkcm46ThRMAA24zCr/go-libp2p-interface-connmgr" protocol "gx/ipfs/QmZNkThpqfVXs9GNbexPrfBbXSLNYeKrE7jwFM2oqHbyqN/go-libp2p-protocol" ) @@ -34,6 +36,8 @@ type BitSwapNetwork interface { NewMessageSender(context.Context, peer.ID) (MessageSender, error) + ConnectionManager() ifconnmgr.ConnManager + Routing } diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index e96d74447..a6fc904a2 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -15,6 +15,7 @@ import ( logging "gx/ipfs/QmSpJByNKFX1sCsHBEp3R73FL4NF6FnQTEGyNAXHm2GS52/go-log" ma "gx/ipfs/QmXY77cVe7rVRQXZZQRioukUM7aRW3BTcAgJe12MCtb3Ji/go-multiaddr" peer "gx/ipfs/QmXYjuNuxVzXKJCfWasQk1RqkhVLDM9jtUKhqc2WPQmFSB/go-libp2p-peer" + ifconnmgr "gx/ipfs/QmYkCrTwivapqdB3JbwvwvxymseahVkcm46ThRMAA24zCr/go-libp2p-interface-connmgr" ggio "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/io" host "gx/ipfs/Qmc1XhrFEiSeBNn3mpfg6gEuYCt5im2gYmNVmncsvmpeAk/go-libp2p-host" ) @@ -212,6 +213,10 @@ func (bsnet *impl) handleNewStream(s inet.Stream) { } } +func (bsnet *impl) ConnectionManager() ifconnmgr.ConnManager { + return bsnet.host.ConnManager() +} + type netNotifiee impl func (nn *netNotifiee) impl() *impl { diff --git a/bitswap/session.go b/bitswap/session.go index 7e55bb5e9..09b778622 100644 --- a/bitswap/session.go +++ b/bitswap/session.go @@ -2,6 +2,7 @@ package bitswap import ( "context" + "fmt" "time" notifications "github.com/ipfs/go-ipfs/exchange/bitswap/notifications" @@ -44,7 +45,8 @@ type Session struct { uuid logging.Loggable - id uint64 + id uint64 + tag string } // NewSession creates a new bitswap session whose lifetime is bounded by the @@ -66,6 +68,8 @@ func (bs *Bitswap) NewSession(ctx context.Context) *Session { id: bs.getNextSessionID(), } + s.tag = fmt.Sprint("bs-ses-", s.id) + cache, _ := lru.New(2048) s.interest = cache @@ -139,6 +143,9 @@ func (s *Session) addActivePeer(p peer.ID) { if _, ok := s.activePeers[p]; !ok { s.activePeers[p] = struct{}{} s.activePeersArr = append(s.activePeersArr, p) + + cmgr := s.bs.network.ConnectionManager() + cmgr.TagPeer(p, s.tag, 10) } } @@ -216,6 +223,11 @@ func (s *Session) run(ctx context.Context) { case <-ctx.Done(): s.tick.Stop() s.bs.removeSession(s) + + cmgr := s.bs.network.ConnectionManager() + for _, p := range s.activePeersArr { + cmgr.UntagPeer(p, s.tag) + } return } } diff --git a/bitswap/testnet/virtual.go b/bitswap/testnet/virtual.go index 37ae23b54..217d43552 100644 --- a/bitswap/testnet/virtual.go +++ b/bitswap/testnet/virtual.go @@ -8,12 +8,13 @@ import ( bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" mockrouting "github.com/ipfs/go-ipfs/routing/mock" delay "github.com/ipfs/go-ipfs/thirdparty/delay" - testutil "gx/ipfs/QmWRCn8vruNAzHx8i6SAXinuheRitKEGu8c7m26stKvsYx/go-testutil" cid "gx/ipfs/QmNp85zy9RLrQ5oQD4hPyS39ezrrXpcaa7R4Y9kxdWQLLQ/go-cid" routing "gx/ipfs/QmPR2JzfKd9poHx9XBhzoFeBBC31ZM3W5iUPKJZWyaoZZm/go-libp2p-routing" logging "gx/ipfs/QmSpJByNKFX1sCsHBEp3R73FL4NF6FnQTEGyNAXHm2GS52/go-log" + testutil "gx/ipfs/QmWRCn8vruNAzHx8i6SAXinuheRitKEGu8c7m26stKvsYx/go-testutil" peer "gx/ipfs/QmXYjuNuxVzXKJCfWasQk1RqkhVLDM9jtUKhqc2WPQmFSB/go-libp2p-peer" + ifconnmgr "gx/ipfs/QmYkCrTwivapqdB3JbwvwvxymseahVkcm46ThRMAA24zCr/go-libp2p-interface-connmgr" ) var log = logging.Logger("bstestnet") @@ -118,6 +119,10 @@ func (nc *networkClient) FindProvidersAsync(ctx context.Context, k *cid.Cid, max return out } +func (nc *networkClient) ConnectionManager() ifconnmgr.ConnManager { + return &ifconnmgr.NullConnMgr{} +} + type messagePasser struct { net *network target peer.ID From 72c08457e6da97ed2d356c7559e065a5b58be462 Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Tue, 17 Oct 2017 15:37:46 -0700 Subject: [PATCH 0582/1035] filter out "" from active peers in bitswap sessions We use "" to indicate that the block came from the local node. There's no reason to record "" as an active peer (doesn't really *hurt* but still...). License: MIT Signed-off-by: Steven Allen This commit was moved from ipfs/go-bitswap@42f8fed05f3ef162fee86e05a91e79d180e23d4c --- bitswap/session.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/bitswap/session.go b/bitswap/session.go index 7e55bb5e9..e2236eda6 100644 --- a/bitswap/session.go +++ b/bitswap/session.go @@ -159,7 +159,9 @@ func (s *Session) run(ctx context.Context) { case blk := <-s.incoming: s.tick.Stop() - s.addActivePeer(blk.from) + if blk.from != "" { + s.addActivePeer(blk.from) + } s.receiveBlock(ctx, blk.blk) From 91c37d6ee2fe37fc15e54392051b21ea3edf6f28 Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Tue, 17 Oct 2017 15:58:27 -0700 Subject: [PATCH 0583/1035] NewStream now creates a connection if necessary License: MIT Signed-off-by: Steven Allen This commit was moved from ipfs/go-bitswap@bec48890f47487957aaf91f7ea2af3843f5a707f --- bitswap/network/ipfs_impl.go | 8 -------- 1 file changed, 8 deletions(-) diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index e96d74447..3b7c87312 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -105,14 +105,6 @@ func (bsnet *impl) NewMessageSender(ctx context.Context, p peer.ID) (MessageSend } func (bsnet *impl) newStreamToPeer(ctx context.Context, p peer.ID) (inet.Stream, error) { - - // first, make sure we're connected. - // if this fails, we cannot connect to given peer. - //TODO(jbenet) move this into host.NewStream? - if err := bsnet.host.Connect(ctx, pstore.PeerInfo{ID: p}); err != nil { - return nil, err - } - return bsnet.host.NewStream(ctx, p, ProtocolBitswap, ProtocolBitswapOne, ProtocolBitswapNoVers) } From dc49be3e0dcc7b9ba8fc48f5c13ca9897c19d8cc Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Thu, 19 Oct 2017 07:51:55 -0700 Subject: [PATCH 0584/1035] gx update go-peerstream, go-libp2p-floodsub License: MIT Signed-off-by: Steven Allen This commit was moved from ipfs/go-bitswap@fc37b2f652dbefc728a034d314e09f68c6374195 --- bitswap/bitswap_test.go | 2 +- bitswap/testnet/peernet.go | 2 +- bitswap/testutils.go | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index 88b510fed..d68858eef 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -20,8 +20,8 @@ import ( detectrace "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-detect-race" cid "gx/ipfs/QmNp85zy9RLrQ5oQD4hPyS39ezrrXpcaa7R4Y9kxdWQLLQ/go-cid" + p2ptestutil "gx/ipfs/QmQGX417WoxKxDJeHqouMEmmH4G1RCENNSzkZYHrXy3Xb3/go-libp2p-netutil" tu "gx/ipfs/QmWRCn8vruNAzHx8i6SAXinuheRitKEGu8c7m26stKvsYx/go-testutil" - p2ptestutil "gx/ipfs/QmdzuGp4a9pahgXuBeReHdYGUzdVX3FUCwfmWVo5mQfkTi/go-libp2p-netutil" ) // FIXME the tests are really sensitive to the network delay. fix them to work diff --git a/bitswap/testnet/peernet.go b/bitswap/testnet/peernet.go index 7557542be..32438508a 100644 --- a/bitswap/testnet/peernet.go +++ b/bitswap/testnet/peernet.go @@ -7,7 +7,7 @@ import ( ds "gx/ipfs/QmVSase1JP7cq9QkPT46oNwdp9pT6kBkG3oqS14y3QcZjG/go-datastore" testutil "gx/ipfs/QmWRCn8vruNAzHx8i6SAXinuheRitKEGu8c7m26stKvsYx/go-testutil" peer "gx/ipfs/QmXYjuNuxVzXKJCfWasQk1RqkhVLDM9jtUKhqc2WPQmFSB/go-libp2p-peer" - mockpeernet "gx/ipfs/Qmbgce14YTWE2qhE49JVvTBPaHTyz3FaFmqQPyuZAz6C28/go-libp2p/p2p/net/mock" + mockpeernet "gx/ipfs/QmefgzMbKZYsmHFkLqxgaTBG9ypeEjrdWRD5WXH4j1cWDL/go-libp2p/p2p/net/mock" ) type peernet struct { diff --git a/bitswap/testutils.go b/bitswap/testutils.go index 2ff5bc173..ca7b9a60b 100644 --- a/bitswap/testutils.go +++ b/bitswap/testutils.go @@ -10,10 +10,10 @@ import ( delay "github.com/ipfs/go-ipfs/thirdparty/delay" testutil "gx/ipfs/QmWRCn8vruNAzHx8i6SAXinuheRitKEGu8c7m26stKvsYx/go-testutil" + p2ptestutil "gx/ipfs/QmQGX417WoxKxDJeHqouMEmmH4G1RCENNSzkZYHrXy3Xb3/go-libp2p-netutil" ds "gx/ipfs/QmVSase1JP7cq9QkPT46oNwdp9pT6kBkG3oqS14y3QcZjG/go-datastore" ds_sync "gx/ipfs/QmVSase1JP7cq9QkPT46oNwdp9pT6kBkG3oqS14y3QcZjG/go-datastore/sync" peer "gx/ipfs/QmXYjuNuxVzXKJCfWasQk1RqkhVLDM9jtUKhqc2WPQmFSB/go-libp2p-peer" - p2ptestutil "gx/ipfs/QmdzuGp4a9pahgXuBeReHdYGUzdVX3FUCwfmWVo5mQfkTi/go-libp2p-netutil" ) // WARNING: this uses RandTestBogusIdentity DO NOT USE for NON TESTS! From 847e95c1b529a8946ba1934463da6eb7b77300ec Mon Sep 17 00:00:00 2001 From: Jeromy Date: Sun, 12 Nov 2017 19:21:56 -0800 Subject: [PATCH 0585/1035] Buffer response channel to prevent deadlock License: MIT Signed-off-by: Jeromy This commit was moved from ipfs/go-bitswap@44e13806ec868e13a0fcce3c39c83ff36032454a --- bitswap/session.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bitswap/session.go b/bitswap/session.go index 11d1ea4ff..987ab30f6 100644 --- a/bitswap/session.go +++ b/bitswap/session.go @@ -119,7 +119,7 @@ type interestReq struct { // block we received) this function will not be called, as the cid will likely // still be in the interest cache. func (s *Session) isLiveWant(c *cid.Cid) bool { - resp := make(chan bool) + resp := make(chan bool, 1) s.interestReqs <- interestReq{ c: c, resp: resp, From b5bd5d862db255296a81e68d373743f917dd6b5a Mon Sep 17 00:00:00 2001 From: Jan Winkelmann Date: Sat, 1 Apr 2017 16:58:17 +0200 Subject: [PATCH 0586/1035] cmd: use go-ipfs-cmds License: MIT Signed-off-by: keks This commit was moved from ipfs/go-bitswap@290fff923659bcf0aaca731a2b9d4327f9c35d4a --- bitswap/bitswap_test.go | 6 +++--- bitswap/decision/engine_test.go | 2 +- bitswap/message/message_test.go | 4 ++-- bitswap/message/pb/Makefile | 8 ++++++++ bitswap/testnet/network_test.go | 2 +- bitswap/testnet/peernet.go | 1 + 6 files changed, 16 insertions(+), 7 deletions(-) create mode 100644 bitswap/message/pb/Makefile diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index d68858eef..e35461780 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -14,14 +14,14 @@ import ( tn "github.com/ipfs/go-ipfs/exchange/bitswap/testnet" mockrouting "github.com/ipfs/go-ipfs/routing/mock" delay "github.com/ipfs/go-ipfs/thirdparty/delay" - blocks "gx/ipfs/QmSn9Td7xgxm9EV7iEjTckpUWmWApggzPxu7eFGWkkpwin/go-block-format" - travis "gx/ipfs/QmWRCn8vruNAzHx8i6SAXinuheRitKEGu8c7m26stKvsYx/go-testutil/ci/travis" detectrace "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-detect-race" cid "gx/ipfs/QmNp85zy9RLrQ5oQD4hPyS39ezrrXpcaa7R4Y9kxdWQLLQ/go-cid" - p2ptestutil "gx/ipfs/QmQGX417WoxKxDJeHqouMEmmH4G1RCENNSzkZYHrXy3Xb3/go-libp2p-netutil" tu "gx/ipfs/QmWRCn8vruNAzHx8i6SAXinuheRitKEGu8c7m26stKvsYx/go-testutil" + travis "gx/ipfs/QmWRCn8vruNAzHx8i6SAXinuheRitKEGu8c7m26stKvsYx/go-testutil/ci/travis" + blocks "gx/ipfs/QmSn9Td7xgxm9EV7iEjTckpUWmWApggzPxu7eFGWkkpwin/go-block-format" + p2ptestutil "gx/ipfs/QmQGX417WoxKxDJeHqouMEmmH4G1RCENNSzkZYHrXy3Xb3/go-libp2p-netutil" ) // FIXME the tests are really sensitive to the network delay. fix them to work diff --git a/bitswap/decision/engine_test.go b/bitswap/decision/engine_test.go index 512548cf5..65ca05a71 100644 --- a/bitswap/decision/engine_test.go +++ b/bitswap/decision/engine_test.go @@ -1,6 +1,7 @@ package decision import ( + "context" "errors" "fmt" "math" @@ -8,7 +9,6 @@ import ( "sync" "testing" - context "context" blockstore "github.com/ipfs/go-ipfs/blocks/blockstore" message "github.com/ipfs/go-ipfs/exchange/bitswap/message" blocks "gx/ipfs/QmSn9Td7xgxm9EV7iEjTckpUWmWApggzPxu7eFGWkkpwin/go-block-format" diff --git a/bitswap/message/message_test.go b/bitswap/message/message_test.go index 14233bf88..465953fbd 100644 --- a/bitswap/message/message_test.go +++ b/bitswap/message/message_test.go @@ -4,12 +4,12 @@ import ( "bytes" "testing" - proto "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/proto" - pb "github.com/ipfs/go-ipfs/exchange/bitswap/message/pb" + cid "gx/ipfs/QmNp85zy9RLrQ5oQD4hPyS39ezrrXpcaa7R4Y9kxdWQLLQ/go-cid" u "gx/ipfs/QmSU6eubNdhXjFBJBSksTp8kv8YRub8mGAPv8tVJHmL2EU/go-ipfs-util" blocks "gx/ipfs/QmSn9Td7xgxm9EV7iEjTckpUWmWApggzPxu7eFGWkkpwin/go-block-format" + proto "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/proto" ) func mkFakeCid(s string) *cid.Cid { diff --git a/bitswap/message/pb/Makefile b/bitswap/message/pb/Makefile new file mode 100644 index 000000000..5bbebea07 --- /dev/null +++ b/bitswap/message/pb/Makefile @@ -0,0 +1,8 @@ +# TODO(brian): add proto tasks +all: message.pb.go + +message.pb.go: message.proto + protoc --gogo_out=. --proto_path=../../../../../:/usr/local/opt/protobuf/include:. $< + +clean: + rm message.pb.go diff --git a/bitswap/testnet/network_test.go b/bitswap/testnet/network_test.go index 803248552..5f14427ab 100644 --- a/bitswap/testnet/network_test.go +++ b/bitswap/testnet/network_test.go @@ -1,10 +1,10 @@ package bitswap import ( + "context" "sync" "testing" - context "context" bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" mockrouting "github.com/ipfs/go-ipfs/routing/mock" diff --git a/bitswap/testnet/peernet.go b/bitswap/testnet/peernet.go index 32438508a..5aed6e24d 100644 --- a/bitswap/testnet/peernet.go +++ b/bitswap/testnet/peernet.go @@ -2,6 +2,7 @@ package bitswap import ( "context" + bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" mockrouting "github.com/ipfs/go-ipfs/routing/mock" ds "gx/ipfs/QmVSase1JP7cq9QkPT46oNwdp9pT6kBkG3oqS14y3QcZjG/go-datastore" From 7f6347a7334c1d6b86508a705f1b453d538125eb Mon Sep 17 00:00:00 2001 From: keks Date: Mon, 23 Oct 2017 16:50:39 +0200 Subject: [PATCH 0587/1035] compatible to js-ipfs-api License: MIT Signed-off-by: keks This commit was moved from ipfs/go-bitswap@f2018cd7e076f274eea3060d758b4aaf715013f9 --- bitswap/bitswap_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index e35461780..5abc37527 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -18,10 +18,10 @@ import ( detectrace "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-detect-race" cid "gx/ipfs/QmNp85zy9RLrQ5oQD4hPyS39ezrrXpcaa7R4Y9kxdWQLLQ/go-cid" + p2ptestutil "gx/ipfs/QmQGX417WoxKxDJeHqouMEmmH4G1RCENNSzkZYHrXy3Xb3/go-libp2p-netutil" + blocks "gx/ipfs/QmSn9Td7xgxm9EV7iEjTckpUWmWApggzPxu7eFGWkkpwin/go-block-format" tu "gx/ipfs/QmWRCn8vruNAzHx8i6SAXinuheRitKEGu8c7m26stKvsYx/go-testutil" travis "gx/ipfs/QmWRCn8vruNAzHx8i6SAXinuheRitKEGu8c7m26stKvsYx/go-testutil/ci/travis" - blocks "gx/ipfs/QmSn9Td7xgxm9EV7iEjTckpUWmWApggzPxu7eFGWkkpwin/go-block-format" - p2ptestutil "gx/ipfs/QmQGX417WoxKxDJeHqouMEmmH4G1RCENNSzkZYHrXy3Xb3/go-libp2p-netutil" ) // FIXME the tests are really sensitive to the network delay. fix them to work From 1450580da34388a85331c0c5c7bb1ec3116d9295 Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Mon, 20 Nov 2017 16:25:06 -0800 Subject: [PATCH 0588/1035] gx: massive update Note: This commit is technically broken. However, I need to make a bunch of cmds changes to make this work and I'd rather not bundle both changes into a single commit. License: MIT Signed-off-by: Steven Allen This commit was moved from ipfs/go-bitswap@55dbaead5f3d5e92bbc72ae50fe171ece9d9495e --- bitswap/bitswap_test.go | 6 +++--- bitswap/decision/bench_test.go | 2 +- bitswap/decision/engine_test.go | 2 +- bitswap/decision/peer_request_queue_test.go | 2 +- bitswap/message/message.go | 2 +- bitswap/network/interface.go | 2 +- bitswap/network/ipfs_impl.go | 6 +++--- bitswap/testnet/interface.go | 2 +- bitswap/testnet/network_test.go | 2 +- bitswap/testnet/peernet.go | 4 ++-- bitswap/testnet/virtual.go | 4 ++-- bitswap/testutils.go | 4 ++-- 12 files changed, 19 insertions(+), 19 deletions(-) diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index 5abc37527..8f6ce439d 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -18,10 +18,10 @@ import ( detectrace "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-detect-race" cid "gx/ipfs/QmNp85zy9RLrQ5oQD4hPyS39ezrrXpcaa7R4Y9kxdWQLLQ/go-cid" - p2ptestutil "gx/ipfs/QmQGX417WoxKxDJeHqouMEmmH4G1RCENNSzkZYHrXy3Xb3/go-libp2p-netutil" + tu "gx/ipfs/QmQgLZP9haZheimMHqqAjJh2LhRmNfEoZDfbtkpeMhi9xK/go-testutil" + travis "gx/ipfs/QmQgLZP9haZheimMHqqAjJh2LhRmNfEoZDfbtkpeMhi9xK/go-testutil/ci/travis" blocks "gx/ipfs/QmSn9Td7xgxm9EV7iEjTckpUWmWApggzPxu7eFGWkkpwin/go-block-format" - tu "gx/ipfs/QmWRCn8vruNAzHx8i6SAXinuheRitKEGu8c7m26stKvsYx/go-testutil" - travis "gx/ipfs/QmWRCn8vruNAzHx8i6SAXinuheRitKEGu8c7m26stKvsYx/go-testutil/ci/travis" + p2ptestutil "gx/ipfs/QmUUNDRYXgfqdjxTg79ogkciczU5y4WY1tKMU2vEX9CRN7/go-libp2p-netutil" ) // FIXME the tests are really sensitive to the network delay. fix them to work diff --git a/bitswap/decision/bench_test.go b/bitswap/decision/bench_test.go index cb005e6ef..5ffb2aa3c 100644 --- a/bitswap/decision/bench_test.go +++ b/bitswap/decision/bench_test.go @@ -7,8 +7,8 @@ import ( "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" cid "gx/ipfs/QmNp85zy9RLrQ5oQD4hPyS39ezrrXpcaa7R4Y9kxdWQLLQ/go-cid" + "gx/ipfs/QmQgLZP9haZheimMHqqAjJh2LhRmNfEoZDfbtkpeMhi9xK/go-testutil" u "gx/ipfs/QmSU6eubNdhXjFBJBSksTp8kv8YRub8mGAPv8tVJHmL2EU/go-ipfs-util" - "gx/ipfs/QmWRCn8vruNAzHx8i6SAXinuheRitKEGu8c7m26stKvsYx/go-testutil" "gx/ipfs/QmXYjuNuxVzXKJCfWasQk1RqkhVLDM9jtUKhqc2WPQmFSB/go-libp2p-peer" ) diff --git a/bitswap/decision/engine_test.go b/bitswap/decision/engine_test.go index 65ca05a71..66db73e6e 100644 --- a/bitswap/decision/engine_test.go +++ b/bitswap/decision/engine_test.go @@ -11,10 +11,10 @@ import ( blockstore "github.com/ipfs/go-ipfs/blocks/blockstore" message "github.com/ipfs/go-ipfs/exchange/bitswap/message" + testutil "gx/ipfs/QmQgLZP9haZheimMHqqAjJh2LhRmNfEoZDfbtkpeMhi9xK/go-testutil" blocks "gx/ipfs/QmSn9Td7xgxm9EV7iEjTckpUWmWApggzPxu7eFGWkkpwin/go-block-format" ds "gx/ipfs/QmVSase1JP7cq9QkPT46oNwdp9pT6kBkG3oqS14y3QcZjG/go-datastore" dssync "gx/ipfs/QmVSase1JP7cq9QkPT46oNwdp9pT6kBkG3oqS14y3QcZjG/go-datastore/sync" - testutil "gx/ipfs/QmWRCn8vruNAzHx8i6SAXinuheRitKEGu8c7m26stKvsYx/go-testutil" peer "gx/ipfs/QmXYjuNuxVzXKJCfWasQk1RqkhVLDM9jtUKhqc2WPQmFSB/go-libp2p-peer" ) diff --git a/bitswap/decision/peer_request_queue_test.go b/bitswap/decision/peer_request_queue_test.go index 718da14e4..32efd763b 100644 --- a/bitswap/decision/peer_request_queue_test.go +++ b/bitswap/decision/peer_request_queue_test.go @@ -10,8 +10,8 @@ import ( "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" cid "gx/ipfs/QmNp85zy9RLrQ5oQD4hPyS39ezrrXpcaa7R4Y9kxdWQLLQ/go-cid" + "gx/ipfs/QmQgLZP9haZheimMHqqAjJh2LhRmNfEoZDfbtkpeMhi9xK/go-testutil" u "gx/ipfs/QmSU6eubNdhXjFBJBSksTp8kv8YRub8mGAPv8tVJHmL2EU/go-ipfs-util" - "gx/ipfs/QmWRCn8vruNAzHx8i6SAXinuheRitKEGu8c7m26stKvsYx/go-testutil" ) func TestPushPop(t *testing.T) { diff --git a/bitswap/message/message.go b/bitswap/message/message.go index f5720006d..dca3d0b17 100644 --- a/bitswap/message/message.go +++ b/bitswap/message/message.go @@ -8,10 +8,10 @@ import ( wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" blocks "gx/ipfs/QmSn9Td7xgxm9EV7iEjTckpUWmWApggzPxu7eFGWkkpwin/go-block-format" - inet "gx/ipfs/QmNa31VPzC561NWwRsJLE7nGYZYuuD2QfpK2b1q9BK54J1/go-libp2p-net" cid "gx/ipfs/QmNp85zy9RLrQ5oQD4hPyS39ezrrXpcaa7R4Y9kxdWQLLQ/go-cid" ggio "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/io" proto "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/proto" + inet "gx/ipfs/QmbD5yKbXahNvoMqzeuNyKQA9vAs9fUvJg2GXeWU1fVqY5/go-libp2p-net" ) // TODO move message.go into the bitswap package diff --git a/bitswap/network/interface.go b/bitswap/network/interface.go index fa0437bbe..9be82e6de 100644 --- a/bitswap/network/interface.go +++ b/bitswap/network/interface.go @@ -6,8 +6,8 @@ import ( bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" cid "gx/ipfs/QmNp85zy9RLrQ5oQD4hPyS39ezrrXpcaa7R4Y9kxdWQLLQ/go-cid" + ifconnmgr "gx/ipfs/QmWfkNorhirGE1Qp3VwBWcnGaj4adv4hNqCYwabMrEYc21/go-libp2p-interface-connmgr" peer "gx/ipfs/QmXYjuNuxVzXKJCfWasQk1RqkhVLDM9jtUKhqc2WPQmFSB/go-libp2p-peer" - ifconnmgr "gx/ipfs/QmYkCrTwivapqdB3JbwvwvxymseahVkcm46ThRMAA24zCr/go-libp2p-interface-connmgr" protocol "gx/ipfs/QmZNkThpqfVXs9GNbexPrfBbXSLNYeKrE7jwFM2oqHbyqN/go-libp2p-protocol" ) diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index d1dcbfe0f..a9a8dc8c5 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -8,16 +8,16 @@ import ( bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" - inet "gx/ipfs/QmNa31VPzC561NWwRsJLE7nGYZYuuD2QfpK2b1q9BK54J1/go-libp2p-net" cid "gx/ipfs/QmNp85zy9RLrQ5oQD4hPyS39ezrrXpcaa7R4Y9kxdWQLLQ/go-cid" routing "gx/ipfs/QmPR2JzfKd9poHx9XBhzoFeBBC31ZM3W5iUPKJZWyaoZZm/go-libp2p-routing" pstore "gx/ipfs/QmPgDWmTmuzvP7QE5zwo1TmjbJme9pmZHNujB2453jkCTr/go-libp2p-peerstore" + host "gx/ipfs/QmRS46AyqtpJBsf1zmQdeizSDEzo1qkWR7rdEuPFAv8237/go-libp2p-host" logging "gx/ipfs/QmSpJByNKFX1sCsHBEp3R73FL4NF6FnQTEGyNAXHm2GS52/go-log" + ifconnmgr "gx/ipfs/QmWfkNorhirGE1Qp3VwBWcnGaj4adv4hNqCYwabMrEYc21/go-libp2p-interface-connmgr" ma "gx/ipfs/QmXY77cVe7rVRQXZZQRioukUM7aRW3BTcAgJe12MCtb3Ji/go-multiaddr" peer "gx/ipfs/QmXYjuNuxVzXKJCfWasQk1RqkhVLDM9jtUKhqc2WPQmFSB/go-libp2p-peer" - ifconnmgr "gx/ipfs/QmYkCrTwivapqdB3JbwvwvxymseahVkcm46ThRMAA24zCr/go-libp2p-interface-connmgr" ggio "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/io" - host "gx/ipfs/Qmc1XhrFEiSeBNn3mpfg6gEuYCt5im2gYmNVmncsvmpeAk/go-libp2p-host" + inet "gx/ipfs/QmbD5yKbXahNvoMqzeuNyKQA9vAs9fUvJg2GXeWU1fVqY5/go-libp2p-net" ) var log = logging.Logger("bitswap_network") diff --git a/bitswap/testnet/interface.go b/bitswap/testnet/interface.go index c83b2e78e..69cdbf0cc 100644 --- a/bitswap/testnet/interface.go +++ b/bitswap/testnet/interface.go @@ -2,7 +2,7 @@ package bitswap import ( bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" - "gx/ipfs/QmWRCn8vruNAzHx8i6SAXinuheRitKEGu8c7m26stKvsYx/go-testutil" + "gx/ipfs/QmQgLZP9haZheimMHqqAjJh2LhRmNfEoZDfbtkpeMhi9xK/go-testutil" peer "gx/ipfs/QmXYjuNuxVzXKJCfWasQk1RqkhVLDM9jtUKhqc2WPQmFSB/go-libp2p-peer" ) diff --git a/bitswap/testnet/network_test.go b/bitswap/testnet/network_test.go index 5f14427ab..88aa6d8dc 100644 --- a/bitswap/testnet/network_test.go +++ b/bitswap/testnet/network_test.go @@ -9,8 +9,8 @@ import ( bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" mockrouting "github.com/ipfs/go-ipfs/routing/mock" delay "github.com/ipfs/go-ipfs/thirdparty/delay" + testutil "gx/ipfs/QmQgLZP9haZheimMHqqAjJh2LhRmNfEoZDfbtkpeMhi9xK/go-testutil" blocks "gx/ipfs/QmSn9Td7xgxm9EV7iEjTckpUWmWApggzPxu7eFGWkkpwin/go-block-format" - testutil "gx/ipfs/QmWRCn8vruNAzHx8i6SAXinuheRitKEGu8c7m26stKvsYx/go-testutil" peer "gx/ipfs/QmXYjuNuxVzXKJCfWasQk1RqkhVLDM9jtUKhqc2WPQmFSB/go-libp2p-peer" ) diff --git a/bitswap/testnet/peernet.go b/bitswap/testnet/peernet.go index 5aed6e24d..e40b49104 100644 --- a/bitswap/testnet/peernet.go +++ b/bitswap/testnet/peernet.go @@ -5,10 +5,10 @@ import ( bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" mockrouting "github.com/ipfs/go-ipfs/routing/mock" + testutil "gx/ipfs/QmQgLZP9haZheimMHqqAjJh2LhRmNfEoZDfbtkpeMhi9xK/go-testutil" + mockpeernet "gx/ipfs/QmTzs3Gp2rU3HuNayjBVG7qBgbaKWE8bgtwJ7faRxAe9UP/go-libp2p/p2p/net/mock" ds "gx/ipfs/QmVSase1JP7cq9QkPT46oNwdp9pT6kBkG3oqS14y3QcZjG/go-datastore" - testutil "gx/ipfs/QmWRCn8vruNAzHx8i6SAXinuheRitKEGu8c7m26stKvsYx/go-testutil" peer "gx/ipfs/QmXYjuNuxVzXKJCfWasQk1RqkhVLDM9jtUKhqc2WPQmFSB/go-libp2p-peer" - mockpeernet "gx/ipfs/QmefgzMbKZYsmHFkLqxgaTBG9ypeEjrdWRD5WXH4j1cWDL/go-libp2p/p2p/net/mock" ) type peernet struct { diff --git a/bitswap/testnet/virtual.go b/bitswap/testnet/virtual.go index 217d43552..d2b7bd87d 100644 --- a/bitswap/testnet/virtual.go +++ b/bitswap/testnet/virtual.go @@ -11,10 +11,10 @@ import ( cid "gx/ipfs/QmNp85zy9RLrQ5oQD4hPyS39ezrrXpcaa7R4Y9kxdWQLLQ/go-cid" routing "gx/ipfs/QmPR2JzfKd9poHx9XBhzoFeBBC31ZM3W5iUPKJZWyaoZZm/go-libp2p-routing" + testutil "gx/ipfs/QmQgLZP9haZheimMHqqAjJh2LhRmNfEoZDfbtkpeMhi9xK/go-testutil" logging "gx/ipfs/QmSpJByNKFX1sCsHBEp3R73FL4NF6FnQTEGyNAXHm2GS52/go-log" - testutil "gx/ipfs/QmWRCn8vruNAzHx8i6SAXinuheRitKEGu8c7m26stKvsYx/go-testutil" + ifconnmgr "gx/ipfs/QmWfkNorhirGE1Qp3VwBWcnGaj4adv4hNqCYwabMrEYc21/go-libp2p-interface-connmgr" peer "gx/ipfs/QmXYjuNuxVzXKJCfWasQk1RqkhVLDM9jtUKhqc2WPQmFSB/go-libp2p-peer" - ifconnmgr "gx/ipfs/QmYkCrTwivapqdB3JbwvwvxymseahVkcm46ThRMAA24zCr/go-libp2p-interface-connmgr" ) var log = logging.Logger("bstestnet") diff --git a/bitswap/testutils.go b/bitswap/testutils.go index ca7b9a60b..20a1b0dbb 100644 --- a/bitswap/testutils.go +++ b/bitswap/testutils.go @@ -8,9 +8,9 @@ import ( tn "github.com/ipfs/go-ipfs/exchange/bitswap/testnet" datastore2 "github.com/ipfs/go-ipfs/thirdparty/datastore2" delay "github.com/ipfs/go-ipfs/thirdparty/delay" - testutil "gx/ipfs/QmWRCn8vruNAzHx8i6SAXinuheRitKEGu8c7m26stKvsYx/go-testutil" + testutil "gx/ipfs/QmQgLZP9haZheimMHqqAjJh2LhRmNfEoZDfbtkpeMhi9xK/go-testutil" - p2ptestutil "gx/ipfs/QmQGX417WoxKxDJeHqouMEmmH4G1RCENNSzkZYHrXy3Xb3/go-libp2p-netutil" + p2ptestutil "gx/ipfs/QmUUNDRYXgfqdjxTg79ogkciczU5y4WY1tKMU2vEX9CRN7/go-libp2p-netutil" ds "gx/ipfs/QmVSase1JP7cq9QkPT46oNwdp9pT6kBkG3oqS14y3QcZjG/go-datastore" ds_sync "gx/ipfs/QmVSase1JP7cq9QkPT46oNwdp9pT6kBkG3oqS14y3QcZjG/go-datastore/sync" peer "gx/ipfs/QmXYjuNuxVzXKJCfWasQk1RqkhVLDM9jtUKhqc2WPQmFSB/go-libp2p-peer" From efdd01234f3e5a96586fc4e54c05b2df1aaaea02 Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Mon, 20 Nov 2017 22:13:34 -0800 Subject: [PATCH 0589/1035] fix deadlock in bitswap sessions This deadlock would happen when calling SessionsForBlock (holding bitswap.sessLk) while the session's main loop was trying to deregister the session (taking bitswap.sessLk). I've also defensively added selects on contexts for two other channel writes just in case. fixes #4394 ...well, it fixes *a* deadlock showing up in that issue, there may be more. License: MIT Signed-off-by: Steven Allen This commit was moved from ipfs/go-bitswap@a6f4f7d464a1ec524b3e99ba9ea18969a491d441 --- bitswap/session.go | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/bitswap/session.go b/bitswap/session.go index 987ab30f6..9c7f85b30 100644 --- a/bitswap/session.go +++ b/bitswap/session.go @@ -120,9 +120,13 @@ type interestReq struct { // still be in the interest cache. func (s *Session) isLiveWant(c *cid.Cid) bool { resp := make(chan bool, 1) - s.interestReqs <- interestReq{ + select { + case s.interestReqs <- interestReq{ c: c, resp: resp, + }: + case <-s.ctx.Done(): + return false } select { @@ -278,13 +282,17 @@ func (s *Session) cancel(keys []*cid.Cid) { } func (s *Session) cancelWants(keys []*cid.Cid) { - s.cancelKeys <- keys + select { + case s.cancelKeys <- keys: + case <-s.ctx.Done(): + } } func (s *Session) fetch(ctx context.Context, keys []*cid.Cid) { select { case s.newReqs <- keys: case <-ctx.Done(): + case <-s.ctx.Done(): } } From 977a690aa30db0d29f96dc8ff7acd8ec99326ebc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Sun, 19 Nov 2017 04:32:55 +0100 Subject: [PATCH 0590/1035] gx: Update go-datastore to 1.4.0 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit License: MIT Signed-off-by: Łukasz Magiera This commit was moved from ipfs/go-bitswap@25520f34dac0cdf7773ece9d16a1a4dc6e9ce385 --- bitswap/decision/engine_test.go | 4 ++-- bitswap/testnet/peernet.go | 2 +- bitswap/testutils.go | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/bitswap/decision/engine_test.go b/bitswap/decision/engine_test.go index 66db73e6e..06c2a2bd2 100644 --- a/bitswap/decision/engine_test.go +++ b/bitswap/decision/engine_test.go @@ -13,9 +13,9 @@ import ( message "github.com/ipfs/go-ipfs/exchange/bitswap/message" testutil "gx/ipfs/QmQgLZP9haZheimMHqqAjJh2LhRmNfEoZDfbtkpeMhi9xK/go-testutil" blocks "gx/ipfs/QmSn9Td7xgxm9EV7iEjTckpUWmWApggzPxu7eFGWkkpwin/go-block-format" - ds "gx/ipfs/QmVSase1JP7cq9QkPT46oNwdp9pT6kBkG3oqS14y3QcZjG/go-datastore" - dssync "gx/ipfs/QmVSase1JP7cq9QkPT46oNwdp9pT6kBkG3oqS14y3QcZjG/go-datastore/sync" peer "gx/ipfs/QmXYjuNuxVzXKJCfWasQk1RqkhVLDM9jtUKhqc2WPQmFSB/go-libp2p-peer" + ds "gx/ipfs/QmdHG8MAuARdGHxx4rPQASLcvhz24fzjSQq7AJRAQEorq5/go-datastore" + dssync "gx/ipfs/QmdHG8MAuARdGHxx4rPQASLcvhz24fzjSQq7AJRAQEorq5/go-datastore/sync" ) type peerAndEngine struct { diff --git a/bitswap/testnet/peernet.go b/bitswap/testnet/peernet.go index e40b49104..6ff543d57 100644 --- a/bitswap/testnet/peernet.go +++ b/bitswap/testnet/peernet.go @@ -7,8 +7,8 @@ import ( mockrouting "github.com/ipfs/go-ipfs/routing/mock" testutil "gx/ipfs/QmQgLZP9haZheimMHqqAjJh2LhRmNfEoZDfbtkpeMhi9xK/go-testutil" mockpeernet "gx/ipfs/QmTzs3Gp2rU3HuNayjBVG7qBgbaKWE8bgtwJ7faRxAe9UP/go-libp2p/p2p/net/mock" - ds "gx/ipfs/QmVSase1JP7cq9QkPT46oNwdp9pT6kBkG3oqS14y3QcZjG/go-datastore" peer "gx/ipfs/QmXYjuNuxVzXKJCfWasQk1RqkhVLDM9jtUKhqc2WPQmFSB/go-libp2p-peer" + ds "gx/ipfs/QmdHG8MAuARdGHxx4rPQASLcvhz24fzjSQq7AJRAQEorq5/go-datastore" ) type peernet struct { diff --git a/bitswap/testutils.go b/bitswap/testutils.go index 20a1b0dbb..3b0bec59e 100644 --- a/bitswap/testutils.go +++ b/bitswap/testutils.go @@ -11,9 +11,9 @@ import ( testutil "gx/ipfs/QmQgLZP9haZheimMHqqAjJh2LhRmNfEoZDfbtkpeMhi9xK/go-testutil" p2ptestutil "gx/ipfs/QmUUNDRYXgfqdjxTg79ogkciczU5y4WY1tKMU2vEX9CRN7/go-libp2p-netutil" - ds "gx/ipfs/QmVSase1JP7cq9QkPT46oNwdp9pT6kBkG3oqS14y3QcZjG/go-datastore" - ds_sync "gx/ipfs/QmVSase1JP7cq9QkPT46oNwdp9pT6kBkG3oqS14y3QcZjG/go-datastore/sync" peer "gx/ipfs/QmXYjuNuxVzXKJCfWasQk1RqkhVLDM9jtUKhqc2WPQmFSB/go-libp2p-peer" + ds "gx/ipfs/QmdHG8MAuARdGHxx4rPQASLcvhz24fzjSQq7AJRAQEorq5/go-datastore" + ds_sync "gx/ipfs/QmdHG8MAuARdGHxx4rPQASLcvhz24fzjSQq7AJRAQEorq5/go-datastore/sync" ) // WARNING: this uses RandTestBogusIdentity DO NOT USE for NON TESTS! From 36eff23dcc59fbb8cf62c91df2fd50701cbda48f Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Tue, 5 Dec 2017 09:08:23 -0800 Subject: [PATCH 0591/1035] bitswap: preallocate peers array on bitswap stat Avoids lots of reallocations under a lock. License: MIT Signed-off-by: Steven Allen This commit was moved from ipfs/go-bitswap@84389103dac77ecae7a0c5efc4213a1e84280e0d --- bitswap/decision/engine.go | 3 ++- bitswap/stat.go | 5 ++++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/bitswap/decision/engine.go b/bitswap/decision/engine.go index 74d5cf330..3ebadda39 100644 --- a/bitswap/decision/engine.go +++ b/bitswap/decision/engine.go @@ -201,7 +201,8 @@ func (e *Engine) Peers() []peer.ID { e.lock.Lock() defer e.lock.Unlock() - response := make([]peer.ID, 0) + response := make([]peer.ID, 0, len(e.ledgerMap)) + for _, ledger := range e.ledgerMap { response = append(response, ledger.Partner) } diff --git a/bitswap/stat.go b/bitswap/stat.go index 39f02c1c9..1c7f3f3e8 100644 --- a/bitswap/stat.go +++ b/bitswap/stat.go @@ -32,7 +32,10 @@ func (bs *Bitswap) Stat() (*Stat, error) { st.DataReceived = c.dataRecvd bs.counterLk.Unlock() - for _, p := range bs.engine.Peers() { + peers := bs.engine.Peers() + st.Peers = make([]string, 0, len(peers)) + + for _, p := range peers { st.Peers = append(st.Peers, p.Pretty()) } sort.Strings(st.Peers) From 045e97e370ad4568035bd3204e88ee48d689fa82 Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Tue, 5 Dec 2017 09:09:32 -0800 Subject: [PATCH 0592/1035] bitswap: defer unlock when possible License: MIT Signed-off-by: Steven Allen This commit was moved from ipfs/go-bitswap@75294d229ff5b724762ab27780bb477161ec70bf --- bitswap/decision/engine.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/bitswap/decision/engine.go b/bitswap/decision/engine.go index 3ebadda39..6770b535d 100644 --- a/bitswap/decision/engine.go +++ b/bitswap/decision/engine.go @@ -298,15 +298,15 @@ func (e *Engine) MessageSent(p peer.ID, m bsmsg.BitSwapMessage) error { func (e *Engine) PeerConnected(p peer.ID) { e.lock.Lock() + defer e.lock.Unlock() l, ok := e.ledgerMap[p] if !ok { l = newLedger(p) e.ledgerMap[p] = l } l.lk.Lock() + defer l.lk.Unlock() l.ref++ - l.lk.Unlock() - e.lock.Unlock() } func (e *Engine) PeerDisconnected(p peer.ID) { @@ -317,11 +317,11 @@ func (e *Engine) PeerDisconnected(p peer.ID) { return } l.lk.Lock() + defer l.lk.Unlock() l.ref-- if l.ref <= 0 { delete(e.ledgerMap, p) } - l.lk.Unlock() } func (e *Engine) numBytesSentTo(p peer.ID) uint64 { @@ -337,12 +337,12 @@ func (e *Engine) numBytesReceivedFrom(p peer.ID) uint64 { // ledger lazily instantiates a ledger func (e *Engine) findOrCreate(p peer.ID) *ledger { e.lock.Lock() + defer e.lock.Unlock() l, ok := e.ledgerMap[p] if !ok { l = newLedger(p) e.ledgerMap[p] = l } - e.lock.Unlock() return l } From 5cfcfd35e513da89c59f9697c11dd9b0771e52c3 Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Tue, 5 Dec 2017 09:43:23 -0800 Subject: [PATCH 0593/1035] bitswap: better wantlist allocation patterns License: MIT Signed-off-by: Steven Allen This commit was moved from ipfs/go-bitswap@bd024d24c1a97bdf35908d8c3a78779fcc34d65a --- bitswap/bitswap.go | 5 +++-- bitswap/wantlist/wantlist.go | 20 ++++++-------------- 2 files changed, 9 insertions(+), 16 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 35d48a35b..e1d6da61c 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -451,8 +451,9 @@ func (bs *Bitswap) Close() error { } func (bs *Bitswap) GetWantlist() []*cid.Cid { - var out []*cid.Cid - for _, e := range bs.wm.wl.Entries() { + entries := bs.wm.wl.Entries() + out := make([]*cid.Cid, 0, len(entries)) + for _, e := range entries { out = append(out, e.Cid) } return out diff --git a/bitswap/wantlist/wantlist.go b/bitswap/wantlist/wantlist.go index b55bc9421..00c7ce303 100644 --- a/bitswap/wantlist/wantlist.go +++ b/bitswap/wantlist/wantlist.go @@ -126,7 +126,7 @@ func (w *ThreadSafe) Contains(k *cid.Cid) (*Entry, bool) { func (w *ThreadSafe) Entries() []*Entry { w.lk.RLock() defer w.lk.RUnlock() - var es entrySlice + es := make([]*Entry, 0, len(w.set)) for _, e := range w.set { es = append(es, e) } @@ -134,13 +134,8 @@ func (w *ThreadSafe) Entries() []*Entry { } func (w *ThreadSafe) SortedEntries() []*Entry { - w.lk.RLock() - defer w.lk.RUnlock() - var es entrySlice - for _, e := range w.set { - es = append(es, e) - } - sort.Sort(es) + es := w.Entries() + sort.Sort(entrySlice(es)) return es } @@ -194,7 +189,7 @@ func (w *Wantlist) Contains(k *cid.Cid) (*Entry, bool) { } func (w *Wantlist) Entries() []*Entry { - var es entrySlice + es := make([]*Entry, 0, len(w.set)) for _, e := range w.set { es = append(es, e) } @@ -202,10 +197,7 @@ func (w *Wantlist) Entries() []*Entry { } func (w *Wantlist) SortedEntries() []*Entry { - var es entrySlice - for _, e := range w.set { - es = append(es, e) - } - sort.Sort(es) + es := w.Entries() + sort.Sort(entrySlice(es)) return es } From 54e9a2e49580e13d21c718fa81104fbe223e9fb3 Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Tue, 5 Dec 2017 09:48:55 -0800 Subject: [PATCH 0594/1035] bitswap: remove useless code License: MIT Signed-off-by: Steven Allen This commit was moved from ipfs/go-bitswap@39d168d680a18647af4d716cc383d80b65fcbf30 --- bitswap/bitswap.go | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index e1d6da61c..b3325d6ca 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -372,16 +372,6 @@ func (bs *Bitswap) ReceiveMessage(ctx context.Context, p peer.ID, incoming bsmsg return } - // quickly send out cancels, reduces chances of duplicate block receives - var keys []*cid.Cid - for _, block := range iblocks { - if _, found := bs.wm.wl.Contains(block.Cid()); !found { - log.Infof("received un-asked-for %s from %s", block, p) - continue - } - keys = append(keys, block.Cid()) - } - wg := sync.WaitGroup{} for _, block := range iblocks { wg.Add(1) From bf44a31a05894119dd5bd65ce984969cd4aa4b99 Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Tue, 5 Dec 2017 09:56:26 -0800 Subject: [PATCH 0595/1035] bitswap: better allocation patters in message License: MIT Signed-off-by: Steven Allen This commit was moved from ipfs/go-bitswap@90d17a674fc24c84b81475d8c3f03790935a898a --- bitswap/message/message.go | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/bitswap/message/message.go b/bitswap/message/message.go index dca3d0b17..93a0b9f7b 100644 --- a/bitswap/message/message.go +++ b/bitswap/message/message.go @@ -120,7 +120,7 @@ func (m *impl) Empty() bool { } func (m *impl) Wantlist() []Entry { - var out []Entry + out := make([]Entry, 0, len(m.wantlist)) for _, e := range m.wantlist { out = append(out, e) } @@ -182,6 +182,7 @@ func FromPBReader(pbr ggio.Reader) (BitSwapMessage, error) { func (m *impl) ToProtoV0() *pb.Message { pbm := new(pb.Message) pbm.Wantlist = new(pb.Message_Wantlist) + pbm.Wantlist.Entries = make([]*pb.Message_Wantlist_Entry, 0, len(m.wantlist)) for _, e := range m.wantlist { pbm.Wantlist.Entries = append(pbm.Wantlist.Entries, &pb.Message_Wantlist_Entry{ Block: proto.String(e.Cid.KeyString()), @@ -190,7 +191,10 @@ func (m *impl) ToProtoV0() *pb.Message { }) } pbm.Wantlist.Full = proto.Bool(m.full) - for _, b := range m.Blocks() { + + blocks := m.Blocks() + pbm.Blocks = make([][]byte, 0, len(blocks)) + for _, b := range blocks { pbm.Blocks = append(pbm.Blocks, b.RawData()) } return pbm @@ -199,6 +203,7 @@ func (m *impl) ToProtoV0() *pb.Message { func (m *impl) ToProtoV1() *pb.Message { pbm := new(pb.Message) pbm.Wantlist = new(pb.Message_Wantlist) + pbm.Wantlist.Entries = make([]*pb.Message_Wantlist_Entry, 0, len(m.wantlist)) for _, e := range m.wantlist { pbm.Wantlist.Entries = append(pbm.Wantlist.Entries, &pb.Message_Wantlist_Entry{ Block: proto.String(e.Cid.KeyString()), @@ -207,7 +212,10 @@ func (m *impl) ToProtoV1() *pb.Message { }) } pbm.Wantlist.Full = proto.Bool(m.full) - for _, b := range m.Blocks() { + + blocks := m.Blocks() + pbm.Payload = make([]*pb.Message_Block, 0, len(blocks)) + for _, b := range blocks { blk := &pb.Message_Block{ Data: b.RawData(), Prefix: b.Cid().Prefix().Bytes(), @@ -230,7 +238,7 @@ func (m *impl) ToNetV1(w io.Writer) error { } func (m *impl) Loggable() map[string]interface{} { - var blocks []string + blocks := make([]string, 0, len(m.blocks)) for _, v := range m.blocks { blocks = append(blocks, v.Cid().String()) } From e876714d626261997e7f2c29ca4c97d24a1567d9 Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Tue, 5 Dec 2017 10:01:17 -0800 Subject: [PATCH 0596/1035] bitswap: preallocate cid string array License: MIT Signed-off-by: Steven Allen This commit was moved from ipfs/go-bitswap@bf9f36e366d16e6a5829fd51109b473e274ebec5 --- bitswap/notifications/notifications.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bitswap/notifications/notifications.go b/bitswap/notifications/notifications.go index 4b1a62eea..6d1e11801 100644 --- a/bitswap/notifications/notifications.go +++ b/bitswap/notifications/notifications.go @@ -73,7 +73,7 @@ func (ps *impl) Subscribe(ctx context.Context, keys ...*cid.Cid) <-chan blocks.B } func toStrings(keys []*cid.Cid) []string { - strs := make([]string, 0) + strs := make([]string, 0, len(keys)) for _, key := range keys { strs = append(strs, key.KeyString()) } From 171e3cf9c9deee614e9701e63b50b54c5a24083a Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Tue, 5 Dec 2017 10:08:07 -0800 Subject: [PATCH 0597/1035] bitswap: better wantmanager allocation patterns License: MIT Signed-off-by: Steven Allen This commit was moved from ipfs/go-bitswap@3b80bf49380701934b7c67f326e21c9894b4553d --- bitswap/wantmanager.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/bitswap/wantmanager.go b/bitswap/wantmanager.go index e2859a292..d74b836a7 100644 --- a/bitswap/wantmanager.go +++ b/bitswap/wantmanager.go @@ -94,7 +94,7 @@ type wantSet struct { } func (pm *WantManager) addEntries(ctx context.Context, ks []*cid.Cid, targets []peer.ID, cancel bool, ses uint64) { - var entries []*bsmsg.Entry + entries := make([]*bsmsg.Entry, 0, len(ks)) for i, k := range ks { entries = append(entries, &bsmsg.Entry{ Cancel: cancel, @@ -340,7 +340,7 @@ func (pm *WantManager) Run() { pm.stopPeerHandler(p.peer) } case req := <-pm.peerReqs: - var peers []peer.ID + peers := make([]peer.ID, 0, len(pm.peers)) for p := range pm.peers { peers = append(peers, p) } From 8bbad8a705fd4107e3dcc55d2caf8ed645a7e92f Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Tue, 5 Dec 2017 10:10:18 -0800 Subject: [PATCH 0598/1035] bitswap: fewer allocations in bitswap sessions Also, don't call time.Now in a loop. License: MIT Signed-off-by: Steven Allen This commit was moved from ipfs/go-bitswap@5b77b6604bfa44d855949e0154ab93f428da0ab5 --- bitswap/session.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/bitswap/session.go b/bitswap/session.go index 9c7f85b30..9c8e6a96e 100644 --- a/bitswap/session.go +++ b/bitswap/session.go @@ -199,11 +199,12 @@ func (s *Session) run(ctx context.Context) { s.cancel(keys) case <-s.tick.C: - var live []*cid.Cid + live := make([]*cid.Cid, 0, len(s.liveWants)) + now := time.Now() for c := range s.liveWants { cs, _ := cid.Cast([]byte(c)) live = append(live, cs) - s.liveWants[c] = time.Now() + s.liveWants[c] = now } // Broadcast these keys to everyone we're connected to From f6611fb26880c32a2e572383e6e9e712e99b59de Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Fri, 8 Dec 2017 14:04:34 -0800 Subject: [PATCH 0599/1035] Demote bitswap error to an info Not being able to dial a peer we used to be connected to is interesting but definitely not an error. License: MIT Signed-off-by: Steven Allen This commit was moved from ipfs/go-bitswap@bdfed2e66260488043818f2c72fe4ede886666cb --- bitswap/wantmanager.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bitswap/wantmanager.go b/bitswap/wantmanager.go index e2859a292..d8e55bea3 100644 --- a/bitswap/wantmanager.go +++ b/bitswap/wantmanager.go @@ -234,7 +234,7 @@ func (mq *msgQueue) doWork(ctx context.Context) { err = mq.openSender(ctx) if err != nil { - log.Errorf("couldnt open sender again after SendMsg(%s) failed: %s", mq.p, err) + log.Infof("couldnt open sender again after SendMsg(%s) failed: %s", mq.p, err) // TODO(why): what do we do now? // I think the *right* answer is to probably put the message we're // trying to send back, and then return to waiting for new work or From 0c2b8d693357c19228559a814ed78e34c0daa520 Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Sun, 3 Dec 2017 21:34:29 -0800 Subject: [PATCH 0600/1035] gx: update go-multihash License: MIT Signed-off-by: Steven Allen This commit was moved from ipfs/go-bitswap@b13118ca209c1a76838199f9fccccafe96d9f993 --- bitswap/bitswap.go | 6 +++--- bitswap/bitswap_test.go | 10 +++++----- bitswap/decision/bench_test.go | 8 ++++---- bitswap/decision/engine.go | 4 ++-- bitswap/decision/engine_test.go | 6 +++--- bitswap/decision/ledger.go | 4 ++-- bitswap/decision/peer_request_queue.go | 4 ++-- bitswap/decision/peer_request_queue_test.go | 6 +++--- bitswap/get.go | 4 ++-- bitswap/message/message.go | 6 +++--- bitswap/message/message_test.go | 6 +++--- bitswap/network/interface.go | 6 +++--- bitswap/network/ipfs_impl.go | 16 ++++++++-------- bitswap/notifications/notifications.go | 4 ++-- bitswap/notifications/notifications_test.go | 4 ++-- bitswap/session.go | 8 ++++---- bitswap/session_test.go | 4 ++-- bitswap/stat.go | 2 +- bitswap/testnet/interface.go | 4 ++-- bitswap/testnet/network_test.go | 6 +++--- bitswap/testnet/peernet.go | 6 +++--- bitswap/testnet/virtual.go | 10 +++++----- bitswap/testutils.go | 6 +++--- bitswap/wantlist/wantlist.go | 2 +- bitswap/wantlist/wantlist_test.go | 2 +- bitswap/wantmanager.go | 4 ++-- bitswap/workers.go | 4 ++-- 27 files changed, 76 insertions(+), 76 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index b3325d6ca..ec12e7be3 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -19,13 +19,13 @@ import ( flags "github.com/ipfs/go-ipfs/flags" "github.com/ipfs/go-ipfs/thirdparty/delay" - cid "gx/ipfs/QmNp85zy9RLrQ5oQD4hPyS39ezrrXpcaa7R4Y9kxdWQLLQ/go-cid" metrics "gx/ipfs/QmRg1gKTHzc3CZXSKzem8aR4E3TubFhbgXwfVuWnSK5CC5/go-metrics-interface" process "gx/ipfs/QmSF8fPo3jgVBAy8fpdjjYqgG87dkJgUprRBHRd2tmfgpP/goprocess" procctx "gx/ipfs/QmSF8fPo3jgVBAy8fpdjjYqgG87dkJgUprRBHRd2tmfgpP/goprocess/context" - blocks "gx/ipfs/QmSn9Td7xgxm9EV7iEjTckpUWmWApggzPxu7eFGWkkpwin/go-block-format" logging "gx/ipfs/QmSpJByNKFX1sCsHBEp3R73FL4NF6FnQTEGyNAXHm2GS52/go-log" - peer "gx/ipfs/QmXYjuNuxVzXKJCfWasQk1RqkhVLDM9jtUKhqc2WPQmFSB/go-libp2p-peer" + peer "gx/ipfs/QmWNY7dV54ZDYmTA1ykVdwNCqC11mpU4zSUp6XDpLTH9eG/go-libp2p-peer" + blocks "gx/ipfs/QmYsEQydGrsxNZfAiskvQ76N2xE9hDQtSAkRSynwMiUK3c/go-block-format" + cid "gx/ipfs/QmeSrf6pzut73u6zLQkRFQ3ygt3k6XFT2kjdYP8Tnkwwyg/go-cid" ) var log = logging.Logger("bitswap") diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index 8f6ce439d..a66ff452c 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -17,11 +17,11 @@ import ( detectrace "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-detect-race" - cid "gx/ipfs/QmNp85zy9RLrQ5oQD4hPyS39ezrrXpcaa7R4Y9kxdWQLLQ/go-cid" - tu "gx/ipfs/QmQgLZP9haZheimMHqqAjJh2LhRmNfEoZDfbtkpeMhi9xK/go-testutil" - travis "gx/ipfs/QmQgLZP9haZheimMHqqAjJh2LhRmNfEoZDfbtkpeMhi9xK/go-testutil/ci/travis" - blocks "gx/ipfs/QmSn9Td7xgxm9EV7iEjTckpUWmWApggzPxu7eFGWkkpwin/go-block-format" - p2ptestutil "gx/ipfs/QmUUNDRYXgfqdjxTg79ogkciczU5y4WY1tKMU2vEX9CRN7/go-libp2p-netutil" + blocks "gx/ipfs/QmYsEQydGrsxNZfAiskvQ76N2xE9hDQtSAkRSynwMiUK3c/go-block-format" + p2ptestutil "gx/ipfs/QmZTcPxK6VqrwY94JpKZPvEqAZ6tEr1rLrpcqJbbRZbg2V/go-libp2p-netutil" + tu "gx/ipfs/QmeDA8gNhvRTsbrjEieay5wezupJDiky8xvCzDABbsGzmp/go-testutil" + travis "gx/ipfs/QmeDA8gNhvRTsbrjEieay5wezupJDiky8xvCzDABbsGzmp/go-testutil/ci/travis" + cid "gx/ipfs/QmeSrf6pzut73u6zLQkRFQ3ygt3k6XFT2kjdYP8Tnkwwyg/go-cid" ) // FIXME the tests are really sensitive to the network delay. fix them to work diff --git a/bitswap/decision/bench_test.go b/bitswap/decision/bench_test.go index 5ffb2aa3c..288bb7e7d 100644 --- a/bitswap/decision/bench_test.go +++ b/bitswap/decision/bench_test.go @@ -6,10 +6,10 @@ import ( "testing" "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" - cid "gx/ipfs/QmNp85zy9RLrQ5oQD4hPyS39ezrrXpcaa7R4Y9kxdWQLLQ/go-cid" - "gx/ipfs/QmQgLZP9haZheimMHqqAjJh2LhRmNfEoZDfbtkpeMhi9xK/go-testutil" - u "gx/ipfs/QmSU6eubNdhXjFBJBSksTp8kv8YRub8mGAPv8tVJHmL2EU/go-ipfs-util" - "gx/ipfs/QmXYjuNuxVzXKJCfWasQk1RqkhVLDM9jtUKhqc2WPQmFSB/go-libp2p-peer" + u "gx/ipfs/QmPsAfmDBnZN3kZGSuNwvCNDZiHneERSKmRcFyG3UkvcT3/go-ipfs-util" + "gx/ipfs/QmWNY7dV54ZDYmTA1ykVdwNCqC11mpU4zSUp6XDpLTH9eG/go-libp2p-peer" + "gx/ipfs/QmeDA8gNhvRTsbrjEieay5wezupJDiky8xvCzDABbsGzmp/go-testutil" + cid "gx/ipfs/QmeSrf6pzut73u6zLQkRFQ3ygt3k6XFT2kjdYP8Tnkwwyg/go-cid" ) // FWIW: At the time of this commit, including a timestamp in task increases diff --git a/bitswap/decision/engine.go b/bitswap/decision/engine.go index 6770b535d..bad932b7e 100644 --- a/bitswap/decision/engine.go +++ b/bitswap/decision/engine.go @@ -9,9 +9,9 @@ import ( bstore "github.com/ipfs/go-ipfs/blocks/blockstore" bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" wl "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" - blocks "gx/ipfs/QmSn9Td7xgxm9EV7iEjTckpUWmWApggzPxu7eFGWkkpwin/go-block-format" logging "gx/ipfs/QmSpJByNKFX1sCsHBEp3R73FL4NF6FnQTEGyNAXHm2GS52/go-log" - peer "gx/ipfs/QmXYjuNuxVzXKJCfWasQk1RqkhVLDM9jtUKhqc2WPQmFSB/go-libp2p-peer" + peer "gx/ipfs/QmWNY7dV54ZDYmTA1ykVdwNCqC11mpU4zSUp6XDpLTH9eG/go-libp2p-peer" + blocks "gx/ipfs/QmYsEQydGrsxNZfAiskvQ76N2xE9hDQtSAkRSynwMiUK3c/go-block-format" ) // TODO consider taking responsibility for other types of requests. For diff --git a/bitswap/decision/engine_test.go b/bitswap/decision/engine_test.go index 06c2a2bd2..1a12d019b 100644 --- a/bitswap/decision/engine_test.go +++ b/bitswap/decision/engine_test.go @@ -11,11 +11,11 @@ import ( blockstore "github.com/ipfs/go-ipfs/blocks/blockstore" message "github.com/ipfs/go-ipfs/exchange/bitswap/message" - testutil "gx/ipfs/QmQgLZP9haZheimMHqqAjJh2LhRmNfEoZDfbtkpeMhi9xK/go-testutil" - blocks "gx/ipfs/QmSn9Td7xgxm9EV7iEjTckpUWmWApggzPxu7eFGWkkpwin/go-block-format" - peer "gx/ipfs/QmXYjuNuxVzXKJCfWasQk1RqkhVLDM9jtUKhqc2WPQmFSB/go-libp2p-peer" + peer "gx/ipfs/QmWNY7dV54ZDYmTA1ykVdwNCqC11mpU4zSUp6XDpLTH9eG/go-libp2p-peer" + blocks "gx/ipfs/QmYsEQydGrsxNZfAiskvQ76N2xE9hDQtSAkRSynwMiUK3c/go-block-format" ds "gx/ipfs/QmdHG8MAuARdGHxx4rPQASLcvhz24fzjSQq7AJRAQEorq5/go-datastore" dssync "gx/ipfs/QmdHG8MAuARdGHxx4rPQASLcvhz24fzjSQq7AJRAQEorq5/go-datastore/sync" + testutil "gx/ipfs/QmeDA8gNhvRTsbrjEieay5wezupJDiky8xvCzDABbsGzmp/go-testutil" ) type peerAndEngine struct { diff --git a/bitswap/decision/ledger.go b/bitswap/decision/ledger.go index 5cfdeb18d..e3ce24df6 100644 --- a/bitswap/decision/ledger.go +++ b/bitswap/decision/ledger.go @@ -6,8 +6,8 @@ import ( wl "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" - cid "gx/ipfs/QmNp85zy9RLrQ5oQD4hPyS39ezrrXpcaa7R4Y9kxdWQLLQ/go-cid" - peer "gx/ipfs/QmXYjuNuxVzXKJCfWasQk1RqkhVLDM9jtUKhqc2WPQmFSB/go-libp2p-peer" + peer "gx/ipfs/QmWNY7dV54ZDYmTA1ykVdwNCqC11mpU4zSUp6XDpLTH9eG/go-libp2p-peer" + cid "gx/ipfs/QmeSrf6pzut73u6zLQkRFQ3ygt3k6XFT2kjdYP8Tnkwwyg/go-cid" ) func newLedger(p peer.ID) *ledger { diff --git a/bitswap/decision/peer_request_queue.go b/bitswap/decision/peer_request_queue.go index 2606e8a4c..00123ac8a 100644 --- a/bitswap/decision/peer_request_queue.go +++ b/bitswap/decision/peer_request_queue.go @@ -7,8 +7,8 @@ import ( wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" pq "github.com/ipfs/go-ipfs/thirdparty/pq" - cid "gx/ipfs/QmNp85zy9RLrQ5oQD4hPyS39ezrrXpcaa7R4Y9kxdWQLLQ/go-cid" - peer "gx/ipfs/QmXYjuNuxVzXKJCfWasQk1RqkhVLDM9jtUKhqc2WPQmFSB/go-libp2p-peer" + peer "gx/ipfs/QmWNY7dV54ZDYmTA1ykVdwNCqC11mpU4zSUp6XDpLTH9eG/go-libp2p-peer" + cid "gx/ipfs/QmeSrf6pzut73u6zLQkRFQ3ygt3k6XFT2kjdYP8Tnkwwyg/go-cid" ) type peerRequestQueue interface { diff --git a/bitswap/decision/peer_request_queue_test.go b/bitswap/decision/peer_request_queue_test.go index 32efd763b..3416a5ca1 100644 --- a/bitswap/decision/peer_request_queue_test.go +++ b/bitswap/decision/peer_request_queue_test.go @@ -9,9 +9,9 @@ import ( "testing" "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" - cid "gx/ipfs/QmNp85zy9RLrQ5oQD4hPyS39ezrrXpcaa7R4Y9kxdWQLLQ/go-cid" - "gx/ipfs/QmQgLZP9haZheimMHqqAjJh2LhRmNfEoZDfbtkpeMhi9xK/go-testutil" - u "gx/ipfs/QmSU6eubNdhXjFBJBSksTp8kv8YRub8mGAPv8tVJHmL2EU/go-ipfs-util" + u "gx/ipfs/QmPsAfmDBnZN3kZGSuNwvCNDZiHneERSKmRcFyG3UkvcT3/go-ipfs-util" + "gx/ipfs/QmeDA8gNhvRTsbrjEieay5wezupJDiky8xvCzDABbsGzmp/go-testutil" + cid "gx/ipfs/QmeSrf6pzut73u6zLQkRFQ3ygt3k6XFT2kjdYP8Tnkwwyg/go-cid" ) func TestPushPop(t *testing.T) { diff --git a/bitswap/get.go b/bitswap/get.go index b22f7e1da..aa26de4ef 100644 --- a/bitswap/get.go +++ b/bitswap/get.go @@ -6,9 +6,9 @@ import ( blockstore "github.com/ipfs/go-ipfs/blocks/blockstore" notifications "github.com/ipfs/go-ipfs/exchange/bitswap/notifications" - blocks "gx/ipfs/QmSn9Td7xgxm9EV7iEjTckpUWmWApggzPxu7eFGWkkpwin/go-block-format" + blocks "gx/ipfs/QmYsEQydGrsxNZfAiskvQ76N2xE9hDQtSAkRSynwMiUK3c/go-block-format" - cid "gx/ipfs/QmNp85zy9RLrQ5oQD4hPyS39ezrrXpcaa7R4Y9kxdWQLLQ/go-cid" + cid "gx/ipfs/QmeSrf6pzut73u6zLQkRFQ3ygt3k6XFT2kjdYP8Tnkwwyg/go-cid" ) type getBlocksFunc func(context.Context, []*cid.Cid) (<-chan blocks.Block, error) diff --git a/bitswap/message/message.go b/bitswap/message/message.go index 93a0b9f7b..de5c92696 100644 --- a/bitswap/message/message.go +++ b/bitswap/message/message.go @@ -6,12 +6,12 @@ import ( pb "github.com/ipfs/go-ipfs/exchange/bitswap/message/pb" wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" - blocks "gx/ipfs/QmSn9Td7xgxm9EV7iEjTckpUWmWApggzPxu7eFGWkkpwin/go-block-format" + blocks "gx/ipfs/QmYsEQydGrsxNZfAiskvQ76N2xE9hDQtSAkRSynwMiUK3c/go-block-format" - cid "gx/ipfs/QmNp85zy9RLrQ5oQD4hPyS39ezrrXpcaa7R4Y9kxdWQLLQ/go-cid" + inet "gx/ipfs/QmU4vCDZTPLDqSDKguWbHCiUe46mZUtmM2g2suBZ9NE8ko/go-libp2p-net" ggio "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/io" proto "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/proto" - inet "gx/ipfs/QmbD5yKbXahNvoMqzeuNyKQA9vAs9fUvJg2GXeWU1fVqY5/go-libp2p-net" + cid "gx/ipfs/QmeSrf6pzut73u6zLQkRFQ3ygt3k6XFT2kjdYP8Tnkwwyg/go-cid" ) // TODO move message.go into the bitswap package diff --git a/bitswap/message/message_test.go b/bitswap/message/message_test.go index 465953fbd..7e0eb48b7 100644 --- a/bitswap/message/message_test.go +++ b/bitswap/message/message_test.go @@ -6,10 +6,10 @@ import ( pb "github.com/ipfs/go-ipfs/exchange/bitswap/message/pb" - cid "gx/ipfs/QmNp85zy9RLrQ5oQD4hPyS39ezrrXpcaa7R4Y9kxdWQLLQ/go-cid" - u "gx/ipfs/QmSU6eubNdhXjFBJBSksTp8kv8YRub8mGAPv8tVJHmL2EU/go-ipfs-util" - blocks "gx/ipfs/QmSn9Td7xgxm9EV7iEjTckpUWmWApggzPxu7eFGWkkpwin/go-block-format" + u "gx/ipfs/QmPsAfmDBnZN3kZGSuNwvCNDZiHneERSKmRcFyG3UkvcT3/go-ipfs-util" + blocks "gx/ipfs/QmYsEQydGrsxNZfAiskvQ76N2xE9hDQtSAkRSynwMiUK3c/go-block-format" proto "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/proto" + cid "gx/ipfs/QmeSrf6pzut73u6zLQkRFQ3ygt3k6XFT2kjdYP8Tnkwwyg/go-cid" ) func mkFakeCid(s string) *cid.Cid { diff --git a/bitswap/network/interface.go b/bitswap/network/interface.go index 9be82e6de..d2cd1fd6c 100644 --- a/bitswap/network/interface.go +++ b/bitswap/network/interface.go @@ -5,10 +5,10 @@ import ( bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" - cid "gx/ipfs/QmNp85zy9RLrQ5oQD4hPyS39ezrrXpcaa7R4Y9kxdWQLLQ/go-cid" - ifconnmgr "gx/ipfs/QmWfkNorhirGE1Qp3VwBWcnGaj4adv4hNqCYwabMrEYc21/go-libp2p-interface-connmgr" - peer "gx/ipfs/QmXYjuNuxVzXKJCfWasQk1RqkhVLDM9jtUKhqc2WPQmFSB/go-libp2p-peer" + ifconnmgr "gx/ipfs/QmSAJm4QdTJ3EGF2cvgNcQyXTEbxqWSW1x4kCVV1aJQUQr/go-libp2p-interface-connmgr" + peer "gx/ipfs/QmWNY7dV54ZDYmTA1ykVdwNCqC11mpU4zSUp6XDpLTH9eG/go-libp2p-peer" protocol "gx/ipfs/QmZNkThpqfVXs9GNbexPrfBbXSLNYeKrE7jwFM2oqHbyqN/go-libp2p-protocol" + cid "gx/ipfs/QmeSrf6pzut73u6zLQkRFQ3ygt3k6XFT2kjdYP8Tnkwwyg/go-cid" ) var ( diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index a9a8dc8c5..241da4e6e 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -8,16 +8,16 @@ import ( bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" - cid "gx/ipfs/QmNp85zy9RLrQ5oQD4hPyS39ezrrXpcaa7R4Y9kxdWQLLQ/go-cid" - routing "gx/ipfs/QmPR2JzfKd9poHx9XBhzoFeBBC31ZM3W5iUPKJZWyaoZZm/go-libp2p-routing" - pstore "gx/ipfs/QmPgDWmTmuzvP7QE5zwo1TmjbJme9pmZHNujB2453jkCTr/go-libp2p-peerstore" - host "gx/ipfs/QmRS46AyqtpJBsf1zmQdeizSDEzo1qkWR7rdEuPFAv8237/go-libp2p-host" + host "gx/ipfs/QmP46LGWhzVZTMmt5akNNLfoV8qL4h5wTwmzQxLyDafggd/go-libp2p-host" + routing "gx/ipfs/QmPCGUjMRuBcPybZFpjhzpifwPP9wPRoiy5geTQKU4vqWA/go-libp2p-routing" + ifconnmgr "gx/ipfs/QmSAJm4QdTJ3EGF2cvgNcQyXTEbxqWSW1x4kCVV1aJQUQr/go-libp2p-interface-connmgr" logging "gx/ipfs/QmSpJByNKFX1sCsHBEp3R73FL4NF6FnQTEGyNAXHm2GS52/go-log" - ifconnmgr "gx/ipfs/QmWfkNorhirGE1Qp3VwBWcnGaj4adv4hNqCYwabMrEYc21/go-libp2p-interface-connmgr" - ma "gx/ipfs/QmXY77cVe7rVRQXZZQRioukUM7aRW3BTcAgJe12MCtb3Ji/go-multiaddr" - peer "gx/ipfs/QmXYjuNuxVzXKJCfWasQk1RqkhVLDM9jtUKhqc2WPQmFSB/go-libp2p-peer" + inet "gx/ipfs/QmU4vCDZTPLDqSDKguWbHCiUe46mZUtmM2g2suBZ9NE8ko/go-libp2p-net" + ma "gx/ipfs/QmW8s4zTsUoX1Q6CeYxVKPyqSKbF7H1YDUyTostBtZ8DaG/go-multiaddr" + peer "gx/ipfs/QmWNY7dV54ZDYmTA1ykVdwNCqC11mpU4zSUp6XDpLTH9eG/go-libp2p-peer" + pstore "gx/ipfs/QmYijbtjCxFEjSXaudaQAUz3LN5VKLssm8WCUsRoqzXmQR/go-libp2p-peerstore" ggio "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/io" - inet "gx/ipfs/QmbD5yKbXahNvoMqzeuNyKQA9vAs9fUvJg2GXeWU1fVqY5/go-libp2p-net" + cid "gx/ipfs/QmeSrf6pzut73u6zLQkRFQ3ygt3k6XFT2kjdYP8Tnkwwyg/go-cid" ) var log = logging.Logger("bitswap_network") diff --git a/bitswap/notifications/notifications.go b/bitswap/notifications/notifications.go index 6d1e11801..f5ed52962 100644 --- a/bitswap/notifications/notifications.go +++ b/bitswap/notifications/notifications.go @@ -3,10 +3,10 @@ package notifications import ( "context" - blocks "gx/ipfs/QmSn9Td7xgxm9EV7iEjTckpUWmWApggzPxu7eFGWkkpwin/go-block-format" + blocks "gx/ipfs/QmYsEQydGrsxNZfAiskvQ76N2xE9hDQtSAkRSynwMiUK3c/go-block-format" pubsub "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/briantigerchow/pubsub" - cid "gx/ipfs/QmNp85zy9RLrQ5oQD4hPyS39ezrrXpcaa7R4Y9kxdWQLLQ/go-cid" + cid "gx/ipfs/QmeSrf6pzut73u6zLQkRFQ3ygt3k6XFT2kjdYP8Tnkwwyg/go-cid" ) const bufferSize = 16 diff --git a/bitswap/notifications/notifications_test.go b/bitswap/notifications/notifications_test.go index d10a0be6b..9373d7097 100644 --- a/bitswap/notifications/notifications_test.go +++ b/bitswap/notifications/notifications_test.go @@ -7,8 +7,8 @@ import ( "time" blocksutil "github.com/ipfs/go-ipfs/blocks/blocksutil" - cid "gx/ipfs/QmNp85zy9RLrQ5oQD4hPyS39ezrrXpcaa7R4Y9kxdWQLLQ/go-cid" - blocks "gx/ipfs/QmSn9Td7xgxm9EV7iEjTckpUWmWApggzPxu7eFGWkkpwin/go-block-format" + blocks "gx/ipfs/QmYsEQydGrsxNZfAiskvQ76N2xE9hDQtSAkRSynwMiUK3c/go-block-format" + cid "gx/ipfs/QmeSrf6pzut73u6zLQkRFQ3ygt3k6XFT2kjdYP8Tnkwwyg/go-cid" ) func TestDuplicates(t *testing.T) { diff --git a/bitswap/session.go b/bitswap/session.go index 9c8e6a96e..73d9fd1f4 100644 --- a/bitswap/session.go +++ b/bitswap/session.go @@ -7,12 +7,12 @@ import ( notifications "github.com/ipfs/go-ipfs/exchange/bitswap/notifications" - cid "gx/ipfs/QmNp85zy9RLrQ5oQD4hPyS39ezrrXpcaa7R4Y9kxdWQLLQ/go-cid" - blocks "gx/ipfs/QmSn9Td7xgxm9EV7iEjTckpUWmWApggzPxu7eFGWkkpwin/go-block-format" logging "gx/ipfs/QmSpJByNKFX1sCsHBEp3R73FL4NF6FnQTEGyNAXHm2GS52/go-log" - loggables "gx/ipfs/QmT4PgCNdv73hnFAqzHqwW44q7M9PWpykSswHDxndquZbc/go-libp2p-loggables" + loggables "gx/ipfs/QmSvcDkiRwB8LuMhUtnvhum2C851Mproo75ZDD19jx43tD/go-libp2p-loggables" lru "gx/ipfs/QmVYxfoJQiZijTgPNHCHgHELvQpbsJNTg6Crmc3dQkj3yy/golang-lru" - peer "gx/ipfs/QmXYjuNuxVzXKJCfWasQk1RqkhVLDM9jtUKhqc2WPQmFSB/go-libp2p-peer" + peer "gx/ipfs/QmWNY7dV54ZDYmTA1ykVdwNCqC11mpU4zSUp6XDpLTH9eG/go-libp2p-peer" + blocks "gx/ipfs/QmYsEQydGrsxNZfAiskvQ76N2xE9hDQtSAkRSynwMiUK3c/go-block-format" + cid "gx/ipfs/QmeSrf6pzut73u6zLQkRFQ3ygt3k6XFT2kjdYP8Tnkwwyg/go-cid" ) const activeWantsLimit = 16 diff --git a/bitswap/session_test.go b/bitswap/session_test.go index 9048e59b4..2536ff0e7 100644 --- a/bitswap/session_test.go +++ b/bitswap/session_test.go @@ -8,8 +8,8 @@ import ( blocksutil "github.com/ipfs/go-ipfs/blocks/blocksutil" - cid "gx/ipfs/QmNp85zy9RLrQ5oQD4hPyS39ezrrXpcaa7R4Y9kxdWQLLQ/go-cid" - blocks "gx/ipfs/QmSn9Td7xgxm9EV7iEjTckpUWmWApggzPxu7eFGWkkpwin/go-block-format" + blocks "gx/ipfs/QmYsEQydGrsxNZfAiskvQ76N2xE9hDQtSAkRSynwMiUK3c/go-block-format" + cid "gx/ipfs/QmeSrf6pzut73u6zLQkRFQ3ygt3k6XFT2kjdYP8Tnkwwyg/go-cid" ) func TestBasicSessions(t *testing.T) { diff --git a/bitswap/stat.go b/bitswap/stat.go index 1c7f3f3e8..2c82c7cae 100644 --- a/bitswap/stat.go +++ b/bitswap/stat.go @@ -3,7 +3,7 @@ package bitswap import ( "sort" - cid "gx/ipfs/QmNp85zy9RLrQ5oQD4hPyS39ezrrXpcaa7R4Y9kxdWQLLQ/go-cid" + cid "gx/ipfs/QmeSrf6pzut73u6zLQkRFQ3ygt3k6XFT2kjdYP8Tnkwwyg/go-cid" ) type Stat struct { diff --git a/bitswap/testnet/interface.go b/bitswap/testnet/interface.go index 69cdbf0cc..6bc3bf188 100644 --- a/bitswap/testnet/interface.go +++ b/bitswap/testnet/interface.go @@ -2,8 +2,8 @@ package bitswap import ( bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" - "gx/ipfs/QmQgLZP9haZheimMHqqAjJh2LhRmNfEoZDfbtkpeMhi9xK/go-testutil" - peer "gx/ipfs/QmXYjuNuxVzXKJCfWasQk1RqkhVLDM9jtUKhqc2WPQmFSB/go-libp2p-peer" + peer "gx/ipfs/QmWNY7dV54ZDYmTA1ykVdwNCqC11mpU4zSUp6XDpLTH9eG/go-libp2p-peer" + "gx/ipfs/QmeDA8gNhvRTsbrjEieay5wezupJDiky8xvCzDABbsGzmp/go-testutil" ) type Network interface { diff --git a/bitswap/testnet/network_test.go b/bitswap/testnet/network_test.go index 88aa6d8dc..ee10af3ce 100644 --- a/bitswap/testnet/network_test.go +++ b/bitswap/testnet/network_test.go @@ -9,9 +9,9 @@ import ( bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" mockrouting "github.com/ipfs/go-ipfs/routing/mock" delay "github.com/ipfs/go-ipfs/thirdparty/delay" - testutil "gx/ipfs/QmQgLZP9haZheimMHqqAjJh2LhRmNfEoZDfbtkpeMhi9xK/go-testutil" - blocks "gx/ipfs/QmSn9Td7xgxm9EV7iEjTckpUWmWApggzPxu7eFGWkkpwin/go-block-format" - peer "gx/ipfs/QmXYjuNuxVzXKJCfWasQk1RqkhVLDM9jtUKhqc2WPQmFSB/go-libp2p-peer" + peer "gx/ipfs/QmWNY7dV54ZDYmTA1ykVdwNCqC11mpU4zSUp6XDpLTH9eG/go-libp2p-peer" + blocks "gx/ipfs/QmYsEQydGrsxNZfAiskvQ76N2xE9hDQtSAkRSynwMiUK3c/go-block-format" + testutil "gx/ipfs/QmeDA8gNhvRTsbrjEieay5wezupJDiky8xvCzDABbsGzmp/go-testutil" ) func TestSendMessageAsyncButWaitForResponse(t *testing.T) { diff --git a/bitswap/testnet/peernet.go b/bitswap/testnet/peernet.go index 6ff543d57..6d1ea8ad9 100644 --- a/bitswap/testnet/peernet.go +++ b/bitswap/testnet/peernet.go @@ -5,10 +5,10 @@ import ( bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" mockrouting "github.com/ipfs/go-ipfs/routing/mock" - testutil "gx/ipfs/QmQgLZP9haZheimMHqqAjJh2LhRmNfEoZDfbtkpeMhi9xK/go-testutil" - mockpeernet "gx/ipfs/QmTzs3Gp2rU3HuNayjBVG7qBgbaKWE8bgtwJ7faRxAe9UP/go-libp2p/p2p/net/mock" - peer "gx/ipfs/QmXYjuNuxVzXKJCfWasQk1RqkhVLDM9jtUKhqc2WPQmFSB/go-libp2p-peer" + peer "gx/ipfs/QmWNY7dV54ZDYmTA1ykVdwNCqC11mpU4zSUp6XDpLTH9eG/go-libp2p-peer" + mockpeernet "gx/ipfs/Qma23bpHwQrQyvKeBemaeJh7sAoRHggPkgnge1B9489ff5/go-libp2p/p2p/net/mock" ds "gx/ipfs/QmdHG8MAuARdGHxx4rPQASLcvhz24fzjSQq7AJRAQEorq5/go-datastore" + testutil "gx/ipfs/QmeDA8gNhvRTsbrjEieay5wezupJDiky8xvCzDABbsGzmp/go-testutil" ) type peernet struct { diff --git a/bitswap/testnet/virtual.go b/bitswap/testnet/virtual.go index d2b7bd87d..c3debc90d 100644 --- a/bitswap/testnet/virtual.go +++ b/bitswap/testnet/virtual.go @@ -9,12 +9,12 @@ import ( mockrouting "github.com/ipfs/go-ipfs/routing/mock" delay "github.com/ipfs/go-ipfs/thirdparty/delay" - cid "gx/ipfs/QmNp85zy9RLrQ5oQD4hPyS39ezrrXpcaa7R4Y9kxdWQLLQ/go-cid" - routing "gx/ipfs/QmPR2JzfKd9poHx9XBhzoFeBBC31ZM3W5iUPKJZWyaoZZm/go-libp2p-routing" - testutil "gx/ipfs/QmQgLZP9haZheimMHqqAjJh2LhRmNfEoZDfbtkpeMhi9xK/go-testutil" + routing "gx/ipfs/QmPCGUjMRuBcPybZFpjhzpifwPP9wPRoiy5geTQKU4vqWA/go-libp2p-routing" + ifconnmgr "gx/ipfs/QmSAJm4QdTJ3EGF2cvgNcQyXTEbxqWSW1x4kCVV1aJQUQr/go-libp2p-interface-connmgr" logging "gx/ipfs/QmSpJByNKFX1sCsHBEp3R73FL4NF6FnQTEGyNAXHm2GS52/go-log" - ifconnmgr "gx/ipfs/QmWfkNorhirGE1Qp3VwBWcnGaj4adv4hNqCYwabMrEYc21/go-libp2p-interface-connmgr" - peer "gx/ipfs/QmXYjuNuxVzXKJCfWasQk1RqkhVLDM9jtUKhqc2WPQmFSB/go-libp2p-peer" + peer "gx/ipfs/QmWNY7dV54ZDYmTA1ykVdwNCqC11mpU4zSUp6XDpLTH9eG/go-libp2p-peer" + testutil "gx/ipfs/QmeDA8gNhvRTsbrjEieay5wezupJDiky8xvCzDABbsGzmp/go-testutil" + cid "gx/ipfs/QmeSrf6pzut73u6zLQkRFQ3ygt3k6XFT2kjdYP8Tnkwwyg/go-cid" ) var log = logging.Logger("bstestnet") diff --git a/bitswap/testutils.go b/bitswap/testutils.go index 3b0bec59e..0ad9ef773 100644 --- a/bitswap/testutils.go +++ b/bitswap/testutils.go @@ -8,10 +8,10 @@ import ( tn "github.com/ipfs/go-ipfs/exchange/bitswap/testnet" datastore2 "github.com/ipfs/go-ipfs/thirdparty/datastore2" delay "github.com/ipfs/go-ipfs/thirdparty/delay" - testutil "gx/ipfs/QmQgLZP9haZheimMHqqAjJh2LhRmNfEoZDfbtkpeMhi9xK/go-testutil" + testutil "gx/ipfs/QmeDA8gNhvRTsbrjEieay5wezupJDiky8xvCzDABbsGzmp/go-testutil" - p2ptestutil "gx/ipfs/QmUUNDRYXgfqdjxTg79ogkciczU5y4WY1tKMU2vEX9CRN7/go-libp2p-netutil" - peer "gx/ipfs/QmXYjuNuxVzXKJCfWasQk1RqkhVLDM9jtUKhqc2WPQmFSB/go-libp2p-peer" + peer "gx/ipfs/QmWNY7dV54ZDYmTA1ykVdwNCqC11mpU4zSUp6XDpLTH9eG/go-libp2p-peer" + p2ptestutil "gx/ipfs/QmZTcPxK6VqrwY94JpKZPvEqAZ6tEr1rLrpcqJbbRZbg2V/go-libp2p-netutil" ds "gx/ipfs/QmdHG8MAuARdGHxx4rPQASLcvhz24fzjSQq7AJRAQEorq5/go-datastore" ds_sync "gx/ipfs/QmdHG8MAuARdGHxx4rPQASLcvhz24fzjSQq7AJRAQEorq5/go-datastore/sync" ) diff --git a/bitswap/wantlist/wantlist.go b/bitswap/wantlist/wantlist.go index 00c7ce303..9a1412785 100644 --- a/bitswap/wantlist/wantlist.go +++ b/bitswap/wantlist/wantlist.go @@ -6,7 +6,7 @@ import ( "sort" "sync" - cid "gx/ipfs/QmNp85zy9RLrQ5oQD4hPyS39ezrrXpcaa7R4Y9kxdWQLLQ/go-cid" + cid "gx/ipfs/QmeSrf6pzut73u6zLQkRFQ3ygt3k6XFT2kjdYP8Tnkwwyg/go-cid" ) type ThreadSafe struct { diff --git a/bitswap/wantlist/wantlist_test.go b/bitswap/wantlist/wantlist_test.go index 07712d98e..3c400f9bf 100644 --- a/bitswap/wantlist/wantlist_test.go +++ b/bitswap/wantlist/wantlist_test.go @@ -3,7 +3,7 @@ package wantlist import ( "testing" - cid "gx/ipfs/QmNp85zy9RLrQ5oQD4hPyS39ezrrXpcaa7R4Y9kxdWQLLQ/go-cid" + cid "gx/ipfs/QmeSrf6pzut73u6zLQkRFQ3ygt3k6XFT2kjdYP8Tnkwwyg/go-cid" ) var testcids []*cid.Cid diff --git a/bitswap/wantmanager.go b/bitswap/wantmanager.go index b4c0458aa..e89d7ef66 100644 --- a/bitswap/wantmanager.go +++ b/bitswap/wantmanager.go @@ -10,9 +10,9 @@ import ( bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" - cid "gx/ipfs/QmNp85zy9RLrQ5oQD4hPyS39ezrrXpcaa7R4Y9kxdWQLLQ/go-cid" metrics "gx/ipfs/QmRg1gKTHzc3CZXSKzem8aR4E3TubFhbgXwfVuWnSK5CC5/go-metrics-interface" - peer "gx/ipfs/QmXYjuNuxVzXKJCfWasQk1RqkhVLDM9jtUKhqc2WPQmFSB/go-libp2p-peer" + peer "gx/ipfs/QmWNY7dV54ZDYmTA1ykVdwNCqC11mpU4zSUp6XDpLTH9eG/go-libp2p-peer" + cid "gx/ipfs/QmeSrf6pzut73u6zLQkRFQ3ygt3k6XFT2kjdYP8Tnkwwyg/go-cid" ) type WantManager struct { diff --git a/bitswap/workers.go b/bitswap/workers.go index 3ce4f44c7..00710d0af 100644 --- a/bitswap/workers.go +++ b/bitswap/workers.go @@ -8,11 +8,11 @@ import ( bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" - cid "gx/ipfs/QmNp85zy9RLrQ5oQD4hPyS39ezrrXpcaa7R4Y9kxdWQLLQ/go-cid" process "gx/ipfs/QmSF8fPo3jgVBAy8fpdjjYqgG87dkJgUprRBHRd2tmfgpP/goprocess" procctx "gx/ipfs/QmSF8fPo3jgVBAy8fpdjjYqgG87dkJgUprRBHRd2tmfgpP/goprocess/context" logging "gx/ipfs/QmSpJByNKFX1sCsHBEp3R73FL4NF6FnQTEGyNAXHm2GS52/go-log" - peer "gx/ipfs/QmXYjuNuxVzXKJCfWasQk1RqkhVLDM9jtUKhqc2WPQmFSB/go-libp2p-peer" + peer "gx/ipfs/QmWNY7dV54ZDYmTA1ykVdwNCqC11mpU4zSUp6XDpLTH9eG/go-libp2p-peer" + cid "gx/ipfs/QmeSrf6pzut73u6zLQkRFQ3ygt3k6XFT2kjdYP8Tnkwwyg/go-cid" ) var TaskWorkerCount = 8 From 60b3c996e51508bae4c632f2695d4ba36330969e Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Fri, 15 Dec 2017 13:06:30 -0800 Subject: [PATCH 0601/1035] improve basic bitswap test License: MIT Signed-off-by: Steven Allen This commit was moved from ipfs/go-bitswap@96a00227ac6474c304ac22661a8876674e7eb526 --- bitswap/bitswap_test.go | 25 ++++++++++++++++--------- 1 file changed, 16 insertions(+), 9 deletions(-) diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index a66ff452c..a3d64557b 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -292,23 +292,22 @@ func TestEmptyKey(t *testing.T) { } } -func assertStat(st *Stat, sblks, rblks, sdata, rdata uint64) error { +func assertStat(t *testing.T, st *Stat, sblks, rblks, sdata, rdata uint64) { if sblks != st.BlocksSent { - return fmt.Errorf("mismatch in blocks sent: %d vs %d", sblks, st.BlocksSent) + t.Errorf("mismatch in blocks sent: %d vs %d", sblks, st.BlocksSent) } if rblks != st.BlocksReceived { - return fmt.Errorf("mismatch in blocks recvd: %d vs %d", rblks, st.BlocksReceived) + t.Errorf("mismatch in blocks recvd: %d vs %d", rblks, st.BlocksReceived) } if sdata != st.DataSent { - return fmt.Errorf("mismatch in data sent: %d vs %d", sdata, st.DataSent) + t.Errorf("mismatch in data sent: %d vs %d", sdata, st.DataSent) } if rdata != st.DataReceived { - return fmt.Errorf("mismatch in data recvd: %d vs %d", rdata, st.DataReceived) + t.Errorf("mismatch in data recvd: %d vs %d", rdata, st.DataReceived) } - return nil } func TestBasicBitswap(t *testing.T) { @@ -355,12 +354,20 @@ func TestBasicBitswap(t *testing.T) { t.Fatal(err) } - if err := assertStat(st0, 1, 0, 1, 0); err != nil { + st2, err := instances[2].Exchange.Stat() + if err != nil { t.Fatal(err) } - if err := assertStat(st1, 0, 1, 0, 1); err != nil { - t.Fatal(err) + t.Log("stat node 0") + assertStat(t, st0, 1, 0, uint64(len(blk.RawData())), 0) + t.Log("stat node 1") + assertStat(t, st1, 0, 1, 0, uint64(len(blk.RawData()))) + t.Log("stat node 2") + assertStat(t, st2, 0, 0, 0, 0) + + if !bytes.Equal(blk.RawData(), blocks[0].RawData()) { + t.Errorf("blocks aren't equal: expected %v, actual %v", blocks[0].RawData(), blk.RawData()) } t.Log(blk) From 55911ff3cc65eb9b789857103ba3ea8c51833cc3 Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Fri, 15 Dec 2017 13:41:53 -0800 Subject: [PATCH 0602/1035] fix races in testnet ConnectTo can be called concurrently from within bitswap. License: MIT Signed-off-by: Steven Allen This commit was moved from ipfs/go-bitswap@9d56edba4b7daa2781fc78c046e28c979e45b98e --- bitswap/testnet/virtual.go | 23 +++++++++++++++++++++-- 1 file changed, 21 insertions(+), 2 deletions(-) diff --git a/bitswap/testnet/virtual.go b/bitswap/testnet/virtual.go index c3debc90d..97d251992 100644 --- a/bitswap/testnet/virtual.go +++ b/bitswap/testnet/virtual.go @@ -3,6 +3,7 @@ package bitswap import ( "context" "errors" + "sync" bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" @@ -29,6 +30,7 @@ func VirtualNetwork(rs mockrouting.Server, d delay.D) Network { } type network struct { + mu sync.Mutex clients map[peer.ID]bsnet.Receiver routingserver mockrouting.Server delay delay.D @@ -36,6 +38,9 @@ type network struct { } func (n *network) Adapter(p testutil.Identity) bsnet.BitSwapNetwork { + n.mu.Lock() + defer n.mu.Unlock() + client := &networkClient{ local: p.ID(), network: n, @@ -46,6 +51,9 @@ func (n *network) Adapter(p testutil.Identity) bsnet.BitSwapNetwork { } func (n *network) HasPeer(p peer.ID) bool { + n.mu.Lock() + defer n.mu.Unlock() + _, found := n.clients[p] return found } @@ -58,6 +66,9 @@ func (n *network) SendMessage( to peer.ID, message bsmsg.BitSwapMessage) error { + n.mu.Lock() + defer n.mu.Unlock() + receiver, ok := n.clients[to] if !ok { return errors.New("Cannot locate peer on network") @@ -161,18 +172,26 @@ func (nc *networkClient) SetDelegate(r bsnet.Receiver) { } func (nc *networkClient) ConnectTo(_ context.Context, p peer.ID) error { - if !nc.network.HasPeer(p) { + nc.network.mu.Lock() + + otherClient, ok := nc.network.clients[p] + if !ok { + nc.network.mu.Unlock() return errors.New("no such peer in network") } + tag := tagForPeers(nc.local, p) if _, ok := nc.network.conns[tag]; ok { + nc.network.mu.Unlock() log.Warning("ALREADY CONNECTED TO PEER (is this a reconnect? test lib needs fixing)") return nil } nc.network.conns[tag] = struct{}{} + nc.network.mu.Unlock() + // TODO: add handling for disconnects - nc.network.clients[p].PeerConnected(nc.local) + otherClient.PeerConnected(nc.local) nc.Receiver.PeerConnected(p) return nil } From 4ee3e6d6ab8114fad585237b8a89a375fd1f8a17 Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Fri, 15 Dec 2017 15:46:33 -0800 Subject: [PATCH 0603/1035] make bitswap tests pass again with the race detector enabled fixes #2444 License: MIT Signed-off-by: Steven Allen This commit was moved from ipfs/go-bitswap@3e7d3b57e7cba43e333054dbd9da20f3e55a756a --- bitswap/bitswap_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index a3d64557b..c0b13cabe 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -108,7 +108,7 @@ func TestLargeSwarm(t *testing.T) { if detectrace.WithRace() { // when running with the race detector, 500 instances launches // well over 8k goroutines. This hits a race detector limit. - numInstances = 100 + numInstances = 75 } else if travis.IsRunning() { numInstances = 200 } else { From 73b63db11490c7d67dab40ce231fc55482f03734 Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Fri, 29 Dec 2017 13:00:24 -0800 Subject: [PATCH 0604/1035] only construct bitswap event loggable if necessary Base58 encoding cids/peerIDs isn't exactly fast. License: MIT Signed-off-by: Steven Allen This commit was moved from ipfs/go-bitswap@f765b1502f75c31a8f931064bc3fbf077f6392e8 --- bitswap/workers.go | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/bitswap/workers.go b/bitswap/workers.go index 00710d0af..8a1f420bd 100644 --- a/bitswap/workers.go +++ b/bitswap/workers.go @@ -59,11 +59,13 @@ func (bs *Bitswap) taskWorker(ctx context.Context, id int) { if !ok { continue } - log.Event(ctx, "Bitswap.TaskWorker.Work", logging.LoggableMap{ - "ID": id, - "Target": envelope.Peer.Pretty(), - "Block": envelope.Block.Cid().String(), - }) + log.Event(ctx, "Bitswap.TaskWorker.Work", logging.LoggableF(func() map[string]interface{} { + return logging.LoggableMap{ + "ID": id, + "Target": envelope.Peer.Pretty(), + "Block": envelope.Block.Cid().String(), + } + })) // update the BS ledger to reflect sent message // TODO: Should only track *useful* messages in ledger From 5be3e19b149b240938fe6648dfc10b71b6c7b279 Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Wed, 24 Jan 2018 15:55:28 -0800 Subject: [PATCH 0605/1035] gx: mass update License: MIT Signed-off-by: Steven Allen This commit was moved from ipfs/go-bitswap@ab1eda3e8b2a53c525a5328148dd49f6a0285ce6 --- bitswap/bitswap.go | 6 +++--- bitswap/bitswap_test.go | 10 +++++----- bitswap/decision/bench_test.go | 8 ++++---- bitswap/decision/engine.go | 4 ++-- bitswap/decision/engine_test.go | 10 +++++----- bitswap/decision/ledger.go | 4 ++-- bitswap/decision/peer_request_queue.go | 4 ++-- bitswap/decision/peer_request_queue_test.go | 6 +++--- bitswap/get.go | 4 ++-- bitswap/message/message.go | 6 +++--- bitswap/message/message_test.go | 6 +++--- bitswap/network/interface.go | 6 +++--- bitswap/network/ipfs_impl.go | 16 ++++++++-------- bitswap/notifications/notifications.go | 4 ++-- bitswap/notifications/notifications_test.go | 4 ++-- bitswap/session.go | 8 ++++---- bitswap/session_test.go | 4 ++-- bitswap/stat.go | 2 +- bitswap/testnet/interface.go | 4 ++-- bitswap/testnet/network_test.go | 6 +++--- bitswap/testnet/peernet.go | 8 ++++---- bitswap/testnet/virtual.go | 10 +++++----- bitswap/testutils.go | 10 +++++----- bitswap/wantlist/wantlist.go | 2 +- bitswap/wantlist/wantlist_test.go | 2 +- bitswap/wantmanager.go | 4 ++-- bitswap/workers.go | 4 ++-- 27 files changed, 81 insertions(+), 81 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index ec12e7be3..e74438c44 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -23,9 +23,9 @@ import ( process "gx/ipfs/QmSF8fPo3jgVBAy8fpdjjYqgG87dkJgUprRBHRd2tmfgpP/goprocess" procctx "gx/ipfs/QmSF8fPo3jgVBAy8fpdjjYqgG87dkJgUprRBHRd2tmfgpP/goprocess/context" logging "gx/ipfs/QmSpJByNKFX1sCsHBEp3R73FL4NF6FnQTEGyNAXHm2GS52/go-log" - peer "gx/ipfs/QmWNY7dV54ZDYmTA1ykVdwNCqC11mpU4zSUp6XDpLTH9eG/go-libp2p-peer" - blocks "gx/ipfs/QmYsEQydGrsxNZfAiskvQ76N2xE9hDQtSAkRSynwMiUK3c/go-block-format" - cid "gx/ipfs/QmeSrf6pzut73u6zLQkRFQ3ygt3k6XFT2kjdYP8Tnkwwyg/go-cid" + peer "gx/ipfs/Qma7H6RW8wRrfZpNSXwxYGcd1E149s42FpWNpDNieSVrnU/go-libp2p-peer" + cid "gx/ipfs/QmcZfnkapfECQGcLZaf9B79NRg7cRa9EnZh4LSbkCzwNvY/go-cid" + blocks "gx/ipfs/Qmej7nf81hi2x2tvjRBF3mcp74sQyuDH4VMYDGd1YtXjb2/go-block-format" ) var log = logging.Logger("bitswap") diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index c0b13cabe..23cce9303 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -17,11 +17,11 @@ import ( detectrace "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-detect-race" - blocks "gx/ipfs/QmYsEQydGrsxNZfAiskvQ76N2xE9hDQtSAkRSynwMiUK3c/go-block-format" - p2ptestutil "gx/ipfs/QmZTcPxK6VqrwY94JpKZPvEqAZ6tEr1rLrpcqJbbRZbg2V/go-libp2p-netutil" - tu "gx/ipfs/QmeDA8gNhvRTsbrjEieay5wezupJDiky8xvCzDABbsGzmp/go-testutil" - travis "gx/ipfs/QmeDA8gNhvRTsbrjEieay5wezupJDiky8xvCzDABbsGzmp/go-testutil/ci/travis" - cid "gx/ipfs/QmeSrf6pzut73u6zLQkRFQ3ygt3k6XFT2kjdYP8Tnkwwyg/go-cid" + p2ptestutil "gx/ipfs/QmV1axkk86DDkYwS269AvPy9eV5h7mUyHveJkSVHPjrQtY/go-libp2p-netutil" + cid "gx/ipfs/QmcZfnkapfECQGcLZaf9B79NRg7cRa9EnZh4LSbkCzwNvY/go-cid" + blocks "gx/ipfs/Qmej7nf81hi2x2tvjRBF3mcp74sQyuDH4VMYDGd1YtXjb2/go-block-format" + tu "gx/ipfs/QmfB65MYJqaKzBiMvW47fquCRhmEeXW6AhrJSGM7TeY5eG/go-testutil" + travis "gx/ipfs/QmfB65MYJqaKzBiMvW47fquCRhmEeXW6AhrJSGM7TeY5eG/go-testutil/ci/travis" ) // FIXME the tests are really sensitive to the network delay. fix them to work diff --git a/bitswap/decision/bench_test.go b/bitswap/decision/bench_test.go index 288bb7e7d..5f06bcfec 100644 --- a/bitswap/decision/bench_test.go +++ b/bitswap/decision/bench_test.go @@ -6,10 +6,10 @@ import ( "testing" "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" - u "gx/ipfs/QmPsAfmDBnZN3kZGSuNwvCNDZiHneERSKmRcFyG3UkvcT3/go-ipfs-util" - "gx/ipfs/QmWNY7dV54ZDYmTA1ykVdwNCqC11mpU4zSUp6XDpLTH9eG/go-libp2p-peer" - "gx/ipfs/QmeDA8gNhvRTsbrjEieay5wezupJDiky8xvCzDABbsGzmp/go-testutil" - cid "gx/ipfs/QmeSrf6pzut73u6zLQkRFQ3ygt3k6XFT2kjdYP8Tnkwwyg/go-cid" + u "gx/ipfs/QmNiJuT8Ja3hMVpBHXv3Q6dwmperaQ6JjLtpMQgMCD7xvx/go-ipfs-util" + "gx/ipfs/Qma7H6RW8wRrfZpNSXwxYGcd1E149s42FpWNpDNieSVrnU/go-libp2p-peer" + cid "gx/ipfs/QmcZfnkapfECQGcLZaf9B79NRg7cRa9EnZh4LSbkCzwNvY/go-cid" + "gx/ipfs/QmfB65MYJqaKzBiMvW47fquCRhmEeXW6AhrJSGM7TeY5eG/go-testutil" ) // FWIW: At the time of this commit, including a timestamp in task increases diff --git a/bitswap/decision/engine.go b/bitswap/decision/engine.go index bad932b7e..d81db4cb2 100644 --- a/bitswap/decision/engine.go +++ b/bitswap/decision/engine.go @@ -10,8 +10,8 @@ import ( bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" wl "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" logging "gx/ipfs/QmSpJByNKFX1sCsHBEp3R73FL4NF6FnQTEGyNAXHm2GS52/go-log" - peer "gx/ipfs/QmWNY7dV54ZDYmTA1ykVdwNCqC11mpU4zSUp6XDpLTH9eG/go-libp2p-peer" - blocks "gx/ipfs/QmYsEQydGrsxNZfAiskvQ76N2xE9hDQtSAkRSynwMiUK3c/go-block-format" + peer "gx/ipfs/Qma7H6RW8wRrfZpNSXwxYGcd1E149s42FpWNpDNieSVrnU/go-libp2p-peer" + blocks "gx/ipfs/Qmej7nf81hi2x2tvjRBF3mcp74sQyuDH4VMYDGd1YtXjb2/go-block-format" ) // TODO consider taking responsibility for other types of requests. For diff --git a/bitswap/decision/engine_test.go b/bitswap/decision/engine_test.go index 1a12d019b..eea38a6f4 100644 --- a/bitswap/decision/engine_test.go +++ b/bitswap/decision/engine_test.go @@ -11,11 +11,11 @@ import ( blockstore "github.com/ipfs/go-ipfs/blocks/blockstore" message "github.com/ipfs/go-ipfs/exchange/bitswap/message" - peer "gx/ipfs/QmWNY7dV54ZDYmTA1ykVdwNCqC11mpU4zSUp6XDpLTH9eG/go-libp2p-peer" - blocks "gx/ipfs/QmYsEQydGrsxNZfAiskvQ76N2xE9hDQtSAkRSynwMiUK3c/go-block-format" - ds "gx/ipfs/QmdHG8MAuARdGHxx4rPQASLcvhz24fzjSQq7AJRAQEorq5/go-datastore" - dssync "gx/ipfs/QmdHG8MAuARdGHxx4rPQASLcvhz24fzjSQq7AJRAQEorq5/go-datastore/sync" - testutil "gx/ipfs/QmeDA8gNhvRTsbrjEieay5wezupJDiky8xvCzDABbsGzmp/go-testutil" + ds "gx/ipfs/QmPpegoMqhAEqjncrzArm7KVWAkCm78rqL2DPuNjhPrshg/go-datastore" + dssync "gx/ipfs/QmPpegoMqhAEqjncrzArm7KVWAkCm78rqL2DPuNjhPrshg/go-datastore/sync" + peer "gx/ipfs/Qma7H6RW8wRrfZpNSXwxYGcd1E149s42FpWNpDNieSVrnU/go-libp2p-peer" + blocks "gx/ipfs/Qmej7nf81hi2x2tvjRBF3mcp74sQyuDH4VMYDGd1YtXjb2/go-block-format" + testutil "gx/ipfs/QmfB65MYJqaKzBiMvW47fquCRhmEeXW6AhrJSGM7TeY5eG/go-testutil" ) type peerAndEngine struct { diff --git a/bitswap/decision/ledger.go b/bitswap/decision/ledger.go index e3ce24df6..210a9ffe3 100644 --- a/bitswap/decision/ledger.go +++ b/bitswap/decision/ledger.go @@ -6,8 +6,8 @@ import ( wl "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" - peer "gx/ipfs/QmWNY7dV54ZDYmTA1ykVdwNCqC11mpU4zSUp6XDpLTH9eG/go-libp2p-peer" - cid "gx/ipfs/QmeSrf6pzut73u6zLQkRFQ3ygt3k6XFT2kjdYP8Tnkwwyg/go-cid" + peer "gx/ipfs/Qma7H6RW8wRrfZpNSXwxYGcd1E149s42FpWNpDNieSVrnU/go-libp2p-peer" + cid "gx/ipfs/QmcZfnkapfECQGcLZaf9B79NRg7cRa9EnZh4LSbkCzwNvY/go-cid" ) func newLedger(p peer.ID) *ledger { diff --git a/bitswap/decision/peer_request_queue.go b/bitswap/decision/peer_request_queue.go index 00123ac8a..46606eabf 100644 --- a/bitswap/decision/peer_request_queue.go +++ b/bitswap/decision/peer_request_queue.go @@ -7,8 +7,8 @@ import ( wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" pq "github.com/ipfs/go-ipfs/thirdparty/pq" - peer "gx/ipfs/QmWNY7dV54ZDYmTA1ykVdwNCqC11mpU4zSUp6XDpLTH9eG/go-libp2p-peer" - cid "gx/ipfs/QmeSrf6pzut73u6zLQkRFQ3ygt3k6XFT2kjdYP8Tnkwwyg/go-cid" + peer "gx/ipfs/Qma7H6RW8wRrfZpNSXwxYGcd1E149s42FpWNpDNieSVrnU/go-libp2p-peer" + cid "gx/ipfs/QmcZfnkapfECQGcLZaf9B79NRg7cRa9EnZh4LSbkCzwNvY/go-cid" ) type peerRequestQueue interface { diff --git a/bitswap/decision/peer_request_queue_test.go b/bitswap/decision/peer_request_queue_test.go index 3416a5ca1..fdd8eb666 100644 --- a/bitswap/decision/peer_request_queue_test.go +++ b/bitswap/decision/peer_request_queue_test.go @@ -9,9 +9,9 @@ import ( "testing" "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" - u "gx/ipfs/QmPsAfmDBnZN3kZGSuNwvCNDZiHneERSKmRcFyG3UkvcT3/go-ipfs-util" - "gx/ipfs/QmeDA8gNhvRTsbrjEieay5wezupJDiky8xvCzDABbsGzmp/go-testutil" - cid "gx/ipfs/QmeSrf6pzut73u6zLQkRFQ3ygt3k6XFT2kjdYP8Tnkwwyg/go-cid" + u "gx/ipfs/QmNiJuT8Ja3hMVpBHXv3Q6dwmperaQ6JjLtpMQgMCD7xvx/go-ipfs-util" + cid "gx/ipfs/QmcZfnkapfECQGcLZaf9B79NRg7cRa9EnZh4LSbkCzwNvY/go-cid" + "gx/ipfs/QmfB65MYJqaKzBiMvW47fquCRhmEeXW6AhrJSGM7TeY5eG/go-testutil" ) func TestPushPop(t *testing.T) { diff --git a/bitswap/get.go b/bitswap/get.go index aa26de4ef..f10a62d68 100644 --- a/bitswap/get.go +++ b/bitswap/get.go @@ -6,9 +6,9 @@ import ( blockstore "github.com/ipfs/go-ipfs/blocks/blockstore" notifications "github.com/ipfs/go-ipfs/exchange/bitswap/notifications" - blocks "gx/ipfs/QmYsEQydGrsxNZfAiskvQ76N2xE9hDQtSAkRSynwMiUK3c/go-block-format" + blocks "gx/ipfs/Qmej7nf81hi2x2tvjRBF3mcp74sQyuDH4VMYDGd1YtXjb2/go-block-format" - cid "gx/ipfs/QmeSrf6pzut73u6zLQkRFQ3ygt3k6XFT2kjdYP8Tnkwwyg/go-cid" + cid "gx/ipfs/QmcZfnkapfECQGcLZaf9B79NRg7cRa9EnZh4LSbkCzwNvY/go-cid" ) type getBlocksFunc func(context.Context, []*cid.Cid) (<-chan blocks.Block, error) diff --git a/bitswap/message/message.go b/bitswap/message/message.go index de5c92696..cb1fb562c 100644 --- a/bitswap/message/message.go +++ b/bitswap/message/message.go @@ -6,12 +6,12 @@ import ( pb "github.com/ipfs/go-ipfs/exchange/bitswap/message/pb" wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" - blocks "gx/ipfs/QmYsEQydGrsxNZfAiskvQ76N2xE9hDQtSAkRSynwMiUK3c/go-block-format" + blocks "gx/ipfs/Qmej7nf81hi2x2tvjRBF3mcp74sQyuDH4VMYDGd1YtXjb2/go-block-format" - inet "gx/ipfs/QmU4vCDZTPLDqSDKguWbHCiUe46mZUtmM2g2suBZ9NE8ko/go-libp2p-net" + inet "gx/ipfs/QmQm7WmgYCa4RSz76tKEYpRjApjnRw8ZTUVQC15b8JM4a2/go-libp2p-net" ggio "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/io" proto "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/proto" - cid "gx/ipfs/QmeSrf6pzut73u6zLQkRFQ3ygt3k6XFT2kjdYP8Tnkwwyg/go-cid" + cid "gx/ipfs/QmcZfnkapfECQGcLZaf9B79NRg7cRa9EnZh4LSbkCzwNvY/go-cid" ) // TODO move message.go into the bitswap package diff --git a/bitswap/message/message_test.go b/bitswap/message/message_test.go index 7e0eb48b7..1ab0a9c40 100644 --- a/bitswap/message/message_test.go +++ b/bitswap/message/message_test.go @@ -6,10 +6,10 @@ import ( pb "github.com/ipfs/go-ipfs/exchange/bitswap/message/pb" - u "gx/ipfs/QmPsAfmDBnZN3kZGSuNwvCNDZiHneERSKmRcFyG3UkvcT3/go-ipfs-util" - blocks "gx/ipfs/QmYsEQydGrsxNZfAiskvQ76N2xE9hDQtSAkRSynwMiUK3c/go-block-format" + u "gx/ipfs/QmNiJuT8Ja3hMVpBHXv3Q6dwmperaQ6JjLtpMQgMCD7xvx/go-ipfs-util" proto "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/proto" - cid "gx/ipfs/QmeSrf6pzut73u6zLQkRFQ3ygt3k6XFT2kjdYP8Tnkwwyg/go-cid" + cid "gx/ipfs/QmcZfnkapfECQGcLZaf9B79NRg7cRa9EnZh4LSbkCzwNvY/go-cid" + blocks "gx/ipfs/Qmej7nf81hi2x2tvjRBF3mcp74sQyuDH4VMYDGd1YtXjb2/go-block-format" ) func mkFakeCid(s string) *cid.Cid { diff --git a/bitswap/network/interface.go b/bitswap/network/interface.go index d2cd1fd6c..d111f499c 100644 --- a/bitswap/network/interface.go +++ b/bitswap/network/interface.go @@ -5,10 +5,10 @@ import ( bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" - ifconnmgr "gx/ipfs/QmSAJm4QdTJ3EGF2cvgNcQyXTEbxqWSW1x4kCVV1aJQUQr/go-libp2p-interface-connmgr" - peer "gx/ipfs/QmWNY7dV54ZDYmTA1ykVdwNCqC11mpU4zSUp6XDpLTH9eG/go-libp2p-peer" protocol "gx/ipfs/QmZNkThpqfVXs9GNbexPrfBbXSLNYeKrE7jwFM2oqHbyqN/go-libp2p-protocol" - cid "gx/ipfs/QmeSrf6pzut73u6zLQkRFQ3ygt3k6XFT2kjdYP8Tnkwwyg/go-cid" + ifconnmgr "gx/ipfs/QmZdqgq4h6AdodSmPwb5FZzhwnmhchu1hhJgv8tnFdod1o/go-libp2p-interface-connmgr" + peer "gx/ipfs/Qma7H6RW8wRrfZpNSXwxYGcd1E149s42FpWNpDNieSVrnU/go-libp2p-peer" + cid "gx/ipfs/QmcZfnkapfECQGcLZaf9B79NRg7cRa9EnZh4LSbkCzwNvY/go-cid" ) var ( diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index 241da4e6e..e0e6649d5 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -8,16 +8,16 @@ import ( bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" - host "gx/ipfs/QmP46LGWhzVZTMmt5akNNLfoV8qL4h5wTwmzQxLyDafggd/go-libp2p-host" - routing "gx/ipfs/QmPCGUjMRuBcPybZFpjhzpifwPP9wPRoiy5geTQKU4vqWA/go-libp2p-routing" - ifconnmgr "gx/ipfs/QmSAJm4QdTJ3EGF2cvgNcQyXTEbxqWSW1x4kCVV1aJQUQr/go-libp2p-interface-connmgr" + inet "gx/ipfs/QmQm7WmgYCa4RSz76tKEYpRjApjnRw8ZTUVQC15b8JM4a2/go-libp2p-net" + routing "gx/ipfs/QmRijoA6zGS98ELTDbGsLWPZbVotYsGbjp3RbXcKCYBeon/go-libp2p-routing" logging "gx/ipfs/QmSpJByNKFX1sCsHBEp3R73FL4NF6FnQTEGyNAXHm2GS52/go-log" - inet "gx/ipfs/QmU4vCDZTPLDqSDKguWbHCiUe46mZUtmM2g2suBZ9NE8ko/go-libp2p-net" - ma "gx/ipfs/QmW8s4zTsUoX1Q6CeYxVKPyqSKbF7H1YDUyTostBtZ8DaG/go-multiaddr" - peer "gx/ipfs/QmWNY7dV54ZDYmTA1ykVdwNCqC11mpU4zSUp6XDpLTH9eG/go-libp2p-peer" - pstore "gx/ipfs/QmYijbtjCxFEjSXaudaQAUz3LN5VKLssm8WCUsRoqzXmQR/go-libp2p-peerstore" + ma "gx/ipfs/QmWWQ2Txc2c6tqjsBpzg5Ar652cHPGNsQQp2SejkNmkUMb/go-multiaddr" ggio "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/io" - cid "gx/ipfs/QmeSrf6pzut73u6zLQkRFQ3ygt3k6XFT2kjdYP8Tnkwwyg/go-cid" + ifconnmgr "gx/ipfs/QmZdqgq4h6AdodSmPwb5FZzhwnmhchu1hhJgv8tnFdod1o/go-libp2p-interface-connmgr" + peer "gx/ipfs/Qma7H6RW8wRrfZpNSXwxYGcd1E149s42FpWNpDNieSVrnU/go-libp2p-peer" + cid "gx/ipfs/QmcZfnkapfECQGcLZaf9B79NRg7cRa9EnZh4LSbkCzwNvY/go-cid" + pstore "gx/ipfs/QmeZVQzUrXqaszo24DAoHfGzcmCptN9JyngLkGAiEfk2x7/go-libp2p-peerstore" + host "gx/ipfs/QmfCtHMCd9xFvehvHeVxtKVXJTMVTuHhyPRVHEXetn87vL/go-libp2p-host" ) var log = logging.Logger("bitswap_network") diff --git a/bitswap/notifications/notifications.go b/bitswap/notifications/notifications.go index f5ed52962..ba5b379ec 100644 --- a/bitswap/notifications/notifications.go +++ b/bitswap/notifications/notifications.go @@ -3,10 +3,10 @@ package notifications import ( "context" - blocks "gx/ipfs/QmYsEQydGrsxNZfAiskvQ76N2xE9hDQtSAkRSynwMiUK3c/go-block-format" + blocks "gx/ipfs/Qmej7nf81hi2x2tvjRBF3mcp74sQyuDH4VMYDGd1YtXjb2/go-block-format" pubsub "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/briantigerchow/pubsub" - cid "gx/ipfs/QmeSrf6pzut73u6zLQkRFQ3ygt3k6XFT2kjdYP8Tnkwwyg/go-cid" + cid "gx/ipfs/QmcZfnkapfECQGcLZaf9B79NRg7cRa9EnZh4LSbkCzwNvY/go-cid" ) const bufferSize = 16 diff --git a/bitswap/notifications/notifications_test.go b/bitswap/notifications/notifications_test.go index 9373d7097..0377c307d 100644 --- a/bitswap/notifications/notifications_test.go +++ b/bitswap/notifications/notifications_test.go @@ -7,8 +7,8 @@ import ( "time" blocksutil "github.com/ipfs/go-ipfs/blocks/blocksutil" - blocks "gx/ipfs/QmYsEQydGrsxNZfAiskvQ76N2xE9hDQtSAkRSynwMiUK3c/go-block-format" - cid "gx/ipfs/QmeSrf6pzut73u6zLQkRFQ3ygt3k6XFT2kjdYP8Tnkwwyg/go-cid" + cid "gx/ipfs/QmcZfnkapfECQGcLZaf9B79NRg7cRa9EnZh4LSbkCzwNvY/go-cid" + blocks "gx/ipfs/Qmej7nf81hi2x2tvjRBF3mcp74sQyuDH4VMYDGd1YtXjb2/go-block-format" ) func TestDuplicates(t *testing.T) { diff --git a/bitswap/session.go b/bitswap/session.go index 73d9fd1f4..33875f069 100644 --- a/bitswap/session.go +++ b/bitswap/session.go @@ -8,11 +8,11 @@ import ( notifications "github.com/ipfs/go-ipfs/exchange/bitswap/notifications" logging "gx/ipfs/QmSpJByNKFX1sCsHBEp3R73FL4NF6FnQTEGyNAXHm2GS52/go-log" - loggables "gx/ipfs/QmSvcDkiRwB8LuMhUtnvhum2C851Mproo75ZDD19jx43tD/go-libp2p-loggables" lru "gx/ipfs/QmVYxfoJQiZijTgPNHCHgHELvQpbsJNTg6Crmc3dQkj3yy/golang-lru" - peer "gx/ipfs/QmWNY7dV54ZDYmTA1ykVdwNCqC11mpU4zSUp6XDpLTH9eG/go-libp2p-peer" - blocks "gx/ipfs/QmYsEQydGrsxNZfAiskvQ76N2xE9hDQtSAkRSynwMiUK3c/go-block-format" - cid "gx/ipfs/QmeSrf6pzut73u6zLQkRFQ3ygt3k6XFT2kjdYP8Tnkwwyg/go-cid" + peer "gx/ipfs/Qma7H6RW8wRrfZpNSXwxYGcd1E149s42FpWNpDNieSVrnU/go-libp2p-peer" + loggables "gx/ipfs/QmaDoQyTYCS3DrPLBLXMixXfuCstBVVR81J3UY1vMxghpT/go-libp2p-loggables" + cid "gx/ipfs/QmcZfnkapfECQGcLZaf9B79NRg7cRa9EnZh4LSbkCzwNvY/go-cid" + blocks "gx/ipfs/Qmej7nf81hi2x2tvjRBF3mcp74sQyuDH4VMYDGd1YtXjb2/go-block-format" ) const activeWantsLimit = 16 diff --git a/bitswap/session_test.go b/bitswap/session_test.go index 2536ff0e7..645890454 100644 --- a/bitswap/session_test.go +++ b/bitswap/session_test.go @@ -8,8 +8,8 @@ import ( blocksutil "github.com/ipfs/go-ipfs/blocks/blocksutil" - blocks "gx/ipfs/QmYsEQydGrsxNZfAiskvQ76N2xE9hDQtSAkRSynwMiUK3c/go-block-format" - cid "gx/ipfs/QmeSrf6pzut73u6zLQkRFQ3ygt3k6XFT2kjdYP8Tnkwwyg/go-cid" + cid "gx/ipfs/QmcZfnkapfECQGcLZaf9B79NRg7cRa9EnZh4LSbkCzwNvY/go-cid" + blocks "gx/ipfs/Qmej7nf81hi2x2tvjRBF3mcp74sQyuDH4VMYDGd1YtXjb2/go-block-format" ) func TestBasicSessions(t *testing.T) { diff --git a/bitswap/stat.go b/bitswap/stat.go index 2c82c7cae..825888abc 100644 --- a/bitswap/stat.go +++ b/bitswap/stat.go @@ -3,7 +3,7 @@ package bitswap import ( "sort" - cid "gx/ipfs/QmeSrf6pzut73u6zLQkRFQ3ygt3k6XFT2kjdYP8Tnkwwyg/go-cid" + cid "gx/ipfs/QmcZfnkapfECQGcLZaf9B79NRg7cRa9EnZh4LSbkCzwNvY/go-cid" ) type Stat struct { diff --git a/bitswap/testnet/interface.go b/bitswap/testnet/interface.go index 6bc3bf188..53eb6ea62 100644 --- a/bitswap/testnet/interface.go +++ b/bitswap/testnet/interface.go @@ -2,8 +2,8 @@ package bitswap import ( bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" - peer "gx/ipfs/QmWNY7dV54ZDYmTA1ykVdwNCqC11mpU4zSUp6XDpLTH9eG/go-libp2p-peer" - "gx/ipfs/QmeDA8gNhvRTsbrjEieay5wezupJDiky8xvCzDABbsGzmp/go-testutil" + peer "gx/ipfs/Qma7H6RW8wRrfZpNSXwxYGcd1E149s42FpWNpDNieSVrnU/go-libp2p-peer" + "gx/ipfs/QmfB65MYJqaKzBiMvW47fquCRhmEeXW6AhrJSGM7TeY5eG/go-testutil" ) type Network interface { diff --git a/bitswap/testnet/network_test.go b/bitswap/testnet/network_test.go index ee10af3ce..4e54e4eb8 100644 --- a/bitswap/testnet/network_test.go +++ b/bitswap/testnet/network_test.go @@ -9,9 +9,9 @@ import ( bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" mockrouting "github.com/ipfs/go-ipfs/routing/mock" delay "github.com/ipfs/go-ipfs/thirdparty/delay" - peer "gx/ipfs/QmWNY7dV54ZDYmTA1ykVdwNCqC11mpU4zSUp6XDpLTH9eG/go-libp2p-peer" - blocks "gx/ipfs/QmYsEQydGrsxNZfAiskvQ76N2xE9hDQtSAkRSynwMiUK3c/go-block-format" - testutil "gx/ipfs/QmeDA8gNhvRTsbrjEieay5wezupJDiky8xvCzDABbsGzmp/go-testutil" + peer "gx/ipfs/Qma7H6RW8wRrfZpNSXwxYGcd1E149s42FpWNpDNieSVrnU/go-libp2p-peer" + blocks "gx/ipfs/Qmej7nf81hi2x2tvjRBF3mcp74sQyuDH4VMYDGd1YtXjb2/go-block-format" + testutil "gx/ipfs/QmfB65MYJqaKzBiMvW47fquCRhmEeXW6AhrJSGM7TeY5eG/go-testutil" ) func TestSendMessageAsyncButWaitForResponse(t *testing.T) { diff --git a/bitswap/testnet/peernet.go b/bitswap/testnet/peernet.go index 6d1ea8ad9..af7b05940 100644 --- a/bitswap/testnet/peernet.go +++ b/bitswap/testnet/peernet.go @@ -5,10 +5,10 @@ import ( bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" mockrouting "github.com/ipfs/go-ipfs/routing/mock" - peer "gx/ipfs/QmWNY7dV54ZDYmTA1ykVdwNCqC11mpU4zSUp6XDpLTH9eG/go-libp2p-peer" - mockpeernet "gx/ipfs/Qma23bpHwQrQyvKeBemaeJh7sAoRHggPkgnge1B9489ff5/go-libp2p/p2p/net/mock" - ds "gx/ipfs/QmdHG8MAuARdGHxx4rPQASLcvhz24fzjSQq7AJRAQEorq5/go-datastore" - testutil "gx/ipfs/QmeDA8gNhvRTsbrjEieay5wezupJDiky8xvCzDABbsGzmp/go-testutil" + mockpeernet "gx/ipfs/QmNRN4eZGmY89CRC4T5PC4xDYRx6GkDKEfRnvrT65fVeio/go-libp2p/p2p/net/mock" + ds "gx/ipfs/QmPpegoMqhAEqjncrzArm7KVWAkCm78rqL2DPuNjhPrshg/go-datastore" + peer "gx/ipfs/Qma7H6RW8wRrfZpNSXwxYGcd1E149s42FpWNpDNieSVrnU/go-libp2p-peer" + testutil "gx/ipfs/QmfB65MYJqaKzBiMvW47fquCRhmEeXW6AhrJSGM7TeY5eG/go-testutil" ) type peernet struct { diff --git a/bitswap/testnet/virtual.go b/bitswap/testnet/virtual.go index 97d251992..643eebad6 100644 --- a/bitswap/testnet/virtual.go +++ b/bitswap/testnet/virtual.go @@ -10,12 +10,12 @@ import ( mockrouting "github.com/ipfs/go-ipfs/routing/mock" delay "github.com/ipfs/go-ipfs/thirdparty/delay" - routing "gx/ipfs/QmPCGUjMRuBcPybZFpjhzpifwPP9wPRoiy5geTQKU4vqWA/go-libp2p-routing" - ifconnmgr "gx/ipfs/QmSAJm4QdTJ3EGF2cvgNcQyXTEbxqWSW1x4kCVV1aJQUQr/go-libp2p-interface-connmgr" + routing "gx/ipfs/QmRijoA6zGS98ELTDbGsLWPZbVotYsGbjp3RbXcKCYBeon/go-libp2p-routing" logging "gx/ipfs/QmSpJByNKFX1sCsHBEp3R73FL4NF6FnQTEGyNAXHm2GS52/go-log" - peer "gx/ipfs/QmWNY7dV54ZDYmTA1ykVdwNCqC11mpU4zSUp6XDpLTH9eG/go-libp2p-peer" - testutil "gx/ipfs/QmeDA8gNhvRTsbrjEieay5wezupJDiky8xvCzDABbsGzmp/go-testutil" - cid "gx/ipfs/QmeSrf6pzut73u6zLQkRFQ3ygt3k6XFT2kjdYP8Tnkwwyg/go-cid" + ifconnmgr "gx/ipfs/QmZdqgq4h6AdodSmPwb5FZzhwnmhchu1hhJgv8tnFdod1o/go-libp2p-interface-connmgr" + peer "gx/ipfs/Qma7H6RW8wRrfZpNSXwxYGcd1E149s42FpWNpDNieSVrnU/go-libp2p-peer" + cid "gx/ipfs/QmcZfnkapfECQGcLZaf9B79NRg7cRa9EnZh4LSbkCzwNvY/go-cid" + testutil "gx/ipfs/QmfB65MYJqaKzBiMvW47fquCRhmEeXW6AhrJSGM7TeY5eG/go-testutil" ) var log = logging.Logger("bstestnet") diff --git a/bitswap/testutils.go b/bitswap/testutils.go index 0ad9ef773..b361e53f3 100644 --- a/bitswap/testutils.go +++ b/bitswap/testutils.go @@ -8,12 +8,12 @@ import ( tn "github.com/ipfs/go-ipfs/exchange/bitswap/testnet" datastore2 "github.com/ipfs/go-ipfs/thirdparty/datastore2" delay "github.com/ipfs/go-ipfs/thirdparty/delay" - testutil "gx/ipfs/QmeDA8gNhvRTsbrjEieay5wezupJDiky8xvCzDABbsGzmp/go-testutil" + testutil "gx/ipfs/QmfB65MYJqaKzBiMvW47fquCRhmEeXW6AhrJSGM7TeY5eG/go-testutil" - peer "gx/ipfs/QmWNY7dV54ZDYmTA1ykVdwNCqC11mpU4zSUp6XDpLTH9eG/go-libp2p-peer" - p2ptestutil "gx/ipfs/QmZTcPxK6VqrwY94JpKZPvEqAZ6tEr1rLrpcqJbbRZbg2V/go-libp2p-netutil" - ds "gx/ipfs/QmdHG8MAuARdGHxx4rPQASLcvhz24fzjSQq7AJRAQEorq5/go-datastore" - ds_sync "gx/ipfs/QmdHG8MAuARdGHxx4rPQASLcvhz24fzjSQq7AJRAQEorq5/go-datastore/sync" + ds "gx/ipfs/QmPpegoMqhAEqjncrzArm7KVWAkCm78rqL2DPuNjhPrshg/go-datastore" + ds_sync "gx/ipfs/QmPpegoMqhAEqjncrzArm7KVWAkCm78rqL2DPuNjhPrshg/go-datastore/sync" + p2ptestutil "gx/ipfs/QmV1axkk86DDkYwS269AvPy9eV5h7mUyHveJkSVHPjrQtY/go-libp2p-netutil" + peer "gx/ipfs/Qma7H6RW8wRrfZpNSXwxYGcd1E149s42FpWNpDNieSVrnU/go-libp2p-peer" ) // WARNING: this uses RandTestBogusIdentity DO NOT USE for NON TESTS! diff --git a/bitswap/wantlist/wantlist.go b/bitswap/wantlist/wantlist.go index 9a1412785..c2225b88d 100644 --- a/bitswap/wantlist/wantlist.go +++ b/bitswap/wantlist/wantlist.go @@ -6,7 +6,7 @@ import ( "sort" "sync" - cid "gx/ipfs/QmeSrf6pzut73u6zLQkRFQ3ygt3k6XFT2kjdYP8Tnkwwyg/go-cid" + cid "gx/ipfs/QmcZfnkapfECQGcLZaf9B79NRg7cRa9EnZh4LSbkCzwNvY/go-cid" ) type ThreadSafe struct { diff --git a/bitswap/wantlist/wantlist_test.go b/bitswap/wantlist/wantlist_test.go index 3c400f9bf..37c5c91c6 100644 --- a/bitswap/wantlist/wantlist_test.go +++ b/bitswap/wantlist/wantlist_test.go @@ -3,7 +3,7 @@ package wantlist import ( "testing" - cid "gx/ipfs/QmeSrf6pzut73u6zLQkRFQ3ygt3k6XFT2kjdYP8Tnkwwyg/go-cid" + cid "gx/ipfs/QmcZfnkapfECQGcLZaf9B79NRg7cRa9EnZh4LSbkCzwNvY/go-cid" ) var testcids []*cid.Cid diff --git a/bitswap/wantmanager.go b/bitswap/wantmanager.go index e89d7ef66..c4cc7ea35 100644 --- a/bitswap/wantmanager.go +++ b/bitswap/wantmanager.go @@ -11,8 +11,8 @@ import ( wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" metrics "gx/ipfs/QmRg1gKTHzc3CZXSKzem8aR4E3TubFhbgXwfVuWnSK5CC5/go-metrics-interface" - peer "gx/ipfs/QmWNY7dV54ZDYmTA1ykVdwNCqC11mpU4zSUp6XDpLTH9eG/go-libp2p-peer" - cid "gx/ipfs/QmeSrf6pzut73u6zLQkRFQ3ygt3k6XFT2kjdYP8Tnkwwyg/go-cid" + peer "gx/ipfs/Qma7H6RW8wRrfZpNSXwxYGcd1E149s42FpWNpDNieSVrnU/go-libp2p-peer" + cid "gx/ipfs/QmcZfnkapfECQGcLZaf9B79NRg7cRa9EnZh4LSbkCzwNvY/go-cid" ) type WantManager struct { diff --git a/bitswap/workers.go b/bitswap/workers.go index 8a1f420bd..11b9b2d82 100644 --- a/bitswap/workers.go +++ b/bitswap/workers.go @@ -11,8 +11,8 @@ import ( process "gx/ipfs/QmSF8fPo3jgVBAy8fpdjjYqgG87dkJgUprRBHRd2tmfgpP/goprocess" procctx "gx/ipfs/QmSF8fPo3jgVBAy8fpdjjYqgG87dkJgUprRBHRd2tmfgpP/goprocess/context" logging "gx/ipfs/QmSpJByNKFX1sCsHBEp3R73FL4NF6FnQTEGyNAXHm2GS52/go-log" - peer "gx/ipfs/QmWNY7dV54ZDYmTA1ykVdwNCqC11mpU4zSUp6XDpLTH9eG/go-libp2p-peer" - cid "gx/ipfs/QmeSrf6pzut73u6zLQkRFQ3ygt3k6XFT2kjdYP8Tnkwwyg/go-cid" + peer "gx/ipfs/Qma7H6RW8wRrfZpNSXwxYGcd1E149s42FpWNpDNieSVrnU/go-libp2p-peer" + cid "gx/ipfs/QmcZfnkapfECQGcLZaf9B79NRg7cRa9EnZh4LSbkCzwNvY/go-cid" ) var TaskWorkerCount = 8 From 981d464e05bbc50ab866d2e3ae346e859e1a5f39 Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Sat, 27 Jan 2018 18:03:59 -0800 Subject: [PATCH 0606/1035] update go-lib2p-loggables fixes a UUID bug I introduced (UUIDs were always an error value) License: MIT Signed-off-by: Steven Allen This commit was moved from ipfs/go-bitswap@defd3665e9e2c228bd3977b0b2dc6db7e81963fe --- bitswap/bitswap_test.go | 2 +- bitswap/session.go | 2 +- bitswap/testnet/peernet.go | 2 +- bitswap/testutils.go | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index 23cce9303..0504991fd 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -17,7 +17,7 @@ import ( detectrace "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-detect-race" - p2ptestutil "gx/ipfs/QmV1axkk86DDkYwS269AvPy9eV5h7mUyHveJkSVHPjrQtY/go-libp2p-netutil" + p2ptestutil "gx/ipfs/QmWUugnJBbcuin8qdfiCYKAsNkG8NeDLhzoBqRaqXhAHd4/go-libp2p-netutil" cid "gx/ipfs/QmcZfnkapfECQGcLZaf9B79NRg7cRa9EnZh4LSbkCzwNvY/go-cid" blocks "gx/ipfs/Qmej7nf81hi2x2tvjRBF3mcp74sQyuDH4VMYDGd1YtXjb2/go-block-format" tu "gx/ipfs/QmfB65MYJqaKzBiMvW47fquCRhmEeXW6AhrJSGM7TeY5eG/go-testutil" diff --git a/bitswap/session.go b/bitswap/session.go index 33875f069..d562ac235 100644 --- a/bitswap/session.go +++ b/bitswap/session.go @@ -7,10 +7,10 @@ import ( notifications "github.com/ipfs/go-ipfs/exchange/bitswap/notifications" + loggables "gx/ipfs/QmQ3c5AP6yjqD3E4get5atkvfaUU4rubWquoL2e8ycjUSu/go-libp2p-loggables" logging "gx/ipfs/QmSpJByNKFX1sCsHBEp3R73FL4NF6FnQTEGyNAXHm2GS52/go-log" lru "gx/ipfs/QmVYxfoJQiZijTgPNHCHgHELvQpbsJNTg6Crmc3dQkj3yy/golang-lru" peer "gx/ipfs/Qma7H6RW8wRrfZpNSXwxYGcd1E149s42FpWNpDNieSVrnU/go-libp2p-peer" - loggables "gx/ipfs/QmaDoQyTYCS3DrPLBLXMixXfuCstBVVR81J3UY1vMxghpT/go-libp2p-loggables" cid "gx/ipfs/QmcZfnkapfECQGcLZaf9B79NRg7cRa9EnZh4LSbkCzwNvY/go-cid" blocks "gx/ipfs/Qmej7nf81hi2x2tvjRBF3mcp74sQyuDH4VMYDGd1YtXjb2/go-block-format" ) diff --git a/bitswap/testnet/peernet.go b/bitswap/testnet/peernet.go index af7b05940..7f768e137 100644 --- a/bitswap/testnet/peernet.go +++ b/bitswap/testnet/peernet.go @@ -5,7 +5,7 @@ import ( bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" mockrouting "github.com/ipfs/go-ipfs/routing/mock" - mockpeernet "gx/ipfs/QmNRN4eZGmY89CRC4T5PC4xDYRx6GkDKEfRnvrT65fVeio/go-libp2p/p2p/net/mock" + mockpeernet "gx/ipfs/QmPd5qhppUqewTQMfStvNNCFtcxiWGsnE6Vs3va6788gsX/go-libp2p/p2p/net/mock" ds "gx/ipfs/QmPpegoMqhAEqjncrzArm7KVWAkCm78rqL2DPuNjhPrshg/go-datastore" peer "gx/ipfs/Qma7H6RW8wRrfZpNSXwxYGcd1E149s42FpWNpDNieSVrnU/go-libp2p-peer" testutil "gx/ipfs/QmfB65MYJqaKzBiMvW47fquCRhmEeXW6AhrJSGM7TeY5eG/go-testutil" diff --git a/bitswap/testutils.go b/bitswap/testutils.go index b361e53f3..48dd35653 100644 --- a/bitswap/testutils.go +++ b/bitswap/testutils.go @@ -12,7 +12,7 @@ import ( ds "gx/ipfs/QmPpegoMqhAEqjncrzArm7KVWAkCm78rqL2DPuNjhPrshg/go-datastore" ds_sync "gx/ipfs/QmPpegoMqhAEqjncrzArm7KVWAkCm78rqL2DPuNjhPrshg/go-datastore/sync" - p2ptestutil "gx/ipfs/QmV1axkk86DDkYwS269AvPy9eV5h7mUyHveJkSVHPjrQtY/go-libp2p-netutil" + p2ptestutil "gx/ipfs/QmWUugnJBbcuin8qdfiCYKAsNkG8NeDLhzoBqRaqXhAHd4/go-libp2p-netutil" peer "gx/ipfs/Qma7H6RW8wRrfZpNSXwxYGcd1E149s42FpWNpDNieSVrnU/go-libp2p-peer" ) From c8ccf3ca6336b7f2167f72a6d0218a76a6740077 Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Wed, 31 Jan 2018 18:54:57 -0800 Subject: [PATCH 0607/1035] gx: update go-log License: MIT Signed-off-by: Steven Allen This commit was moved from ipfs/go-bitswap@c26feecfaa7d0dff2bb22158b3d84ec1eb50bccc --- bitswap/bitswap.go | 4 ++-- bitswap/bitswap_test.go | 6 +++--- bitswap/decision/bench_test.go | 4 ++-- bitswap/decision/engine.go | 4 ++-- bitswap/decision/engine_test.go | 4 ++-- bitswap/decision/ledger.go | 2 +- bitswap/decision/peer_request_queue.go | 2 +- bitswap/decision/peer_request_queue_test.go | 2 +- bitswap/message/message.go | 2 +- bitswap/network/interface.go | 4 ++-- bitswap/network/ipfs_impl.go | 14 +++++++------- bitswap/session.go | 6 +++--- bitswap/testnet/interface.go | 4 ++-- bitswap/testnet/network_test.go | 4 ++-- bitswap/testnet/peernet.go | 6 +++--- bitswap/testnet/virtual.go | 10 +++++----- bitswap/testutils.go | 6 +++--- bitswap/wantmanager.go | 2 +- bitswap/workers.go | 4 ++-- 19 files changed, 45 insertions(+), 45 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index e74438c44..235233304 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -19,11 +19,11 @@ import ( flags "github.com/ipfs/go-ipfs/flags" "github.com/ipfs/go-ipfs/thirdparty/delay" + logging "gx/ipfs/QmRb5jh8z2E8hMGN2tkvs1yHynUanqnZ3UeKwgN1i9P1F8/go-log" metrics "gx/ipfs/QmRg1gKTHzc3CZXSKzem8aR4E3TubFhbgXwfVuWnSK5CC5/go-metrics-interface" process "gx/ipfs/QmSF8fPo3jgVBAy8fpdjjYqgG87dkJgUprRBHRd2tmfgpP/goprocess" procctx "gx/ipfs/QmSF8fPo3jgVBAy8fpdjjYqgG87dkJgUprRBHRd2tmfgpP/goprocess/context" - logging "gx/ipfs/QmSpJByNKFX1sCsHBEp3R73FL4NF6FnQTEGyNAXHm2GS52/go-log" - peer "gx/ipfs/Qma7H6RW8wRrfZpNSXwxYGcd1E149s42FpWNpDNieSVrnU/go-libp2p-peer" + peer "gx/ipfs/QmZoWKhxUmZ2seW4BzX6fJkNR8hh9PsGModr7q171yq2SS/go-libp2p-peer" cid "gx/ipfs/QmcZfnkapfECQGcLZaf9B79NRg7cRa9EnZh4LSbkCzwNvY/go-cid" blocks "gx/ipfs/Qmej7nf81hi2x2tvjRBF3mcp74sQyuDH4VMYDGd1YtXjb2/go-block-format" ) diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index 0504991fd..6558dce23 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -17,11 +17,11 @@ import ( detectrace "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-detect-race" - p2ptestutil "gx/ipfs/QmWUugnJBbcuin8qdfiCYKAsNkG8NeDLhzoBqRaqXhAHd4/go-libp2p-netutil" + tu "gx/ipfs/QmVvkK7s5imCiq3JVbL3pGfnhcCnf3LrFJPF4GE2sAoGZf/go-testutil" + travis "gx/ipfs/QmVvkK7s5imCiq3JVbL3pGfnhcCnf3LrFJPF4GE2sAoGZf/go-testutil/ci/travis" + p2ptestutil "gx/ipfs/QmYVR3C8DWPHdHxvLtNFYfjsXgaRAdh6hPMNH3KiwCgu4o/go-libp2p-netutil" cid "gx/ipfs/QmcZfnkapfECQGcLZaf9B79NRg7cRa9EnZh4LSbkCzwNvY/go-cid" blocks "gx/ipfs/Qmej7nf81hi2x2tvjRBF3mcp74sQyuDH4VMYDGd1YtXjb2/go-block-format" - tu "gx/ipfs/QmfB65MYJqaKzBiMvW47fquCRhmEeXW6AhrJSGM7TeY5eG/go-testutil" - travis "gx/ipfs/QmfB65MYJqaKzBiMvW47fquCRhmEeXW6AhrJSGM7TeY5eG/go-testutil/ci/travis" ) // FIXME the tests are really sensitive to the network delay. fix them to work diff --git a/bitswap/decision/bench_test.go b/bitswap/decision/bench_test.go index 5f06bcfec..062eb20ff 100644 --- a/bitswap/decision/bench_test.go +++ b/bitswap/decision/bench_test.go @@ -7,9 +7,9 @@ import ( "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" u "gx/ipfs/QmNiJuT8Ja3hMVpBHXv3Q6dwmperaQ6JjLtpMQgMCD7xvx/go-ipfs-util" - "gx/ipfs/Qma7H6RW8wRrfZpNSXwxYGcd1E149s42FpWNpDNieSVrnU/go-libp2p-peer" + "gx/ipfs/QmVvkK7s5imCiq3JVbL3pGfnhcCnf3LrFJPF4GE2sAoGZf/go-testutil" + "gx/ipfs/QmZoWKhxUmZ2seW4BzX6fJkNR8hh9PsGModr7q171yq2SS/go-libp2p-peer" cid "gx/ipfs/QmcZfnkapfECQGcLZaf9B79NRg7cRa9EnZh4LSbkCzwNvY/go-cid" - "gx/ipfs/QmfB65MYJqaKzBiMvW47fquCRhmEeXW6AhrJSGM7TeY5eG/go-testutil" ) // FWIW: At the time of this commit, including a timestamp in task increases diff --git a/bitswap/decision/engine.go b/bitswap/decision/engine.go index d81db4cb2..295078e72 100644 --- a/bitswap/decision/engine.go +++ b/bitswap/decision/engine.go @@ -9,8 +9,8 @@ import ( bstore "github.com/ipfs/go-ipfs/blocks/blockstore" bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" wl "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" - logging "gx/ipfs/QmSpJByNKFX1sCsHBEp3R73FL4NF6FnQTEGyNAXHm2GS52/go-log" - peer "gx/ipfs/Qma7H6RW8wRrfZpNSXwxYGcd1E149s42FpWNpDNieSVrnU/go-libp2p-peer" + logging "gx/ipfs/QmRb5jh8z2E8hMGN2tkvs1yHynUanqnZ3UeKwgN1i9P1F8/go-log" + peer "gx/ipfs/QmZoWKhxUmZ2seW4BzX6fJkNR8hh9PsGModr7q171yq2SS/go-libp2p-peer" blocks "gx/ipfs/Qmej7nf81hi2x2tvjRBF3mcp74sQyuDH4VMYDGd1YtXjb2/go-block-format" ) diff --git a/bitswap/decision/engine_test.go b/bitswap/decision/engine_test.go index eea38a6f4..faa0a3e2a 100644 --- a/bitswap/decision/engine_test.go +++ b/bitswap/decision/engine_test.go @@ -13,9 +13,9 @@ import ( message "github.com/ipfs/go-ipfs/exchange/bitswap/message" ds "gx/ipfs/QmPpegoMqhAEqjncrzArm7KVWAkCm78rqL2DPuNjhPrshg/go-datastore" dssync "gx/ipfs/QmPpegoMqhAEqjncrzArm7KVWAkCm78rqL2DPuNjhPrshg/go-datastore/sync" - peer "gx/ipfs/Qma7H6RW8wRrfZpNSXwxYGcd1E149s42FpWNpDNieSVrnU/go-libp2p-peer" + testutil "gx/ipfs/QmVvkK7s5imCiq3JVbL3pGfnhcCnf3LrFJPF4GE2sAoGZf/go-testutil" + peer "gx/ipfs/QmZoWKhxUmZ2seW4BzX6fJkNR8hh9PsGModr7q171yq2SS/go-libp2p-peer" blocks "gx/ipfs/Qmej7nf81hi2x2tvjRBF3mcp74sQyuDH4VMYDGd1YtXjb2/go-block-format" - testutil "gx/ipfs/QmfB65MYJqaKzBiMvW47fquCRhmEeXW6AhrJSGM7TeY5eG/go-testutil" ) type peerAndEngine struct { diff --git a/bitswap/decision/ledger.go b/bitswap/decision/ledger.go index 210a9ffe3..c4679cd1f 100644 --- a/bitswap/decision/ledger.go +++ b/bitswap/decision/ledger.go @@ -6,7 +6,7 @@ import ( wl "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" - peer "gx/ipfs/Qma7H6RW8wRrfZpNSXwxYGcd1E149s42FpWNpDNieSVrnU/go-libp2p-peer" + peer "gx/ipfs/QmZoWKhxUmZ2seW4BzX6fJkNR8hh9PsGModr7q171yq2SS/go-libp2p-peer" cid "gx/ipfs/QmcZfnkapfECQGcLZaf9B79NRg7cRa9EnZh4LSbkCzwNvY/go-cid" ) diff --git a/bitswap/decision/peer_request_queue.go b/bitswap/decision/peer_request_queue.go index 46606eabf..64762f23b 100644 --- a/bitswap/decision/peer_request_queue.go +++ b/bitswap/decision/peer_request_queue.go @@ -7,7 +7,7 @@ import ( wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" pq "github.com/ipfs/go-ipfs/thirdparty/pq" - peer "gx/ipfs/Qma7H6RW8wRrfZpNSXwxYGcd1E149s42FpWNpDNieSVrnU/go-libp2p-peer" + peer "gx/ipfs/QmZoWKhxUmZ2seW4BzX6fJkNR8hh9PsGModr7q171yq2SS/go-libp2p-peer" cid "gx/ipfs/QmcZfnkapfECQGcLZaf9B79NRg7cRa9EnZh4LSbkCzwNvY/go-cid" ) diff --git a/bitswap/decision/peer_request_queue_test.go b/bitswap/decision/peer_request_queue_test.go index fdd8eb666..c21116ae6 100644 --- a/bitswap/decision/peer_request_queue_test.go +++ b/bitswap/decision/peer_request_queue_test.go @@ -10,8 +10,8 @@ import ( "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" u "gx/ipfs/QmNiJuT8Ja3hMVpBHXv3Q6dwmperaQ6JjLtpMQgMCD7xvx/go-ipfs-util" + "gx/ipfs/QmVvkK7s5imCiq3JVbL3pGfnhcCnf3LrFJPF4GE2sAoGZf/go-testutil" cid "gx/ipfs/QmcZfnkapfECQGcLZaf9B79NRg7cRa9EnZh4LSbkCzwNvY/go-cid" - "gx/ipfs/QmfB65MYJqaKzBiMvW47fquCRhmEeXW6AhrJSGM7TeY5eG/go-testutil" ) func TestPushPop(t *testing.T) { diff --git a/bitswap/message/message.go b/bitswap/message/message.go index cb1fb562c..7ede57f87 100644 --- a/bitswap/message/message.go +++ b/bitswap/message/message.go @@ -8,7 +8,7 @@ import ( wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" blocks "gx/ipfs/Qmej7nf81hi2x2tvjRBF3mcp74sQyuDH4VMYDGd1YtXjb2/go-block-format" - inet "gx/ipfs/QmQm7WmgYCa4RSz76tKEYpRjApjnRw8ZTUVQC15b8JM4a2/go-libp2p-net" + inet "gx/ipfs/QmXfkENeeBvh3zYA51MaSdGUdBjhQ99cP5WQe8zgr6wchG/go-libp2p-net" ggio "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/io" proto "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/proto" cid "gx/ipfs/QmcZfnkapfECQGcLZaf9B79NRg7cRa9EnZh4LSbkCzwNvY/go-cid" diff --git a/bitswap/network/interface.go b/bitswap/network/interface.go index d111f499c..1f63c6c22 100644 --- a/bitswap/network/interface.go +++ b/bitswap/network/interface.go @@ -6,8 +6,8 @@ import ( bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" protocol "gx/ipfs/QmZNkThpqfVXs9GNbexPrfBbXSLNYeKrE7jwFM2oqHbyqN/go-libp2p-protocol" - ifconnmgr "gx/ipfs/QmZdqgq4h6AdodSmPwb5FZzhwnmhchu1hhJgv8tnFdod1o/go-libp2p-interface-connmgr" - peer "gx/ipfs/Qma7H6RW8wRrfZpNSXwxYGcd1E149s42FpWNpDNieSVrnU/go-libp2p-peer" + peer "gx/ipfs/QmZoWKhxUmZ2seW4BzX6fJkNR8hh9PsGModr7q171yq2SS/go-libp2p-peer" + ifconnmgr "gx/ipfs/Qmax8X1Kfahf5WfSB68EWDG3d3qyS3Sqs1v412fjPTfRwx/go-libp2p-interface-connmgr" cid "gx/ipfs/QmcZfnkapfECQGcLZaf9B79NRg7cRa9EnZh4LSbkCzwNvY/go-cid" ) diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index e0e6649d5..2a2a1ea47 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -8,16 +8,16 @@ import ( bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" - inet "gx/ipfs/QmQm7WmgYCa4RSz76tKEYpRjApjnRw8ZTUVQC15b8JM4a2/go-libp2p-net" - routing "gx/ipfs/QmRijoA6zGS98ELTDbGsLWPZbVotYsGbjp3RbXcKCYBeon/go-libp2p-routing" - logging "gx/ipfs/QmSpJByNKFX1sCsHBEp3R73FL4NF6FnQTEGyNAXHm2GS52/go-log" + host "gx/ipfs/QmNmJZL7FQySMtE2BQuLMuZg2EB2CLEunJJUSVSc9YnnbV/go-libp2p-host" + logging "gx/ipfs/QmRb5jh8z2E8hMGN2tkvs1yHynUanqnZ3UeKwgN1i9P1F8/go-log" + routing "gx/ipfs/QmTiWLZ6Fo5j4KcTVutZJ5KWRRJrbxzmxA4td8NfEdrPh7/go-libp2p-routing" ma "gx/ipfs/QmWWQ2Txc2c6tqjsBpzg5Ar652cHPGNsQQp2SejkNmkUMb/go-multiaddr" + pstore "gx/ipfs/QmXauCuJzmzapetmC6W4TuDJLL1yFFrVzSHoWv8YdbmnxH/go-libp2p-peerstore" + inet "gx/ipfs/QmXfkENeeBvh3zYA51MaSdGUdBjhQ99cP5WQe8zgr6wchG/go-libp2p-net" ggio "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/io" - ifconnmgr "gx/ipfs/QmZdqgq4h6AdodSmPwb5FZzhwnmhchu1hhJgv8tnFdod1o/go-libp2p-interface-connmgr" - peer "gx/ipfs/Qma7H6RW8wRrfZpNSXwxYGcd1E149s42FpWNpDNieSVrnU/go-libp2p-peer" + peer "gx/ipfs/QmZoWKhxUmZ2seW4BzX6fJkNR8hh9PsGModr7q171yq2SS/go-libp2p-peer" + ifconnmgr "gx/ipfs/Qmax8X1Kfahf5WfSB68EWDG3d3qyS3Sqs1v412fjPTfRwx/go-libp2p-interface-connmgr" cid "gx/ipfs/QmcZfnkapfECQGcLZaf9B79NRg7cRa9EnZh4LSbkCzwNvY/go-cid" - pstore "gx/ipfs/QmeZVQzUrXqaszo24DAoHfGzcmCptN9JyngLkGAiEfk2x7/go-libp2p-peerstore" - host "gx/ipfs/QmfCtHMCd9xFvehvHeVxtKVXJTMVTuHhyPRVHEXetn87vL/go-libp2p-host" ) var log = logging.Logger("bitswap_network") diff --git a/bitswap/session.go b/bitswap/session.go index d562ac235..07444ad36 100644 --- a/bitswap/session.go +++ b/bitswap/session.go @@ -7,12 +7,12 @@ import ( notifications "github.com/ipfs/go-ipfs/exchange/bitswap/notifications" - loggables "gx/ipfs/QmQ3c5AP6yjqD3E4get5atkvfaUU4rubWquoL2e8ycjUSu/go-libp2p-loggables" - logging "gx/ipfs/QmSpJByNKFX1sCsHBEp3R73FL4NF6FnQTEGyNAXHm2GS52/go-log" + logging "gx/ipfs/QmRb5jh8z2E8hMGN2tkvs1yHynUanqnZ3UeKwgN1i9P1F8/go-log" lru "gx/ipfs/QmVYxfoJQiZijTgPNHCHgHELvQpbsJNTg6Crmc3dQkj3yy/golang-lru" - peer "gx/ipfs/Qma7H6RW8wRrfZpNSXwxYGcd1E149s42FpWNpDNieSVrnU/go-libp2p-peer" + peer "gx/ipfs/QmZoWKhxUmZ2seW4BzX6fJkNR8hh9PsGModr7q171yq2SS/go-libp2p-peer" cid "gx/ipfs/QmcZfnkapfECQGcLZaf9B79NRg7cRa9EnZh4LSbkCzwNvY/go-cid" blocks "gx/ipfs/Qmej7nf81hi2x2tvjRBF3mcp74sQyuDH4VMYDGd1YtXjb2/go-block-format" + loggables "gx/ipfs/Qmf9JgVLz46pxPXwG2eWSJpkqVCcjD4rp7zCRi2KP6GTNB/go-libp2p-loggables" ) const activeWantsLimit = 16 diff --git a/bitswap/testnet/interface.go b/bitswap/testnet/interface.go index 53eb6ea62..334bf9809 100644 --- a/bitswap/testnet/interface.go +++ b/bitswap/testnet/interface.go @@ -2,8 +2,8 @@ package bitswap import ( bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" - peer "gx/ipfs/Qma7H6RW8wRrfZpNSXwxYGcd1E149s42FpWNpDNieSVrnU/go-libp2p-peer" - "gx/ipfs/QmfB65MYJqaKzBiMvW47fquCRhmEeXW6AhrJSGM7TeY5eG/go-testutil" + "gx/ipfs/QmVvkK7s5imCiq3JVbL3pGfnhcCnf3LrFJPF4GE2sAoGZf/go-testutil" + peer "gx/ipfs/QmZoWKhxUmZ2seW4BzX6fJkNR8hh9PsGModr7q171yq2SS/go-libp2p-peer" ) type Network interface { diff --git a/bitswap/testnet/network_test.go b/bitswap/testnet/network_test.go index 4e54e4eb8..90c510813 100644 --- a/bitswap/testnet/network_test.go +++ b/bitswap/testnet/network_test.go @@ -9,9 +9,9 @@ import ( bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" mockrouting "github.com/ipfs/go-ipfs/routing/mock" delay "github.com/ipfs/go-ipfs/thirdparty/delay" - peer "gx/ipfs/Qma7H6RW8wRrfZpNSXwxYGcd1E149s42FpWNpDNieSVrnU/go-libp2p-peer" + testutil "gx/ipfs/QmVvkK7s5imCiq3JVbL3pGfnhcCnf3LrFJPF4GE2sAoGZf/go-testutil" + peer "gx/ipfs/QmZoWKhxUmZ2seW4BzX6fJkNR8hh9PsGModr7q171yq2SS/go-libp2p-peer" blocks "gx/ipfs/Qmej7nf81hi2x2tvjRBF3mcp74sQyuDH4VMYDGd1YtXjb2/go-block-format" - testutil "gx/ipfs/QmfB65MYJqaKzBiMvW47fquCRhmEeXW6AhrJSGM7TeY5eG/go-testutil" ) func TestSendMessageAsyncButWaitForResponse(t *testing.T) { diff --git a/bitswap/testnet/peernet.go b/bitswap/testnet/peernet.go index 7f768e137..effe1bfac 100644 --- a/bitswap/testnet/peernet.go +++ b/bitswap/testnet/peernet.go @@ -5,10 +5,10 @@ import ( bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" mockrouting "github.com/ipfs/go-ipfs/routing/mock" - mockpeernet "gx/ipfs/QmPd5qhppUqewTQMfStvNNCFtcxiWGsnE6Vs3va6788gsX/go-libp2p/p2p/net/mock" + mockpeernet "gx/ipfs/QmNh1kGFFdsPu79KNSaL4NUKUPb4Eiz4KHdMtFY6664RDp/go-libp2p/p2p/net/mock" ds "gx/ipfs/QmPpegoMqhAEqjncrzArm7KVWAkCm78rqL2DPuNjhPrshg/go-datastore" - peer "gx/ipfs/Qma7H6RW8wRrfZpNSXwxYGcd1E149s42FpWNpDNieSVrnU/go-libp2p-peer" - testutil "gx/ipfs/QmfB65MYJqaKzBiMvW47fquCRhmEeXW6AhrJSGM7TeY5eG/go-testutil" + testutil "gx/ipfs/QmVvkK7s5imCiq3JVbL3pGfnhcCnf3LrFJPF4GE2sAoGZf/go-testutil" + peer "gx/ipfs/QmZoWKhxUmZ2seW4BzX6fJkNR8hh9PsGModr7q171yq2SS/go-libp2p-peer" ) type peernet struct { diff --git a/bitswap/testnet/virtual.go b/bitswap/testnet/virtual.go index 643eebad6..c5ba6e0ae 100644 --- a/bitswap/testnet/virtual.go +++ b/bitswap/testnet/virtual.go @@ -10,12 +10,12 @@ import ( mockrouting "github.com/ipfs/go-ipfs/routing/mock" delay "github.com/ipfs/go-ipfs/thirdparty/delay" - routing "gx/ipfs/QmRijoA6zGS98ELTDbGsLWPZbVotYsGbjp3RbXcKCYBeon/go-libp2p-routing" - logging "gx/ipfs/QmSpJByNKFX1sCsHBEp3R73FL4NF6FnQTEGyNAXHm2GS52/go-log" - ifconnmgr "gx/ipfs/QmZdqgq4h6AdodSmPwb5FZzhwnmhchu1hhJgv8tnFdod1o/go-libp2p-interface-connmgr" - peer "gx/ipfs/Qma7H6RW8wRrfZpNSXwxYGcd1E149s42FpWNpDNieSVrnU/go-libp2p-peer" + logging "gx/ipfs/QmRb5jh8z2E8hMGN2tkvs1yHynUanqnZ3UeKwgN1i9P1F8/go-log" + routing "gx/ipfs/QmTiWLZ6Fo5j4KcTVutZJ5KWRRJrbxzmxA4td8NfEdrPh7/go-libp2p-routing" + testutil "gx/ipfs/QmVvkK7s5imCiq3JVbL3pGfnhcCnf3LrFJPF4GE2sAoGZf/go-testutil" + peer "gx/ipfs/QmZoWKhxUmZ2seW4BzX6fJkNR8hh9PsGModr7q171yq2SS/go-libp2p-peer" + ifconnmgr "gx/ipfs/Qmax8X1Kfahf5WfSB68EWDG3d3qyS3Sqs1v412fjPTfRwx/go-libp2p-interface-connmgr" cid "gx/ipfs/QmcZfnkapfECQGcLZaf9B79NRg7cRa9EnZh4LSbkCzwNvY/go-cid" - testutil "gx/ipfs/QmfB65MYJqaKzBiMvW47fquCRhmEeXW6AhrJSGM7TeY5eG/go-testutil" ) var log = logging.Logger("bstestnet") diff --git a/bitswap/testutils.go b/bitswap/testutils.go index 48dd35653..3f9c04084 100644 --- a/bitswap/testutils.go +++ b/bitswap/testutils.go @@ -8,12 +8,12 @@ import ( tn "github.com/ipfs/go-ipfs/exchange/bitswap/testnet" datastore2 "github.com/ipfs/go-ipfs/thirdparty/datastore2" delay "github.com/ipfs/go-ipfs/thirdparty/delay" - testutil "gx/ipfs/QmfB65MYJqaKzBiMvW47fquCRhmEeXW6AhrJSGM7TeY5eG/go-testutil" + testutil "gx/ipfs/QmVvkK7s5imCiq3JVbL3pGfnhcCnf3LrFJPF4GE2sAoGZf/go-testutil" ds "gx/ipfs/QmPpegoMqhAEqjncrzArm7KVWAkCm78rqL2DPuNjhPrshg/go-datastore" ds_sync "gx/ipfs/QmPpegoMqhAEqjncrzArm7KVWAkCm78rqL2DPuNjhPrshg/go-datastore/sync" - p2ptestutil "gx/ipfs/QmWUugnJBbcuin8qdfiCYKAsNkG8NeDLhzoBqRaqXhAHd4/go-libp2p-netutil" - peer "gx/ipfs/Qma7H6RW8wRrfZpNSXwxYGcd1E149s42FpWNpDNieSVrnU/go-libp2p-peer" + p2ptestutil "gx/ipfs/QmYVR3C8DWPHdHxvLtNFYfjsXgaRAdh6hPMNH3KiwCgu4o/go-libp2p-netutil" + peer "gx/ipfs/QmZoWKhxUmZ2seW4BzX6fJkNR8hh9PsGModr7q171yq2SS/go-libp2p-peer" ) // WARNING: this uses RandTestBogusIdentity DO NOT USE for NON TESTS! diff --git a/bitswap/wantmanager.go b/bitswap/wantmanager.go index c4cc7ea35..0e6453d6b 100644 --- a/bitswap/wantmanager.go +++ b/bitswap/wantmanager.go @@ -11,7 +11,7 @@ import ( wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" metrics "gx/ipfs/QmRg1gKTHzc3CZXSKzem8aR4E3TubFhbgXwfVuWnSK5CC5/go-metrics-interface" - peer "gx/ipfs/Qma7H6RW8wRrfZpNSXwxYGcd1E149s42FpWNpDNieSVrnU/go-libp2p-peer" + peer "gx/ipfs/QmZoWKhxUmZ2seW4BzX6fJkNR8hh9PsGModr7q171yq2SS/go-libp2p-peer" cid "gx/ipfs/QmcZfnkapfECQGcLZaf9B79NRg7cRa9EnZh4LSbkCzwNvY/go-cid" ) diff --git a/bitswap/workers.go b/bitswap/workers.go index 11b9b2d82..38a5df9d1 100644 --- a/bitswap/workers.go +++ b/bitswap/workers.go @@ -8,10 +8,10 @@ import ( bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" + logging "gx/ipfs/QmRb5jh8z2E8hMGN2tkvs1yHynUanqnZ3UeKwgN1i9P1F8/go-log" process "gx/ipfs/QmSF8fPo3jgVBAy8fpdjjYqgG87dkJgUprRBHRd2tmfgpP/goprocess" procctx "gx/ipfs/QmSF8fPo3jgVBAy8fpdjjYqgG87dkJgUprRBHRd2tmfgpP/goprocess/context" - logging "gx/ipfs/QmSpJByNKFX1sCsHBEp3R73FL4NF6FnQTEGyNAXHm2GS52/go-log" - peer "gx/ipfs/Qma7H6RW8wRrfZpNSXwxYGcd1E149s42FpWNpDNieSVrnU/go-libp2p-peer" + peer "gx/ipfs/QmZoWKhxUmZ2seW4BzX6fJkNR8hh9PsGModr7q171yq2SS/go-libp2p-peer" cid "gx/ipfs/QmcZfnkapfECQGcLZaf9B79NRg7cRa9EnZh4LSbkCzwNvY/go-cid" ) From 3ed739d4ed4353378bae9ad4af1b1712d40b1900 Mon Sep 17 00:00:00 2001 From: Jeromy Date: Sun, 4 Feb 2018 15:09:03 -0800 Subject: [PATCH 0608/1035] shutdown notifications engine when closing a bitswap session License: MIT Signed-off-by: Jeromy This commit was moved from ipfs/go-bitswap@8028bc49c32d72a67b24cc5c54b0ee4e0f4ac39d --- bitswap/session.go | 1 + 1 file changed, 1 insertion(+) diff --git a/bitswap/session.go b/bitswap/session.go index 07444ad36..049be4e9e 100644 --- a/bitswap/session.go +++ b/bitswap/session.go @@ -83,6 +83,7 @@ func (bs *Bitswap) NewSession(ctx context.Context) *Session { } func (bs *Bitswap) removeSession(s *Session) { + s.notif.Shutdown() bs.sessLk.Lock() defer bs.sessLk.Unlock() for i := 0; i < len(bs.sessions); i++ { From 53a979953864455eaf8166bf0d09b0ba60d7903a Mon Sep 17 00:00:00 2001 From: Jeromy Date: Mon, 5 Feb 2018 12:14:35 -0800 Subject: [PATCH 0609/1035] WIP: fix wantlist clearing by closing down session License: MIT Signed-off-by: Jeromy This commit was moved from ipfs/go-bitswap@7787e3d17ea61f03272c4cf4553023f75d6df57a --- bitswap/session.go | 8 ++++++++ bitswap/session_test.go | 33 +++++++++++++++++++++++++++++++++ 2 files changed, 41 insertions(+) diff --git a/bitswap/session.go b/bitswap/session.go index 049be4e9e..bc824dbee 100644 --- a/bitswap/session.go +++ b/bitswap/session.go @@ -84,6 +84,14 @@ func (bs *Bitswap) NewSession(ctx context.Context) *Session { func (bs *Bitswap) removeSession(s *Session) { s.notif.Shutdown() + + live := make([]*cid.Cid, 0, len(s.liveWants)) + for c := range s.liveWants { + cs, _ := cid.Cast([]byte(c)) + live = append(live, cs) + } + bs.CancelWants(live, s.id) + bs.sessLk.Lock() defer bs.sessLk.Unlock() for i := 0; i < len(bs.sessions); i++ { diff --git a/bitswap/session_test.go b/bitswap/session_test.go index 645890454..2fe4672b0 100644 --- a/bitswap/session_test.go +++ b/bitswap/session_test.go @@ -285,3 +285,36 @@ func TestMultipleSessions(t *testing.T) { } _ = blkch } + +func TestWantlistClearsOnCancel(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + vnet := getVirtualNetwork() + sesgen := NewTestSessionGenerator(vnet) + defer sesgen.Close() + bgen := blocksutil.NewBlockGenerator() + + blks := bgen.Blocks(10) + var cids []*cid.Cid + for _, blk := range blks { + cids = append(cids, blk.Cid()) + } + + inst := sesgen.Instances(1) + + a := inst[0] + + ctx1, cancel1 := context.WithCancel(ctx) + ses := a.Exchange.NewSession(ctx1) + + _, err := ses.GetBlocks(ctx, cids) + if err != nil { + t.Fatal(err) + } + cancel1() + + if len(a.Exchange.GetWantlist()) > 0 { + t.Fatal("expected empty wantlist") + } +} From e4649c3914f1e3cb96ba60a491e438df98b4c5fc Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Thu, 8 Feb 2018 17:48:22 -0800 Subject: [PATCH 0610/1035] remove excessive time.Now() calls from bitswap sessions License: MIT Signed-off-by: Steven Allen This commit was moved from ipfs/go-bitswap@2f6fa4f50761c85b74ac50b4a33cd28f1c60b365 --- bitswap/session.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/bitswap/session.go b/bitswap/session.go index bc824dbee..937376723 100644 --- a/bitswap/session.go +++ b/bitswap/session.go @@ -279,8 +279,9 @@ func (s *Session) receiveBlock(ctx context.Context, blk blocks.Block) { } func (s *Session) wantBlocks(ctx context.Context, ks []*cid.Cid) { + now := time.Now() for _, c := range ks { - s.liveWants[c.KeyString()] = time.Now() + s.liveWants[c.KeyString()] = now } s.bs.wm.WantBlocks(ctx, ks, s.activePeersArr, s.id) } From a9b5aaef0720aa67f319f76443b5552af3edf623 Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Fri, 9 Feb 2018 12:19:21 -0800 Subject: [PATCH 0611/1035] bitswap: finish unsubscribing from the pubsub instance before shutting it down Otherwise, we'll deadlock and leak a goroutine. This fix is kind of crappy but modifying the pubsub library would have been worse (and, really, it *is* reasonable to say "don't use the pubsub instance after shutting it down"). License: MIT Signed-off-by: Steven Allen This commit was moved from ipfs/go-bitswap@e39ba627b1c4a461af278fb82a7d28ab730a596c --- bitswap/notifications/notifications.go | 46 ++++++++++++++++++++++++-- 1 file changed, 43 insertions(+), 3 deletions(-) diff --git a/bitswap/notifications/notifications.go b/bitswap/notifications/notifications.go index ba5b379ec..defea700a 100644 --- a/bitswap/notifications/notifications.go +++ b/bitswap/notifications/notifications.go @@ -2,6 +2,7 @@ package notifications import ( "context" + "sync" blocks "gx/ipfs/Qmej7nf81hi2x2tvjRBF3mcp74sQyuDH4VMYDGd1YtXjb2/go-block-format" @@ -18,18 +19,33 @@ type PubSub interface { } func New() PubSub { - return &impl{*pubsub.New(bufferSize)} + return &impl{ + wrapped: *pubsub.New(bufferSize), + cancel: make(chan struct{}), + } } type impl struct { wrapped pubsub.PubSub + + // These two fields make up a shutdown "lock". + // We need them as calling, e.g., `Unsubscribe` after calling `Shutdown` + // blocks forever and fixing this in pubsub would be rather invasive. + cancel chan struct{} + wg sync.WaitGroup } func (ps *impl) Publish(block blocks.Block) { ps.wrapped.Pub(block, block.Cid().KeyString()) } +// Not safe to call more than once. func (ps *impl) Shutdown() { + // Interrupt in-progress subscriptions. + close(ps.cancel) + // Wait for them to finish. + ps.wg.Wait() + // shutdown the pubsub. ps.wrapped.Shutdown() } @@ -44,12 +60,34 @@ func (ps *impl) Subscribe(ctx context.Context, keys ...*cid.Cid) <-chan blocks.B close(blocksCh) return blocksCh } + + // prevent shutdown + ps.wg.Add(1) + + // check if shutdown *after* preventing shutdowns. + select { + case <-ps.cancel: + // abort, allow shutdown to continue. + ps.wg.Done() + close(blocksCh) + return blocksCh + default: + } + ps.wrapped.AddSubOnceEach(valuesCh, toStrings(keys)...) go func() { - defer close(blocksCh) - defer ps.wrapped.Unsub(valuesCh) // with a len(keys) buffer, this is an optimization + defer func() { + ps.wrapped.Unsub(valuesCh) + close(blocksCh) + + // Unblock shutdown. + ps.wg.Done() + }() + for { select { + case <-ps.cancel: + return case <-ctx.Done(): return case val, ok := <-valuesCh: @@ -61,6 +99,8 @@ func (ps *impl) Subscribe(ctx context.Context, keys ...*cid.Cid) <-chan blocks.B return } select { + case <-ps.cancel: + return case <-ctx.Done(): return case blocksCh <- block: // continue From e61dc73bfef6aa5f55e4b9ddcb5dac02be25de0f Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Fri, 9 Feb 2018 17:33:57 -0800 Subject: [PATCH 0612/1035] bitswap: test canceling subscription context after shutting down License: MIT Signed-off-by: Steven Allen This commit was moved from ipfs/go-bitswap@ab65a1849c9e9ccaa760c2a85ded065fbd1bbb43 --- bitswap/notifications/notifications_test.go | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/bitswap/notifications/notifications_test.go b/bitswap/notifications/notifications_test.go index 0377c307d..a70a0755a 100644 --- a/bitswap/notifications/notifications_test.go +++ b/bitswap/notifications/notifications_test.go @@ -100,6 +100,25 @@ func TestDuplicateSubscribe(t *testing.T) { assertBlocksEqual(t, e1, r2) } +func TestShutdownBeforeUnsubscribe(t *testing.T) { + e1 := blocks.NewBlock([]byte("1")) + + n := New() + ctx, cancel := context.WithCancel(context.Background()) + ch := n.Subscribe(ctx, e1.Cid()) // no keys provided + n.Shutdown() + cancel() + + select { + case _, ok := <-ch: + if ok { + t.Fatal("channel should have been closed") + } + default: + t.Fatal("channel should have been closed") + } +} + func TestSubscribeIsANoopWhenCalledWithNoKeys(t *testing.T) { n := New() defer n.Shutdown() From 35f1f237d6d41275a18016c7fe60620e2dfe00d2 Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Sun, 11 Feb 2018 12:51:50 -0800 Subject: [PATCH 0613/1035] avoid publishing if notification system has been shut down (will deadlock) License: MIT Signed-off-by: Steven Allen This commit was moved from ipfs/go-bitswap@af6e6f0b4378d6aaa3827afee1c3f4dac004f813 --- bitswap/notifications/notifications.go | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/bitswap/notifications/notifications.go b/bitswap/notifications/notifications.go index defea700a..9a6f10b52 100644 --- a/bitswap/notifications/notifications.go +++ b/bitswap/notifications/notifications.go @@ -36,6 +36,16 @@ type impl struct { } func (ps *impl) Publish(block blocks.Block) { + ps.wg.Add(1) + defer ps.wg.Done() + + select { + case <-ps.cancel: + // Already shutdown, bail. + return + default: + } + ps.wrapped.Pub(block, block.Cid().KeyString()) } From 92eb2643c94af09f509cb11fd47a995f29becd95 Mon Sep 17 00:00:00 2001 From: Hector Sanjuan Date: Mon, 12 Feb 2018 12:35:34 +0100 Subject: [PATCH 0614/1035] Extract go-detect-race from Godeps I have forked it, put it under ipfs namespace, published to gx License: MIT Signed-off-by: Hector Sanjuan This commit was moved from ipfs/go-bitswap@3633c0b111dbcba045f8e60a594207c18f61ffe5 --- bitswap/bitswap_test.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index 6558dce23..1a5771a8c 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -15,13 +15,12 @@ import ( mockrouting "github.com/ipfs/go-ipfs/routing/mock" delay "github.com/ipfs/go-ipfs/thirdparty/delay" - detectrace "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-detect-race" - tu "gx/ipfs/QmVvkK7s5imCiq3JVbL3pGfnhcCnf3LrFJPF4GE2sAoGZf/go-testutil" travis "gx/ipfs/QmVvkK7s5imCiq3JVbL3pGfnhcCnf3LrFJPF4GE2sAoGZf/go-testutil/ci/travis" p2ptestutil "gx/ipfs/QmYVR3C8DWPHdHxvLtNFYfjsXgaRAdh6hPMNH3KiwCgu4o/go-libp2p-netutil" cid "gx/ipfs/QmcZfnkapfECQGcLZaf9B79NRg7cRa9EnZh4LSbkCzwNvY/go-cid" blocks "gx/ipfs/Qmej7nf81hi2x2tvjRBF3mcp74sQyuDH4VMYDGd1YtXjb2/go-block-format" + detectrace "gx/ipfs/Qmf7HqcW7LtCi1W8y2bdx2eJpze74jkbKqpByxgXikdbLF/go-detect-race" ) // FIXME the tests are really sensitive to the network delay. fix them to work From b2d0d328b98ee27fcf4bb0acf41ea6fb3b3dabbd Mon Sep 17 00:00:00 2001 From: Hector Sanjuan Date: Fri, 9 Feb 2018 14:36:19 +0100 Subject: [PATCH 0615/1035] Extract: flags and thirdparty/delay submodules They have been moved to their own repositories: * github.com/ipfs/go-ipfs-delay * github.com/ipfs/go-ipfs-flags History has been preserved. They have been published with gx'ed. Imports have been updated and re-ordered accordingly. License: MIT Signed-off-by: Hector Sanjuan This commit was moved from ipfs/go-bitswap@bd7ae31c856ccb8c990286ec06755b1c17b036fa --- bitswap/bitswap.go | 5 +++-- bitswap/bitswap_test.go | 2 +- bitswap/testnet/network_test.go | 2 +- bitswap/testnet/virtual.go | 2 +- bitswap/testutils.go | 2 +- 5 files changed, 7 insertions(+), 6 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 235233304..cdd4f633a 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -10,15 +10,16 @@ import ( "sync/atomic" "time" + "gx/ipfs/QmRJVNatYJwTAHgdSM1Xef9QVQ1Ch3XHdmcrykjP5Y4soL/go-ipfs-delay" + blockstore "github.com/ipfs/go-ipfs/blocks/blockstore" exchange "github.com/ipfs/go-ipfs/exchange" decision "github.com/ipfs/go-ipfs/exchange/bitswap/decision" bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" notifications "github.com/ipfs/go-ipfs/exchange/bitswap/notifications" - flags "github.com/ipfs/go-ipfs/flags" - "github.com/ipfs/go-ipfs/thirdparty/delay" + flags "gx/ipfs/QmRMGdC6HKdLsPDABL9aXPDidrpmEHzJqFWSvshkbn9Hj8/go-ipfs-flags" logging "gx/ipfs/QmRb5jh8z2E8hMGN2tkvs1yHynUanqnZ3UeKwgN1i9P1F8/go-log" metrics "gx/ipfs/QmRg1gKTHzc3CZXSKzem8aR4E3TubFhbgXwfVuWnSK5CC5/go-metrics-interface" process "gx/ipfs/QmSF8fPo3jgVBAy8fpdjjYqgG87dkJgUprRBHRd2tmfgpP/goprocess" diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index 1a5771a8c..4df657068 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -13,7 +13,7 @@ import ( decision "github.com/ipfs/go-ipfs/exchange/bitswap/decision" tn "github.com/ipfs/go-ipfs/exchange/bitswap/testnet" mockrouting "github.com/ipfs/go-ipfs/routing/mock" - delay "github.com/ipfs/go-ipfs/thirdparty/delay" + delay "gx/ipfs/QmRJVNatYJwTAHgdSM1Xef9QVQ1Ch3XHdmcrykjP5Y4soL/go-ipfs-delay" tu "gx/ipfs/QmVvkK7s5imCiq3JVbL3pGfnhcCnf3LrFJPF4GE2sAoGZf/go-testutil" travis "gx/ipfs/QmVvkK7s5imCiq3JVbL3pGfnhcCnf3LrFJPF4GE2sAoGZf/go-testutil/ci/travis" diff --git a/bitswap/testnet/network_test.go b/bitswap/testnet/network_test.go index 90c510813..0be2a9266 100644 --- a/bitswap/testnet/network_test.go +++ b/bitswap/testnet/network_test.go @@ -8,7 +8,7 @@ import ( bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" mockrouting "github.com/ipfs/go-ipfs/routing/mock" - delay "github.com/ipfs/go-ipfs/thirdparty/delay" + delay "gx/ipfs/QmRJVNatYJwTAHgdSM1Xef9QVQ1Ch3XHdmcrykjP5Y4soL/go-ipfs-delay" testutil "gx/ipfs/QmVvkK7s5imCiq3JVbL3pGfnhcCnf3LrFJPF4GE2sAoGZf/go-testutil" peer "gx/ipfs/QmZoWKhxUmZ2seW4BzX6fJkNR8hh9PsGModr7q171yq2SS/go-libp2p-peer" blocks "gx/ipfs/Qmej7nf81hi2x2tvjRBF3mcp74sQyuDH4VMYDGd1YtXjb2/go-block-format" diff --git a/bitswap/testnet/virtual.go b/bitswap/testnet/virtual.go index c5ba6e0ae..c7589cd90 100644 --- a/bitswap/testnet/virtual.go +++ b/bitswap/testnet/virtual.go @@ -8,7 +8,7 @@ import ( bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" mockrouting "github.com/ipfs/go-ipfs/routing/mock" - delay "github.com/ipfs/go-ipfs/thirdparty/delay" + delay "gx/ipfs/QmRJVNatYJwTAHgdSM1Xef9QVQ1Ch3XHdmcrykjP5Y4soL/go-ipfs-delay" logging "gx/ipfs/QmRb5jh8z2E8hMGN2tkvs1yHynUanqnZ3UeKwgN1i9P1F8/go-log" routing "gx/ipfs/QmTiWLZ6Fo5j4KcTVutZJ5KWRRJrbxzmxA4td8NfEdrPh7/go-libp2p-routing" diff --git a/bitswap/testutils.go b/bitswap/testutils.go index 3f9c04084..7ddf08030 100644 --- a/bitswap/testutils.go +++ b/bitswap/testutils.go @@ -7,7 +7,7 @@ import ( blockstore "github.com/ipfs/go-ipfs/blocks/blockstore" tn "github.com/ipfs/go-ipfs/exchange/bitswap/testnet" datastore2 "github.com/ipfs/go-ipfs/thirdparty/datastore2" - delay "github.com/ipfs/go-ipfs/thirdparty/delay" + delay "gx/ipfs/QmRJVNatYJwTAHgdSM1Xef9QVQ1Ch3XHdmcrykjP5Y4soL/go-ipfs-delay" testutil "gx/ipfs/QmVvkK7s5imCiq3JVbL3pGfnhcCnf3LrFJPF4GE2sAoGZf/go-testutil" ds "gx/ipfs/QmPpegoMqhAEqjncrzArm7KVWAkCm78rqL2DPuNjhPrshg/go-datastore" From f6748c1571e2708179115920d7dab11058222b22 Mon Sep 17 00:00:00 2001 From: Hector Sanjuan Date: Mon, 12 Feb 2018 11:55:03 +0100 Subject: [PATCH 0616/1035] Import re-ordering License: MIT Signed-off-by: Hector Sanjuan This commit was moved from ipfs/go-bitswap@c35e6cbc7cfcafcbad243f52f7be474f63641fc6 --- bitswap/bitswap.go | 3 +-- bitswap/bitswap_test.go | 2 +- bitswap/testnet/network_test.go | 1 + bitswap/testnet/virtual.go | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index cdd4f633a..081bbf067 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -10,8 +10,6 @@ import ( "sync/atomic" "time" - "gx/ipfs/QmRJVNatYJwTAHgdSM1Xef9QVQ1Ch3XHdmcrykjP5Y4soL/go-ipfs-delay" - blockstore "github.com/ipfs/go-ipfs/blocks/blockstore" exchange "github.com/ipfs/go-ipfs/exchange" decision "github.com/ipfs/go-ipfs/exchange/bitswap/decision" @@ -19,6 +17,7 @@ import ( bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" notifications "github.com/ipfs/go-ipfs/exchange/bitswap/notifications" + "gx/ipfs/QmRJVNatYJwTAHgdSM1Xef9QVQ1Ch3XHdmcrykjP5Y4soL/go-ipfs-delay" flags "gx/ipfs/QmRMGdC6HKdLsPDABL9aXPDidrpmEHzJqFWSvshkbn9Hj8/go-ipfs-flags" logging "gx/ipfs/QmRb5jh8z2E8hMGN2tkvs1yHynUanqnZ3UeKwgN1i9P1F8/go-log" metrics "gx/ipfs/QmRg1gKTHzc3CZXSKzem8aR4E3TubFhbgXwfVuWnSK5CC5/go-metrics-interface" diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index 4df657068..26fed27d1 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -13,8 +13,8 @@ import ( decision "github.com/ipfs/go-ipfs/exchange/bitswap/decision" tn "github.com/ipfs/go-ipfs/exchange/bitswap/testnet" mockrouting "github.com/ipfs/go-ipfs/routing/mock" - delay "gx/ipfs/QmRJVNatYJwTAHgdSM1Xef9QVQ1Ch3XHdmcrykjP5Y4soL/go-ipfs-delay" + delay "gx/ipfs/QmRJVNatYJwTAHgdSM1Xef9QVQ1Ch3XHdmcrykjP5Y4soL/go-ipfs-delay" tu "gx/ipfs/QmVvkK7s5imCiq3JVbL3pGfnhcCnf3LrFJPF4GE2sAoGZf/go-testutil" travis "gx/ipfs/QmVvkK7s5imCiq3JVbL3pGfnhcCnf3LrFJPF4GE2sAoGZf/go-testutil/ci/travis" p2ptestutil "gx/ipfs/QmYVR3C8DWPHdHxvLtNFYfjsXgaRAdh6hPMNH3KiwCgu4o/go-libp2p-netutil" diff --git a/bitswap/testnet/network_test.go b/bitswap/testnet/network_test.go index 0be2a9266..4cb7551db 100644 --- a/bitswap/testnet/network_test.go +++ b/bitswap/testnet/network_test.go @@ -8,6 +8,7 @@ import ( bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" mockrouting "github.com/ipfs/go-ipfs/routing/mock" + delay "gx/ipfs/QmRJVNatYJwTAHgdSM1Xef9QVQ1Ch3XHdmcrykjP5Y4soL/go-ipfs-delay" testutil "gx/ipfs/QmVvkK7s5imCiq3JVbL3pGfnhcCnf3LrFJPF4GE2sAoGZf/go-testutil" peer "gx/ipfs/QmZoWKhxUmZ2seW4BzX6fJkNR8hh9PsGModr7q171yq2SS/go-libp2p-peer" diff --git a/bitswap/testnet/virtual.go b/bitswap/testnet/virtual.go index c7589cd90..bcb00d14e 100644 --- a/bitswap/testnet/virtual.go +++ b/bitswap/testnet/virtual.go @@ -8,8 +8,8 @@ import ( bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" mockrouting "github.com/ipfs/go-ipfs/routing/mock" - delay "gx/ipfs/QmRJVNatYJwTAHgdSM1Xef9QVQ1Ch3XHdmcrykjP5Y4soL/go-ipfs-delay" + delay "gx/ipfs/QmRJVNatYJwTAHgdSM1Xef9QVQ1Ch3XHdmcrykjP5Y4soL/go-ipfs-delay" logging "gx/ipfs/QmRb5jh8z2E8hMGN2tkvs1yHynUanqnZ3UeKwgN1i9P1F8/go-log" routing "gx/ipfs/QmTiWLZ6Fo5j4KcTVutZJ5KWRRJrbxzmxA4td8NfEdrPh7/go-libp2p-routing" testutil "gx/ipfs/QmVvkK7s5imCiq3JVbL3pGfnhcCnf3LrFJPF4GE2sAoGZf/go-testutil" From b50aaf7293528ab599ab664d59c3153640a1f5b5 Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Mon, 12 Feb 2018 21:02:19 -0800 Subject: [PATCH 0617/1035] bitswap: actually *update* wantlist entries in outbound wantlist messages Before, we weren't using a pointer so we were throwing away the update. License: MIT Signed-off-by: Steven Allen This commit was moved from ipfs/go-bitswap@30cc892def4a2d58e94378deb2153ddb8c61871d --- bitswap/message/message.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/bitswap/message/message.go b/bitswap/message/message.go index 7ede57f87..9a166c942 100644 --- a/bitswap/message/message.go +++ b/bitswap/message/message.go @@ -50,7 +50,7 @@ type Exportable interface { type impl struct { full bool - wantlist map[string]Entry + wantlist map[string]*Entry blocks map[string]blocks.Block } @@ -61,7 +61,7 @@ func New(full bool) BitSwapMessage { func newMsg(full bool) *impl { return &impl{ blocks: make(map[string]blocks.Block), - wantlist: make(map[string]Entry), + wantlist: make(map[string]*Entry), full: full, } } @@ -122,7 +122,7 @@ func (m *impl) Empty() bool { func (m *impl) Wantlist() []Entry { out := make([]Entry, 0, len(m.wantlist)) for _, e := range m.wantlist { - out = append(out, e) + out = append(out, *e) } return out } @@ -151,7 +151,7 @@ func (m *impl) addEntry(c *cid.Cid, priority int, cancel bool) { e.Priority = priority e.Cancel = cancel } else { - m.wantlist[k] = Entry{ + m.wantlist[k] = &Entry{ Entry: &wantlist.Entry{ Cid: c, Priority: priority, From fb225e7856c31eeb9a91e0f1d452a79be695877b Mon Sep 17 00:00:00 2001 From: Jeromy Date: Mon, 12 Feb 2018 23:40:15 -0800 Subject: [PATCH 0618/1035] bitswap virtual test net code should send messages in order License: MIT Signed-off-by: Jeromy This commit was moved from ipfs/go-bitswap@0df75e410b8387b27a723081418ad622bab83fd9 --- bitswap/testnet/virtual.go | 61 ++++++++++++++++++++++++++++++++++---- 1 file changed, 55 insertions(+), 6 deletions(-) diff --git a/bitswap/testnet/virtual.go b/bitswap/testnet/virtual.go index c5ba6e0ae..0524d17c5 100644 --- a/bitswap/testnet/virtual.go +++ b/bitswap/testnet/virtual.go @@ -4,6 +4,7 @@ import ( "context" "errors" "sync" + "time" bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" @@ -22,7 +23,7 @@ var log = logging.Logger("bstestnet") func VirtualNetwork(rs mockrouting.Server, d delay.D) Network { return &network{ - clients: make(map[peer.ID]bsnet.Receiver), + clients: make(map[peer.ID]*receiverQueue), delay: d, routingserver: rs, conns: make(map[string]struct{}), @@ -31,12 +32,28 @@ func VirtualNetwork(rs mockrouting.Server, d delay.D) Network { type network struct { mu sync.Mutex - clients map[peer.ID]bsnet.Receiver + clients map[peer.ID]*receiverQueue routingserver mockrouting.Server delay delay.D conns map[string]struct{} } +type message struct { + from peer.ID + msg bsmsg.BitSwapMessage + shouldSend time.Time +} + +// receiverQueue queues up a set of messages to be sent, and sends them *in +// order* with their delays respected as much as sending them in order allows +// for +type receiverQueue struct { + receiver bsnet.Receiver + queue []*message + active bool + lk sync.Mutex +} + func (n *network) Adapter(p testutil.Identity) bsnet.BitSwapNetwork { n.mu.Lock() defer n.mu.Unlock() @@ -46,7 +63,7 @@ func (n *network) Adapter(p testutil.Identity) bsnet.BitSwapNetwork { network: n, routing: n.routingserver.Client(p), } - n.clients[p.ID()] = client + n.clients[p.ID()] = &receiverQueue{receiver: client} return client } @@ -64,7 +81,7 @@ func (n *network) SendMessage( ctx context.Context, from peer.ID, to peer.ID, - message bsmsg.BitSwapMessage) error { + mes bsmsg.BitSwapMessage) error { n.mu.Lock() defer n.mu.Unlock() @@ -77,7 +94,12 @@ func (n *network) SendMessage( // nb: terminate the context since the context wouldn't actually be passed // over the network in a real scenario - go n.deliver(receiver, from, message) + msg := &message{ + from: from, + msg: mes, + shouldSend: time.Now().Add(n.delay.Get()), + } + receiver.enqueue(msg) return nil } @@ -191,11 +213,38 @@ func (nc *networkClient) ConnectTo(_ context.Context, p peer.ID) error { // TODO: add handling for disconnects - otherClient.PeerConnected(nc.local) + otherClient.receiver.PeerConnected(nc.local) nc.Receiver.PeerConnected(p) return nil } +func (rq *receiverQueue) enqueue(m *message) { + rq.lk.Lock() + defer rq.lk.Unlock() + rq.queue = append(rq.queue, m) + if !rq.active { + rq.active = true + go rq.process() + } +} + +func (rq *receiverQueue) process() { + for { + rq.lk.Lock() + if len(rq.queue) == 0 { + rq.active = false + rq.lk.Unlock() + return + } + m := rq.queue[0] + rq.queue = rq.queue[1:] + rq.lk.Unlock() + + time.Sleep(time.Until(m.shouldSend)) + rq.receiver.ReceiveMessage(context.TODO(), m.from, m.msg) + } +} + func tagForPeers(a, b peer.ID) string { if a < b { return string(a + b) From eba3bf66b0eeba02f9b1811c18977367f241ad09 Mon Sep 17 00:00:00 2001 From: Hector Sanjuan Date: Tue, 13 Feb 2018 11:29:32 +0100 Subject: [PATCH 0619/1035] More consistency in imports Per @magik6k comments. License: MIT Signed-off-by: Hector Sanjuan This commit was moved from ipfs/go-bitswap@c9341aeb3db625df7e2a9137898455b76fdbe4f5 --- bitswap/bitswap.go | 2 +- bitswap/testutils.go | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 081bbf067..5feb8cb59 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -17,7 +17,7 @@ import ( bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" notifications "github.com/ipfs/go-ipfs/exchange/bitswap/notifications" - "gx/ipfs/QmRJVNatYJwTAHgdSM1Xef9QVQ1Ch3XHdmcrykjP5Y4soL/go-ipfs-delay" + delay "gx/ipfs/QmRJVNatYJwTAHgdSM1Xef9QVQ1Ch3XHdmcrykjP5Y4soL/go-ipfs-delay" flags "gx/ipfs/QmRMGdC6HKdLsPDABL9aXPDidrpmEHzJqFWSvshkbn9Hj8/go-ipfs-flags" logging "gx/ipfs/QmRb5jh8z2E8hMGN2tkvs1yHynUanqnZ3UeKwgN1i9P1F8/go-log" metrics "gx/ipfs/QmRg1gKTHzc3CZXSKzem8aR4E3TubFhbgXwfVuWnSK5CC5/go-metrics-interface" diff --git a/bitswap/testutils.go b/bitswap/testutils.go index 7ddf08030..3611f4bb7 100644 --- a/bitswap/testutils.go +++ b/bitswap/testutils.go @@ -7,11 +7,11 @@ import ( blockstore "github.com/ipfs/go-ipfs/blocks/blockstore" tn "github.com/ipfs/go-ipfs/exchange/bitswap/testnet" datastore2 "github.com/ipfs/go-ipfs/thirdparty/datastore2" - delay "gx/ipfs/QmRJVNatYJwTAHgdSM1Xef9QVQ1Ch3XHdmcrykjP5Y4soL/go-ipfs-delay" - testutil "gx/ipfs/QmVvkK7s5imCiq3JVbL3pGfnhcCnf3LrFJPF4GE2sAoGZf/go-testutil" ds "gx/ipfs/QmPpegoMqhAEqjncrzArm7KVWAkCm78rqL2DPuNjhPrshg/go-datastore" ds_sync "gx/ipfs/QmPpegoMqhAEqjncrzArm7KVWAkCm78rqL2DPuNjhPrshg/go-datastore/sync" + delay "gx/ipfs/QmRJVNatYJwTAHgdSM1Xef9QVQ1Ch3XHdmcrykjP5Y4soL/go-ipfs-delay" + testutil "gx/ipfs/QmVvkK7s5imCiq3JVbL3pGfnhcCnf3LrFJPF4GE2sAoGZf/go-testutil" p2ptestutil "gx/ipfs/QmYVR3C8DWPHdHxvLtNFYfjsXgaRAdh6hPMNH3KiwCgu4o/go-libp2p-netutil" peer "gx/ipfs/QmZoWKhxUmZ2seW4BzX6fJkNR8hh9PsGModr7q171yq2SS/go-libp2p-peer" ) From bd277db5186f133fc11250dad229be60f2e970e3 Mon Sep 17 00:00:00 2001 From: Hector Sanjuan Date: Fri, 9 Feb 2018 15:06:31 +0100 Subject: [PATCH 0620/1035] Extract: routing package to github.com/ipfs/go-ipfs-routing This extracts the routing package to its own repository (https://github.com/ipfs/go-ipfs-routing). History has been preserved. The new module has been gx'ed and published. Imports have been rewritten and re-ordered accordingly. An internal dependency to go-ipfs/repo has been removed by substituting it with the go-datastore.Batching interface. License: MIT Signed-off-by: Hector Sanjuan This commit was moved from ipfs/go-bitswap@6d7ed78ed712ff859799b491017f7f2e8aa70460 --- bitswap/bitswap_test.go | 2 +- bitswap/testnet/network_test.go | 2 +- bitswap/testnet/peernet.go | 3 ++- bitswap/testnet/virtual.go | 2 +- 4 files changed, 5 insertions(+), 4 deletions(-) diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index 26fed27d1..854661670 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -12,12 +12,12 @@ import ( blocksutil "github.com/ipfs/go-ipfs/blocks/blocksutil" decision "github.com/ipfs/go-ipfs/exchange/bitswap/decision" tn "github.com/ipfs/go-ipfs/exchange/bitswap/testnet" - mockrouting "github.com/ipfs/go-ipfs/routing/mock" delay "gx/ipfs/QmRJVNatYJwTAHgdSM1Xef9QVQ1Ch3XHdmcrykjP5Y4soL/go-ipfs-delay" tu "gx/ipfs/QmVvkK7s5imCiq3JVbL3pGfnhcCnf3LrFJPF4GE2sAoGZf/go-testutil" travis "gx/ipfs/QmVvkK7s5imCiq3JVbL3pGfnhcCnf3LrFJPF4GE2sAoGZf/go-testutil/ci/travis" p2ptestutil "gx/ipfs/QmYVR3C8DWPHdHxvLtNFYfjsXgaRAdh6hPMNH3KiwCgu4o/go-libp2p-netutil" + mockrouting "gx/ipfs/QmZRcGYvxdauCd7hHnMYLYqcZRaDjv24c7eUNyJojAcdBb/go-ipfs-routing/mock" cid "gx/ipfs/QmcZfnkapfECQGcLZaf9B79NRg7cRa9EnZh4LSbkCzwNvY/go-cid" blocks "gx/ipfs/Qmej7nf81hi2x2tvjRBF3mcp74sQyuDH4VMYDGd1YtXjb2/go-block-format" detectrace "gx/ipfs/Qmf7HqcW7LtCi1W8y2bdx2eJpze74jkbKqpByxgXikdbLF/go-detect-race" diff --git a/bitswap/testnet/network_test.go b/bitswap/testnet/network_test.go index 4cb7551db..27f7edc69 100644 --- a/bitswap/testnet/network_test.go +++ b/bitswap/testnet/network_test.go @@ -7,10 +7,10 @@ import ( bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" - mockrouting "github.com/ipfs/go-ipfs/routing/mock" delay "gx/ipfs/QmRJVNatYJwTAHgdSM1Xef9QVQ1Ch3XHdmcrykjP5Y4soL/go-ipfs-delay" testutil "gx/ipfs/QmVvkK7s5imCiq3JVbL3pGfnhcCnf3LrFJPF4GE2sAoGZf/go-testutil" + mockrouting "gx/ipfs/QmZRcGYvxdauCd7hHnMYLYqcZRaDjv24c7eUNyJojAcdBb/go-ipfs-routing/mock" peer "gx/ipfs/QmZoWKhxUmZ2seW4BzX6fJkNR8hh9PsGModr7q171yq2SS/go-libp2p-peer" blocks "gx/ipfs/Qmej7nf81hi2x2tvjRBF3mcp74sQyuDH4VMYDGd1YtXjb2/go-block-format" ) diff --git a/bitswap/testnet/peernet.go b/bitswap/testnet/peernet.go index effe1bfac..9997c4403 100644 --- a/bitswap/testnet/peernet.go +++ b/bitswap/testnet/peernet.go @@ -4,10 +4,11 @@ import ( "context" bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" - mockrouting "github.com/ipfs/go-ipfs/routing/mock" + mockpeernet "gx/ipfs/QmNh1kGFFdsPu79KNSaL4NUKUPb4Eiz4KHdMtFY6664RDp/go-libp2p/p2p/net/mock" ds "gx/ipfs/QmPpegoMqhAEqjncrzArm7KVWAkCm78rqL2DPuNjhPrshg/go-datastore" testutil "gx/ipfs/QmVvkK7s5imCiq3JVbL3pGfnhcCnf3LrFJPF4GE2sAoGZf/go-testutil" + mockrouting "gx/ipfs/QmZRcGYvxdauCd7hHnMYLYqcZRaDjv24c7eUNyJojAcdBb/go-ipfs-routing/mock" peer "gx/ipfs/QmZoWKhxUmZ2seW4BzX6fJkNR8hh9PsGModr7q171yq2SS/go-libp2p-peer" ) diff --git a/bitswap/testnet/virtual.go b/bitswap/testnet/virtual.go index 7e7ee185c..b8237a1b6 100644 --- a/bitswap/testnet/virtual.go +++ b/bitswap/testnet/virtual.go @@ -8,12 +8,12 @@ import ( bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" - mockrouting "github.com/ipfs/go-ipfs/routing/mock" delay "gx/ipfs/QmRJVNatYJwTAHgdSM1Xef9QVQ1Ch3XHdmcrykjP5Y4soL/go-ipfs-delay" logging "gx/ipfs/QmRb5jh8z2E8hMGN2tkvs1yHynUanqnZ3UeKwgN1i9P1F8/go-log" routing "gx/ipfs/QmTiWLZ6Fo5j4KcTVutZJ5KWRRJrbxzmxA4td8NfEdrPh7/go-libp2p-routing" testutil "gx/ipfs/QmVvkK7s5imCiq3JVbL3pGfnhcCnf3LrFJPF4GE2sAoGZf/go-testutil" + mockrouting "gx/ipfs/QmZRcGYvxdauCd7hHnMYLYqcZRaDjv24c7eUNyJojAcdBb/go-ipfs-routing/mock" peer "gx/ipfs/QmZoWKhxUmZ2seW4BzX6fJkNR8hh9PsGModr7q171yq2SS/go-libp2p-peer" ifconnmgr "gx/ipfs/Qmax8X1Kfahf5WfSB68EWDG3d3qyS3Sqs1v412fjPTfRwx/go-libp2p-interface-connmgr" cid "gx/ipfs/QmcZfnkapfECQGcLZaf9B79NRg7cRa9EnZh4LSbkCzwNvY/go-cid" From b68b930da001b25c28cbd07a1c28c6234e87a548 Mon Sep 17 00:00:00 2001 From: Hector Sanjuan Date: Thu, 15 Feb 2018 18:03:41 +0100 Subject: [PATCH 0621/1035] Extract blocks/blockstore package to go-ipfs-blockstore This extracts the blocks/blockstore package and renames the blocks/blockstore/util package to /blocks/blockstoreutil (because util depends on Pin and I don't plan to extract Pin and its depedencies). The history of blocks/blockstore has been preserved. It has been gx'ed and imported. Imports have been rewritten accordingly and re-ordered. License: MIT Signed-off-by: Hector Sanjuan This commit was moved from ipfs/go-bitswap@93f5fecda96e8d257bfd47f508b531a4d8e84bcb --- bitswap/bitswap.go | 2 +- bitswap/bitswap_test.go | 2 +- bitswap/decision/engine.go | 3 ++- bitswap/decision/engine_test.go | 3 ++- bitswap/get.go | 4 ++-- bitswap/testutils.go | 2 +- 6 files changed, 9 insertions(+), 7 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 5feb8cb59..a1404a8de 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -10,7 +10,6 @@ import ( "sync/atomic" "time" - blockstore "github.com/ipfs/go-ipfs/blocks/blockstore" exchange "github.com/ipfs/go-ipfs/exchange" decision "github.com/ipfs/go-ipfs/exchange/bitswap/decision" bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" @@ -23,6 +22,7 @@ import ( metrics "gx/ipfs/QmRg1gKTHzc3CZXSKzem8aR4E3TubFhbgXwfVuWnSK5CC5/go-metrics-interface" process "gx/ipfs/QmSF8fPo3jgVBAy8fpdjjYqgG87dkJgUprRBHRd2tmfgpP/goprocess" procctx "gx/ipfs/QmSF8fPo3jgVBAy8fpdjjYqgG87dkJgUprRBHRd2tmfgpP/goprocess/context" + blockstore "gx/ipfs/QmTVDM4LCSUMFNQzbDLL9zQwp8usE6QHymFdh3h8vL9v6b/go-ipfs-blockstore" peer "gx/ipfs/QmZoWKhxUmZ2seW4BzX6fJkNR8hh9PsGModr7q171yq2SS/go-libp2p-peer" cid "gx/ipfs/QmcZfnkapfECQGcLZaf9B79NRg7cRa9EnZh4LSbkCzwNvY/go-cid" blocks "gx/ipfs/Qmej7nf81hi2x2tvjRBF3mcp74sQyuDH4VMYDGd1YtXjb2/go-block-format" diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index 854661670..7e99f72f9 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -8,12 +8,12 @@ import ( "testing" "time" - blockstore "github.com/ipfs/go-ipfs/blocks/blockstore" blocksutil "github.com/ipfs/go-ipfs/blocks/blocksutil" decision "github.com/ipfs/go-ipfs/exchange/bitswap/decision" tn "github.com/ipfs/go-ipfs/exchange/bitswap/testnet" delay "gx/ipfs/QmRJVNatYJwTAHgdSM1Xef9QVQ1Ch3XHdmcrykjP5Y4soL/go-ipfs-delay" + blockstore "gx/ipfs/QmTVDM4LCSUMFNQzbDLL9zQwp8usE6QHymFdh3h8vL9v6b/go-ipfs-blockstore" tu "gx/ipfs/QmVvkK7s5imCiq3JVbL3pGfnhcCnf3LrFJPF4GE2sAoGZf/go-testutil" travis "gx/ipfs/QmVvkK7s5imCiq3JVbL3pGfnhcCnf3LrFJPF4GE2sAoGZf/go-testutil/ci/travis" p2ptestutil "gx/ipfs/QmYVR3C8DWPHdHxvLtNFYfjsXgaRAdh6hPMNH3KiwCgu4o/go-libp2p-netutil" diff --git a/bitswap/decision/engine.go b/bitswap/decision/engine.go index 295078e72..dfeeaa8ce 100644 --- a/bitswap/decision/engine.go +++ b/bitswap/decision/engine.go @@ -6,10 +6,11 @@ import ( "sync" "time" - bstore "github.com/ipfs/go-ipfs/blocks/blockstore" bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" wl "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" + logging "gx/ipfs/QmRb5jh8z2E8hMGN2tkvs1yHynUanqnZ3UeKwgN1i9P1F8/go-log" + bstore "gx/ipfs/QmTVDM4LCSUMFNQzbDLL9zQwp8usE6QHymFdh3h8vL9v6b/go-ipfs-blockstore" peer "gx/ipfs/QmZoWKhxUmZ2seW4BzX6fJkNR8hh9PsGModr7q171yq2SS/go-libp2p-peer" blocks "gx/ipfs/Qmej7nf81hi2x2tvjRBF3mcp74sQyuDH4VMYDGd1YtXjb2/go-block-format" ) diff --git a/bitswap/decision/engine_test.go b/bitswap/decision/engine_test.go index faa0a3e2a..c003a6efb 100644 --- a/bitswap/decision/engine_test.go +++ b/bitswap/decision/engine_test.go @@ -9,10 +9,11 @@ import ( "sync" "testing" - blockstore "github.com/ipfs/go-ipfs/blocks/blockstore" message "github.com/ipfs/go-ipfs/exchange/bitswap/message" + ds "gx/ipfs/QmPpegoMqhAEqjncrzArm7KVWAkCm78rqL2DPuNjhPrshg/go-datastore" dssync "gx/ipfs/QmPpegoMqhAEqjncrzArm7KVWAkCm78rqL2DPuNjhPrshg/go-datastore/sync" + blockstore "gx/ipfs/QmTVDM4LCSUMFNQzbDLL9zQwp8usE6QHymFdh3h8vL9v6b/go-ipfs-blockstore" testutil "gx/ipfs/QmVvkK7s5imCiq3JVbL3pGfnhcCnf3LrFJPF4GE2sAoGZf/go-testutil" peer "gx/ipfs/QmZoWKhxUmZ2seW4BzX6fJkNR8hh9PsGModr7q171yq2SS/go-libp2p-peer" blocks "gx/ipfs/Qmej7nf81hi2x2tvjRBF3mcp74sQyuDH4VMYDGd1YtXjb2/go-block-format" diff --git a/bitswap/get.go b/bitswap/get.go index f10a62d68..0ebed665c 100644 --- a/bitswap/get.go +++ b/bitswap/get.go @@ -4,11 +4,11 @@ import ( "context" "errors" - blockstore "github.com/ipfs/go-ipfs/blocks/blockstore" notifications "github.com/ipfs/go-ipfs/exchange/bitswap/notifications" - blocks "gx/ipfs/Qmej7nf81hi2x2tvjRBF3mcp74sQyuDH4VMYDGd1YtXjb2/go-block-format" + blockstore "gx/ipfs/QmTVDM4LCSUMFNQzbDLL9zQwp8usE6QHymFdh3h8vL9v6b/go-ipfs-blockstore" cid "gx/ipfs/QmcZfnkapfECQGcLZaf9B79NRg7cRa9EnZh4LSbkCzwNvY/go-cid" + blocks "gx/ipfs/Qmej7nf81hi2x2tvjRBF3mcp74sQyuDH4VMYDGd1YtXjb2/go-block-format" ) type getBlocksFunc func(context.Context, []*cid.Cid) (<-chan blocks.Block, error) diff --git a/bitswap/testutils.go b/bitswap/testutils.go index 3611f4bb7..1c0979af5 100644 --- a/bitswap/testutils.go +++ b/bitswap/testutils.go @@ -4,13 +4,13 @@ import ( "context" "time" - blockstore "github.com/ipfs/go-ipfs/blocks/blockstore" tn "github.com/ipfs/go-ipfs/exchange/bitswap/testnet" datastore2 "github.com/ipfs/go-ipfs/thirdparty/datastore2" ds "gx/ipfs/QmPpegoMqhAEqjncrzArm7KVWAkCm78rqL2DPuNjhPrshg/go-datastore" ds_sync "gx/ipfs/QmPpegoMqhAEqjncrzArm7KVWAkCm78rqL2DPuNjhPrshg/go-datastore/sync" delay "gx/ipfs/QmRJVNatYJwTAHgdSM1Xef9QVQ1Ch3XHdmcrykjP5Y4soL/go-ipfs-delay" + blockstore "gx/ipfs/QmTVDM4LCSUMFNQzbDLL9zQwp8usE6QHymFdh3h8vL9v6b/go-ipfs-blockstore" testutil "gx/ipfs/QmVvkK7s5imCiq3JVbL3pGfnhcCnf3LrFJPF4GE2sAoGZf/go-testutil" p2ptestutil "gx/ipfs/QmYVR3C8DWPHdHxvLtNFYfjsXgaRAdh6hPMNH3KiwCgu4o/go-libp2p-netutil" peer "gx/ipfs/QmZoWKhxUmZ2seW4BzX6fJkNR8hh9PsGModr7q171yq2SS/go-libp2p-peer" From 05f5d3395bb5312ed67243c02998da63ad051d33 Mon Sep 17 00:00:00 2001 From: Hector Sanjuan Date: Thu, 15 Feb 2018 23:03:01 +0100 Subject: [PATCH 0622/1035] Point briantigerchow/pubsub GoDep'ed module to the gx'ed version This removes briantigerchow/pubsub from Godeps and uses our gx'ed version instead. License: MIT Signed-off-by: Hector Sanjuan This commit was moved from ipfs/go-bitswap@a7934af53e3343546912a6929b978628e91e8650 --- bitswap/notifications/notifications.go | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/bitswap/notifications/notifications.go b/bitswap/notifications/notifications.go index 9a6f10b52..be0f11c5a 100644 --- a/bitswap/notifications/notifications.go +++ b/bitswap/notifications/notifications.go @@ -4,10 +4,9 @@ import ( "context" "sync" - blocks "gx/ipfs/Qmej7nf81hi2x2tvjRBF3mcp74sQyuDH4VMYDGd1YtXjb2/go-block-format" - - pubsub "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/briantigerchow/pubsub" cid "gx/ipfs/QmcZfnkapfECQGcLZaf9B79NRg7cRa9EnZh4LSbkCzwNvY/go-cid" + pubsub "gx/ipfs/QmdbxjQWogRCHRaxhhGnYdT1oQJzL9GdqSKzCdqWr85AP2/pubsub" + blocks "gx/ipfs/Qmej7nf81hi2x2tvjRBF3mcp74sQyuDH4VMYDGd1YtXjb2/go-block-format" ) const bufferSize = 16 From 1c90d9ffcb9c1d064ecf74a916529e25f4dcdb44 Mon Sep 17 00:00:00 2001 From: Hector Sanjuan Date: Thu, 15 Feb 2018 22:53:13 +0100 Subject: [PATCH 0623/1035] Extract thirdparty/pq to go-ipfs-pq This moves the `thirdparty/pq` package to https://github.com/ipfs/go-ipfs-pq . History has been retained. The new package has been gx'ed and published. Imports have been updated accordingly. License: MIT Signed-off-by: Hector Sanjuan This commit was moved from ipfs/go-bitswap@70d57e22ad693a8d7aa165045df70c237b53022a --- bitswap/decision/peer_request_queue.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bitswap/decision/peer_request_queue.go b/bitswap/decision/peer_request_queue.go index 64762f23b..5c116fd69 100644 --- a/bitswap/decision/peer_request_queue.go +++ b/bitswap/decision/peer_request_queue.go @@ -5,8 +5,8 @@ import ( "time" wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" - pq "github.com/ipfs/go-ipfs/thirdparty/pq" + pq "gx/ipfs/QmZUbTDJ39JpvtFCSubiWeUTQRvMA1tVE5RZCJrY4oeAsC/go-ipfs-pq" peer "gx/ipfs/QmZoWKhxUmZ2seW4BzX6fJkNR8hh9PsGModr7q171yq2SS/go-libp2p-peer" cid "gx/ipfs/QmcZfnkapfECQGcLZaf9B79NRg7cRa9EnZh4LSbkCzwNvY/go-cid" ) From 9106687fee3014dc0c61ac85676b69632606d93f Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Wed, 21 Feb 2018 12:35:56 -0800 Subject: [PATCH 0624/1035] fix race in TestWantlistClearsOnCancel fixes #4726 License: MIT Signed-off-by: Steven Allen This commit was moved from ipfs/go-bitswap@461bdd1de82d7f87f2111886c548e481ceded11b --- bitswap/session_test.go | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/bitswap/session_test.go b/bitswap/session_test.go index 2fe4672b0..75e4da038 100644 --- a/bitswap/session_test.go +++ b/bitswap/session_test.go @@ -8,6 +8,7 @@ import ( blocksutil "github.com/ipfs/go-ipfs/blocks/blocksutil" + tu "gx/ipfs/QmVvkK7s5imCiq3JVbL3pGfnhcCnf3LrFJPF4GE2sAoGZf/go-testutil" cid "gx/ipfs/QmcZfnkapfECQGcLZaf9B79NRg7cRa9EnZh4LSbkCzwNvY/go-cid" blocks "gx/ipfs/Qmej7nf81hi2x2tvjRBF3mcp74sQyuDH4VMYDGd1YtXjb2/go-block-format" ) @@ -287,7 +288,7 @@ func TestMultipleSessions(t *testing.T) { } func TestWantlistClearsOnCancel(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) defer cancel() vnet := getVirtualNetwork() @@ -314,7 +315,12 @@ func TestWantlistClearsOnCancel(t *testing.T) { } cancel1() - if len(a.Exchange.GetWantlist()) > 0 { - t.Fatal("expected empty wantlist") + if err := tu.WaitFor(ctx, func() error { + if len(a.Exchange.GetWantlist()) > 0 { + return fmt.Errorf("expected empty wantlist") + } + return nil + }); err != nil { + t.Fatal(err) } } From c30ff62ed8ec388d0cf7c40215706941746e8758 Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Wed, 29 Nov 2017 15:39:57 -0800 Subject: [PATCH 0625/1035] don't warn when trying to send wantlist to disconnected peers fixes #4439 License: MIT Signed-off-by: Steven Allen This commit was moved from ipfs/go-bitswap@cc43783d9b6a1cc73621aece9db0fc23e9f9146a --- bitswap/wantmanager.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bitswap/wantmanager.go b/bitswap/wantmanager.go index 0e6453d6b..650618c23 100644 --- a/bitswap/wantmanager.go +++ b/bitswap/wantmanager.go @@ -326,7 +326,7 @@ func (pm *WantManager) Run() { for _, t := range ws.targets { p, ok := pm.peers[t] if !ok { - log.Warning("tried sending wantlist change to non-partner peer") + // No longer connected. continue } p.addMessage(ws.entries, ws.from) From dc7de62ce9a84e6a9994ff2bbe881c567d478b0a Mon Sep 17 00:00:00 2001 From: Jakub Sztandera Date: Mon, 12 Mar 2018 23:23:27 +0100 Subject: [PATCH 0626/1035] exchange: reintroduce info on wantlist update to no connected peer License: MIT Signed-off-by: Jakub Sztandera This commit was moved from ipfs/go-bitswap@c848b5a5d5fb2c6be7a4202053fdbb911721719f --- bitswap/wantmanager.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bitswap/wantmanager.go b/bitswap/wantmanager.go index 650618c23..306aadbe7 100644 --- a/bitswap/wantmanager.go +++ b/bitswap/wantmanager.go @@ -326,7 +326,7 @@ func (pm *WantManager) Run() { for _, t := range ws.targets { p, ok := pm.peers[t] if !ok { - // No longer connected. + log.Infof("tried sending wantlist change to non-partner peer: %s", t) continue } p.addMessage(ws.entries, ws.from) From 1f69804528781686482bd8cb2241ed8e36282df7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Mon, 19 Mar 2018 02:09:29 +0100 Subject: [PATCH 0627/1035] misc: Remove some dead code MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit License: MIT Signed-off-by: Łukasz Magiera This commit was moved from ipfs/go-bitswap@689398fb2cd385e34484a1f6c7dc197ae3ec3408 --- bitswap/bitswap.go | 4 ++-- bitswap/testutils.go | 4 +--- 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index a1404a8de..5d2db1ebd 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -66,8 +66,8 @@ var rebroadcastDelay = delay.Fixed(time.Minute) // BitSwapNetwork. This function registers the returned instance as the network // delegate. // Runs until context is cancelled. -func New(parent context.Context, p peer.ID, network bsnet.BitSwapNetwork, - bstore blockstore.Blockstore, nice bool) exchange.Interface { +func New(parent context.Context, network bsnet.BitSwapNetwork, + bstore blockstore.Blockstore) exchange.Interface { // important to use provided parent context (since it may include important // loggable data). It's probably not a good idea to allow bitswap to be diff --git a/bitswap/testutils.go b/bitswap/testutils.go index 1c0979af5..a27ccd99f 100644 --- a/bitswap/testutils.go +++ b/bitswap/testutils.go @@ -99,9 +99,7 @@ func MkSession(ctx context.Context, net tn.Network, p testutil.Identity) Instanc panic(err.Error()) // FIXME perhaps change signature and return error. } - const alwaysSendToPeer = true - - bs := New(ctx, p.ID(), adapter, bstore, alwaysSendToPeer).(*Bitswap) + bs := New(ctx, adapter, bstore).(*Bitswap) return Instance{ Peer: p.ID(), From 0b973a708653962b8da1d12faecfcbec01ceabb1 Mon Sep 17 00:00:00 2001 From: Hector Sanjuan Date: Tue, 27 Feb 2018 21:03:55 +0100 Subject: [PATCH 0628/1035] Update to latest go-datastore License: MIT Signed-off-by: Hector Sanjuan This commit was moved from ipfs/go-bitswap@35ab14d9d568fcf0f2bd3653fd3a7cd4a89e8637 --- bitswap/bitswap.go | 2 +- bitswap/bitswap_test.go | 4 ++-- bitswap/decision/engine.go | 2 +- bitswap/decision/engine_test.go | 6 +++--- bitswap/get.go | 2 +- bitswap/testnet/network_test.go | 2 +- bitswap/testnet/peernet.go | 4 ++-- bitswap/testnet/virtual.go | 2 +- bitswap/testutils.go | 6 +++--- 9 files changed, 15 insertions(+), 15 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index a1404a8de..154b6c4bc 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -22,8 +22,8 @@ import ( metrics "gx/ipfs/QmRg1gKTHzc3CZXSKzem8aR4E3TubFhbgXwfVuWnSK5CC5/go-metrics-interface" process "gx/ipfs/QmSF8fPo3jgVBAy8fpdjjYqgG87dkJgUprRBHRd2tmfgpP/goprocess" procctx "gx/ipfs/QmSF8fPo3jgVBAy8fpdjjYqgG87dkJgUprRBHRd2tmfgpP/goprocess/context" - blockstore "gx/ipfs/QmTVDM4LCSUMFNQzbDLL9zQwp8usE6QHymFdh3h8vL9v6b/go-ipfs-blockstore" peer "gx/ipfs/QmZoWKhxUmZ2seW4BzX6fJkNR8hh9PsGModr7q171yq2SS/go-libp2p-peer" + blockstore "gx/ipfs/QmaG4DZ4JaqEfvPWt5nPPgoTzhc1tr1T3f4Nu9Jpdm8ymY/go-ipfs-blockstore" cid "gx/ipfs/QmcZfnkapfECQGcLZaf9B79NRg7cRa9EnZh4LSbkCzwNvY/go-cid" blocks "gx/ipfs/Qmej7nf81hi2x2tvjRBF3mcp74sQyuDH4VMYDGd1YtXjb2/go-block-format" ) diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index 7e99f72f9..120a0bd8f 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -13,11 +13,11 @@ import ( tn "github.com/ipfs/go-ipfs/exchange/bitswap/testnet" delay "gx/ipfs/QmRJVNatYJwTAHgdSM1Xef9QVQ1Ch3XHdmcrykjP5Y4soL/go-ipfs-delay" - blockstore "gx/ipfs/QmTVDM4LCSUMFNQzbDLL9zQwp8usE6QHymFdh3h8vL9v6b/go-ipfs-blockstore" + mockrouting "gx/ipfs/QmT51m6og9tmYo8FdaYin3zk1R7vA6ek5WYoHYEiMorfon/go-ipfs-routing/mock" tu "gx/ipfs/QmVvkK7s5imCiq3JVbL3pGfnhcCnf3LrFJPF4GE2sAoGZf/go-testutil" travis "gx/ipfs/QmVvkK7s5imCiq3JVbL3pGfnhcCnf3LrFJPF4GE2sAoGZf/go-testutil/ci/travis" p2ptestutil "gx/ipfs/QmYVR3C8DWPHdHxvLtNFYfjsXgaRAdh6hPMNH3KiwCgu4o/go-libp2p-netutil" - mockrouting "gx/ipfs/QmZRcGYvxdauCd7hHnMYLYqcZRaDjv24c7eUNyJojAcdBb/go-ipfs-routing/mock" + blockstore "gx/ipfs/QmaG4DZ4JaqEfvPWt5nPPgoTzhc1tr1T3f4Nu9Jpdm8ymY/go-ipfs-blockstore" cid "gx/ipfs/QmcZfnkapfECQGcLZaf9B79NRg7cRa9EnZh4LSbkCzwNvY/go-cid" blocks "gx/ipfs/Qmej7nf81hi2x2tvjRBF3mcp74sQyuDH4VMYDGd1YtXjb2/go-block-format" detectrace "gx/ipfs/Qmf7HqcW7LtCi1W8y2bdx2eJpze74jkbKqpByxgXikdbLF/go-detect-race" diff --git a/bitswap/decision/engine.go b/bitswap/decision/engine.go index dfeeaa8ce..cd9ae9361 100644 --- a/bitswap/decision/engine.go +++ b/bitswap/decision/engine.go @@ -10,8 +10,8 @@ import ( wl "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" logging "gx/ipfs/QmRb5jh8z2E8hMGN2tkvs1yHynUanqnZ3UeKwgN1i9P1F8/go-log" - bstore "gx/ipfs/QmTVDM4LCSUMFNQzbDLL9zQwp8usE6QHymFdh3h8vL9v6b/go-ipfs-blockstore" peer "gx/ipfs/QmZoWKhxUmZ2seW4BzX6fJkNR8hh9PsGModr7q171yq2SS/go-libp2p-peer" + bstore "gx/ipfs/QmaG4DZ4JaqEfvPWt5nPPgoTzhc1tr1T3f4Nu9Jpdm8ymY/go-ipfs-blockstore" blocks "gx/ipfs/Qmej7nf81hi2x2tvjRBF3mcp74sQyuDH4VMYDGd1YtXjb2/go-block-format" ) diff --git a/bitswap/decision/engine_test.go b/bitswap/decision/engine_test.go index c003a6efb..dbebfb058 100644 --- a/bitswap/decision/engine_test.go +++ b/bitswap/decision/engine_test.go @@ -11,11 +11,11 @@ import ( message "github.com/ipfs/go-ipfs/exchange/bitswap/message" - ds "gx/ipfs/QmPpegoMqhAEqjncrzArm7KVWAkCm78rqL2DPuNjhPrshg/go-datastore" - dssync "gx/ipfs/QmPpegoMqhAEqjncrzArm7KVWAkCm78rqL2DPuNjhPrshg/go-datastore/sync" - blockstore "gx/ipfs/QmTVDM4LCSUMFNQzbDLL9zQwp8usE6QHymFdh3h8vL9v6b/go-ipfs-blockstore" testutil "gx/ipfs/QmVvkK7s5imCiq3JVbL3pGfnhcCnf3LrFJPF4GE2sAoGZf/go-testutil" + ds "gx/ipfs/QmXRKBQA4wXP7xWbFiZsR1GP4HV6wMDQ1aWFxZZ4uBcPX9/go-datastore" + dssync "gx/ipfs/QmXRKBQA4wXP7xWbFiZsR1GP4HV6wMDQ1aWFxZZ4uBcPX9/go-datastore/sync" peer "gx/ipfs/QmZoWKhxUmZ2seW4BzX6fJkNR8hh9PsGModr7q171yq2SS/go-libp2p-peer" + blockstore "gx/ipfs/QmaG4DZ4JaqEfvPWt5nPPgoTzhc1tr1T3f4Nu9Jpdm8ymY/go-ipfs-blockstore" blocks "gx/ipfs/Qmej7nf81hi2x2tvjRBF3mcp74sQyuDH4VMYDGd1YtXjb2/go-block-format" ) diff --git a/bitswap/get.go b/bitswap/get.go index 0ebed665c..e18b3ad3b 100644 --- a/bitswap/get.go +++ b/bitswap/get.go @@ -6,7 +6,7 @@ import ( notifications "github.com/ipfs/go-ipfs/exchange/bitswap/notifications" - blockstore "gx/ipfs/QmTVDM4LCSUMFNQzbDLL9zQwp8usE6QHymFdh3h8vL9v6b/go-ipfs-blockstore" + blockstore "gx/ipfs/QmaG4DZ4JaqEfvPWt5nPPgoTzhc1tr1T3f4Nu9Jpdm8ymY/go-ipfs-blockstore" cid "gx/ipfs/QmcZfnkapfECQGcLZaf9B79NRg7cRa9EnZh4LSbkCzwNvY/go-cid" blocks "gx/ipfs/Qmej7nf81hi2x2tvjRBF3mcp74sQyuDH4VMYDGd1YtXjb2/go-block-format" ) diff --git a/bitswap/testnet/network_test.go b/bitswap/testnet/network_test.go index 27f7edc69..cae8c2c72 100644 --- a/bitswap/testnet/network_test.go +++ b/bitswap/testnet/network_test.go @@ -9,8 +9,8 @@ import ( bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" delay "gx/ipfs/QmRJVNatYJwTAHgdSM1Xef9QVQ1Ch3XHdmcrykjP5Y4soL/go-ipfs-delay" + mockrouting "gx/ipfs/QmT51m6og9tmYo8FdaYin3zk1R7vA6ek5WYoHYEiMorfon/go-ipfs-routing/mock" testutil "gx/ipfs/QmVvkK7s5imCiq3JVbL3pGfnhcCnf3LrFJPF4GE2sAoGZf/go-testutil" - mockrouting "gx/ipfs/QmZRcGYvxdauCd7hHnMYLYqcZRaDjv24c7eUNyJojAcdBb/go-ipfs-routing/mock" peer "gx/ipfs/QmZoWKhxUmZ2seW4BzX6fJkNR8hh9PsGModr7q171yq2SS/go-libp2p-peer" blocks "gx/ipfs/Qmej7nf81hi2x2tvjRBF3mcp74sQyuDH4VMYDGd1YtXjb2/go-block-format" ) diff --git a/bitswap/testnet/peernet.go b/bitswap/testnet/peernet.go index 9997c4403..7c40f8b27 100644 --- a/bitswap/testnet/peernet.go +++ b/bitswap/testnet/peernet.go @@ -6,9 +6,9 @@ import ( bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" mockpeernet "gx/ipfs/QmNh1kGFFdsPu79KNSaL4NUKUPb4Eiz4KHdMtFY6664RDp/go-libp2p/p2p/net/mock" - ds "gx/ipfs/QmPpegoMqhAEqjncrzArm7KVWAkCm78rqL2DPuNjhPrshg/go-datastore" + mockrouting "gx/ipfs/QmT51m6og9tmYo8FdaYin3zk1R7vA6ek5WYoHYEiMorfon/go-ipfs-routing/mock" testutil "gx/ipfs/QmVvkK7s5imCiq3JVbL3pGfnhcCnf3LrFJPF4GE2sAoGZf/go-testutil" - mockrouting "gx/ipfs/QmZRcGYvxdauCd7hHnMYLYqcZRaDjv24c7eUNyJojAcdBb/go-ipfs-routing/mock" + ds "gx/ipfs/QmXRKBQA4wXP7xWbFiZsR1GP4HV6wMDQ1aWFxZZ4uBcPX9/go-datastore" peer "gx/ipfs/QmZoWKhxUmZ2seW4BzX6fJkNR8hh9PsGModr7q171yq2SS/go-libp2p-peer" ) diff --git a/bitswap/testnet/virtual.go b/bitswap/testnet/virtual.go index b8237a1b6..28fcd15ae 100644 --- a/bitswap/testnet/virtual.go +++ b/bitswap/testnet/virtual.go @@ -11,9 +11,9 @@ import ( delay "gx/ipfs/QmRJVNatYJwTAHgdSM1Xef9QVQ1Ch3XHdmcrykjP5Y4soL/go-ipfs-delay" logging "gx/ipfs/QmRb5jh8z2E8hMGN2tkvs1yHynUanqnZ3UeKwgN1i9P1F8/go-log" + mockrouting "gx/ipfs/QmT51m6og9tmYo8FdaYin3zk1R7vA6ek5WYoHYEiMorfon/go-ipfs-routing/mock" routing "gx/ipfs/QmTiWLZ6Fo5j4KcTVutZJ5KWRRJrbxzmxA4td8NfEdrPh7/go-libp2p-routing" testutil "gx/ipfs/QmVvkK7s5imCiq3JVbL3pGfnhcCnf3LrFJPF4GE2sAoGZf/go-testutil" - mockrouting "gx/ipfs/QmZRcGYvxdauCd7hHnMYLYqcZRaDjv24c7eUNyJojAcdBb/go-ipfs-routing/mock" peer "gx/ipfs/QmZoWKhxUmZ2seW4BzX6fJkNR8hh9PsGModr7q171yq2SS/go-libp2p-peer" ifconnmgr "gx/ipfs/Qmax8X1Kfahf5WfSB68EWDG3d3qyS3Sqs1v412fjPTfRwx/go-libp2p-interface-connmgr" cid "gx/ipfs/QmcZfnkapfECQGcLZaf9B79NRg7cRa9EnZh4LSbkCzwNvY/go-cid" diff --git a/bitswap/testutils.go b/bitswap/testutils.go index 1c0979af5..8822e11a1 100644 --- a/bitswap/testutils.go +++ b/bitswap/testutils.go @@ -7,13 +7,13 @@ import ( tn "github.com/ipfs/go-ipfs/exchange/bitswap/testnet" datastore2 "github.com/ipfs/go-ipfs/thirdparty/datastore2" - ds "gx/ipfs/QmPpegoMqhAEqjncrzArm7KVWAkCm78rqL2DPuNjhPrshg/go-datastore" - ds_sync "gx/ipfs/QmPpegoMqhAEqjncrzArm7KVWAkCm78rqL2DPuNjhPrshg/go-datastore/sync" delay "gx/ipfs/QmRJVNatYJwTAHgdSM1Xef9QVQ1Ch3XHdmcrykjP5Y4soL/go-ipfs-delay" - blockstore "gx/ipfs/QmTVDM4LCSUMFNQzbDLL9zQwp8usE6QHymFdh3h8vL9v6b/go-ipfs-blockstore" testutil "gx/ipfs/QmVvkK7s5imCiq3JVbL3pGfnhcCnf3LrFJPF4GE2sAoGZf/go-testutil" + ds "gx/ipfs/QmXRKBQA4wXP7xWbFiZsR1GP4HV6wMDQ1aWFxZZ4uBcPX9/go-datastore" + ds_sync "gx/ipfs/QmXRKBQA4wXP7xWbFiZsR1GP4HV6wMDQ1aWFxZZ4uBcPX9/go-datastore/sync" p2ptestutil "gx/ipfs/QmYVR3C8DWPHdHxvLtNFYfjsXgaRAdh6hPMNH3KiwCgu4o/go-libp2p-netutil" peer "gx/ipfs/QmZoWKhxUmZ2seW4BzX6fJkNR8hh9PsGModr7q171yq2SS/go-libp2p-peer" + blockstore "gx/ipfs/QmaG4DZ4JaqEfvPWt5nPPgoTzhc1tr1T3f4Nu9Jpdm8ymY/go-ipfs-blockstore" ) // WARNING: this uses RandTestBogusIdentity DO NOT USE for NON TESTS! From 6abbfaaf3dcc5382e1ca0c0d5df854a89b9dabee Mon Sep 17 00:00:00 2001 From: Hector Sanjuan Date: Tue, 27 Feb 2018 21:49:59 +0100 Subject: [PATCH 0629/1035] Remove thirdparty/datastore2/delayed.go: part of new go-datastore License: MIT Signed-off-by: Hector Sanjuan This commit was moved from ipfs/go-bitswap@a95100845f789a015129a1e73b22f7f418c52f7c --- bitswap/testutils.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/bitswap/testutils.go b/bitswap/testutils.go index 8822e11a1..4df79c1b5 100644 --- a/bitswap/testutils.go +++ b/bitswap/testutils.go @@ -5,11 +5,11 @@ import ( "time" tn "github.com/ipfs/go-ipfs/exchange/bitswap/testnet" - datastore2 "github.com/ipfs/go-ipfs/thirdparty/datastore2" delay "gx/ipfs/QmRJVNatYJwTAHgdSM1Xef9QVQ1Ch3XHdmcrykjP5Y4soL/go-ipfs-delay" testutil "gx/ipfs/QmVvkK7s5imCiq3JVbL3pGfnhcCnf3LrFJPF4GE2sAoGZf/go-testutil" ds "gx/ipfs/QmXRKBQA4wXP7xWbFiZsR1GP4HV6wMDQ1aWFxZZ4uBcPX9/go-datastore" + delayed "gx/ipfs/QmXRKBQA4wXP7xWbFiZsR1GP4HV6wMDQ1aWFxZZ4uBcPX9/go-datastore/delayed" ds_sync "gx/ipfs/QmXRKBQA4wXP7xWbFiZsR1GP4HV6wMDQ1aWFxZZ4uBcPX9/go-datastore/sync" p2ptestutil "gx/ipfs/QmYVR3C8DWPHdHxvLtNFYfjsXgaRAdh6hPMNH3KiwCgu4o/go-libp2p-netutil" peer "gx/ipfs/QmZoWKhxUmZ2seW4BzX6fJkNR8hh9PsGModr7q171yq2SS/go-libp2p-peer" @@ -90,7 +90,7 @@ func MkSession(ctx context.Context, net tn.Network, p testutil.Identity) Instanc bsdelay := delay.Fixed(0) adapter := net.Adapter(p) - dstore := ds_sync.MutexWrap(datastore2.WithDelay(ds.NewMapDatastore(), bsdelay)) + dstore := ds_sync.MutexWrap(delayed.New(ds.NewMapDatastore(), bsdelay)) bstore, err := blockstore.CachedBlockstore(ctx, blockstore.NewBlockstore(ds_sync.MutexWrap(dstore)), From e0c8208b0841d8a00e7062f915143fc35de42711 Mon Sep 17 00:00:00 2001 From: Hector Sanjuan Date: Fri, 2 Mar 2018 15:01:12 +0100 Subject: [PATCH 0630/1035] Revert go-libp2p-kad-dht and related changes to a working version This uses a working libp2p-kad-dht and libp2p-record libraries, reverts the changes that were introduced to support the newer versions License: MIT Signed-off-by: Hector Sanjuan This commit was moved from ipfs/go-bitswap@2cd0ce70dd0c099b3235f8e3b2335c469269e8e7 --- bitswap/bitswap_test.go | 2 +- bitswap/testnet/network_test.go | 2 +- bitswap/testnet/peernet.go | 2 +- bitswap/testnet/virtual.go | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index 120a0bd8f..1cc8b2d94 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -13,9 +13,9 @@ import ( tn "github.com/ipfs/go-ipfs/exchange/bitswap/testnet" delay "gx/ipfs/QmRJVNatYJwTAHgdSM1Xef9QVQ1Ch3XHdmcrykjP5Y4soL/go-ipfs-delay" - mockrouting "gx/ipfs/QmT51m6og9tmYo8FdaYin3zk1R7vA6ek5WYoHYEiMorfon/go-ipfs-routing/mock" tu "gx/ipfs/QmVvkK7s5imCiq3JVbL3pGfnhcCnf3LrFJPF4GE2sAoGZf/go-testutil" travis "gx/ipfs/QmVvkK7s5imCiq3JVbL3pGfnhcCnf3LrFJPF4GE2sAoGZf/go-testutil/ci/travis" + mockrouting "gx/ipfs/QmXtoXbu9ReyV6Q4kDQ5CF9wXQNDY1PdHc4HhfxRR5AHB3/go-ipfs-routing/mock" p2ptestutil "gx/ipfs/QmYVR3C8DWPHdHxvLtNFYfjsXgaRAdh6hPMNH3KiwCgu4o/go-libp2p-netutil" blockstore "gx/ipfs/QmaG4DZ4JaqEfvPWt5nPPgoTzhc1tr1T3f4Nu9Jpdm8ymY/go-ipfs-blockstore" cid "gx/ipfs/QmcZfnkapfECQGcLZaf9B79NRg7cRa9EnZh4LSbkCzwNvY/go-cid" diff --git a/bitswap/testnet/network_test.go b/bitswap/testnet/network_test.go index cae8c2c72..4da3df3e5 100644 --- a/bitswap/testnet/network_test.go +++ b/bitswap/testnet/network_test.go @@ -9,8 +9,8 @@ import ( bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" delay "gx/ipfs/QmRJVNatYJwTAHgdSM1Xef9QVQ1Ch3XHdmcrykjP5Y4soL/go-ipfs-delay" - mockrouting "gx/ipfs/QmT51m6og9tmYo8FdaYin3zk1R7vA6ek5WYoHYEiMorfon/go-ipfs-routing/mock" testutil "gx/ipfs/QmVvkK7s5imCiq3JVbL3pGfnhcCnf3LrFJPF4GE2sAoGZf/go-testutil" + mockrouting "gx/ipfs/QmXtoXbu9ReyV6Q4kDQ5CF9wXQNDY1PdHc4HhfxRR5AHB3/go-ipfs-routing/mock" peer "gx/ipfs/QmZoWKhxUmZ2seW4BzX6fJkNR8hh9PsGModr7q171yq2SS/go-libp2p-peer" blocks "gx/ipfs/Qmej7nf81hi2x2tvjRBF3mcp74sQyuDH4VMYDGd1YtXjb2/go-block-format" ) diff --git a/bitswap/testnet/peernet.go b/bitswap/testnet/peernet.go index 7c40f8b27..19f36a61f 100644 --- a/bitswap/testnet/peernet.go +++ b/bitswap/testnet/peernet.go @@ -6,9 +6,9 @@ import ( bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" mockpeernet "gx/ipfs/QmNh1kGFFdsPu79KNSaL4NUKUPb4Eiz4KHdMtFY6664RDp/go-libp2p/p2p/net/mock" - mockrouting "gx/ipfs/QmT51m6og9tmYo8FdaYin3zk1R7vA6ek5WYoHYEiMorfon/go-ipfs-routing/mock" testutil "gx/ipfs/QmVvkK7s5imCiq3JVbL3pGfnhcCnf3LrFJPF4GE2sAoGZf/go-testutil" ds "gx/ipfs/QmXRKBQA4wXP7xWbFiZsR1GP4HV6wMDQ1aWFxZZ4uBcPX9/go-datastore" + mockrouting "gx/ipfs/QmXtoXbu9ReyV6Q4kDQ5CF9wXQNDY1PdHc4HhfxRR5AHB3/go-ipfs-routing/mock" peer "gx/ipfs/QmZoWKhxUmZ2seW4BzX6fJkNR8hh9PsGModr7q171yq2SS/go-libp2p-peer" ) diff --git a/bitswap/testnet/virtual.go b/bitswap/testnet/virtual.go index 28fcd15ae..86b43c7c8 100644 --- a/bitswap/testnet/virtual.go +++ b/bitswap/testnet/virtual.go @@ -11,9 +11,9 @@ import ( delay "gx/ipfs/QmRJVNatYJwTAHgdSM1Xef9QVQ1Ch3XHdmcrykjP5Y4soL/go-ipfs-delay" logging "gx/ipfs/QmRb5jh8z2E8hMGN2tkvs1yHynUanqnZ3UeKwgN1i9P1F8/go-log" - mockrouting "gx/ipfs/QmT51m6og9tmYo8FdaYin3zk1R7vA6ek5WYoHYEiMorfon/go-ipfs-routing/mock" routing "gx/ipfs/QmTiWLZ6Fo5j4KcTVutZJ5KWRRJrbxzmxA4td8NfEdrPh7/go-libp2p-routing" testutil "gx/ipfs/QmVvkK7s5imCiq3JVbL3pGfnhcCnf3LrFJPF4GE2sAoGZf/go-testutil" + mockrouting "gx/ipfs/QmXtoXbu9ReyV6Q4kDQ5CF9wXQNDY1PdHc4HhfxRR5AHB3/go-ipfs-routing/mock" peer "gx/ipfs/QmZoWKhxUmZ2seW4BzX6fJkNR8hh9PsGModr7q171yq2SS/go-libp2p-peer" ifconnmgr "gx/ipfs/Qmax8X1Kfahf5WfSB68EWDG3d3qyS3Sqs1v412fjPTfRwx/go-libp2p-interface-connmgr" cid "gx/ipfs/QmcZfnkapfECQGcLZaf9B79NRg7cRa9EnZh4LSbkCzwNvY/go-cid" From 974f9eb938e07c1af68c4ba713dd5f453f4c8d1e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Sun, 18 Mar 2018 19:54:46 +0100 Subject: [PATCH 0631/1035] fix error style MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit License: MIT Signed-off-by: Łukasz Magiera This commit was moved from ipfs/go-bitswap@2dde408ca8f7a508ba3fa241696fc900c413d64d --- bitswap/testnet/virtual.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/bitswap/testnet/virtual.go b/bitswap/testnet/virtual.go index 86b43c7c8..e887a5cf4 100644 --- a/bitswap/testnet/virtual.go +++ b/bitswap/testnet/virtual.go @@ -88,7 +88,7 @@ func (n *network) SendMessage( receiver, ok := n.clients[to] if !ok { - return errors.New("Cannot locate peer on network") + return errors.New("cannot locate peer on network") } // nb: terminate the context since the context wouldn't actually be passed @@ -107,7 +107,7 @@ func (n *network) SendMessage( func (n *network) deliver( r bsnet.Receiver, from peer.ID, message bsmsg.BitSwapMessage) error { if message == nil || from == "" { - return errors.New("Invalid input") + return errors.New("invalid input") } n.delay.Wait() From 76388f0c6d5894d863ac7d48a5634eb5b440a5af Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Mon, 19 Mar 2018 03:41:28 +0100 Subject: [PATCH 0632/1035] misc: Fix a few typos MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit License: MIT Signed-off-by: Łukasz Magiera This commit was moved from ipfs/go-bitswap@21e1da33a33a54a87c63ab4460e4ddff7cbb352e --- bitswap/README.md | 4 ++-- bitswap/bitswap.go | 2 +- bitswap/decision/ledger.go | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/bitswap/README.md b/bitswap/README.md index cfdbd27e0..417d87ff3 100644 --- a/bitswap/README.md +++ b/bitswap/README.md @@ -4,7 +4,7 @@ Bitswap is the data trading module for ipfs, it manages requesting and sending blocks to and from other peers in the network. Bitswap has two main jobs, the first is to acquire blocks requested by the client from the network. The second -is to judiciously send blocks in its posession to other peers who want them. +is to judiciously send blocks in its possession to other peers who want them. Bitswap is a message based protocol, as opposed to response-reply. All messages contain wantlists, or blocks. Upon receiving a wantlist, a node should consider @@ -20,7 +20,7 @@ another peer has a task in the peer request queue created for it. The peer request queue is a priority queue that sorts available tasks by some metric, currently, that metric is very simple and aims to fairly address the tasks of each other peer. More advanced decision logic will be implemented in the -future. Task workers pull tasks to be done off of the queue, retreive the block +future. Task workers pull tasks to be done off of the queue, retrieve the block to be sent, and send it off. The number of task workers is limited by a constant factor. diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 154b6c4bc..4fcb7172c 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -295,7 +295,7 @@ func (bs *Bitswap) CancelWants(cids []*cid.Cid, ses uint64) { bs.wm.CancelWants(context.Background(), cids, nil, ses) } -// HasBlock announces the existance of a block to this bitswap service. The +// HasBlock announces the existence of a block to this bitswap service. The // service will potentially notify its peers. func (bs *Bitswap) HasBlock(blk blocks.Block) error { return bs.receiveBlockFrom(blk, "") diff --git a/bitswap/decision/ledger.go b/bitswap/decision/ledger.go index c4679cd1f..45cab6220 100644 --- a/bitswap/decision/ledger.go +++ b/bitswap/decision/ledger.go @@ -24,7 +24,7 @@ type ledger struct { // Partner is the remote Peer. Partner peer.ID - // Accounting tracks bytes sent and recieved. + // Accounting tracks bytes sent and received. Accounting debtRatio // lastExchange is the time of the last data exchange. From 73eb1290a5ff40a3052f3d478edfa58b2e2be013 Mon Sep 17 00:00:00 2001 From: Hector Sanjuan Date: Tue, 3 Apr 2018 14:39:17 +0200 Subject: [PATCH 0633/1035] Extract: exchange/interface.go to go-ipfs-exchange-interface License: MIT Signed-off-by: Hector Sanjuan This commit was moved from ipfs/go-bitswap@fdb4f15ed3e92a45747ea0c6f8292de73bd3ada0 --- bitswap/bitswap.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 154b6c4bc..6fcd95570 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -10,7 +10,6 @@ import ( "sync/atomic" "time" - exchange "github.com/ipfs/go-ipfs/exchange" decision "github.com/ipfs/go-ipfs/exchange/bitswap/decision" bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" @@ -25,6 +24,7 @@ import ( peer "gx/ipfs/QmZoWKhxUmZ2seW4BzX6fJkNR8hh9PsGModr7q171yq2SS/go-libp2p-peer" blockstore "gx/ipfs/QmaG4DZ4JaqEfvPWt5nPPgoTzhc1tr1T3f4Nu9Jpdm8ymY/go-ipfs-blockstore" cid "gx/ipfs/QmcZfnkapfECQGcLZaf9B79NRg7cRa9EnZh4LSbkCzwNvY/go-cid" + exchange "gx/ipfs/QmdcAXgEHUueP4A7b5hjabKn2EooeHgMreMvFC249dGCgc/go-ipfs-exchange-interface" blocks "gx/ipfs/Qmej7nf81hi2x2tvjRBF3mcp74sQyuDH4VMYDGd1YtXjb2/go-block-format" ) From 3f357e93449e412ab6347e4153701fc3b08b1354 Mon Sep 17 00:00:00 2001 From: Hector Sanjuan Date: Tue, 3 Apr 2018 15:00:12 +0200 Subject: [PATCH 0634/1035] Extract: blocks/blocksutil to go-ipfs-blocksutil License: MIT Signed-off-by: Hector Sanjuan This commit was moved from ipfs/go-bitswap@e60e1708705a3bb8d2dca4561a1b63d521b746aa --- bitswap/bitswap_test.go | 2 +- bitswap/notifications/notifications_test.go | 2 +- bitswap/session_test.go | 3 +-- 3 files changed, 3 insertions(+), 4 deletions(-) diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index 1cc8b2d94..e3ddd4f8c 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -8,7 +8,6 @@ import ( "testing" "time" - blocksutil "github.com/ipfs/go-ipfs/blocks/blocksutil" decision "github.com/ipfs/go-ipfs/exchange/bitswap/decision" tn "github.com/ipfs/go-ipfs/exchange/bitswap/testnet" @@ -21,6 +20,7 @@ import ( cid "gx/ipfs/QmcZfnkapfECQGcLZaf9B79NRg7cRa9EnZh4LSbkCzwNvY/go-cid" blocks "gx/ipfs/Qmej7nf81hi2x2tvjRBF3mcp74sQyuDH4VMYDGd1YtXjb2/go-block-format" detectrace "gx/ipfs/Qmf7HqcW7LtCi1W8y2bdx2eJpze74jkbKqpByxgXikdbLF/go-detect-race" + blocksutil "gx/ipfs/Qmf951DP11mCoctpyF3ZppPZdo2oAxuNi2vnkVDgHJ8Fqk/go-ipfs-blocksutil" ) // FIXME the tests are really sensitive to the network delay. fix them to work diff --git a/bitswap/notifications/notifications_test.go b/bitswap/notifications/notifications_test.go index a70a0755a..5c15975db 100644 --- a/bitswap/notifications/notifications_test.go +++ b/bitswap/notifications/notifications_test.go @@ -6,9 +6,9 @@ import ( "testing" "time" - blocksutil "github.com/ipfs/go-ipfs/blocks/blocksutil" cid "gx/ipfs/QmcZfnkapfECQGcLZaf9B79NRg7cRa9EnZh4LSbkCzwNvY/go-cid" blocks "gx/ipfs/Qmej7nf81hi2x2tvjRBF3mcp74sQyuDH4VMYDGd1YtXjb2/go-block-format" + blocksutil "gx/ipfs/Qmf951DP11mCoctpyF3ZppPZdo2oAxuNi2vnkVDgHJ8Fqk/go-ipfs-blocksutil" ) func TestDuplicates(t *testing.T) { diff --git a/bitswap/session_test.go b/bitswap/session_test.go index 75e4da038..cfcf00238 100644 --- a/bitswap/session_test.go +++ b/bitswap/session_test.go @@ -6,11 +6,10 @@ import ( "testing" "time" - blocksutil "github.com/ipfs/go-ipfs/blocks/blocksutil" - tu "gx/ipfs/QmVvkK7s5imCiq3JVbL3pGfnhcCnf3LrFJPF4GE2sAoGZf/go-testutil" cid "gx/ipfs/QmcZfnkapfECQGcLZaf9B79NRg7cRa9EnZh4LSbkCzwNvY/go-cid" blocks "gx/ipfs/Qmej7nf81hi2x2tvjRBF3mcp74sQyuDH4VMYDGd1YtXjb2/go-block-format" + blocksutil "gx/ipfs/Qmf951DP11mCoctpyF3ZppPZdo2oAxuNi2vnkVDgHJ8Fqk/go-ipfs-blocksutil" ) func TestBasicSessions(t *testing.T) { From c1b39c4667546df31e150d2383ed2310abdb7891 Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Thu, 3 May 2018 21:39:52 -0700 Subject: [PATCH 0635/1035] update deps License: MIT Signed-off-by: Steven Allen This commit was moved from ipfs/go-bitswap@c80a3ae8487bab57c8c6297affb930a479cc7145 --- bitswap/bitswap.go | 6 +++--- bitswap/bitswap_test.go | 10 +++++----- bitswap/decision/bench_test.go | 4 ++-- bitswap/decision/engine.go | 6 +++--- bitswap/decision/engine_test.go | 10 +++++----- bitswap/decision/ledger.go | 2 +- bitswap/decision/peer_request_queue.go | 2 +- bitswap/decision/peer_request_queue_test.go | 2 +- bitswap/get.go | 2 +- bitswap/message/message.go | 2 +- bitswap/network/interface.go | 4 ++-- bitswap/network/ipfs_impl.go | 14 +++++++------- bitswap/session.go | 6 +++--- bitswap/session_test.go | 2 +- bitswap/testnet/interface.go | 4 ++-- bitswap/testnet/network_test.go | 6 +++--- bitswap/testnet/peernet.go | 10 +++++----- bitswap/testnet/virtual.go | 12 ++++++------ bitswap/testutils.go | 14 +++++++------- bitswap/wantmanager.go | 2 +- bitswap/workers.go | 4 ++-- 21 files changed, 62 insertions(+), 62 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 0a6a6f83e..512e0ae17 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -17,12 +17,12 @@ import ( delay "gx/ipfs/QmRJVNatYJwTAHgdSM1Xef9QVQ1Ch3XHdmcrykjP5Y4soL/go-ipfs-delay" flags "gx/ipfs/QmRMGdC6HKdLsPDABL9aXPDidrpmEHzJqFWSvshkbn9Hj8/go-ipfs-flags" - logging "gx/ipfs/QmRb5jh8z2E8hMGN2tkvs1yHynUanqnZ3UeKwgN1i9P1F8/go-log" metrics "gx/ipfs/QmRg1gKTHzc3CZXSKzem8aR4E3TubFhbgXwfVuWnSK5CC5/go-metrics-interface" process "gx/ipfs/QmSF8fPo3jgVBAy8fpdjjYqgG87dkJgUprRBHRd2tmfgpP/goprocess" procctx "gx/ipfs/QmSF8fPo3jgVBAy8fpdjjYqgG87dkJgUprRBHRd2tmfgpP/goprocess/context" - peer "gx/ipfs/QmZoWKhxUmZ2seW4BzX6fJkNR8hh9PsGModr7q171yq2SS/go-libp2p-peer" - blockstore "gx/ipfs/QmaG4DZ4JaqEfvPWt5nPPgoTzhc1tr1T3f4Nu9Jpdm8ymY/go-ipfs-blockstore" + logging "gx/ipfs/QmTG23dvpBCBjqQwyDxV8CQT6jmS4PSftNr1VqHhE3MLy7/go-log" + blockstore "gx/ipfs/QmayRSLCiM2gWR7Kay8vqu3Yy5mf7yPqocF9ZRgDUPYMcc/go-ipfs-blockstore" + peer "gx/ipfs/QmcJukH2sAFjY3HdBKq35WDzWoL3UUu2gt9wdfqZTUyM74/go-libp2p-peer" cid "gx/ipfs/QmcZfnkapfECQGcLZaf9B79NRg7cRa9EnZh4LSbkCzwNvY/go-cid" exchange "gx/ipfs/QmdcAXgEHUueP4A7b5hjabKn2EooeHgMreMvFC249dGCgc/go-ipfs-exchange-interface" blocks "gx/ipfs/Qmej7nf81hi2x2tvjRBF3mcp74sQyuDH4VMYDGd1YtXjb2/go-block-format" diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index e3ddd4f8c..b8b9888d2 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -11,12 +11,12 @@ import ( decision "github.com/ipfs/go-ipfs/exchange/bitswap/decision" tn "github.com/ipfs/go-ipfs/exchange/bitswap/testnet" + mockrouting "gx/ipfs/QmPuPdzoG4b5uyYSQCjLEHB8NM593m3BW19UHX2jZ6Wzfm/go-ipfs-routing/mock" delay "gx/ipfs/QmRJVNatYJwTAHgdSM1Xef9QVQ1Ch3XHdmcrykjP5Y4soL/go-ipfs-delay" - tu "gx/ipfs/QmVvkK7s5imCiq3JVbL3pGfnhcCnf3LrFJPF4GE2sAoGZf/go-testutil" - travis "gx/ipfs/QmVvkK7s5imCiq3JVbL3pGfnhcCnf3LrFJPF4GE2sAoGZf/go-testutil/ci/travis" - mockrouting "gx/ipfs/QmXtoXbu9ReyV6Q4kDQ5CF9wXQNDY1PdHc4HhfxRR5AHB3/go-ipfs-routing/mock" - p2ptestutil "gx/ipfs/QmYVR3C8DWPHdHxvLtNFYfjsXgaRAdh6hPMNH3KiwCgu4o/go-libp2p-netutil" - blockstore "gx/ipfs/QmaG4DZ4JaqEfvPWt5nPPgoTzhc1tr1T3f4Nu9Jpdm8ymY/go-ipfs-blockstore" + tu "gx/ipfs/QmUJzxQQ2kzwQubsMqBTr1NGDpLfh7pGA2E1oaJULcKDPq/go-testutil" + travis "gx/ipfs/QmUJzxQQ2kzwQubsMqBTr1NGDpLfh7pGA2E1oaJULcKDPq/go-testutil/ci/travis" + blockstore "gx/ipfs/QmayRSLCiM2gWR7Kay8vqu3Yy5mf7yPqocF9ZRgDUPYMcc/go-ipfs-blockstore" + p2ptestutil "gx/ipfs/Qmb6BsZf6Y3kxffXMNTubGPF1w1bkHtpvhfYbmnwP3NQyw/go-libp2p-netutil" cid "gx/ipfs/QmcZfnkapfECQGcLZaf9B79NRg7cRa9EnZh4LSbkCzwNvY/go-cid" blocks "gx/ipfs/Qmej7nf81hi2x2tvjRBF3mcp74sQyuDH4VMYDGd1YtXjb2/go-block-format" detectrace "gx/ipfs/Qmf7HqcW7LtCi1W8y2bdx2eJpze74jkbKqpByxgXikdbLF/go-detect-race" diff --git a/bitswap/decision/bench_test.go b/bitswap/decision/bench_test.go index 062eb20ff..81f8a6f98 100644 --- a/bitswap/decision/bench_test.go +++ b/bitswap/decision/bench_test.go @@ -7,8 +7,8 @@ import ( "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" u "gx/ipfs/QmNiJuT8Ja3hMVpBHXv3Q6dwmperaQ6JjLtpMQgMCD7xvx/go-ipfs-util" - "gx/ipfs/QmVvkK7s5imCiq3JVbL3pGfnhcCnf3LrFJPF4GE2sAoGZf/go-testutil" - "gx/ipfs/QmZoWKhxUmZ2seW4BzX6fJkNR8hh9PsGModr7q171yq2SS/go-libp2p-peer" + "gx/ipfs/QmUJzxQQ2kzwQubsMqBTr1NGDpLfh7pGA2E1oaJULcKDPq/go-testutil" + "gx/ipfs/QmcJukH2sAFjY3HdBKq35WDzWoL3UUu2gt9wdfqZTUyM74/go-libp2p-peer" cid "gx/ipfs/QmcZfnkapfECQGcLZaf9B79NRg7cRa9EnZh4LSbkCzwNvY/go-cid" ) diff --git a/bitswap/decision/engine.go b/bitswap/decision/engine.go index cd9ae9361..35c5a58f0 100644 --- a/bitswap/decision/engine.go +++ b/bitswap/decision/engine.go @@ -9,9 +9,9 @@ import ( bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" wl "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" - logging "gx/ipfs/QmRb5jh8z2E8hMGN2tkvs1yHynUanqnZ3UeKwgN1i9P1F8/go-log" - peer "gx/ipfs/QmZoWKhxUmZ2seW4BzX6fJkNR8hh9PsGModr7q171yq2SS/go-libp2p-peer" - bstore "gx/ipfs/QmaG4DZ4JaqEfvPWt5nPPgoTzhc1tr1T3f4Nu9Jpdm8ymY/go-ipfs-blockstore" + logging "gx/ipfs/QmTG23dvpBCBjqQwyDxV8CQT6jmS4PSftNr1VqHhE3MLy7/go-log" + bstore "gx/ipfs/QmayRSLCiM2gWR7Kay8vqu3Yy5mf7yPqocF9ZRgDUPYMcc/go-ipfs-blockstore" + peer "gx/ipfs/QmcJukH2sAFjY3HdBKq35WDzWoL3UUu2gt9wdfqZTUyM74/go-libp2p-peer" blocks "gx/ipfs/Qmej7nf81hi2x2tvjRBF3mcp74sQyuDH4VMYDGd1YtXjb2/go-block-format" ) diff --git a/bitswap/decision/engine_test.go b/bitswap/decision/engine_test.go index dbebfb058..de54c1018 100644 --- a/bitswap/decision/engine_test.go +++ b/bitswap/decision/engine_test.go @@ -11,11 +11,11 @@ import ( message "github.com/ipfs/go-ipfs/exchange/bitswap/message" - testutil "gx/ipfs/QmVvkK7s5imCiq3JVbL3pGfnhcCnf3LrFJPF4GE2sAoGZf/go-testutil" - ds "gx/ipfs/QmXRKBQA4wXP7xWbFiZsR1GP4HV6wMDQ1aWFxZZ4uBcPX9/go-datastore" - dssync "gx/ipfs/QmXRKBQA4wXP7xWbFiZsR1GP4HV6wMDQ1aWFxZZ4uBcPX9/go-datastore/sync" - peer "gx/ipfs/QmZoWKhxUmZ2seW4BzX6fJkNR8hh9PsGModr7q171yq2SS/go-libp2p-peer" - blockstore "gx/ipfs/QmaG4DZ4JaqEfvPWt5nPPgoTzhc1tr1T3f4Nu9Jpdm8ymY/go-ipfs-blockstore" + testutil "gx/ipfs/QmUJzxQQ2kzwQubsMqBTr1NGDpLfh7pGA2E1oaJULcKDPq/go-testutil" + blockstore "gx/ipfs/QmayRSLCiM2gWR7Kay8vqu3Yy5mf7yPqocF9ZRgDUPYMcc/go-ipfs-blockstore" + peer "gx/ipfs/QmcJukH2sAFjY3HdBKq35WDzWoL3UUu2gt9wdfqZTUyM74/go-libp2p-peer" + ds "gx/ipfs/QmeiCcJfDW1GJnWUArudsv5rQsihpi4oyddPhdqo3CfX6i/go-datastore" + dssync "gx/ipfs/QmeiCcJfDW1GJnWUArudsv5rQsihpi4oyddPhdqo3CfX6i/go-datastore/sync" blocks "gx/ipfs/Qmej7nf81hi2x2tvjRBF3mcp74sQyuDH4VMYDGd1YtXjb2/go-block-format" ) diff --git a/bitswap/decision/ledger.go b/bitswap/decision/ledger.go index 45cab6220..c873d7679 100644 --- a/bitswap/decision/ledger.go +++ b/bitswap/decision/ledger.go @@ -6,7 +6,7 @@ import ( wl "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" - peer "gx/ipfs/QmZoWKhxUmZ2seW4BzX6fJkNR8hh9PsGModr7q171yq2SS/go-libp2p-peer" + peer "gx/ipfs/QmcJukH2sAFjY3HdBKq35WDzWoL3UUu2gt9wdfqZTUyM74/go-libp2p-peer" cid "gx/ipfs/QmcZfnkapfECQGcLZaf9B79NRg7cRa9EnZh4LSbkCzwNvY/go-cid" ) diff --git a/bitswap/decision/peer_request_queue.go b/bitswap/decision/peer_request_queue.go index 5c116fd69..63b574737 100644 --- a/bitswap/decision/peer_request_queue.go +++ b/bitswap/decision/peer_request_queue.go @@ -7,7 +7,7 @@ import ( wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" pq "gx/ipfs/QmZUbTDJ39JpvtFCSubiWeUTQRvMA1tVE5RZCJrY4oeAsC/go-ipfs-pq" - peer "gx/ipfs/QmZoWKhxUmZ2seW4BzX6fJkNR8hh9PsGModr7q171yq2SS/go-libp2p-peer" + peer "gx/ipfs/QmcJukH2sAFjY3HdBKq35WDzWoL3UUu2gt9wdfqZTUyM74/go-libp2p-peer" cid "gx/ipfs/QmcZfnkapfECQGcLZaf9B79NRg7cRa9EnZh4LSbkCzwNvY/go-cid" ) diff --git a/bitswap/decision/peer_request_queue_test.go b/bitswap/decision/peer_request_queue_test.go index c21116ae6..4435837ab 100644 --- a/bitswap/decision/peer_request_queue_test.go +++ b/bitswap/decision/peer_request_queue_test.go @@ -10,7 +10,7 @@ import ( "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" u "gx/ipfs/QmNiJuT8Ja3hMVpBHXv3Q6dwmperaQ6JjLtpMQgMCD7xvx/go-ipfs-util" - "gx/ipfs/QmVvkK7s5imCiq3JVbL3pGfnhcCnf3LrFJPF4GE2sAoGZf/go-testutil" + "gx/ipfs/QmUJzxQQ2kzwQubsMqBTr1NGDpLfh7pGA2E1oaJULcKDPq/go-testutil" cid "gx/ipfs/QmcZfnkapfECQGcLZaf9B79NRg7cRa9EnZh4LSbkCzwNvY/go-cid" ) diff --git a/bitswap/get.go b/bitswap/get.go index e18b3ad3b..978a043dc 100644 --- a/bitswap/get.go +++ b/bitswap/get.go @@ -6,7 +6,7 @@ import ( notifications "github.com/ipfs/go-ipfs/exchange/bitswap/notifications" - blockstore "gx/ipfs/QmaG4DZ4JaqEfvPWt5nPPgoTzhc1tr1T3f4Nu9Jpdm8ymY/go-ipfs-blockstore" + blockstore "gx/ipfs/QmayRSLCiM2gWR7Kay8vqu3Yy5mf7yPqocF9ZRgDUPYMcc/go-ipfs-blockstore" cid "gx/ipfs/QmcZfnkapfECQGcLZaf9B79NRg7cRa9EnZh4LSbkCzwNvY/go-cid" blocks "gx/ipfs/Qmej7nf81hi2x2tvjRBF3mcp74sQyuDH4VMYDGd1YtXjb2/go-block-format" ) diff --git a/bitswap/message/message.go b/bitswap/message/message.go index 9a166c942..8477763b7 100644 --- a/bitswap/message/message.go +++ b/bitswap/message/message.go @@ -8,7 +8,7 @@ import ( wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" blocks "gx/ipfs/Qmej7nf81hi2x2tvjRBF3mcp74sQyuDH4VMYDGd1YtXjb2/go-block-format" - inet "gx/ipfs/QmXfkENeeBvh3zYA51MaSdGUdBjhQ99cP5WQe8zgr6wchG/go-libp2p-net" + inet "gx/ipfs/QmXoz9o2PT3tEzf7hicegwex5UgVP54n3k82K7jrWFyN86/go-libp2p-net" ggio "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/io" proto "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/proto" cid "gx/ipfs/QmcZfnkapfECQGcLZaf9B79NRg7cRa9EnZh4LSbkCzwNvY/go-cid" diff --git a/bitswap/network/interface.go b/bitswap/network/interface.go index 1f63c6c22..ff98884e1 100644 --- a/bitswap/network/interface.go +++ b/bitswap/network/interface.go @@ -6,9 +6,9 @@ import ( bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" protocol "gx/ipfs/QmZNkThpqfVXs9GNbexPrfBbXSLNYeKrE7jwFM2oqHbyqN/go-libp2p-protocol" - peer "gx/ipfs/QmZoWKhxUmZ2seW4BzX6fJkNR8hh9PsGModr7q171yq2SS/go-libp2p-peer" - ifconnmgr "gx/ipfs/Qmax8X1Kfahf5WfSB68EWDG3d3qyS3Sqs1v412fjPTfRwx/go-libp2p-interface-connmgr" + peer "gx/ipfs/QmcJukH2sAFjY3HdBKq35WDzWoL3UUu2gt9wdfqZTUyM74/go-libp2p-peer" cid "gx/ipfs/QmcZfnkapfECQGcLZaf9B79NRg7cRa9EnZh4LSbkCzwNvY/go-cid" + ifconnmgr "gx/ipfs/QmfQNieWBPwmnUjXWPZbjJPzhNwFFabTb5RQ79dyVWGujQ/go-libp2p-interface-connmgr" ) var ( diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index 2a2a1ea47..5ff27c6e6 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -8,16 +8,16 @@ import ( bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" - host "gx/ipfs/QmNmJZL7FQySMtE2BQuLMuZg2EB2CLEunJJUSVSc9YnnbV/go-libp2p-host" - logging "gx/ipfs/QmRb5jh8z2E8hMGN2tkvs1yHynUanqnZ3UeKwgN1i9P1F8/go-log" - routing "gx/ipfs/QmTiWLZ6Fo5j4KcTVutZJ5KWRRJrbxzmxA4td8NfEdrPh7/go-libp2p-routing" + logging "gx/ipfs/QmTG23dvpBCBjqQwyDxV8CQT6jmS4PSftNr1VqHhE3MLy7/go-log" + routing "gx/ipfs/QmUHRKTeaoASDvDj7cTAXsmjAY7KQ13ErtzkQHZQq6uFUz/go-libp2p-routing" ma "gx/ipfs/QmWWQ2Txc2c6tqjsBpzg5Ar652cHPGNsQQp2SejkNmkUMb/go-multiaddr" - pstore "gx/ipfs/QmXauCuJzmzapetmC6W4TuDJLL1yFFrVzSHoWv8YdbmnxH/go-libp2p-peerstore" - inet "gx/ipfs/QmXfkENeeBvh3zYA51MaSdGUdBjhQ99cP5WQe8zgr6wchG/go-libp2p-net" + inet "gx/ipfs/QmXoz9o2PT3tEzf7hicegwex5UgVP54n3k82K7jrWFyN86/go-libp2p-net" ggio "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/io" - peer "gx/ipfs/QmZoWKhxUmZ2seW4BzX6fJkNR8hh9PsGModr7q171yq2SS/go-libp2p-peer" - ifconnmgr "gx/ipfs/Qmax8X1Kfahf5WfSB68EWDG3d3qyS3Sqs1v412fjPTfRwx/go-libp2p-interface-connmgr" + peer "gx/ipfs/QmcJukH2sAFjY3HdBKq35WDzWoL3UUu2gt9wdfqZTUyM74/go-libp2p-peer" cid "gx/ipfs/QmcZfnkapfECQGcLZaf9B79NRg7cRa9EnZh4LSbkCzwNvY/go-cid" + pstore "gx/ipfs/QmdeiKhUy1TVGBaKxt7y1QmBDLBdisSrLJ1x58Eoj4PXUh/go-libp2p-peerstore" + ifconnmgr "gx/ipfs/QmfQNieWBPwmnUjXWPZbjJPzhNwFFabTb5RQ79dyVWGujQ/go-libp2p-interface-connmgr" + host "gx/ipfs/QmfZTdmunzKzAGJrSvXXQbQ5kLLUiEMX5vdwux7iXkdk7D/go-libp2p-host" ) var log = logging.Logger("bitswap_network") diff --git a/bitswap/session.go b/bitswap/session.go index 937376723..09f3cab5d 100644 --- a/bitswap/session.go +++ b/bitswap/session.go @@ -7,12 +7,12 @@ import ( notifications "github.com/ipfs/go-ipfs/exchange/bitswap/notifications" - logging "gx/ipfs/QmRb5jh8z2E8hMGN2tkvs1yHynUanqnZ3UeKwgN1i9P1F8/go-log" + loggables "gx/ipfs/QmPDZJxtWGfcwLPazJxD4h3v3aDs43V7UNAVs3Jz1Wo7o4/go-libp2p-loggables" + logging "gx/ipfs/QmTG23dvpBCBjqQwyDxV8CQT6jmS4PSftNr1VqHhE3MLy7/go-log" lru "gx/ipfs/QmVYxfoJQiZijTgPNHCHgHELvQpbsJNTg6Crmc3dQkj3yy/golang-lru" - peer "gx/ipfs/QmZoWKhxUmZ2seW4BzX6fJkNR8hh9PsGModr7q171yq2SS/go-libp2p-peer" + peer "gx/ipfs/QmcJukH2sAFjY3HdBKq35WDzWoL3UUu2gt9wdfqZTUyM74/go-libp2p-peer" cid "gx/ipfs/QmcZfnkapfECQGcLZaf9B79NRg7cRa9EnZh4LSbkCzwNvY/go-cid" blocks "gx/ipfs/Qmej7nf81hi2x2tvjRBF3mcp74sQyuDH4VMYDGd1YtXjb2/go-block-format" - loggables "gx/ipfs/Qmf9JgVLz46pxPXwG2eWSJpkqVCcjD4rp7zCRi2KP6GTNB/go-libp2p-loggables" ) const activeWantsLimit = 16 diff --git a/bitswap/session_test.go b/bitswap/session_test.go index cfcf00238..986fedb8a 100644 --- a/bitswap/session_test.go +++ b/bitswap/session_test.go @@ -6,7 +6,7 @@ import ( "testing" "time" - tu "gx/ipfs/QmVvkK7s5imCiq3JVbL3pGfnhcCnf3LrFJPF4GE2sAoGZf/go-testutil" + tu "gx/ipfs/QmUJzxQQ2kzwQubsMqBTr1NGDpLfh7pGA2E1oaJULcKDPq/go-testutil" cid "gx/ipfs/QmcZfnkapfECQGcLZaf9B79NRg7cRa9EnZh4LSbkCzwNvY/go-cid" blocks "gx/ipfs/Qmej7nf81hi2x2tvjRBF3mcp74sQyuDH4VMYDGd1YtXjb2/go-block-format" blocksutil "gx/ipfs/Qmf951DP11mCoctpyF3ZppPZdo2oAxuNi2vnkVDgHJ8Fqk/go-ipfs-blocksutil" diff --git a/bitswap/testnet/interface.go b/bitswap/testnet/interface.go index 334bf9809..8ab2fb621 100644 --- a/bitswap/testnet/interface.go +++ b/bitswap/testnet/interface.go @@ -2,8 +2,8 @@ package bitswap import ( bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" - "gx/ipfs/QmVvkK7s5imCiq3JVbL3pGfnhcCnf3LrFJPF4GE2sAoGZf/go-testutil" - peer "gx/ipfs/QmZoWKhxUmZ2seW4BzX6fJkNR8hh9PsGModr7q171yq2SS/go-libp2p-peer" + "gx/ipfs/QmUJzxQQ2kzwQubsMqBTr1NGDpLfh7pGA2E1oaJULcKDPq/go-testutil" + peer "gx/ipfs/QmcJukH2sAFjY3HdBKq35WDzWoL3UUu2gt9wdfqZTUyM74/go-libp2p-peer" ) type Network interface { diff --git a/bitswap/testnet/network_test.go b/bitswap/testnet/network_test.go index 4da3df3e5..92a1ea42c 100644 --- a/bitswap/testnet/network_test.go +++ b/bitswap/testnet/network_test.go @@ -8,10 +8,10 @@ import ( bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" + mockrouting "gx/ipfs/QmPuPdzoG4b5uyYSQCjLEHB8NM593m3BW19UHX2jZ6Wzfm/go-ipfs-routing/mock" delay "gx/ipfs/QmRJVNatYJwTAHgdSM1Xef9QVQ1Ch3XHdmcrykjP5Y4soL/go-ipfs-delay" - testutil "gx/ipfs/QmVvkK7s5imCiq3JVbL3pGfnhcCnf3LrFJPF4GE2sAoGZf/go-testutil" - mockrouting "gx/ipfs/QmXtoXbu9ReyV6Q4kDQ5CF9wXQNDY1PdHc4HhfxRR5AHB3/go-ipfs-routing/mock" - peer "gx/ipfs/QmZoWKhxUmZ2seW4BzX6fJkNR8hh9PsGModr7q171yq2SS/go-libp2p-peer" + testutil "gx/ipfs/QmUJzxQQ2kzwQubsMqBTr1NGDpLfh7pGA2E1oaJULcKDPq/go-testutil" + peer "gx/ipfs/QmcJukH2sAFjY3HdBKq35WDzWoL3UUu2gt9wdfqZTUyM74/go-libp2p-peer" blocks "gx/ipfs/Qmej7nf81hi2x2tvjRBF3mcp74sQyuDH4VMYDGd1YtXjb2/go-block-format" ) diff --git a/bitswap/testnet/peernet.go b/bitswap/testnet/peernet.go index 19f36a61f..43d6cb713 100644 --- a/bitswap/testnet/peernet.go +++ b/bitswap/testnet/peernet.go @@ -5,11 +5,11 @@ import ( bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" - mockpeernet "gx/ipfs/QmNh1kGFFdsPu79KNSaL4NUKUPb4Eiz4KHdMtFY6664RDp/go-libp2p/p2p/net/mock" - testutil "gx/ipfs/QmVvkK7s5imCiq3JVbL3pGfnhcCnf3LrFJPF4GE2sAoGZf/go-testutil" - ds "gx/ipfs/QmXRKBQA4wXP7xWbFiZsR1GP4HV6wMDQ1aWFxZZ4uBcPX9/go-datastore" - mockrouting "gx/ipfs/QmXtoXbu9ReyV6Q4kDQ5CF9wXQNDY1PdHc4HhfxRR5AHB3/go-ipfs-routing/mock" - peer "gx/ipfs/QmZoWKhxUmZ2seW4BzX6fJkNR8hh9PsGModr7q171yq2SS/go-libp2p-peer" + mockrouting "gx/ipfs/QmPuPdzoG4b5uyYSQCjLEHB8NM593m3BW19UHX2jZ6Wzfm/go-ipfs-routing/mock" + testutil "gx/ipfs/QmUJzxQQ2kzwQubsMqBTr1NGDpLfh7pGA2E1oaJULcKDPq/go-testutil" + mockpeernet "gx/ipfs/QmWsV6kzPaYGBDVyuUfWBvyQygEc9Qrv9vzo8vZ7X4mdLN/go-libp2p/p2p/net/mock" + peer "gx/ipfs/QmcJukH2sAFjY3HdBKq35WDzWoL3UUu2gt9wdfqZTUyM74/go-libp2p-peer" + ds "gx/ipfs/QmeiCcJfDW1GJnWUArudsv5rQsihpi4oyddPhdqo3CfX6i/go-datastore" ) type peernet struct { diff --git a/bitswap/testnet/virtual.go b/bitswap/testnet/virtual.go index e887a5cf4..8ce0be524 100644 --- a/bitswap/testnet/virtual.go +++ b/bitswap/testnet/virtual.go @@ -9,14 +9,14 @@ import ( bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" + mockrouting "gx/ipfs/QmPuPdzoG4b5uyYSQCjLEHB8NM593m3BW19UHX2jZ6Wzfm/go-ipfs-routing/mock" delay "gx/ipfs/QmRJVNatYJwTAHgdSM1Xef9QVQ1Ch3XHdmcrykjP5Y4soL/go-ipfs-delay" - logging "gx/ipfs/QmRb5jh8z2E8hMGN2tkvs1yHynUanqnZ3UeKwgN1i9P1F8/go-log" - routing "gx/ipfs/QmTiWLZ6Fo5j4KcTVutZJ5KWRRJrbxzmxA4td8NfEdrPh7/go-libp2p-routing" - testutil "gx/ipfs/QmVvkK7s5imCiq3JVbL3pGfnhcCnf3LrFJPF4GE2sAoGZf/go-testutil" - mockrouting "gx/ipfs/QmXtoXbu9ReyV6Q4kDQ5CF9wXQNDY1PdHc4HhfxRR5AHB3/go-ipfs-routing/mock" - peer "gx/ipfs/QmZoWKhxUmZ2seW4BzX6fJkNR8hh9PsGModr7q171yq2SS/go-libp2p-peer" - ifconnmgr "gx/ipfs/Qmax8X1Kfahf5WfSB68EWDG3d3qyS3Sqs1v412fjPTfRwx/go-libp2p-interface-connmgr" + logging "gx/ipfs/QmTG23dvpBCBjqQwyDxV8CQT6jmS4PSftNr1VqHhE3MLy7/go-log" + routing "gx/ipfs/QmUHRKTeaoASDvDj7cTAXsmjAY7KQ13ErtzkQHZQq6uFUz/go-libp2p-routing" + testutil "gx/ipfs/QmUJzxQQ2kzwQubsMqBTr1NGDpLfh7pGA2E1oaJULcKDPq/go-testutil" + peer "gx/ipfs/QmcJukH2sAFjY3HdBKq35WDzWoL3UUu2gt9wdfqZTUyM74/go-libp2p-peer" cid "gx/ipfs/QmcZfnkapfECQGcLZaf9B79NRg7cRa9EnZh4LSbkCzwNvY/go-cid" + ifconnmgr "gx/ipfs/QmfQNieWBPwmnUjXWPZbjJPzhNwFFabTb5RQ79dyVWGujQ/go-libp2p-interface-connmgr" ) var log = logging.Logger("bstestnet") diff --git a/bitswap/testutils.go b/bitswap/testutils.go index cbca2f822..f075c4812 100644 --- a/bitswap/testutils.go +++ b/bitswap/testutils.go @@ -7,13 +7,13 @@ import ( tn "github.com/ipfs/go-ipfs/exchange/bitswap/testnet" delay "gx/ipfs/QmRJVNatYJwTAHgdSM1Xef9QVQ1Ch3XHdmcrykjP5Y4soL/go-ipfs-delay" - testutil "gx/ipfs/QmVvkK7s5imCiq3JVbL3pGfnhcCnf3LrFJPF4GE2sAoGZf/go-testutil" - ds "gx/ipfs/QmXRKBQA4wXP7xWbFiZsR1GP4HV6wMDQ1aWFxZZ4uBcPX9/go-datastore" - delayed "gx/ipfs/QmXRKBQA4wXP7xWbFiZsR1GP4HV6wMDQ1aWFxZZ4uBcPX9/go-datastore/delayed" - ds_sync "gx/ipfs/QmXRKBQA4wXP7xWbFiZsR1GP4HV6wMDQ1aWFxZZ4uBcPX9/go-datastore/sync" - p2ptestutil "gx/ipfs/QmYVR3C8DWPHdHxvLtNFYfjsXgaRAdh6hPMNH3KiwCgu4o/go-libp2p-netutil" - peer "gx/ipfs/QmZoWKhxUmZ2seW4BzX6fJkNR8hh9PsGModr7q171yq2SS/go-libp2p-peer" - blockstore "gx/ipfs/QmaG4DZ4JaqEfvPWt5nPPgoTzhc1tr1T3f4Nu9Jpdm8ymY/go-ipfs-blockstore" + testutil "gx/ipfs/QmUJzxQQ2kzwQubsMqBTr1NGDpLfh7pGA2E1oaJULcKDPq/go-testutil" + blockstore "gx/ipfs/QmayRSLCiM2gWR7Kay8vqu3Yy5mf7yPqocF9ZRgDUPYMcc/go-ipfs-blockstore" + p2ptestutil "gx/ipfs/Qmb6BsZf6Y3kxffXMNTubGPF1w1bkHtpvhfYbmnwP3NQyw/go-libp2p-netutil" + peer "gx/ipfs/QmcJukH2sAFjY3HdBKq35WDzWoL3UUu2gt9wdfqZTUyM74/go-libp2p-peer" + ds "gx/ipfs/QmeiCcJfDW1GJnWUArudsv5rQsihpi4oyddPhdqo3CfX6i/go-datastore" + delayed "gx/ipfs/QmeiCcJfDW1GJnWUArudsv5rQsihpi4oyddPhdqo3CfX6i/go-datastore/delayed" + ds_sync "gx/ipfs/QmeiCcJfDW1GJnWUArudsv5rQsihpi4oyddPhdqo3CfX6i/go-datastore/sync" ) // WARNING: this uses RandTestBogusIdentity DO NOT USE for NON TESTS! diff --git a/bitswap/wantmanager.go b/bitswap/wantmanager.go index 306aadbe7..fdc8b8a76 100644 --- a/bitswap/wantmanager.go +++ b/bitswap/wantmanager.go @@ -11,7 +11,7 @@ import ( wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" metrics "gx/ipfs/QmRg1gKTHzc3CZXSKzem8aR4E3TubFhbgXwfVuWnSK5CC5/go-metrics-interface" - peer "gx/ipfs/QmZoWKhxUmZ2seW4BzX6fJkNR8hh9PsGModr7q171yq2SS/go-libp2p-peer" + peer "gx/ipfs/QmcJukH2sAFjY3HdBKq35WDzWoL3UUu2gt9wdfqZTUyM74/go-libp2p-peer" cid "gx/ipfs/QmcZfnkapfECQGcLZaf9B79NRg7cRa9EnZh4LSbkCzwNvY/go-cid" ) diff --git a/bitswap/workers.go b/bitswap/workers.go index 38a5df9d1..35fa57f3f 100644 --- a/bitswap/workers.go +++ b/bitswap/workers.go @@ -8,10 +8,10 @@ import ( bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" - logging "gx/ipfs/QmRb5jh8z2E8hMGN2tkvs1yHynUanqnZ3UeKwgN1i9P1F8/go-log" process "gx/ipfs/QmSF8fPo3jgVBAy8fpdjjYqgG87dkJgUprRBHRd2tmfgpP/goprocess" procctx "gx/ipfs/QmSF8fPo3jgVBAy8fpdjjYqgG87dkJgUprRBHRd2tmfgpP/goprocess/context" - peer "gx/ipfs/QmZoWKhxUmZ2seW4BzX6fJkNR8hh9PsGModr7q171yq2SS/go-libp2p-peer" + logging "gx/ipfs/QmTG23dvpBCBjqQwyDxV8CQT6jmS4PSftNr1VqHhE3MLy7/go-log" + peer "gx/ipfs/QmcJukH2sAFjY3HdBKq35WDzWoL3UUu2gt9wdfqZTUyM74/go-libp2p-peer" cid "gx/ipfs/QmcZfnkapfECQGcLZaf9B79NRg7cRa9EnZh4LSbkCzwNvY/go-cid" ) From b3c4b7fa5c6b29c72b07c4ea5ed36459d9c15b17 Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Mon, 4 Jun 2018 09:53:40 -0700 Subject: [PATCH 0636/1035] update multiplexers License: MIT Signed-off-by: Steven Allen This commit was moved from ipfs/go-bitswap@cd5778a0ab7f292499b7c3b5a2d31e370aef3d1f --- bitswap/bitswap_test.go | 4 ++-- bitswap/network/ipfs_impl.go | 2 +- bitswap/testnet/network_test.go | 2 +- bitswap/testnet/peernet.go | 4 ++-- bitswap/testnet/virtual.go | 2 +- bitswap/testutils.go | 2 +- 6 files changed, 8 insertions(+), 8 deletions(-) diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index b8b9888d2..a6324aa76 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -11,12 +11,12 @@ import ( decision "github.com/ipfs/go-ipfs/exchange/bitswap/decision" tn "github.com/ipfs/go-ipfs/exchange/bitswap/testnet" - mockrouting "gx/ipfs/QmPuPdzoG4b5uyYSQCjLEHB8NM593m3BW19UHX2jZ6Wzfm/go-ipfs-routing/mock" delay "gx/ipfs/QmRJVNatYJwTAHgdSM1Xef9QVQ1Ch3XHdmcrykjP5Y4soL/go-ipfs-delay" tu "gx/ipfs/QmUJzxQQ2kzwQubsMqBTr1NGDpLfh7pGA2E1oaJULcKDPq/go-testutil" travis "gx/ipfs/QmUJzxQQ2kzwQubsMqBTr1NGDpLfh7pGA2E1oaJULcKDPq/go-testutil/ci/travis" + p2ptestutil "gx/ipfs/Qma2UuHusnaFV24DgeZ5hyrM9uc4UdyVaZbtn2FQsPRhES/go-libp2p-netutil" blockstore "gx/ipfs/QmayRSLCiM2gWR7Kay8vqu3Yy5mf7yPqocF9ZRgDUPYMcc/go-ipfs-blockstore" - p2ptestutil "gx/ipfs/Qmb6BsZf6Y3kxffXMNTubGPF1w1bkHtpvhfYbmnwP3NQyw/go-libp2p-netutil" + mockrouting "gx/ipfs/QmcE3B6ittYBmctva8Q155LPa1YPcVqg8N7pPcgt9i7iAQ/go-ipfs-routing/mock" cid "gx/ipfs/QmcZfnkapfECQGcLZaf9B79NRg7cRa9EnZh4LSbkCzwNvY/go-cid" blocks "gx/ipfs/Qmej7nf81hi2x2tvjRBF3mcp74sQyuDH4VMYDGd1YtXjb2/go-block-format" detectrace "gx/ipfs/Qmf7HqcW7LtCi1W8y2bdx2eJpze74jkbKqpByxgXikdbLF/go-detect-race" diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index 5ff27c6e6..e2a0612a7 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -13,11 +13,11 @@ import ( ma "gx/ipfs/QmWWQ2Txc2c6tqjsBpzg5Ar652cHPGNsQQp2SejkNmkUMb/go-multiaddr" inet "gx/ipfs/QmXoz9o2PT3tEzf7hicegwex5UgVP54n3k82K7jrWFyN86/go-libp2p-net" ggio "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/io" + host "gx/ipfs/QmaSfSMvc1VPZ8JbMponFs4WHvF9FgEruF56opm5E1RgQA/go-libp2p-host" peer "gx/ipfs/QmcJukH2sAFjY3HdBKq35WDzWoL3UUu2gt9wdfqZTUyM74/go-libp2p-peer" cid "gx/ipfs/QmcZfnkapfECQGcLZaf9B79NRg7cRa9EnZh4LSbkCzwNvY/go-cid" pstore "gx/ipfs/QmdeiKhUy1TVGBaKxt7y1QmBDLBdisSrLJ1x58Eoj4PXUh/go-libp2p-peerstore" ifconnmgr "gx/ipfs/QmfQNieWBPwmnUjXWPZbjJPzhNwFFabTb5RQ79dyVWGujQ/go-libp2p-interface-connmgr" - host "gx/ipfs/QmfZTdmunzKzAGJrSvXXQbQ5kLLUiEMX5vdwux7iXkdk7D/go-libp2p-host" ) var log = logging.Logger("bitswap_network") diff --git a/bitswap/testnet/network_test.go b/bitswap/testnet/network_test.go index 92a1ea42c..ed1c459a4 100644 --- a/bitswap/testnet/network_test.go +++ b/bitswap/testnet/network_test.go @@ -8,9 +8,9 @@ import ( bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" - mockrouting "gx/ipfs/QmPuPdzoG4b5uyYSQCjLEHB8NM593m3BW19UHX2jZ6Wzfm/go-ipfs-routing/mock" delay "gx/ipfs/QmRJVNatYJwTAHgdSM1Xef9QVQ1Ch3XHdmcrykjP5Y4soL/go-ipfs-delay" testutil "gx/ipfs/QmUJzxQQ2kzwQubsMqBTr1NGDpLfh7pGA2E1oaJULcKDPq/go-testutil" + mockrouting "gx/ipfs/QmcE3B6ittYBmctva8Q155LPa1YPcVqg8N7pPcgt9i7iAQ/go-ipfs-routing/mock" peer "gx/ipfs/QmcJukH2sAFjY3HdBKq35WDzWoL3UUu2gt9wdfqZTUyM74/go-libp2p-peer" blocks "gx/ipfs/Qmej7nf81hi2x2tvjRBF3mcp74sQyuDH4VMYDGd1YtXjb2/go-block-format" ) diff --git a/bitswap/testnet/peernet.go b/bitswap/testnet/peernet.go index 43d6cb713..6d78cf079 100644 --- a/bitswap/testnet/peernet.go +++ b/bitswap/testnet/peernet.go @@ -5,9 +5,9 @@ import ( bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" - mockrouting "gx/ipfs/QmPuPdzoG4b5uyYSQCjLEHB8NM593m3BW19UHX2jZ6Wzfm/go-ipfs-routing/mock" testutil "gx/ipfs/QmUJzxQQ2kzwQubsMqBTr1NGDpLfh7pGA2E1oaJULcKDPq/go-testutil" - mockpeernet "gx/ipfs/QmWsV6kzPaYGBDVyuUfWBvyQygEc9Qrv9vzo8vZ7X4mdLN/go-libp2p/p2p/net/mock" + mockpeernet "gx/ipfs/QmY6iAoG9DVgZwh5ZRcQEpa2uErAe1Hbei8qXPCjpDS9Ge/go-libp2p/p2p/net/mock" + mockrouting "gx/ipfs/QmcE3B6ittYBmctva8Q155LPa1YPcVqg8N7pPcgt9i7iAQ/go-ipfs-routing/mock" peer "gx/ipfs/QmcJukH2sAFjY3HdBKq35WDzWoL3UUu2gt9wdfqZTUyM74/go-libp2p-peer" ds "gx/ipfs/QmeiCcJfDW1GJnWUArudsv5rQsihpi4oyddPhdqo3CfX6i/go-datastore" ) diff --git a/bitswap/testnet/virtual.go b/bitswap/testnet/virtual.go index 8ce0be524..d12992fa2 100644 --- a/bitswap/testnet/virtual.go +++ b/bitswap/testnet/virtual.go @@ -9,11 +9,11 @@ import ( bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" - mockrouting "gx/ipfs/QmPuPdzoG4b5uyYSQCjLEHB8NM593m3BW19UHX2jZ6Wzfm/go-ipfs-routing/mock" delay "gx/ipfs/QmRJVNatYJwTAHgdSM1Xef9QVQ1Ch3XHdmcrykjP5Y4soL/go-ipfs-delay" logging "gx/ipfs/QmTG23dvpBCBjqQwyDxV8CQT6jmS4PSftNr1VqHhE3MLy7/go-log" routing "gx/ipfs/QmUHRKTeaoASDvDj7cTAXsmjAY7KQ13ErtzkQHZQq6uFUz/go-libp2p-routing" testutil "gx/ipfs/QmUJzxQQ2kzwQubsMqBTr1NGDpLfh7pGA2E1oaJULcKDPq/go-testutil" + mockrouting "gx/ipfs/QmcE3B6ittYBmctva8Q155LPa1YPcVqg8N7pPcgt9i7iAQ/go-ipfs-routing/mock" peer "gx/ipfs/QmcJukH2sAFjY3HdBKq35WDzWoL3UUu2gt9wdfqZTUyM74/go-libp2p-peer" cid "gx/ipfs/QmcZfnkapfECQGcLZaf9B79NRg7cRa9EnZh4LSbkCzwNvY/go-cid" ifconnmgr "gx/ipfs/QmfQNieWBPwmnUjXWPZbjJPzhNwFFabTb5RQ79dyVWGujQ/go-libp2p-interface-connmgr" diff --git a/bitswap/testutils.go b/bitswap/testutils.go index f075c4812..aa886249b 100644 --- a/bitswap/testutils.go +++ b/bitswap/testutils.go @@ -8,8 +8,8 @@ import ( delay "gx/ipfs/QmRJVNatYJwTAHgdSM1Xef9QVQ1Ch3XHdmcrykjP5Y4soL/go-ipfs-delay" testutil "gx/ipfs/QmUJzxQQ2kzwQubsMqBTr1NGDpLfh7pGA2E1oaJULcKDPq/go-testutil" + p2ptestutil "gx/ipfs/Qma2UuHusnaFV24DgeZ5hyrM9uc4UdyVaZbtn2FQsPRhES/go-libp2p-netutil" blockstore "gx/ipfs/QmayRSLCiM2gWR7Kay8vqu3Yy5mf7yPqocF9ZRgDUPYMcc/go-ipfs-blockstore" - p2ptestutil "gx/ipfs/Qmb6BsZf6Y3kxffXMNTubGPF1w1bkHtpvhfYbmnwP3NQyw/go-libp2p-netutil" peer "gx/ipfs/QmcJukH2sAFjY3HdBKq35WDzWoL3UUu2gt9wdfqZTUyM74/go-libp2p-peer" ds "gx/ipfs/QmeiCcJfDW1GJnWUArudsv5rQsihpi4oyddPhdqo3CfX6i/go-datastore" delayed "gx/ipfs/QmeiCcJfDW1GJnWUArudsv5rQsihpi4oyddPhdqo3CfX6i/go-datastore/delayed" From 67e2e8066b9982141fb0bbbefa88a6e5ee2bd1a3 Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Wed, 7 Mar 2018 22:06:17 -0800 Subject: [PATCH 0637/1035] transport refactor update License: MIT Signed-off-by: Steven Allen This commit was moved from ipfs/go-bitswap@d9a8d81e01f49d8d19f0d7908a29bc93a4bfa4a6 --- bitswap/network/ipfs_impl.go | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index e2a0612a7..9388a65f4 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -54,7 +54,7 @@ type streamMessageSender struct { } func (s *streamMessageSender) Close() error { - return s.s.Close() + return inet.FullClose(s.s) } func (s *streamMessageSender) Reset() error { @@ -119,13 +119,13 @@ func (bsnet *impl) SendMessage( return err } - err = msgToStream(ctx, s, outgoing) - if err != nil { + if err = msgToStream(ctx, s, outgoing); err != nil { s.Reset() - } else { - s.Close() + return err } - return err + // Yes, return this error. We have no reason to believe that the block + // was actually *sent* unless we see the EOF. + return inet.FullClose(s) } func (bsnet *impl) SetDelegate(r Receiver) { From f8a9091d4aeb423d4fc5f2c5d635692deeb869ae Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Tue, 5 Jun 2018 23:55:08 -0700 Subject: [PATCH 0638/1035] update gx imports License: MIT Signed-off-by: Steven Allen This commit was moved from ipfs/go-bitswap@ff5791d85bb207d61513d8a2faa8591569b61bea --- bitswap/bitswap_test.go | 4 ++-- bitswap/message/message.go | 2 +- bitswap/network/interface.go | 2 +- bitswap/network/ipfs_impl.go | 10 +++++----- bitswap/testnet/network_test.go | 2 +- bitswap/testnet/peernet.go | 4 ++-- bitswap/testnet/virtual.go | 6 +++--- bitswap/testutils.go | 2 +- 8 files changed, 16 insertions(+), 16 deletions(-) diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index a6324aa76..d908881df 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -11,12 +11,12 @@ import ( decision "github.com/ipfs/go-ipfs/exchange/bitswap/decision" tn "github.com/ipfs/go-ipfs/exchange/bitswap/testnet" + mockrouting "gx/ipfs/QmPFAxh9UwfqwseVcWkj1Lz1gCHyQ6QuCk5m5XUp6vifkL/go-ipfs-routing/mock" delay "gx/ipfs/QmRJVNatYJwTAHgdSM1Xef9QVQ1Ch3XHdmcrykjP5Y4soL/go-ipfs-delay" tu "gx/ipfs/QmUJzxQQ2kzwQubsMqBTr1NGDpLfh7pGA2E1oaJULcKDPq/go-testutil" travis "gx/ipfs/QmUJzxQQ2kzwQubsMqBTr1NGDpLfh7pGA2E1oaJULcKDPq/go-testutil/ci/travis" - p2ptestutil "gx/ipfs/Qma2UuHusnaFV24DgeZ5hyrM9uc4UdyVaZbtn2FQsPRhES/go-libp2p-netutil" blockstore "gx/ipfs/QmayRSLCiM2gWR7Kay8vqu3Yy5mf7yPqocF9ZRgDUPYMcc/go-ipfs-blockstore" - mockrouting "gx/ipfs/QmcE3B6ittYBmctva8Q155LPa1YPcVqg8N7pPcgt9i7iAQ/go-ipfs-routing/mock" + p2ptestutil "gx/ipfs/Qmb3r9qUR7PnkyUKztmXp8sQhzXZHGmRg7fR5zsB1ebWMj/go-libp2p-netutil" cid "gx/ipfs/QmcZfnkapfECQGcLZaf9B79NRg7cRa9EnZh4LSbkCzwNvY/go-cid" blocks "gx/ipfs/Qmej7nf81hi2x2tvjRBF3mcp74sQyuDH4VMYDGd1YtXjb2/go-block-format" detectrace "gx/ipfs/Qmf7HqcW7LtCi1W8y2bdx2eJpze74jkbKqpByxgXikdbLF/go-detect-race" diff --git a/bitswap/message/message.go b/bitswap/message/message.go index 8477763b7..d22762f26 100644 --- a/bitswap/message/message.go +++ b/bitswap/message/message.go @@ -8,7 +8,7 @@ import ( wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" blocks "gx/ipfs/Qmej7nf81hi2x2tvjRBF3mcp74sQyuDH4VMYDGd1YtXjb2/go-block-format" - inet "gx/ipfs/QmXoz9o2PT3tEzf7hicegwex5UgVP54n3k82K7jrWFyN86/go-libp2p-net" + inet "gx/ipfs/QmYj8wdn5sZEHX2XMDWGBvcXJNdzVbaVpHmXvhHBVZepen/go-libp2p-net" ggio "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/io" proto "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/proto" cid "gx/ipfs/QmcZfnkapfECQGcLZaf9B79NRg7cRa9EnZh4LSbkCzwNvY/go-cid" diff --git a/bitswap/network/interface.go b/bitswap/network/interface.go index ff98884e1..1632a3b21 100644 --- a/bitswap/network/interface.go +++ b/bitswap/network/interface.go @@ -5,10 +5,10 @@ import ( bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" + ifconnmgr "gx/ipfs/QmWCWsDQnnQ9Mo9V3GK8TSR91662FdFxjjqPX8YbHC8Ltz/go-libp2p-interface-connmgr" protocol "gx/ipfs/QmZNkThpqfVXs9GNbexPrfBbXSLNYeKrE7jwFM2oqHbyqN/go-libp2p-protocol" peer "gx/ipfs/QmcJukH2sAFjY3HdBKq35WDzWoL3UUu2gt9wdfqZTUyM74/go-libp2p-peer" cid "gx/ipfs/QmcZfnkapfECQGcLZaf9B79NRg7cRa9EnZh4LSbkCzwNvY/go-cid" - ifconnmgr "gx/ipfs/QmfQNieWBPwmnUjXWPZbjJPzhNwFFabTb5RQ79dyVWGujQ/go-libp2p-interface-connmgr" ) var ( diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index 9388a65f4..4957498b3 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -9,15 +9,15 @@ import ( bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" logging "gx/ipfs/QmTG23dvpBCBjqQwyDxV8CQT6jmS4PSftNr1VqHhE3MLy7/go-log" - routing "gx/ipfs/QmUHRKTeaoASDvDj7cTAXsmjAY7KQ13ErtzkQHZQq6uFUz/go-libp2p-routing" + ifconnmgr "gx/ipfs/QmWCWsDQnnQ9Mo9V3GK8TSR91662FdFxjjqPX8YbHC8Ltz/go-libp2p-interface-connmgr" ma "gx/ipfs/QmWWQ2Txc2c6tqjsBpzg5Ar652cHPGNsQQp2SejkNmkUMb/go-multiaddr" - inet "gx/ipfs/QmXoz9o2PT3tEzf7hicegwex5UgVP54n3k82K7jrWFyN86/go-libp2p-net" + routing "gx/ipfs/QmXijJ3T9MjB2v8xpFDoEX6FqR9u8PkJkzu49TgwJ8Ndr5/go-libp2p-routing" + inet "gx/ipfs/QmYj8wdn5sZEHX2XMDWGBvcXJNdzVbaVpHmXvhHBVZepen/go-libp2p-net" ggio "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/io" - host "gx/ipfs/QmaSfSMvc1VPZ8JbMponFs4WHvF9FgEruF56opm5E1RgQA/go-libp2p-host" + pstore "gx/ipfs/QmZb7hAgQEhW9dBbzBudU39gCeD4zbe6xafD52LUuF4cUN/go-libp2p-peerstore" peer "gx/ipfs/QmcJukH2sAFjY3HdBKq35WDzWoL3UUu2gt9wdfqZTUyM74/go-libp2p-peer" cid "gx/ipfs/QmcZfnkapfECQGcLZaf9B79NRg7cRa9EnZh4LSbkCzwNvY/go-cid" - pstore "gx/ipfs/QmdeiKhUy1TVGBaKxt7y1QmBDLBdisSrLJ1x58Eoj4PXUh/go-libp2p-peerstore" - ifconnmgr "gx/ipfs/QmfQNieWBPwmnUjXWPZbjJPzhNwFFabTb5RQ79dyVWGujQ/go-libp2p-interface-connmgr" + host "gx/ipfs/QmdHyfNVTZ5VtUx4Xz23z8wtnioSrFQ28XSfpVkdhQBkGA/go-libp2p-host" ) var log = logging.Logger("bitswap_network") diff --git a/bitswap/testnet/network_test.go b/bitswap/testnet/network_test.go index ed1c459a4..bdf6dafb2 100644 --- a/bitswap/testnet/network_test.go +++ b/bitswap/testnet/network_test.go @@ -8,9 +8,9 @@ import ( bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" + mockrouting "gx/ipfs/QmPFAxh9UwfqwseVcWkj1Lz1gCHyQ6QuCk5m5XUp6vifkL/go-ipfs-routing/mock" delay "gx/ipfs/QmRJVNatYJwTAHgdSM1Xef9QVQ1Ch3XHdmcrykjP5Y4soL/go-ipfs-delay" testutil "gx/ipfs/QmUJzxQQ2kzwQubsMqBTr1NGDpLfh7pGA2E1oaJULcKDPq/go-testutil" - mockrouting "gx/ipfs/QmcE3B6ittYBmctva8Q155LPa1YPcVqg8N7pPcgt9i7iAQ/go-ipfs-routing/mock" peer "gx/ipfs/QmcJukH2sAFjY3HdBKq35WDzWoL3UUu2gt9wdfqZTUyM74/go-libp2p-peer" blocks "gx/ipfs/Qmej7nf81hi2x2tvjRBF3mcp74sQyuDH4VMYDGd1YtXjb2/go-block-format" ) diff --git a/bitswap/testnet/peernet.go b/bitswap/testnet/peernet.go index 6d78cf079..25b887cb7 100644 --- a/bitswap/testnet/peernet.go +++ b/bitswap/testnet/peernet.go @@ -5,9 +5,9 @@ import ( bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" + mockrouting "gx/ipfs/QmPFAxh9UwfqwseVcWkj1Lz1gCHyQ6QuCk5m5XUp6vifkL/go-ipfs-routing/mock" + mockpeernet "gx/ipfs/QmRvoAami8AAf5Yy6jcPq5KqQT1ZCaoi9dF1vdKAghmq9X/go-libp2p/p2p/net/mock" testutil "gx/ipfs/QmUJzxQQ2kzwQubsMqBTr1NGDpLfh7pGA2E1oaJULcKDPq/go-testutil" - mockpeernet "gx/ipfs/QmY6iAoG9DVgZwh5ZRcQEpa2uErAe1Hbei8qXPCjpDS9Ge/go-libp2p/p2p/net/mock" - mockrouting "gx/ipfs/QmcE3B6ittYBmctva8Q155LPa1YPcVqg8N7pPcgt9i7iAQ/go-ipfs-routing/mock" peer "gx/ipfs/QmcJukH2sAFjY3HdBKq35WDzWoL3UUu2gt9wdfqZTUyM74/go-libp2p-peer" ds "gx/ipfs/QmeiCcJfDW1GJnWUArudsv5rQsihpi4oyddPhdqo3CfX6i/go-datastore" ) diff --git a/bitswap/testnet/virtual.go b/bitswap/testnet/virtual.go index d12992fa2..334e06e2f 100644 --- a/bitswap/testnet/virtual.go +++ b/bitswap/testnet/virtual.go @@ -9,14 +9,14 @@ import ( bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" + mockrouting "gx/ipfs/QmPFAxh9UwfqwseVcWkj1Lz1gCHyQ6QuCk5m5XUp6vifkL/go-ipfs-routing/mock" delay "gx/ipfs/QmRJVNatYJwTAHgdSM1Xef9QVQ1Ch3XHdmcrykjP5Y4soL/go-ipfs-delay" logging "gx/ipfs/QmTG23dvpBCBjqQwyDxV8CQT6jmS4PSftNr1VqHhE3MLy7/go-log" - routing "gx/ipfs/QmUHRKTeaoASDvDj7cTAXsmjAY7KQ13ErtzkQHZQq6uFUz/go-libp2p-routing" testutil "gx/ipfs/QmUJzxQQ2kzwQubsMqBTr1NGDpLfh7pGA2E1oaJULcKDPq/go-testutil" - mockrouting "gx/ipfs/QmcE3B6ittYBmctva8Q155LPa1YPcVqg8N7pPcgt9i7iAQ/go-ipfs-routing/mock" + ifconnmgr "gx/ipfs/QmWCWsDQnnQ9Mo9V3GK8TSR91662FdFxjjqPX8YbHC8Ltz/go-libp2p-interface-connmgr" + routing "gx/ipfs/QmXijJ3T9MjB2v8xpFDoEX6FqR9u8PkJkzu49TgwJ8Ndr5/go-libp2p-routing" peer "gx/ipfs/QmcJukH2sAFjY3HdBKq35WDzWoL3UUu2gt9wdfqZTUyM74/go-libp2p-peer" cid "gx/ipfs/QmcZfnkapfECQGcLZaf9B79NRg7cRa9EnZh4LSbkCzwNvY/go-cid" - ifconnmgr "gx/ipfs/QmfQNieWBPwmnUjXWPZbjJPzhNwFFabTb5RQ79dyVWGujQ/go-libp2p-interface-connmgr" ) var log = logging.Logger("bstestnet") diff --git a/bitswap/testutils.go b/bitswap/testutils.go index aa886249b..2f8d2229d 100644 --- a/bitswap/testutils.go +++ b/bitswap/testutils.go @@ -8,8 +8,8 @@ import ( delay "gx/ipfs/QmRJVNatYJwTAHgdSM1Xef9QVQ1Ch3XHdmcrykjP5Y4soL/go-ipfs-delay" testutil "gx/ipfs/QmUJzxQQ2kzwQubsMqBTr1NGDpLfh7pGA2E1oaJULcKDPq/go-testutil" - p2ptestutil "gx/ipfs/Qma2UuHusnaFV24DgeZ5hyrM9uc4UdyVaZbtn2FQsPRhES/go-libp2p-netutil" blockstore "gx/ipfs/QmayRSLCiM2gWR7Kay8vqu3Yy5mf7yPqocF9ZRgDUPYMcc/go-ipfs-blockstore" + p2ptestutil "gx/ipfs/Qmb3r9qUR7PnkyUKztmXp8sQhzXZHGmRg7fR5zsB1ebWMj/go-libp2p-netutil" peer "gx/ipfs/QmcJukH2sAFjY3HdBKq35WDzWoL3UUu2gt9wdfqZTUyM74/go-libp2p-peer" ds "gx/ipfs/QmeiCcJfDW1GJnWUArudsv5rQsihpi4oyddPhdqo3CfX6i/go-datastore" delayed "gx/ipfs/QmeiCcJfDW1GJnWUArudsv5rQsihpi4oyddPhdqo3CfX6i/go-datastore/delayed" From a1371309730a12907f1f7aa7ba43d2c654c27dd4 Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Fri, 8 Jun 2018 22:01:00 -0700 Subject: [PATCH 0639/1035] gx update go-log, sys, go-crypto * go-log * sys * go-crypto License: MIT Signed-off-by: Steven Allen This commit was moved from ipfs/go-bitswap@4ebd2ec3c1cfa0277c42b64c56800e0d709dfd91 --- bitswap/bitswap.go | 12 ++++++------ bitswap/bitswap_test.go | 16 ++++++++-------- bitswap/decision/bench_test.go | 8 ++++---- bitswap/decision/engine.go | 8 ++++---- bitswap/decision/engine_test.go | 8 ++++---- bitswap/decision/ledger.go | 4 ++-- bitswap/decision/peer_request_queue.go | 4 ++-- bitswap/decision/peer_request_queue_test.go | 6 +++--- bitswap/get.go | 6 +++--- bitswap/message/message.go | 6 +++--- bitswap/message/message_test.go | 6 +++--- bitswap/network/interface.go | 6 +++--- bitswap/network/ipfs_impl.go | 18 +++++++++--------- bitswap/notifications/notifications.go | 4 ++-- bitswap/notifications/notifications_test.go | 6 +++--- bitswap/session.go | 10 +++++----- bitswap/session_test.go | 8 ++++---- bitswap/stat.go | 2 +- bitswap/testnet/interface.go | 4 ++-- bitswap/testnet/network_test.go | 8 ++++---- bitswap/testnet/peernet.go | 8 ++++---- bitswap/testnet/virtual.go | 14 +++++++------- bitswap/testutils.go | 8 ++++---- bitswap/wantlist/wantlist.go | 2 +- bitswap/wantlist/wantlist_test.go | 2 +- bitswap/wantmanager.go | 4 ++-- bitswap/workers.go | 6 +++--- 27 files changed, 97 insertions(+), 97 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 512e0ae17..480b65aed 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -20,12 +20,12 @@ import ( metrics "gx/ipfs/QmRg1gKTHzc3CZXSKzem8aR4E3TubFhbgXwfVuWnSK5CC5/go-metrics-interface" process "gx/ipfs/QmSF8fPo3jgVBAy8fpdjjYqgG87dkJgUprRBHRd2tmfgpP/goprocess" procctx "gx/ipfs/QmSF8fPo3jgVBAy8fpdjjYqgG87dkJgUprRBHRd2tmfgpP/goprocess/context" - logging "gx/ipfs/QmTG23dvpBCBjqQwyDxV8CQT6jmS4PSftNr1VqHhE3MLy7/go-log" - blockstore "gx/ipfs/QmayRSLCiM2gWR7Kay8vqu3Yy5mf7yPqocF9ZRgDUPYMcc/go-ipfs-blockstore" - peer "gx/ipfs/QmcJukH2sAFjY3HdBKq35WDzWoL3UUu2gt9wdfqZTUyM74/go-libp2p-peer" - cid "gx/ipfs/QmcZfnkapfECQGcLZaf9B79NRg7cRa9EnZh4LSbkCzwNvY/go-cid" - exchange "gx/ipfs/QmdcAXgEHUueP4A7b5hjabKn2EooeHgMreMvFC249dGCgc/go-ipfs-exchange-interface" - blocks "gx/ipfs/Qmej7nf81hi2x2tvjRBF3mcp74sQyuDH4VMYDGd1YtXjb2/go-block-format" + blocks "gx/ipfs/QmTRCUvZLiir12Qr6MV3HKfKMHX8Nf1Vddn6t2g5nsQSb9/go-block-format" + exchange "gx/ipfs/QmVSe7YJbPnEmkSUKD3HxSvp8HJoyCU55hQoCMRq7N1jaK/go-ipfs-exchange-interface" + peer "gx/ipfs/QmVf8hTAsLLFtn4WPCRNdnaF2Eag2qTBS6uR8AiHPZARXy/go-libp2p-peer" + cid "gx/ipfs/QmapdYm1b22Frv3k17fqrBYTFRxwiaVJkB299Mfn33edeB/go-cid" + blockstore "gx/ipfs/QmbaPGg81pvQiC5vTXtC9Jo8rdrWUjRaugH71WYNsgi6Ev/go-ipfs-blockstore" + logging "gx/ipfs/Qmbi1CTJsbnBZjCEgc2otwu8cUFPsGpzWXG7edVCLZ7Gvk/go-log" ) var log = logging.Logger("bitswap") diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index d908881df..c0ef468b0 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -11,16 +11,16 @@ import ( decision "github.com/ipfs/go-ipfs/exchange/bitswap/decision" tn "github.com/ipfs/go-ipfs/exchange/bitswap/testnet" - mockrouting "gx/ipfs/QmPFAxh9UwfqwseVcWkj1Lz1gCHyQ6QuCk5m5XUp6vifkL/go-ipfs-routing/mock" + tu "gx/ipfs/QmPdxCaVp4jZ9RbxqZADvKH6kiCR5jHvdR5f2ycjAY6T2a/go-testutil" + travis "gx/ipfs/QmPdxCaVp4jZ9RbxqZADvKH6kiCR5jHvdR5f2ycjAY6T2a/go-testutil/ci/travis" delay "gx/ipfs/QmRJVNatYJwTAHgdSM1Xef9QVQ1Ch3XHdmcrykjP5Y4soL/go-ipfs-delay" - tu "gx/ipfs/QmUJzxQQ2kzwQubsMqBTr1NGDpLfh7pGA2E1oaJULcKDPq/go-testutil" - travis "gx/ipfs/QmUJzxQQ2kzwQubsMqBTr1NGDpLfh7pGA2E1oaJULcKDPq/go-testutil/ci/travis" - blockstore "gx/ipfs/QmayRSLCiM2gWR7Kay8vqu3Yy5mf7yPqocF9ZRgDUPYMcc/go-ipfs-blockstore" - p2ptestutil "gx/ipfs/Qmb3r9qUR7PnkyUKztmXp8sQhzXZHGmRg7fR5zsB1ebWMj/go-libp2p-netutil" - cid "gx/ipfs/QmcZfnkapfECQGcLZaf9B79NRg7cRa9EnZh4LSbkCzwNvY/go-cid" - blocks "gx/ipfs/Qmej7nf81hi2x2tvjRBF3mcp74sQyuDH4VMYDGd1YtXjb2/go-block-format" + blocks "gx/ipfs/QmTRCUvZLiir12Qr6MV3HKfKMHX8Nf1Vddn6t2g5nsQSb9/go-block-format" + blocksutil "gx/ipfs/QmYmE4kxv6uFGaWkeBAFYDuNcxzCn87pzwm6CkBkM9C8BM/go-ipfs-blocksutil" + cid "gx/ipfs/QmapdYm1b22Frv3k17fqrBYTFRxwiaVJkB299Mfn33edeB/go-cid" + mockrouting "gx/ipfs/Qmb1N7zdjG2FexpzWNj8T289u9QnQLEiSsTRadDGQxX32D/go-ipfs-routing/mock" + blockstore "gx/ipfs/QmbaPGg81pvQiC5vTXtC9Jo8rdrWUjRaugH71WYNsgi6Ev/go-ipfs-blockstore" + p2ptestutil "gx/ipfs/QmeBUY1BsMjkacVAJ2u76XBGNiRCHq6dkqT2VWG59N3d7b/go-libp2p-netutil" detectrace "gx/ipfs/Qmf7HqcW7LtCi1W8y2bdx2eJpze74jkbKqpByxgXikdbLF/go-detect-race" - blocksutil "gx/ipfs/Qmf951DP11mCoctpyF3ZppPZdo2oAxuNi2vnkVDgHJ8Fqk/go-ipfs-blocksutil" ) // FIXME the tests are really sensitive to the network delay. fix them to work diff --git a/bitswap/decision/bench_test.go b/bitswap/decision/bench_test.go index 81f8a6f98..ff1011aea 100644 --- a/bitswap/decision/bench_test.go +++ b/bitswap/decision/bench_test.go @@ -6,10 +6,10 @@ import ( "testing" "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" - u "gx/ipfs/QmNiJuT8Ja3hMVpBHXv3Q6dwmperaQ6JjLtpMQgMCD7xvx/go-ipfs-util" - "gx/ipfs/QmUJzxQQ2kzwQubsMqBTr1NGDpLfh7pGA2E1oaJULcKDPq/go-testutil" - "gx/ipfs/QmcJukH2sAFjY3HdBKq35WDzWoL3UUu2gt9wdfqZTUyM74/go-libp2p-peer" - cid "gx/ipfs/QmcZfnkapfECQGcLZaf9B79NRg7cRa9EnZh4LSbkCzwNvY/go-cid" + u "gx/ipfs/QmPdKqUcHGFdeSpvjVoaTRPPstGif9GBZb5Q56RVw9o69A/go-ipfs-util" + "gx/ipfs/QmPdxCaVp4jZ9RbxqZADvKH6kiCR5jHvdR5f2ycjAY6T2a/go-testutil" + "gx/ipfs/QmVf8hTAsLLFtn4WPCRNdnaF2Eag2qTBS6uR8AiHPZARXy/go-libp2p-peer" + cid "gx/ipfs/QmapdYm1b22Frv3k17fqrBYTFRxwiaVJkB299Mfn33edeB/go-cid" ) // FWIW: At the time of this commit, including a timestamp in task increases diff --git a/bitswap/decision/engine.go b/bitswap/decision/engine.go index 35c5a58f0..9855d5b99 100644 --- a/bitswap/decision/engine.go +++ b/bitswap/decision/engine.go @@ -9,10 +9,10 @@ import ( bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" wl "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" - logging "gx/ipfs/QmTG23dvpBCBjqQwyDxV8CQT6jmS4PSftNr1VqHhE3MLy7/go-log" - bstore "gx/ipfs/QmayRSLCiM2gWR7Kay8vqu3Yy5mf7yPqocF9ZRgDUPYMcc/go-ipfs-blockstore" - peer "gx/ipfs/QmcJukH2sAFjY3HdBKq35WDzWoL3UUu2gt9wdfqZTUyM74/go-libp2p-peer" - blocks "gx/ipfs/Qmej7nf81hi2x2tvjRBF3mcp74sQyuDH4VMYDGd1YtXjb2/go-block-format" + blocks "gx/ipfs/QmTRCUvZLiir12Qr6MV3HKfKMHX8Nf1Vddn6t2g5nsQSb9/go-block-format" + peer "gx/ipfs/QmVf8hTAsLLFtn4WPCRNdnaF2Eag2qTBS6uR8AiHPZARXy/go-libp2p-peer" + bstore "gx/ipfs/QmbaPGg81pvQiC5vTXtC9Jo8rdrWUjRaugH71WYNsgi6Ev/go-ipfs-blockstore" + logging "gx/ipfs/Qmbi1CTJsbnBZjCEgc2otwu8cUFPsGpzWXG7edVCLZ7Gvk/go-log" ) // TODO consider taking responsibility for other types of requests. For diff --git a/bitswap/decision/engine_test.go b/bitswap/decision/engine_test.go index de54c1018..6c5a0741a 100644 --- a/bitswap/decision/engine_test.go +++ b/bitswap/decision/engine_test.go @@ -11,12 +11,12 @@ import ( message "github.com/ipfs/go-ipfs/exchange/bitswap/message" - testutil "gx/ipfs/QmUJzxQQ2kzwQubsMqBTr1NGDpLfh7pGA2E1oaJULcKDPq/go-testutil" - blockstore "gx/ipfs/QmayRSLCiM2gWR7Kay8vqu3Yy5mf7yPqocF9ZRgDUPYMcc/go-ipfs-blockstore" - peer "gx/ipfs/QmcJukH2sAFjY3HdBKq35WDzWoL3UUu2gt9wdfqZTUyM74/go-libp2p-peer" + testutil "gx/ipfs/QmPdxCaVp4jZ9RbxqZADvKH6kiCR5jHvdR5f2ycjAY6T2a/go-testutil" + blocks "gx/ipfs/QmTRCUvZLiir12Qr6MV3HKfKMHX8Nf1Vddn6t2g5nsQSb9/go-block-format" + peer "gx/ipfs/QmVf8hTAsLLFtn4WPCRNdnaF2Eag2qTBS6uR8AiHPZARXy/go-libp2p-peer" + blockstore "gx/ipfs/QmbaPGg81pvQiC5vTXtC9Jo8rdrWUjRaugH71WYNsgi6Ev/go-ipfs-blockstore" ds "gx/ipfs/QmeiCcJfDW1GJnWUArudsv5rQsihpi4oyddPhdqo3CfX6i/go-datastore" dssync "gx/ipfs/QmeiCcJfDW1GJnWUArudsv5rQsihpi4oyddPhdqo3CfX6i/go-datastore/sync" - blocks "gx/ipfs/Qmej7nf81hi2x2tvjRBF3mcp74sQyuDH4VMYDGd1YtXjb2/go-block-format" ) type peerAndEngine struct { diff --git a/bitswap/decision/ledger.go b/bitswap/decision/ledger.go index c873d7679..749ed93a0 100644 --- a/bitswap/decision/ledger.go +++ b/bitswap/decision/ledger.go @@ -6,8 +6,8 @@ import ( wl "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" - peer "gx/ipfs/QmcJukH2sAFjY3HdBKq35WDzWoL3UUu2gt9wdfqZTUyM74/go-libp2p-peer" - cid "gx/ipfs/QmcZfnkapfECQGcLZaf9B79NRg7cRa9EnZh4LSbkCzwNvY/go-cid" + peer "gx/ipfs/QmVf8hTAsLLFtn4WPCRNdnaF2Eag2qTBS6uR8AiHPZARXy/go-libp2p-peer" + cid "gx/ipfs/QmapdYm1b22Frv3k17fqrBYTFRxwiaVJkB299Mfn33edeB/go-cid" ) func newLedger(p peer.ID) *ledger { diff --git a/bitswap/decision/peer_request_queue.go b/bitswap/decision/peer_request_queue.go index 63b574737..99b09b3f0 100644 --- a/bitswap/decision/peer_request_queue.go +++ b/bitswap/decision/peer_request_queue.go @@ -6,9 +6,9 @@ import ( wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" + peer "gx/ipfs/QmVf8hTAsLLFtn4WPCRNdnaF2Eag2qTBS6uR8AiHPZARXy/go-libp2p-peer" pq "gx/ipfs/QmZUbTDJ39JpvtFCSubiWeUTQRvMA1tVE5RZCJrY4oeAsC/go-ipfs-pq" - peer "gx/ipfs/QmcJukH2sAFjY3HdBKq35WDzWoL3UUu2gt9wdfqZTUyM74/go-libp2p-peer" - cid "gx/ipfs/QmcZfnkapfECQGcLZaf9B79NRg7cRa9EnZh4LSbkCzwNvY/go-cid" + cid "gx/ipfs/QmapdYm1b22Frv3k17fqrBYTFRxwiaVJkB299Mfn33edeB/go-cid" ) type peerRequestQueue interface { diff --git a/bitswap/decision/peer_request_queue_test.go b/bitswap/decision/peer_request_queue_test.go index 4435837ab..89a63cf4f 100644 --- a/bitswap/decision/peer_request_queue_test.go +++ b/bitswap/decision/peer_request_queue_test.go @@ -9,9 +9,9 @@ import ( "testing" "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" - u "gx/ipfs/QmNiJuT8Ja3hMVpBHXv3Q6dwmperaQ6JjLtpMQgMCD7xvx/go-ipfs-util" - "gx/ipfs/QmUJzxQQ2kzwQubsMqBTr1NGDpLfh7pGA2E1oaJULcKDPq/go-testutil" - cid "gx/ipfs/QmcZfnkapfECQGcLZaf9B79NRg7cRa9EnZh4LSbkCzwNvY/go-cid" + u "gx/ipfs/QmPdKqUcHGFdeSpvjVoaTRPPstGif9GBZb5Q56RVw9o69A/go-ipfs-util" + "gx/ipfs/QmPdxCaVp4jZ9RbxqZADvKH6kiCR5jHvdR5f2ycjAY6T2a/go-testutil" + cid "gx/ipfs/QmapdYm1b22Frv3k17fqrBYTFRxwiaVJkB299Mfn33edeB/go-cid" ) func TestPushPop(t *testing.T) { diff --git a/bitswap/get.go b/bitswap/get.go index 978a043dc..e99c4caa8 100644 --- a/bitswap/get.go +++ b/bitswap/get.go @@ -6,9 +6,9 @@ import ( notifications "github.com/ipfs/go-ipfs/exchange/bitswap/notifications" - blockstore "gx/ipfs/QmayRSLCiM2gWR7Kay8vqu3Yy5mf7yPqocF9ZRgDUPYMcc/go-ipfs-blockstore" - cid "gx/ipfs/QmcZfnkapfECQGcLZaf9B79NRg7cRa9EnZh4LSbkCzwNvY/go-cid" - blocks "gx/ipfs/Qmej7nf81hi2x2tvjRBF3mcp74sQyuDH4VMYDGd1YtXjb2/go-block-format" + blocks "gx/ipfs/QmTRCUvZLiir12Qr6MV3HKfKMHX8Nf1Vddn6t2g5nsQSb9/go-block-format" + cid "gx/ipfs/QmapdYm1b22Frv3k17fqrBYTFRxwiaVJkB299Mfn33edeB/go-cid" + blockstore "gx/ipfs/QmbaPGg81pvQiC5vTXtC9Jo8rdrWUjRaugH71WYNsgi6Ev/go-ipfs-blockstore" ) type getBlocksFunc func(context.Context, []*cid.Cid) (<-chan blocks.Block, error) diff --git a/bitswap/message/message.go b/bitswap/message/message.go index d22762f26..156e2faf0 100644 --- a/bitswap/message/message.go +++ b/bitswap/message/message.go @@ -6,12 +6,12 @@ import ( pb "github.com/ipfs/go-ipfs/exchange/bitswap/message/pb" wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" - blocks "gx/ipfs/Qmej7nf81hi2x2tvjRBF3mcp74sQyuDH4VMYDGd1YtXjb2/go-block-format" + blocks "gx/ipfs/QmTRCUvZLiir12Qr6MV3HKfKMHX8Nf1Vddn6t2g5nsQSb9/go-block-format" - inet "gx/ipfs/QmYj8wdn5sZEHX2XMDWGBvcXJNdzVbaVpHmXvhHBVZepen/go-libp2p-net" + inet "gx/ipfs/QmXdgNhVEgjLxjUoMs5ViQL7pboAt3Y7V7eGHRiE4qrmTE/go-libp2p-net" ggio "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/io" proto "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/proto" - cid "gx/ipfs/QmcZfnkapfECQGcLZaf9B79NRg7cRa9EnZh4LSbkCzwNvY/go-cid" + cid "gx/ipfs/QmapdYm1b22Frv3k17fqrBYTFRxwiaVJkB299Mfn33edeB/go-cid" ) // TODO move message.go into the bitswap package diff --git a/bitswap/message/message_test.go b/bitswap/message/message_test.go index 1ab0a9c40..abd3e77db 100644 --- a/bitswap/message/message_test.go +++ b/bitswap/message/message_test.go @@ -6,10 +6,10 @@ import ( pb "github.com/ipfs/go-ipfs/exchange/bitswap/message/pb" - u "gx/ipfs/QmNiJuT8Ja3hMVpBHXv3Q6dwmperaQ6JjLtpMQgMCD7xvx/go-ipfs-util" + u "gx/ipfs/QmPdKqUcHGFdeSpvjVoaTRPPstGif9GBZb5Q56RVw9o69A/go-ipfs-util" + blocks "gx/ipfs/QmTRCUvZLiir12Qr6MV3HKfKMHX8Nf1Vddn6t2g5nsQSb9/go-block-format" proto "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/proto" - cid "gx/ipfs/QmcZfnkapfECQGcLZaf9B79NRg7cRa9EnZh4LSbkCzwNvY/go-cid" - blocks "gx/ipfs/Qmej7nf81hi2x2tvjRBF3mcp74sQyuDH4VMYDGd1YtXjb2/go-block-format" + cid "gx/ipfs/QmapdYm1b22Frv3k17fqrBYTFRxwiaVJkB299Mfn33edeB/go-cid" ) func mkFakeCid(s string) *cid.Cid { diff --git a/bitswap/network/interface.go b/bitswap/network/interface.go index 1632a3b21..96eb66142 100644 --- a/bitswap/network/interface.go +++ b/bitswap/network/interface.go @@ -5,10 +5,10 @@ import ( bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" - ifconnmgr "gx/ipfs/QmWCWsDQnnQ9Mo9V3GK8TSR91662FdFxjjqPX8YbHC8Ltz/go-libp2p-interface-connmgr" + peer "gx/ipfs/QmVf8hTAsLLFtn4WPCRNdnaF2Eag2qTBS6uR8AiHPZARXy/go-libp2p-peer" protocol "gx/ipfs/QmZNkThpqfVXs9GNbexPrfBbXSLNYeKrE7jwFM2oqHbyqN/go-libp2p-protocol" - peer "gx/ipfs/QmcJukH2sAFjY3HdBKq35WDzWoL3UUu2gt9wdfqZTUyM74/go-libp2p-peer" - cid "gx/ipfs/QmcZfnkapfECQGcLZaf9B79NRg7cRa9EnZh4LSbkCzwNvY/go-cid" + cid "gx/ipfs/QmapdYm1b22Frv3k17fqrBYTFRxwiaVJkB299Mfn33edeB/go-cid" + ifconnmgr "gx/ipfs/Qmav3fJzdn43FDvHyGkPdbQ5JVqqiDPmNdnuGa3vatpmwj/go-libp2p-interface-connmgr" ) var ( diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index 4957498b3..9df94e6e6 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -8,16 +8,16 @@ import ( bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" - logging "gx/ipfs/QmTG23dvpBCBjqQwyDxV8CQT6jmS4PSftNr1VqHhE3MLy7/go-log" - ifconnmgr "gx/ipfs/QmWCWsDQnnQ9Mo9V3GK8TSR91662FdFxjjqPX8YbHC8Ltz/go-libp2p-interface-connmgr" - ma "gx/ipfs/QmWWQ2Txc2c6tqjsBpzg5Ar652cHPGNsQQp2SejkNmkUMb/go-multiaddr" - routing "gx/ipfs/QmXijJ3T9MjB2v8xpFDoEX6FqR9u8PkJkzu49TgwJ8Ndr5/go-libp2p-routing" - inet "gx/ipfs/QmYj8wdn5sZEHX2XMDWGBvcXJNdzVbaVpHmXvhHBVZepen/go-libp2p-net" + host "gx/ipfs/QmQQGtcp6nVUrQjNsnU53YWV1q8fK1Kd9S7FEkYbRZzxry/go-libp2p-host" + routing "gx/ipfs/QmUV9hDAAyjeGbxbXkJ2sYqZ6dTd1DXJ2REhYEkRm178Tg/go-libp2p-routing" + ma "gx/ipfs/QmUxSEGbv2nmYNnfXi7839wwQqTN3kwQeUxe8dTjZWZs7J/go-multiaddr" + peer "gx/ipfs/QmVf8hTAsLLFtn4WPCRNdnaF2Eag2qTBS6uR8AiHPZARXy/go-libp2p-peer" + inet "gx/ipfs/QmXdgNhVEgjLxjUoMs5ViQL7pboAt3Y7V7eGHRiE4qrmTE/go-libp2p-net" ggio "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/io" - pstore "gx/ipfs/QmZb7hAgQEhW9dBbzBudU39gCeD4zbe6xafD52LUuF4cUN/go-libp2p-peerstore" - peer "gx/ipfs/QmcJukH2sAFjY3HdBKq35WDzWoL3UUu2gt9wdfqZTUyM74/go-libp2p-peer" - cid "gx/ipfs/QmcZfnkapfECQGcLZaf9B79NRg7cRa9EnZh4LSbkCzwNvY/go-cid" - host "gx/ipfs/QmdHyfNVTZ5VtUx4Xz23z8wtnioSrFQ28XSfpVkdhQBkGA/go-libp2p-host" + pstore "gx/ipfs/QmZhsmorLpD9kmQ4ynbAu4vbKv2goMUnXazwGA4gnWHDjB/go-libp2p-peerstore" + cid "gx/ipfs/QmapdYm1b22Frv3k17fqrBYTFRxwiaVJkB299Mfn33edeB/go-cid" + ifconnmgr "gx/ipfs/Qmav3fJzdn43FDvHyGkPdbQ5JVqqiDPmNdnuGa3vatpmwj/go-libp2p-interface-connmgr" + logging "gx/ipfs/Qmbi1CTJsbnBZjCEgc2otwu8cUFPsGpzWXG7edVCLZ7Gvk/go-log" ) var log = logging.Logger("bitswap_network") diff --git a/bitswap/notifications/notifications.go b/bitswap/notifications/notifications.go index be0f11c5a..31109c719 100644 --- a/bitswap/notifications/notifications.go +++ b/bitswap/notifications/notifications.go @@ -4,9 +4,9 @@ import ( "context" "sync" - cid "gx/ipfs/QmcZfnkapfECQGcLZaf9B79NRg7cRa9EnZh4LSbkCzwNvY/go-cid" + blocks "gx/ipfs/QmTRCUvZLiir12Qr6MV3HKfKMHX8Nf1Vddn6t2g5nsQSb9/go-block-format" + cid "gx/ipfs/QmapdYm1b22Frv3k17fqrBYTFRxwiaVJkB299Mfn33edeB/go-cid" pubsub "gx/ipfs/QmdbxjQWogRCHRaxhhGnYdT1oQJzL9GdqSKzCdqWr85AP2/pubsub" - blocks "gx/ipfs/Qmej7nf81hi2x2tvjRBF3mcp74sQyuDH4VMYDGd1YtXjb2/go-block-format" ) const bufferSize = 16 diff --git a/bitswap/notifications/notifications_test.go b/bitswap/notifications/notifications_test.go index 5c15975db..102b3fb73 100644 --- a/bitswap/notifications/notifications_test.go +++ b/bitswap/notifications/notifications_test.go @@ -6,9 +6,9 @@ import ( "testing" "time" - cid "gx/ipfs/QmcZfnkapfECQGcLZaf9B79NRg7cRa9EnZh4LSbkCzwNvY/go-cid" - blocks "gx/ipfs/Qmej7nf81hi2x2tvjRBF3mcp74sQyuDH4VMYDGd1YtXjb2/go-block-format" - blocksutil "gx/ipfs/Qmf951DP11mCoctpyF3ZppPZdo2oAxuNi2vnkVDgHJ8Fqk/go-ipfs-blocksutil" + blocks "gx/ipfs/QmTRCUvZLiir12Qr6MV3HKfKMHX8Nf1Vddn6t2g5nsQSb9/go-block-format" + blocksutil "gx/ipfs/QmYmE4kxv6uFGaWkeBAFYDuNcxzCn87pzwm6CkBkM9C8BM/go-ipfs-blocksutil" + cid "gx/ipfs/QmapdYm1b22Frv3k17fqrBYTFRxwiaVJkB299Mfn33edeB/go-cid" ) func TestDuplicates(t *testing.T) { diff --git a/bitswap/session.go b/bitswap/session.go index 09f3cab5d..fd8969971 100644 --- a/bitswap/session.go +++ b/bitswap/session.go @@ -7,12 +7,12 @@ import ( notifications "github.com/ipfs/go-ipfs/exchange/bitswap/notifications" - loggables "gx/ipfs/QmPDZJxtWGfcwLPazJxD4h3v3aDs43V7UNAVs3Jz1Wo7o4/go-libp2p-loggables" - logging "gx/ipfs/QmTG23dvpBCBjqQwyDxV8CQT6jmS4PSftNr1VqHhE3MLy7/go-log" + blocks "gx/ipfs/QmTRCUvZLiir12Qr6MV3HKfKMHX8Nf1Vddn6t2g5nsQSb9/go-block-format" lru "gx/ipfs/QmVYxfoJQiZijTgPNHCHgHELvQpbsJNTg6Crmc3dQkj3yy/golang-lru" - peer "gx/ipfs/QmcJukH2sAFjY3HdBKq35WDzWoL3UUu2gt9wdfqZTUyM74/go-libp2p-peer" - cid "gx/ipfs/QmcZfnkapfECQGcLZaf9B79NRg7cRa9EnZh4LSbkCzwNvY/go-cid" - blocks "gx/ipfs/Qmej7nf81hi2x2tvjRBF3mcp74sQyuDH4VMYDGd1YtXjb2/go-block-format" + peer "gx/ipfs/QmVf8hTAsLLFtn4WPCRNdnaF2Eag2qTBS6uR8AiHPZARXy/go-libp2p-peer" + cid "gx/ipfs/QmapdYm1b22Frv3k17fqrBYTFRxwiaVJkB299Mfn33edeB/go-cid" + logging "gx/ipfs/Qmbi1CTJsbnBZjCEgc2otwu8cUFPsGpzWXG7edVCLZ7Gvk/go-log" + loggables "gx/ipfs/QmcBbMF4UyZFRTvH9S2h3rbSRBvvEGLqgt4sdvVugG8rX1/go-libp2p-loggables" ) const activeWantsLimit = 16 diff --git a/bitswap/session_test.go b/bitswap/session_test.go index 986fedb8a..6cf96118b 100644 --- a/bitswap/session_test.go +++ b/bitswap/session_test.go @@ -6,10 +6,10 @@ import ( "testing" "time" - tu "gx/ipfs/QmUJzxQQ2kzwQubsMqBTr1NGDpLfh7pGA2E1oaJULcKDPq/go-testutil" - cid "gx/ipfs/QmcZfnkapfECQGcLZaf9B79NRg7cRa9EnZh4LSbkCzwNvY/go-cid" - blocks "gx/ipfs/Qmej7nf81hi2x2tvjRBF3mcp74sQyuDH4VMYDGd1YtXjb2/go-block-format" - blocksutil "gx/ipfs/Qmf951DP11mCoctpyF3ZppPZdo2oAxuNi2vnkVDgHJ8Fqk/go-ipfs-blocksutil" + tu "gx/ipfs/QmPdxCaVp4jZ9RbxqZADvKH6kiCR5jHvdR5f2ycjAY6T2a/go-testutil" + blocks "gx/ipfs/QmTRCUvZLiir12Qr6MV3HKfKMHX8Nf1Vddn6t2g5nsQSb9/go-block-format" + blocksutil "gx/ipfs/QmYmE4kxv6uFGaWkeBAFYDuNcxzCn87pzwm6CkBkM9C8BM/go-ipfs-blocksutil" + cid "gx/ipfs/QmapdYm1b22Frv3k17fqrBYTFRxwiaVJkB299Mfn33edeB/go-cid" ) func TestBasicSessions(t *testing.T) { diff --git a/bitswap/stat.go b/bitswap/stat.go index 825888abc..85390475d 100644 --- a/bitswap/stat.go +++ b/bitswap/stat.go @@ -3,7 +3,7 @@ package bitswap import ( "sort" - cid "gx/ipfs/QmcZfnkapfECQGcLZaf9B79NRg7cRa9EnZh4LSbkCzwNvY/go-cid" + cid "gx/ipfs/QmapdYm1b22Frv3k17fqrBYTFRxwiaVJkB299Mfn33edeB/go-cid" ) type Stat struct { diff --git a/bitswap/testnet/interface.go b/bitswap/testnet/interface.go index 8ab2fb621..c0dff2a8a 100644 --- a/bitswap/testnet/interface.go +++ b/bitswap/testnet/interface.go @@ -2,8 +2,8 @@ package bitswap import ( bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" - "gx/ipfs/QmUJzxQQ2kzwQubsMqBTr1NGDpLfh7pGA2E1oaJULcKDPq/go-testutil" - peer "gx/ipfs/QmcJukH2sAFjY3HdBKq35WDzWoL3UUu2gt9wdfqZTUyM74/go-libp2p-peer" + "gx/ipfs/QmPdxCaVp4jZ9RbxqZADvKH6kiCR5jHvdR5f2ycjAY6T2a/go-testutil" + peer "gx/ipfs/QmVf8hTAsLLFtn4WPCRNdnaF2Eag2qTBS6uR8AiHPZARXy/go-libp2p-peer" ) type Network interface { diff --git a/bitswap/testnet/network_test.go b/bitswap/testnet/network_test.go index bdf6dafb2..62a92275a 100644 --- a/bitswap/testnet/network_test.go +++ b/bitswap/testnet/network_test.go @@ -8,11 +8,11 @@ import ( bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" - mockrouting "gx/ipfs/QmPFAxh9UwfqwseVcWkj1Lz1gCHyQ6QuCk5m5XUp6vifkL/go-ipfs-routing/mock" + testutil "gx/ipfs/QmPdxCaVp4jZ9RbxqZADvKH6kiCR5jHvdR5f2ycjAY6T2a/go-testutil" delay "gx/ipfs/QmRJVNatYJwTAHgdSM1Xef9QVQ1Ch3XHdmcrykjP5Y4soL/go-ipfs-delay" - testutil "gx/ipfs/QmUJzxQQ2kzwQubsMqBTr1NGDpLfh7pGA2E1oaJULcKDPq/go-testutil" - peer "gx/ipfs/QmcJukH2sAFjY3HdBKq35WDzWoL3UUu2gt9wdfqZTUyM74/go-libp2p-peer" - blocks "gx/ipfs/Qmej7nf81hi2x2tvjRBF3mcp74sQyuDH4VMYDGd1YtXjb2/go-block-format" + blocks "gx/ipfs/QmTRCUvZLiir12Qr6MV3HKfKMHX8Nf1Vddn6t2g5nsQSb9/go-block-format" + peer "gx/ipfs/QmVf8hTAsLLFtn4WPCRNdnaF2Eag2qTBS6uR8AiHPZARXy/go-libp2p-peer" + mockrouting "gx/ipfs/Qmb1N7zdjG2FexpzWNj8T289u9QnQLEiSsTRadDGQxX32D/go-ipfs-routing/mock" ) func TestSendMessageAsyncButWaitForResponse(t *testing.T) { diff --git a/bitswap/testnet/peernet.go b/bitswap/testnet/peernet.go index 25b887cb7..9b51a0de4 100644 --- a/bitswap/testnet/peernet.go +++ b/bitswap/testnet/peernet.go @@ -5,10 +5,10 @@ import ( bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" - mockrouting "gx/ipfs/QmPFAxh9UwfqwseVcWkj1Lz1gCHyQ6QuCk5m5XUp6vifkL/go-ipfs-routing/mock" - mockpeernet "gx/ipfs/QmRvoAami8AAf5Yy6jcPq5KqQT1ZCaoi9dF1vdKAghmq9X/go-libp2p/p2p/net/mock" - testutil "gx/ipfs/QmUJzxQQ2kzwQubsMqBTr1NGDpLfh7pGA2E1oaJULcKDPq/go-testutil" - peer "gx/ipfs/QmcJukH2sAFjY3HdBKq35WDzWoL3UUu2gt9wdfqZTUyM74/go-libp2p-peer" + testutil "gx/ipfs/QmPdxCaVp4jZ9RbxqZADvKH6kiCR5jHvdR5f2ycjAY6T2a/go-testutil" + mockpeernet "gx/ipfs/QmUEAR2pS7fP1GPseS3i8MWFyENs7oDp4CZrgn8FCjbsBu/go-libp2p/p2p/net/mock" + peer "gx/ipfs/QmVf8hTAsLLFtn4WPCRNdnaF2Eag2qTBS6uR8AiHPZARXy/go-libp2p-peer" + mockrouting "gx/ipfs/Qmb1N7zdjG2FexpzWNj8T289u9QnQLEiSsTRadDGQxX32D/go-ipfs-routing/mock" ds "gx/ipfs/QmeiCcJfDW1GJnWUArudsv5rQsihpi4oyddPhdqo3CfX6i/go-datastore" ) diff --git a/bitswap/testnet/virtual.go b/bitswap/testnet/virtual.go index 334e06e2f..bec775847 100644 --- a/bitswap/testnet/virtual.go +++ b/bitswap/testnet/virtual.go @@ -9,14 +9,14 @@ import ( bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" - mockrouting "gx/ipfs/QmPFAxh9UwfqwseVcWkj1Lz1gCHyQ6QuCk5m5XUp6vifkL/go-ipfs-routing/mock" + testutil "gx/ipfs/QmPdxCaVp4jZ9RbxqZADvKH6kiCR5jHvdR5f2ycjAY6T2a/go-testutil" delay "gx/ipfs/QmRJVNatYJwTAHgdSM1Xef9QVQ1Ch3XHdmcrykjP5Y4soL/go-ipfs-delay" - logging "gx/ipfs/QmTG23dvpBCBjqQwyDxV8CQT6jmS4PSftNr1VqHhE3MLy7/go-log" - testutil "gx/ipfs/QmUJzxQQ2kzwQubsMqBTr1NGDpLfh7pGA2E1oaJULcKDPq/go-testutil" - ifconnmgr "gx/ipfs/QmWCWsDQnnQ9Mo9V3GK8TSR91662FdFxjjqPX8YbHC8Ltz/go-libp2p-interface-connmgr" - routing "gx/ipfs/QmXijJ3T9MjB2v8xpFDoEX6FqR9u8PkJkzu49TgwJ8Ndr5/go-libp2p-routing" - peer "gx/ipfs/QmcJukH2sAFjY3HdBKq35WDzWoL3UUu2gt9wdfqZTUyM74/go-libp2p-peer" - cid "gx/ipfs/QmcZfnkapfECQGcLZaf9B79NRg7cRa9EnZh4LSbkCzwNvY/go-cid" + routing "gx/ipfs/QmUV9hDAAyjeGbxbXkJ2sYqZ6dTd1DXJ2REhYEkRm178Tg/go-libp2p-routing" + peer "gx/ipfs/QmVf8hTAsLLFtn4WPCRNdnaF2Eag2qTBS6uR8AiHPZARXy/go-libp2p-peer" + cid "gx/ipfs/QmapdYm1b22Frv3k17fqrBYTFRxwiaVJkB299Mfn33edeB/go-cid" + ifconnmgr "gx/ipfs/Qmav3fJzdn43FDvHyGkPdbQ5JVqqiDPmNdnuGa3vatpmwj/go-libp2p-interface-connmgr" + mockrouting "gx/ipfs/Qmb1N7zdjG2FexpzWNj8T289u9QnQLEiSsTRadDGQxX32D/go-ipfs-routing/mock" + logging "gx/ipfs/Qmbi1CTJsbnBZjCEgc2otwu8cUFPsGpzWXG7edVCLZ7Gvk/go-log" ) var log = logging.Logger("bstestnet") diff --git a/bitswap/testutils.go b/bitswap/testutils.go index 2f8d2229d..ce141ab6d 100644 --- a/bitswap/testutils.go +++ b/bitswap/testutils.go @@ -6,11 +6,11 @@ import ( tn "github.com/ipfs/go-ipfs/exchange/bitswap/testnet" + testutil "gx/ipfs/QmPdxCaVp4jZ9RbxqZADvKH6kiCR5jHvdR5f2ycjAY6T2a/go-testutil" delay "gx/ipfs/QmRJVNatYJwTAHgdSM1Xef9QVQ1Ch3XHdmcrykjP5Y4soL/go-ipfs-delay" - testutil "gx/ipfs/QmUJzxQQ2kzwQubsMqBTr1NGDpLfh7pGA2E1oaJULcKDPq/go-testutil" - blockstore "gx/ipfs/QmayRSLCiM2gWR7Kay8vqu3Yy5mf7yPqocF9ZRgDUPYMcc/go-ipfs-blockstore" - p2ptestutil "gx/ipfs/Qmb3r9qUR7PnkyUKztmXp8sQhzXZHGmRg7fR5zsB1ebWMj/go-libp2p-netutil" - peer "gx/ipfs/QmcJukH2sAFjY3HdBKq35WDzWoL3UUu2gt9wdfqZTUyM74/go-libp2p-peer" + peer "gx/ipfs/QmVf8hTAsLLFtn4WPCRNdnaF2Eag2qTBS6uR8AiHPZARXy/go-libp2p-peer" + blockstore "gx/ipfs/QmbaPGg81pvQiC5vTXtC9Jo8rdrWUjRaugH71WYNsgi6Ev/go-ipfs-blockstore" + p2ptestutil "gx/ipfs/QmeBUY1BsMjkacVAJ2u76XBGNiRCHq6dkqT2VWG59N3d7b/go-libp2p-netutil" ds "gx/ipfs/QmeiCcJfDW1GJnWUArudsv5rQsihpi4oyddPhdqo3CfX6i/go-datastore" delayed "gx/ipfs/QmeiCcJfDW1GJnWUArudsv5rQsihpi4oyddPhdqo3CfX6i/go-datastore/delayed" ds_sync "gx/ipfs/QmeiCcJfDW1GJnWUArudsv5rQsihpi4oyddPhdqo3CfX6i/go-datastore/sync" diff --git a/bitswap/wantlist/wantlist.go b/bitswap/wantlist/wantlist.go index c2225b88d..6f230ba5b 100644 --- a/bitswap/wantlist/wantlist.go +++ b/bitswap/wantlist/wantlist.go @@ -6,7 +6,7 @@ import ( "sort" "sync" - cid "gx/ipfs/QmcZfnkapfECQGcLZaf9B79NRg7cRa9EnZh4LSbkCzwNvY/go-cid" + cid "gx/ipfs/QmapdYm1b22Frv3k17fqrBYTFRxwiaVJkB299Mfn33edeB/go-cid" ) type ThreadSafe struct { diff --git a/bitswap/wantlist/wantlist_test.go b/bitswap/wantlist/wantlist_test.go index 37c5c91c6..dc7925941 100644 --- a/bitswap/wantlist/wantlist_test.go +++ b/bitswap/wantlist/wantlist_test.go @@ -3,7 +3,7 @@ package wantlist import ( "testing" - cid "gx/ipfs/QmcZfnkapfECQGcLZaf9B79NRg7cRa9EnZh4LSbkCzwNvY/go-cid" + cid "gx/ipfs/QmapdYm1b22Frv3k17fqrBYTFRxwiaVJkB299Mfn33edeB/go-cid" ) var testcids []*cid.Cid diff --git a/bitswap/wantmanager.go b/bitswap/wantmanager.go index fdc8b8a76..6f7f2395f 100644 --- a/bitswap/wantmanager.go +++ b/bitswap/wantmanager.go @@ -11,8 +11,8 @@ import ( wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" metrics "gx/ipfs/QmRg1gKTHzc3CZXSKzem8aR4E3TubFhbgXwfVuWnSK5CC5/go-metrics-interface" - peer "gx/ipfs/QmcJukH2sAFjY3HdBKq35WDzWoL3UUu2gt9wdfqZTUyM74/go-libp2p-peer" - cid "gx/ipfs/QmcZfnkapfECQGcLZaf9B79NRg7cRa9EnZh4LSbkCzwNvY/go-cid" + peer "gx/ipfs/QmVf8hTAsLLFtn4WPCRNdnaF2Eag2qTBS6uR8AiHPZARXy/go-libp2p-peer" + cid "gx/ipfs/QmapdYm1b22Frv3k17fqrBYTFRxwiaVJkB299Mfn33edeB/go-cid" ) type WantManager struct { diff --git a/bitswap/workers.go b/bitswap/workers.go index 35fa57f3f..f96fc3ba3 100644 --- a/bitswap/workers.go +++ b/bitswap/workers.go @@ -10,9 +10,9 @@ import ( process "gx/ipfs/QmSF8fPo3jgVBAy8fpdjjYqgG87dkJgUprRBHRd2tmfgpP/goprocess" procctx "gx/ipfs/QmSF8fPo3jgVBAy8fpdjjYqgG87dkJgUprRBHRd2tmfgpP/goprocess/context" - logging "gx/ipfs/QmTG23dvpBCBjqQwyDxV8CQT6jmS4PSftNr1VqHhE3MLy7/go-log" - peer "gx/ipfs/QmcJukH2sAFjY3HdBKq35WDzWoL3UUu2gt9wdfqZTUyM74/go-libp2p-peer" - cid "gx/ipfs/QmcZfnkapfECQGcLZaf9B79NRg7cRa9EnZh4LSbkCzwNvY/go-cid" + peer "gx/ipfs/QmVf8hTAsLLFtn4WPCRNdnaF2Eag2qTBS6uR8AiHPZARXy/go-libp2p-peer" + cid "gx/ipfs/QmapdYm1b22Frv3k17fqrBYTFRxwiaVJkB299Mfn33edeB/go-cid" + logging "gx/ipfs/Qmbi1CTJsbnBZjCEgc2otwu8cUFPsGpzWXG7edVCLZ7Gvk/go-log" ) var TaskWorkerCount = 8 From da60c717c317305f4b1b02a1edf183d3bdaadd5a Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Wed, 13 Jun 2018 20:04:48 -0700 Subject: [PATCH 0640/1035] add record validation to offline routing fixes #5115 License: MIT Signed-off-by: Steven Allen This commit was moved from ipfs/go-bitswap@8abfa71d376ba6aa72eb9bee24603f910e4e89fc --- bitswap/bitswap_test.go | 2 +- bitswap/testnet/network_test.go | 2 +- bitswap/testnet/peernet.go | 2 +- bitswap/testnet/virtual.go | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index c0ef468b0..f618002b7 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -13,11 +13,11 @@ import ( tu "gx/ipfs/QmPdxCaVp4jZ9RbxqZADvKH6kiCR5jHvdR5f2ycjAY6T2a/go-testutil" travis "gx/ipfs/QmPdxCaVp4jZ9RbxqZADvKH6kiCR5jHvdR5f2ycjAY6T2a/go-testutil/ci/travis" + mockrouting "gx/ipfs/QmQUPmFYZBSWn4mtX1YwYkSaMoWVore7tCiSetr6k8JW21/go-ipfs-routing/mock" delay "gx/ipfs/QmRJVNatYJwTAHgdSM1Xef9QVQ1Ch3XHdmcrykjP5Y4soL/go-ipfs-delay" blocks "gx/ipfs/QmTRCUvZLiir12Qr6MV3HKfKMHX8Nf1Vddn6t2g5nsQSb9/go-block-format" blocksutil "gx/ipfs/QmYmE4kxv6uFGaWkeBAFYDuNcxzCn87pzwm6CkBkM9C8BM/go-ipfs-blocksutil" cid "gx/ipfs/QmapdYm1b22Frv3k17fqrBYTFRxwiaVJkB299Mfn33edeB/go-cid" - mockrouting "gx/ipfs/Qmb1N7zdjG2FexpzWNj8T289u9QnQLEiSsTRadDGQxX32D/go-ipfs-routing/mock" blockstore "gx/ipfs/QmbaPGg81pvQiC5vTXtC9Jo8rdrWUjRaugH71WYNsgi6Ev/go-ipfs-blockstore" p2ptestutil "gx/ipfs/QmeBUY1BsMjkacVAJ2u76XBGNiRCHq6dkqT2VWG59N3d7b/go-libp2p-netutil" detectrace "gx/ipfs/Qmf7HqcW7LtCi1W8y2bdx2eJpze74jkbKqpByxgXikdbLF/go-detect-race" diff --git a/bitswap/testnet/network_test.go b/bitswap/testnet/network_test.go index 62a92275a..0f1398b45 100644 --- a/bitswap/testnet/network_test.go +++ b/bitswap/testnet/network_test.go @@ -9,10 +9,10 @@ import ( bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" testutil "gx/ipfs/QmPdxCaVp4jZ9RbxqZADvKH6kiCR5jHvdR5f2ycjAY6T2a/go-testutil" + mockrouting "gx/ipfs/QmQUPmFYZBSWn4mtX1YwYkSaMoWVore7tCiSetr6k8JW21/go-ipfs-routing/mock" delay "gx/ipfs/QmRJVNatYJwTAHgdSM1Xef9QVQ1Ch3XHdmcrykjP5Y4soL/go-ipfs-delay" blocks "gx/ipfs/QmTRCUvZLiir12Qr6MV3HKfKMHX8Nf1Vddn6t2g5nsQSb9/go-block-format" peer "gx/ipfs/QmVf8hTAsLLFtn4WPCRNdnaF2Eag2qTBS6uR8AiHPZARXy/go-libp2p-peer" - mockrouting "gx/ipfs/Qmb1N7zdjG2FexpzWNj8T289u9QnQLEiSsTRadDGQxX32D/go-ipfs-routing/mock" ) func TestSendMessageAsyncButWaitForResponse(t *testing.T) { diff --git a/bitswap/testnet/peernet.go b/bitswap/testnet/peernet.go index 9b51a0de4..f7e76621f 100644 --- a/bitswap/testnet/peernet.go +++ b/bitswap/testnet/peernet.go @@ -6,9 +6,9 @@ import ( bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" testutil "gx/ipfs/QmPdxCaVp4jZ9RbxqZADvKH6kiCR5jHvdR5f2ycjAY6T2a/go-testutil" + mockrouting "gx/ipfs/QmQUPmFYZBSWn4mtX1YwYkSaMoWVore7tCiSetr6k8JW21/go-ipfs-routing/mock" mockpeernet "gx/ipfs/QmUEAR2pS7fP1GPseS3i8MWFyENs7oDp4CZrgn8FCjbsBu/go-libp2p/p2p/net/mock" peer "gx/ipfs/QmVf8hTAsLLFtn4WPCRNdnaF2Eag2qTBS6uR8AiHPZARXy/go-libp2p-peer" - mockrouting "gx/ipfs/Qmb1N7zdjG2FexpzWNj8T289u9QnQLEiSsTRadDGQxX32D/go-ipfs-routing/mock" ds "gx/ipfs/QmeiCcJfDW1GJnWUArudsv5rQsihpi4oyddPhdqo3CfX6i/go-datastore" ) diff --git a/bitswap/testnet/virtual.go b/bitswap/testnet/virtual.go index bec775847..6ef654133 100644 --- a/bitswap/testnet/virtual.go +++ b/bitswap/testnet/virtual.go @@ -10,12 +10,12 @@ import ( bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" testutil "gx/ipfs/QmPdxCaVp4jZ9RbxqZADvKH6kiCR5jHvdR5f2ycjAY6T2a/go-testutil" + mockrouting "gx/ipfs/QmQUPmFYZBSWn4mtX1YwYkSaMoWVore7tCiSetr6k8JW21/go-ipfs-routing/mock" delay "gx/ipfs/QmRJVNatYJwTAHgdSM1Xef9QVQ1Ch3XHdmcrykjP5Y4soL/go-ipfs-delay" routing "gx/ipfs/QmUV9hDAAyjeGbxbXkJ2sYqZ6dTd1DXJ2REhYEkRm178Tg/go-libp2p-routing" peer "gx/ipfs/QmVf8hTAsLLFtn4WPCRNdnaF2Eag2qTBS6uR8AiHPZARXy/go-libp2p-peer" cid "gx/ipfs/QmapdYm1b22Frv3k17fqrBYTFRxwiaVJkB299Mfn33edeB/go-cid" ifconnmgr "gx/ipfs/Qmav3fJzdn43FDvHyGkPdbQ5JVqqiDPmNdnuGa3vatpmwj/go-libp2p-interface-connmgr" - mockrouting "gx/ipfs/Qmb1N7zdjG2FexpzWNj8T289u9QnQLEiSsTRadDGQxX32D/go-ipfs-routing/mock" logging "gx/ipfs/Qmbi1CTJsbnBZjCEgc2otwu8cUFPsGpzWXG7edVCLZ7Gvk/go-log" ) From 61d3c7cef6499f35d733f58d4983ca206f5eb6d2 Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Mon, 25 Jun 2018 20:41:25 -0700 Subject: [PATCH 0641/1035] gx update Updates: * go-kad-dht: Query performance improvements, DHT client fixes, validates records on *local* put. * go-libp2p-swarm/go-libp2p-transport: Timeout improvements. * go-multiaddr-net: Exposes useful Conn methods (CloseWrite, CloseRead, etc.) * go-log: fixes possible panic when enabling/disabling events. * go-multiaddr: fixes possible panic when stringifying malformed multiaddrs, adds support for consuming /p2p/ multiaddrs. fixes #5113 unblocks #4895 License: MIT Signed-off-by: Steven Allen This commit was moved from ipfs/go-bitswap@b468efbc43cf59cb91b0414cc5d7f454d919909f --- bitswap/bitswap.go | 6 +++--- bitswap/bitswap_test.go | 10 +++++----- bitswap/decision/bench_test.go | 4 ++-- bitswap/decision/engine.go | 6 +++--- bitswap/decision/engine_test.go | 6 +++--- bitswap/decision/ledger.go | 2 +- bitswap/decision/peer_request_queue.go | 2 +- bitswap/decision/peer_request_queue_test.go | 2 +- bitswap/get.go | 2 +- bitswap/message/message.go | 2 +- bitswap/network/interface.go | 4 ++-- bitswap/network/ipfs_impl.go | 16 ++++++++-------- bitswap/session.go | 6 +++--- bitswap/session_test.go | 2 +- bitswap/testnet/interface.go | 4 ++-- bitswap/testnet/network_test.go | 6 +++--- bitswap/testnet/peernet.go | 8 ++++---- bitswap/testnet/virtual.go | 12 ++++++------ bitswap/testutils.go | 8 ++++---- bitswap/wantmanager.go | 2 +- bitswap/workers.go | 4 ++-- 21 files changed, 57 insertions(+), 57 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 480b65aed..58acf7196 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -22,10 +22,10 @@ import ( procctx "gx/ipfs/QmSF8fPo3jgVBAy8fpdjjYqgG87dkJgUprRBHRd2tmfgpP/goprocess/context" blocks "gx/ipfs/QmTRCUvZLiir12Qr6MV3HKfKMHX8Nf1Vddn6t2g5nsQSb9/go-block-format" exchange "gx/ipfs/QmVSe7YJbPnEmkSUKD3HxSvp8HJoyCU55hQoCMRq7N1jaK/go-ipfs-exchange-interface" - peer "gx/ipfs/QmVf8hTAsLLFtn4WPCRNdnaF2Eag2qTBS6uR8AiHPZARXy/go-libp2p-peer" cid "gx/ipfs/QmapdYm1b22Frv3k17fqrBYTFRxwiaVJkB299Mfn33edeB/go-cid" - blockstore "gx/ipfs/QmbaPGg81pvQiC5vTXtC9Jo8rdrWUjRaugH71WYNsgi6Ev/go-ipfs-blockstore" - logging "gx/ipfs/Qmbi1CTJsbnBZjCEgc2otwu8cUFPsGpzWXG7edVCLZ7Gvk/go-log" + logging "gx/ipfs/QmcVVHfdyv15GVPk7NrxdWjh2hLVccXnoD8j2tyQShiXJb/go-log" + peer "gx/ipfs/QmdVrMn1LhB4ybb8hMVaMLXnA8XRSewMnK6YqXKXoTcRvN/go-libp2p-peer" + blockstore "gx/ipfs/QmdpuJBPBZ6sLPj9BQpn3Rpi38BT2cF1QMiUfyzNWeySW4/go-ipfs-blockstore" ) var log = logging.Logger("bitswap") diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index f618002b7..1b262db4e 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -11,15 +11,15 @@ import ( decision "github.com/ipfs/go-ipfs/exchange/bitswap/decision" tn "github.com/ipfs/go-ipfs/exchange/bitswap/testnet" - tu "gx/ipfs/QmPdxCaVp4jZ9RbxqZADvKH6kiCR5jHvdR5f2ycjAY6T2a/go-testutil" - travis "gx/ipfs/QmPdxCaVp4jZ9RbxqZADvKH6kiCR5jHvdR5f2ycjAY6T2a/go-testutil/ci/travis" - mockrouting "gx/ipfs/QmQUPmFYZBSWn4mtX1YwYkSaMoWVore7tCiSetr6k8JW21/go-ipfs-routing/mock" delay "gx/ipfs/QmRJVNatYJwTAHgdSM1Xef9QVQ1Ch3XHdmcrykjP5Y4soL/go-ipfs-delay" blocks "gx/ipfs/QmTRCUvZLiir12Qr6MV3HKfKMHX8Nf1Vddn6t2g5nsQSb9/go-block-format" + mockrouting "gx/ipfs/QmWLQyLU7yopJnwMvpHM5VSMG4xmbKgcq6P246mDy9xy5E/go-ipfs-routing/mock" blocksutil "gx/ipfs/QmYmE4kxv6uFGaWkeBAFYDuNcxzCn87pzwm6CkBkM9C8BM/go-ipfs-blocksutil" cid "gx/ipfs/QmapdYm1b22Frv3k17fqrBYTFRxwiaVJkB299Mfn33edeB/go-cid" - blockstore "gx/ipfs/QmbaPGg81pvQiC5vTXtC9Jo8rdrWUjRaugH71WYNsgi6Ev/go-ipfs-blockstore" - p2ptestutil "gx/ipfs/QmeBUY1BsMjkacVAJ2u76XBGNiRCHq6dkqT2VWG59N3d7b/go-libp2p-netutil" + tu "gx/ipfs/QmcW4FGAt24fdK1jBgWQn3yP4R9ZLyWQqjozv9QK7epRhL/go-testutil" + travis "gx/ipfs/QmcW4FGAt24fdK1jBgWQn3yP4R9ZLyWQqjozv9QK7epRhL/go-testutil/ci/travis" + p2ptestutil "gx/ipfs/QmcxUtMB5sJrXR3znSvkrDd2ghvwGM8rLRqwJiPUdgQwat/go-libp2p-netutil" + blockstore "gx/ipfs/QmdpuJBPBZ6sLPj9BQpn3Rpi38BT2cF1QMiUfyzNWeySW4/go-ipfs-blockstore" detectrace "gx/ipfs/Qmf7HqcW7LtCi1W8y2bdx2eJpze74jkbKqpByxgXikdbLF/go-detect-race" ) diff --git a/bitswap/decision/bench_test.go b/bitswap/decision/bench_test.go index ff1011aea..dccfa9ad1 100644 --- a/bitswap/decision/bench_test.go +++ b/bitswap/decision/bench_test.go @@ -7,9 +7,9 @@ import ( "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" u "gx/ipfs/QmPdKqUcHGFdeSpvjVoaTRPPstGif9GBZb5Q56RVw9o69A/go-ipfs-util" - "gx/ipfs/QmPdxCaVp4jZ9RbxqZADvKH6kiCR5jHvdR5f2ycjAY6T2a/go-testutil" - "gx/ipfs/QmVf8hTAsLLFtn4WPCRNdnaF2Eag2qTBS6uR8AiHPZARXy/go-libp2p-peer" cid "gx/ipfs/QmapdYm1b22Frv3k17fqrBYTFRxwiaVJkB299Mfn33edeB/go-cid" + "gx/ipfs/QmcW4FGAt24fdK1jBgWQn3yP4R9ZLyWQqjozv9QK7epRhL/go-testutil" + "gx/ipfs/QmdVrMn1LhB4ybb8hMVaMLXnA8XRSewMnK6YqXKXoTcRvN/go-libp2p-peer" ) // FWIW: At the time of this commit, including a timestamp in task increases diff --git a/bitswap/decision/engine.go b/bitswap/decision/engine.go index 9855d5b99..b0bcf434c 100644 --- a/bitswap/decision/engine.go +++ b/bitswap/decision/engine.go @@ -10,9 +10,9 @@ import ( wl "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" blocks "gx/ipfs/QmTRCUvZLiir12Qr6MV3HKfKMHX8Nf1Vddn6t2g5nsQSb9/go-block-format" - peer "gx/ipfs/QmVf8hTAsLLFtn4WPCRNdnaF2Eag2qTBS6uR8AiHPZARXy/go-libp2p-peer" - bstore "gx/ipfs/QmbaPGg81pvQiC5vTXtC9Jo8rdrWUjRaugH71WYNsgi6Ev/go-ipfs-blockstore" - logging "gx/ipfs/Qmbi1CTJsbnBZjCEgc2otwu8cUFPsGpzWXG7edVCLZ7Gvk/go-log" + logging "gx/ipfs/QmcVVHfdyv15GVPk7NrxdWjh2hLVccXnoD8j2tyQShiXJb/go-log" + peer "gx/ipfs/QmdVrMn1LhB4ybb8hMVaMLXnA8XRSewMnK6YqXKXoTcRvN/go-libp2p-peer" + bstore "gx/ipfs/QmdpuJBPBZ6sLPj9BQpn3Rpi38BT2cF1QMiUfyzNWeySW4/go-ipfs-blockstore" ) // TODO consider taking responsibility for other types of requests. For diff --git a/bitswap/decision/engine_test.go b/bitswap/decision/engine_test.go index 6c5a0741a..a183dd72b 100644 --- a/bitswap/decision/engine_test.go +++ b/bitswap/decision/engine_test.go @@ -11,10 +11,10 @@ import ( message "github.com/ipfs/go-ipfs/exchange/bitswap/message" - testutil "gx/ipfs/QmPdxCaVp4jZ9RbxqZADvKH6kiCR5jHvdR5f2ycjAY6T2a/go-testutil" blocks "gx/ipfs/QmTRCUvZLiir12Qr6MV3HKfKMHX8Nf1Vddn6t2g5nsQSb9/go-block-format" - peer "gx/ipfs/QmVf8hTAsLLFtn4WPCRNdnaF2Eag2qTBS6uR8AiHPZARXy/go-libp2p-peer" - blockstore "gx/ipfs/QmbaPGg81pvQiC5vTXtC9Jo8rdrWUjRaugH71WYNsgi6Ev/go-ipfs-blockstore" + testutil "gx/ipfs/QmcW4FGAt24fdK1jBgWQn3yP4R9ZLyWQqjozv9QK7epRhL/go-testutil" + peer "gx/ipfs/QmdVrMn1LhB4ybb8hMVaMLXnA8XRSewMnK6YqXKXoTcRvN/go-libp2p-peer" + blockstore "gx/ipfs/QmdpuJBPBZ6sLPj9BQpn3Rpi38BT2cF1QMiUfyzNWeySW4/go-ipfs-blockstore" ds "gx/ipfs/QmeiCcJfDW1GJnWUArudsv5rQsihpi4oyddPhdqo3CfX6i/go-datastore" dssync "gx/ipfs/QmeiCcJfDW1GJnWUArudsv5rQsihpi4oyddPhdqo3CfX6i/go-datastore/sync" ) diff --git a/bitswap/decision/ledger.go b/bitswap/decision/ledger.go index 749ed93a0..6c3504788 100644 --- a/bitswap/decision/ledger.go +++ b/bitswap/decision/ledger.go @@ -6,8 +6,8 @@ import ( wl "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" - peer "gx/ipfs/QmVf8hTAsLLFtn4WPCRNdnaF2Eag2qTBS6uR8AiHPZARXy/go-libp2p-peer" cid "gx/ipfs/QmapdYm1b22Frv3k17fqrBYTFRxwiaVJkB299Mfn33edeB/go-cid" + peer "gx/ipfs/QmdVrMn1LhB4ybb8hMVaMLXnA8XRSewMnK6YqXKXoTcRvN/go-libp2p-peer" ) func newLedger(p peer.ID) *ledger { diff --git a/bitswap/decision/peer_request_queue.go b/bitswap/decision/peer_request_queue.go index 99b09b3f0..f2873361a 100644 --- a/bitswap/decision/peer_request_queue.go +++ b/bitswap/decision/peer_request_queue.go @@ -6,9 +6,9 @@ import ( wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" - peer "gx/ipfs/QmVf8hTAsLLFtn4WPCRNdnaF2Eag2qTBS6uR8AiHPZARXy/go-libp2p-peer" pq "gx/ipfs/QmZUbTDJ39JpvtFCSubiWeUTQRvMA1tVE5RZCJrY4oeAsC/go-ipfs-pq" cid "gx/ipfs/QmapdYm1b22Frv3k17fqrBYTFRxwiaVJkB299Mfn33edeB/go-cid" + peer "gx/ipfs/QmdVrMn1LhB4ybb8hMVaMLXnA8XRSewMnK6YqXKXoTcRvN/go-libp2p-peer" ) type peerRequestQueue interface { diff --git a/bitswap/decision/peer_request_queue_test.go b/bitswap/decision/peer_request_queue_test.go index 89a63cf4f..d84a5695c 100644 --- a/bitswap/decision/peer_request_queue_test.go +++ b/bitswap/decision/peer_request_queue_test.go @@ -10,8 +10,8 @@ import ( "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" u "gx/ipfs/QmPdKqUcHGFdeSpvjVoaTRPPstGif9GBZb5Q56RVw9o69A/go-ipfs-util" - "gx/ipfs/QmPdxCaVp4jZ9RbxqZADvKH6kiCR5jHvdR5f2ycjAY6T2a/go-testutil" cid "gx/ipfs/QmapdYm1b22Frv3k17fqrBYTFRxwiaVJkB299Mfn33edeB/go-cid" + "gx/ipfs/QmcW4FGAt24fdK1jBgWQn3yP4R9ZLyWQqjozv9QK7epRhL/go-testutil" ) func TestPushPop(t *testing.T) { diff --git a/bitswap/get.go b/bitswap/get.go index e99c4caa8..a2d9466cd 100644 --- a/bitswap/get.go +++ b/bitswap/get.go @@ -8,7 +8,7 @@ import ( blocks "gx/ipfs/QmTRCUvZLiir12Qr6MV3HKfKMHX8Nf1Vddn6t2g5nsQSb9/go-block-format" cid "gx/ipfs/QmapdYm1b22Frv3k17fqrBYTFRxwiaVJkB299Mfn33edeB/go-cid" - blockstore "gx/ipfs/QmbaPGg81pvQiC5vTXtC9Jo8rdrWUjRaugH71WYNsgi6Ev/go-ipfs-blockstore" + blockstore "gx/ipfs/QmdpuJBPBZ6sLPj9BQpn3Rpi38BT2cF1QMiUfyzNWeySW4/go-ipfs-blockstore" ) type getBlocksFunc func(context.Context, []*cid.Cid) (<-chan blocks.Block, error) diff --git a/bitswap/message/message.go b/bitswap/message/message.go index 156e2faf0..dde2f9e01 100644 --- a/bitswap/message/message.go +++ b/bitswap/message/message.go @@ -8,7 +8,7 @@ import ( wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" blocks "gx/ipfs/QmTRCUvZLiir12Qr6MV3HKfKMHX8Nf1Vddn6t2g5nsQSb9/go-block-format" - inet "gx/ipfs/QmXdgNhVEgjLxjUoMs5ViQL7pboAt3Y7V7eGHRiE4qrmTE/go-libp2p-net" + inet "gx/ipfs/QmPjvxTpVH8qJyQDnxnsxF9kv9jezKD1kozz1hs3fCGsNh/go-libp2p-net" ggio "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/io" proto "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/proto" cid "gx/ipfs/QmapdYm1b22Frv3k17fqrBYTFRxwiaVJkB299Mfn33edeB/go-cid" diff --git a/bitswap/network/interface.go b/bitswap/network/interface.go index 96eb66142..635e5d2bf 100644 --- a/bitswap/network/interface.go +++ b/bitswap/network/interface.go @@ -5,10 +5,10 @@ import ( bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" - peer "gx/ipfs/QmVf8hTAsLLFtn4WPCRNdnaF2Eag2qTBS6uR8AiHPZARXy/go-libp2p-peer" + ifconnmgr "gx/ipfs/QmXuucFcuvAWYAJfhHV2h4BYreHEAsLSsiquosiXeuduTN/go-libp2p-interface-connmgr" protocol "gx/ipfs/QmZNkThpqfVXs9GNbexPrfBbXSLNYeKrE7jwFM2oqHbyqN/go-libp2p-protocol" cid "gx/ipfs/QmapdYm1b22Frv3k17fqrBYTFRxwiaVJkB299Mfn33edeB/go-cid" - ifconnmgr "gx/ipfs/Qmav3fJzdn43FDvHyGkPdbQ5JVqqiDPmNdnuGa3vatpmwj/go-libp2p-interface-connmgr" + peer "gx/ipfs/QmdVrMn1LhB4ybb8hMVaMLXnA8XRSewMnK6YqXKXoTcRvN/go-libp2p-peer" ) var ( diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index 9df94e6e6..a5012e252 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -8,16 +8,16 @@ import ( bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" - host "gx/ipfs/QmQQGtcp6nVUrQjNsnU53YWV1q8fK1Kd9S7FEkYbRZzxry/go-libp2p-host" - routing "gx/ipfs/QmUV9hDAAyjeGbxbXkJ2sYqZ6dTd1DXJ2REhYEkRm178Tg/go-libp2p-routing" - ma "gx/ipfs/QmUxSEGbv2nmYNnfXi7839wwQqTN3kwQeUxe8dTjZWZs7J/go-multiaddr" - peer "gx/ipfs/QmVf8hTAsLLFtn4WPCRNdnaF2Eag2qTBS6uR8AiHPZARXy/go-libp2p-peer" - inet "gx/ipfs/QmXdgNhVEgjLxjUoMs5ViQL7pboAt3Y7V7eGHRiE4qrmTE/go-libp2p-net" + inet "gx/ipfs/QmPjvxTpVH8qJyQDnxnsxF9kv9jezKD1kozz1hs3fCGsNh/go-libp2p-net" + routing "gx/ipfs/QmPpdpS9fknTBM3qHDcpayU6nYPZQeVjia2fbNrD8YWDe6/go-libp2p-routing" + ifconnmgr "gx/ipfs/QmXuucFcuvAWYAJfhHV2h4BYreHEAsLSsiquosiXeuduTN/go-libp2p-interface-connmgr" + ma "gx/ipfs/QmYmsdtJ3HsodkePE3eU3TsCaP2YvPZJ4LoXnNkDE5Tpt7/go-multiaddr" ggio "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/io" - pstore "gx/ipfs/QmZhsmorLpD9kmQ4ynbAu4vbKv2goMUnXazwGA4gnWHDjB/go-libp2p-peerstore" + pstore "gx/ipfs/QmZR2XWVVBCtbgBWnQhWk2xcQfaR3W8faQPriAiaaj7rsr/go-libp2p-peerstore" cid "gx/ipfs/QmapdYm1b22Frv3k17fqrBYTFRxwiaVJkB299Mfn33edeB/go-cid" - ifconnmgr "gx/ipfs/Qmav3fJzdn43FDvHyGkPdbQ5JVqqiDPmNdnuGa3vatpmwj/go-libp2p-interface-connmgr" - logging "gx/ipfs/Qmbi1CTJsbnBZjCEgc2otwu8cUFPsGpzWXG7edVCLZ7Gvk/go-log" + host "gx/ipfs/Qmb8T6YBBsjYsVGfrihQLfCJveczZnneSBqBKkYEBWDjge/go-libp2p-host" + logging "gx/ipfs/QmcVVHfdyv15GVPk7NrxdWjh2hLVccXnoD8j2tyQShiXJb/go-log" + peer "gx/ipfs/QmdVrMn1LhB4ybb8hMVaMLXnA8XRSewMnK6YqXKXoTcRvN/go-libp2p-peer" ) var log = logging.Logger("bitswap_network") diff --git a/bitswap/session.go b/bitswap/session.go index fd8969971..16f3b475c 100644 --- a/bitswap/session.go +++ b/bitswap/session.go @@ -7,12 +7,12 @@ import ( notifications "github.com/ipfs/go-ipfs/exchange/bitswap/notifications" + loggables "gx/ipfs/QmRPkGkHLB72caXgdDYnoaWigXNWx95BcYDKV1n3KTEpaG/go-libp2p-loggables" blocks "gx/ipfs/QmTRCUvZLiir12Qr6MV3HKfKMHX8Nf1Vddn6t2g5nsQSb9/go-block-format" lru "gx/ipfs/QmVYxfoJQiZijTgPNHCHgHELvQpbsJNTg6Crmc3dQkj3yy/golang-lru" - peer "gx/ipfs/QmVf8hTAsLLFtn4WPCRNdnaF2Eag2qTBS6uR8AiHPZARXy/go-libp2p-peer" cid "gx/ipfs/QmapdYm1b22Frv3k17fqrBYTFRxwiaVJkB299Mfn33edeB/go-cid" - logging "gx/ipfs/Qmbi1CTJsbnBZjCEgc2otwu8cUFPsGpzWXG7edVCLZ7Gvk/go-log" - loggables "gx/ipfs/QmcBbMF4UyZFRTvH9S2h3rbSRBvvEGLqgt4sdvVugG8rX1/go-libp2p-loggables" + logging "gx/ipfs/QmcVVHfdyv15GVPk7NrxdWjh2hLVccXnoD8j2tyQShiXJb/go-log" + peer "gx/ipfs/QmdVrMn1LhB4ybb8hMVaMLXnA8XRSewMnK6YqXKXoTcRvN/go-libp2p-peer" ) const activeWantsLimit = 16 diff --git a/bitswap/session_test.go b/bitswap/session_test.go index 6cf96118b..6edc6e065 100644 --- a/bitswap/session_test.go +++ b/bitswap/session_test.go @@ -6,10 +6,10 @@ import ( "testing" "time" - tu "gx/ipfs/QmPdxCaVp4jZ9RbxqZADvKH6kiCR5jHvdR5f2ycjAY6T2a/go-testutil" blocks "gx/ipfs/QmTRCUvZLiir12Qr6MV3HKfKMHX8Nf1Vddn6t2g5nsQSb9/go-block-format" blocksutil "gx/ipfs/QmYmE4kxv6uFGaWkeBAFYDuNcxzCn87pzwm6CkBkM9C8BM/go-ipfs-blocksutil" cid "gx/ipfs/QmapdYm1b22Frv3k17fqrBYTFRxwiaVJkB299Mfn33edeB/go-cid" + tu "gx/ipfs/QmcW4FGAt24fdK1jBgWQn3yP4R9ZLyWQqjozv9QK7epRhL/go-testutil" ) func TestBasicSessions(t *testing.T) { diff --git a/bitswap/testnet/interface.go b/bitswap/testnet/interface.go index c0dff2a8a..c4ac9b368 100644 --- a/bitswap/testnet/interface.go +++ b/bitswap/testnet/interface.go @@ -2,8 +2,8 @@ package bitswap import ( bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" - "gx/ipfs/QmPdxCaVp4jZ9RbxqZADvKH6kiCR5jHvdR5f2ycjAY6T2a/go-testutil" - peer "gx/ipfs/QmVf8hTAsLLFtn4WPCRNdnaF2Eag2qTBS6uR8AiHPZARXy/go-libp2p-peer" + "gx/ipfs/QmcW4FGAt24fdK1jBgWQn3yP4R9ZLyWQqjozv9QK7epRhL/go-testutil" + peer "gx/ipfs/QmdVrMn1LhB4ybb8hMVaMLXnA8XRSewMnK6YqXKXoTcRvN/go-libp2p-peer" ) type Network interface { diff --git a/bitswap/testnet/network_test.go b/bitswap/testnet/network_test.go index 0f1398b45..1fa8a8930 100644 --- a/bitswap/testnet/network_test.go +++ b/bitswap/testnet/network_test.go @@ -8,11 +8,11 @@ import ( bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" - testutil "gx/ipfs/QmPdxCaVp4jZ9RbxqZADvKH6kiCR5jHvdR5f2ycjAY6T2a/go-testutil" - mockrouting "gx/ipfs/QmQUPmFYZBSWn4mtX1YwYkSaMoWVore7tCiSetr6k8JW21/go-ipfs-routing/mock" delay "gx/ipfs/QmRJVNatYJwTAHgdSM1Xef9QVQ1Ch3XHdmcrykjP5Y4soL/go-ipfs-delay" blocks "gx/ipfs/QmTRCUvZLiir12Qr6MV3HKfKMHX8Nf1Vddn6t2g5nsQSb9/go-block-format" - peer "gx/ipfs/QmVf8hTAsLLFtn4WPCRNdnaF2Eag2qTBS6uR8AiHPZARXy/go-libp2p-peer" + mockrouting "gx/ipfs/QmWLQyLU7yopJnwMvpHM5VSMG4xmbKgcq6P246mDy9xy5E/go-ipfs-routing/mock" + testutil "gx/ipfs/QmcW4FGAt24fdK1jBgWQn3yP4R9ZLyWQqjozv9QK7epRhL/go-testutil" + peer "gx/ipfs/QmdVrMn1LhB4ybb8hMVaMLXnA8XRSewMnK6YqXKXoTcRvN/go-libp2p-peer" ) func TestSendMessageAsyncButWaitForResponse(t *testing.T) { diff --git a/bitswap/testnet/peernet.go b/bitswap/testnet/peernet.go index f7e76621f..dc5349391 100644 --- a/bitswap/testnet/peernet.go +++ b/bitswap/testnet/peernet.go @@ -5,10 +5,10 @@ import ( bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" - testutil "gx/ipfs/QmPdxCaVp4jZ9RbxqZADvKH6kiCR5jHvdR5f2ycjAY6T2a/go-testutil" - mockrouting "gx/ipfs/QmQUPmFYZBSWn4mtX1YwYkSaMoWVore7tCiSetr6k8JW21/go-ipfs-routing/mock" - mockpeernet "gx/ipfs/QmUEAR2pS7fP1GPseS3i8MWFyENs7oDp4CZrgn8FCjbsBu/go-libp2p/p2p/net/mock" - peer "gx/ipfs/QmVf8hTAsLLFtn4WPCRNdnaF2Eag2qTBS6uR8AiHPZARXy/go-libp2p-peer" + mockrouting "gx/ipfs/QmWLQyLU7yopJnwMvpHM5VSMG4xmbKgcq6P246mDy9xy5E/go-ipfs-routing/mock" + mockpeernet "gx/ipfs/QmZ86eLPtXkQ1Dfa992Q8NpXArUoWWh3y728JDcWvzRrvC/go-libp2p/p2p/net/mock" + testutil "gx/ipfs/QmcW4FGAt24fdK1jBgWQn3yP4R9ZLyWQqjozv9QK7epRhL/go-testutil" + peer "gx/ipfs/QmdVrMn1LhB4ybb8hMVaMLXnA8XRSewMnK6YqXKXoTcRvN/go-libp2p-peer" ds "gx/ipfs/QmeiCcJfDW1GJnWUArudsv5rQsihpi4oyddPhdqo3CfX6i/go-datastore" ) diff --git a/bitswap/testnet/virtual.go b/bitswap/testnet/virtual.go index 6ef654133..cfb307f10 100644 --- a/bitswap/testnet/virtual.go +++ b/bitswap/testnet/virtual.go @@ -9,14 +9,14 @@ import ( bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" - testutil "gx/ipfs/QmPdxCaVp4jZ9RbxqZADvKH6kiCR5jHvdR5f2ycjAY6T2a/go-testutil" - mockrouting "gx/ipfs/QmQUPmFYZBSWn4mtX1YwYkSaMoWVore7tCiSetr6k8JW21/go-ipfs-routing/mock" + routing "gx/ipfs/QmPpdpS9fknTBM3qHDcpayU6nYPZQeVjia2fbNrD8YWDe6/go-libp2p-routing" delay "gx/ipfs/QmRJVNatYJwTAHgdSM1Xef9QVQ1Ch3XHdmcrykjP5Y4soL/go-ipfs-delay" - routing "gx/ipfs/QmUV9hDAAyjeGbxbXkJ2sYqZ6dTd1DXJ2REhYEkRm178Tg/go-libp2p-routing" - peer "gx/ipfs/QmVf8hTAsLLFtn4WPCRNdnaF2Eag2qTBS6uR8AiHPZARXy/go-libp2p-peer" + mockrouting "gx/ipfs/QmWLQyLU7yopJnwMvpHM5VSMG4xmbKgcq6P246mDy9xy5E/go-ipfs-routing/mock" + ifconnmgr "gx/ipfs/QmXuucFcuvAWYAJfhHV2h4BYreHEAsLSsiquosiXeuduTN/go-libp2p-interface-connmgr" cid "gx/ipfs/QmapdYm1b22Frv3k17fqrBYTFRxwiaVJkB299Mfn33edeB/go-cid" - ifconnmgr "gx/ipfs/Qmav3fJzdn43FDvHyGkPdbQ5JVqqiDPmNdnuGa3vatpmwj/go-libp2p-interface-connmgr" - logging "gx/ipfs/Qmbi1CTJsbnBZjCEgc2otwu8cUFPsGpzWXG7edVCLZ7Gvk/go-log" + logging "gx/ipfs/QmcVVHfdyv15GVPk7NrxdWjh2hLVccXnoD8j2tyQShiXJb/go-log" + testutil "gx/ipfs/QmcW4FGAt24fdK1jBgWQn3yP4R9ZLyWQqjozv9QK7epRhL/go-testutil" + peer "gx/ipfs/QmdVrMn1LhB4ybb8hMVaMLXnA8XRSewMnK6YqXKXoTcRvN/go-libp2p-peer" ) var log = logging.Logger("bstestnet") diff --git a/bitswap/testutils.go b/bitswap/testutils.go index ce141ab6d..9f6ed03c7 100644 --- a/bitswap/testutils.go +++ b/bitswap/testutils.go @@ -6,11 +6,11 @@ import ( tn "github.com/ipfs/go-ipfs/exchange/bitswap/testnet" - testutil "gx/ipfs/QmPdxCaVp4jZ9RbxqZADvKH6kiCR5jHvdR5f2ycjAY6T2a/go-testutil" delay "gx/ipfs/QmRJVNatYJwTAHgdSM1Xef9QVQ1Ch3XHdmcrykjP5Y4soL/go-ipfs-delay" - peer "gx/ipfs/QmVf8hTAsLLFtn4WPCRNdnaF2Eag2qTBS6uR8AiHPZARXy/go-libp2p-peer" - blockstore "gx/ipfs/QmbaPGg81pvQiC5vTXtC9Jo8rdrWUjRaugH71WYNsgi6Ev/go-ipfs-blockstore" - p2ptestutil "gx/ipfs/QmeBUY1BsMjkacVAJ2u76XBGNiRCHq6dkqT2VWG59N3d7b/go-libp2p-netutil" + testutil "gx/ipfs/QmcW4FGAt24fdK1jBgWQn3yP4R9ZLyWQqjozv9QK7epRhL/go-testutil" + p2ptestutil "gx/ipfs/QmcxUtMB5sJrXR3znSvkrDd2ghvwGM8rLRqwJiPUdgQwat/go-libp2p-netutil" + peer "gx/ipfs/QmdVrMn1LhB4ybb8hMVaMLXnA8XRSewMnK6YqXKXoTcRvN/go-libp2p-peer" + blockstore "gx/ipfs/QmdpuJBPBZ6sLPj9BQpn3Rpi38BT2cF1QMiUfyzNWeySW4/go-ipfs-blockstore" ds "gx/ipfs/QmeiCcJfDW1GJnWUArudsv5rQsihpi4oyddPhdqo3CfX6i/go-datastore" delayed "gx/ipfs/QmeiCcJfDW1GJnWUArudsv5rQsihpi4oyddPhdqo3CfX6i/go-datastore/delayed" ds_sync "gx/ipfs/QmeiCcJfDW1GJnWUArudsv5rQsihpi4oyddPhdqo3CfX6i/go-datastore/sync" diff --git a/bitswap/wantmanager.go b/bitswap/wantmanager.go index 6f7f2395f..7b30bf23a 100644 --- a/bitswap/wantmanager.go +++ b/bitswap/wantmanager.go @@ -11,8 +11,8 @@ import ( wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" metrics "gx/ipfs/QmRg1gKTHzc3CZXSKzem8aR4E3TubFhbgXwfVuWnSK5CC5/go-metrics-interface" - peer "gx/ipfs/QmVf8hTAsLLFtn4WPCRNdnaF2Eag2qTBS6uR8AiHPZARXy/go-libp2p-peer" cid "gx/ipfs/QmapdYm1b22Frv3k17fqrBYTFRxwiaVJkB299Mfn33edeB/go-cid" + peer "gx/ipfs/QmdVrMn1LhB4ybb8hMVaMLXnA8XRSewMnK6YqXKXoTcRvN/go-libp2p-peer" ) type WantManager struct { diff --git a/bitswap/workers.go b/bitswap/workers.go index f96fc3ba3..3dd5f9cb2 100644 --- a/bitswap/workers.go +++ b/bitswap/workers.go @@ -10,9 +10,9 @@ import ( process "gx/ipfs/QmSF8fPo3jgVBAy8fpdjjYqgG87dkJgUprRBHRd2tmfgpP/goprocess" procctx "gx/ipfs/QmSF8fPo3jgVBAy8fpdjjYqgG87dkJgUprRBHRd2tmfgpP/goprocess/context" - peer "gx/ipfs/QmVf8hTAsLLFtn4WPCRNdnaF2Eag2qTBS6uR8AiHPZARXy/go-libp2p-peer" cid "gx/ipfs/QmapdYm1b22Frv3k17fqrBYTFRxwiaVJkB299Mfn33edeB/go-cid" - logging "gx/ipfs/Qmbi1CTJsbnBZjCEgc2otwu8cUFPsGpzWXG7edVCLZ7Gvk/go-log" + logging "gx/ipfs/QmcVVHfdyv15GVPk7NrxdWjh2hLVccXnoD8j2tyQShiXJb/go-log" + peer "gx/ipfs/QmdVrMn1LhB4ybb8hMVaMLXnA8XRSewMnK6YqXKXoTcRvN/go-libp2p-peer" ) var TaskWorkerCount = 8 From ad15c59ce64776d308a1ecbf2ec7101ac8cd70c1 Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Mon, 16 Jul 2018 15:16:49 -0700 Subject: [PATCH 0642/1035] update go-cid alternative to #5243 that updates go-cid and all packages that depend on it License: MIT Signed-off-by: Steven Allen This commit was moved from ipfs/go-bitswap@e6367b9deaed07e2bb19b02e9c6315981e51029f --- bitswap/bitswap.go | 8 ++++---- bitswap/bitswap_test.go | 10 +++++----- bitswap/decision/bench_test.go | 2 +- bitswap/decision/engine.go | 4 ++-- bitswap/decision/engine_test.go | 4 ++-- bitswap/decision/ledger.go | 2 +- bitswap/decision/peer_request_queue.go | 2 +- bitswap/decision/peer_request_queue_test.go | 2 +- bitswap/get.go | 6 +++--- bitswap/message/message.go | 4 ++-- bitswap/message/message_test.go | 4 ++-- bitswap/network/interface.go | 2 +- bitswap/network/ipfs_impl.go | 4 ++-- bitswap/notifications/notifications.go | 4 ++-- bitswap/notifications/notifications_test.go | 6 +++--- bitswap/session.go | 4 ++-- bitswap/session_test.go | 6 +++--- bitswap/stat.go | 2 +- bitswap/testnet/network_test.go | 4 ++-- bitswap/testnet/peernet.go | 2 +- bitswap/testnet/virtual.go | 6 +++--- bitswap/testutils.go | 2 +- bitswap/wantlist/wantlist.go | 2 +- bitswap/wantlist/wantlist_test.go | 2 +- bitswap/wantmanager.go | 2 +- bitswap/workers.go | 2 +- 26 files changed, 49 insertions(+), 49 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 58acf7196..da6d7317e 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -17,15 +17,15 @@ import ( delay "gx/ipfs/QmRJVNatYJwTAHgdSM1Xef9QVQ1Ch3XHdmcrykjP5Y4soL/go-ipfs-delay" flags "gx/ipfs/QmRMGdC6HKdLsPDABL9aXPDidrpmEHzJqFWSvshkbn9Hj8/go-ipfs-flags" + blockstore "gx/ipfs/QmRatnbGjPcoyzVjfixMZnuT1xQbjM7FgnL6FX4CKJeDE2/go-ipfs-blockstore" metrics "gx/ipfs/QmRg1gKTHzc3CZXSKzem8aR4E3TubFhbgXwfVuWnSK5CC5/go-metrics-interface" process "gx/ipfs/QmSF8fPo3jgVBAy8fpdjjYqgG87dkJgUprRBHRd2tmfgpP/goprocess" procctx "gx/ipfs/QmSF8fPo3jgVBAy8fpdjjYqgG87dkJgUprRBHRd2tmfgpP/goprocess/context" - blocks "gx/ipfs/QmTRCUvZLiir12Qr6MV3HKfKMHX8Nf1Vddn6t2g5nsQSb9/go-block-format" - exchange "gx/ipfs/QmVSe7YJbPnEmkSUKD3HxSvp8HJoyCU55hQoCMRq7N1jaK/go-ipfs-exchange-interface" - cid "gx/ipfs/QmapdYm1b22Frv3k17fqrBYTFRxwiaVJkB299Mfn33edeB/go-cid" + blocks "gx/ipfs/QmVzK524a2VWLqyvtBeiHKsUAWYgeAk4DBeZoY7vpNPNRx/go-block-format" + cid "gx/ipfs/QmYVNvtQkeZ6AKSwDrjQTs432QtL6umrrK41EBq3cu7iSP/go-cid" + exchange "gx/ipfs/Qmc2faLf7URkHpsbfYM4EMbr8iSAcGAe8VPgVi64HVnwji/go-ipfs-exchange-interface" logging "gx/ipfs/QmcVVHfdyv15GVPk7NrxdWjh2hLVccXnoD8j2tyQShiXJb/go-log" peer "gx/ipfs/QmdVrMn1LhB4ybb8hMVaMLXnA8XRSewMnK6YqXKXoTcRvN/go-libp2p-peer" - blockstore "gx/ipfs/QmdpuJBPBZ6sLPj9BQpn3Rpi38BT2cF1QMiUfyzNWeySW4/go-ipfs-blockstore" ) var log = logging.Logger("bitswap") diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index 1b262db4e..bdaaf8d20 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -12,14 +12,14 @@ import ( tn "github.com/ipfs/go-ipfs/exchange/bitswap/testnet" delay "gx/ipfs/QmRJVNatYJwTAHgdSM1Xef9QVQ1Ch3XHdmcrykjP5Y4soL/go-ipfs-delay" - blocks "gx/ipfs/QmTRCUvZLiir12Qr6MV3HKfKMHX8Nf1Vddn6t2g5nsQSb9/go-block-format" - mockrouting "gx/ipfs/QmWLQyLU7yopJnwMvpHM5VSMG4xmbKgcq6P246mDy9xy5E/go-ipfs-routing/mock" - blocksutil "gx/ipfs/QmYmE4kxv6uFGaWkeBAFYDuNcxzCn87pzwm6CkBkM9C8BM/go-ipfs-blocksutil" - cid "gx/ipfs/QmapdYm1b22Frv3k17fqrBYTFRxwiaVJkB299Mfn33edeB/go-cid" + blockstore "gx/ipfs/QmRatnbGjPcoyzVjfixMZnuT1xQbjM7FgnL6FX4CKJeDE2/go-ipfs-blockstore" + blocks "gx/ipfs/QmVzK524a2VWLqyvtBeiHKsUAWYgeAk4DBeZoY7vpNPNRx/go-block-format" + cid "gx/ipfs/QmYVNvtQkeZ6AKSwDrjQTs432QtL6umrrK41EBq3cu7iSP/go-cid" + blocksutil "gx/ipfs/QmYqPGpZ9Yemr55xus9DiEztkns6Jti5XJ7hC94JbvkdqZ/go-ipfs-blocksutil" + mockrouting "gx/ipfs/QmbFRJeEmEU16y3BmKKaD4a9fm5oHsEAMHe2vSB1UnfLMi/go-ipfs-routing/mock" tu "gx/ipfs/QmcW4FGAt24fdK1jBgWQn3yP4R9ZLyWQqjozv9QK7epRhL/go-testutil" travis "gx/ipfs/QmcW4FGAt24fdK1jBgWQn3yP4R9ZLyWQqjozv9QK7epRhL/go-testutil/ci/travis" p2ptestutil "gx/ipfs/QmcxUtMB5sJrXR3znSvkrDd2ghvwGM8rLRqwJiPUdgQwat/go-libp2p-netutil" - blockstore "gx/ipfs/QmdpuJBPBZ6sLPj9BQpn3Rpi38BT2cF1QMiUfyzNWeySW4/go-ipfs-blockstore" detectrace "gx/ipfs/Qmf7HqcW7LtCi1W8y2bdx2eJpze74jkbKqpByxgXikdbLF/go-detect-race" ) diff --git a/bitswap/decision/bench_test.go b/bitswap/decision/bench_test.go index dccfa9ad1..26e10c40e 100644 --- a/bitswap/decision/bench_test.go +++ b/bitswap/decision/bench_test.go @@ -7,7 +7,7 @@ import ( "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" u "gx/ipfs/QmPdKqUcHGFdeSpvjVoaTRPPstGif9GBZb5Q56RVw9o69A/go-ipfs-util" - cid "gx/ipfs/QmapdYm1b22Frv3k17fqrBYTFRxwiaVJkB299Mfn33edeB/go-cid" + cid "gx/ipfs/QmYVNvtQkeZ6AKSwDrjQTs432QtL6umrrK41EBq3cu7iSP/go-cid" "gx/ipfs/QmcW4FGAt24fdK1jBgWQn3yP4R9ZLyWQqjozv9QK7epRhL/go-testutil" "gx/ipfs/QmdVrMn1LhB4ybb8hMVaMLXnA8XRSewMnK6YqXKXoTcRvN/go-libp2p-peer" ) diff --git a/bitswap/decision/engine.go b/bitswap/decision/engine.go index b0bcf434c..135edf14f 100644 --- a/bitswap/decision/engine.go +++ b/bitswap/decision/engine.go @@ -9,10 +9,10 @@ import ( bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" wl "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" - blocks "gx/ipfs/QmTRCUvZLiir12Qr6MV3HKfKMHX8Nf1Vddn6t2g5nsQSb9/go-block-format" + bstore "gx/ipfs/QmRatnbGjPcoyzVjfixMZnuT1xQbjM7FgnL6FX4CKJeDE2/go-ipfs-blockstore" + blocks "gx/ipfs/QmVzK524a2VWLqyvtBeiHKsUAWYgeAk4DBeZoY7vpNPNRx/go-block-format" logging "gx/ipfs/QmcVVHfdyv15GVPk7NrxdWjh2hLVccXnoD8j2tyQShiXJb/go-log" peer "gx/ipfs/QmdVrMn1LhB4ybb8hMVaMLXnA8XRSewMnK6YqXKXoTcRvN/go-libp2p-peer" - bstore "gx/ipfs/QmdpuJBPBZ6sLPj9BQpn3Rpi38BT2cF1QMiUfyzNWeySW4/go-ipfs-blockstore" ) // TODO consider taking responsibility for other types of requests. For diff --git a/bitswap/decision/engine_test.go b/bitswap/decision/engine_test.go index a183dd72b..afd144a08 100644 --- a/bitswap/decision/engine_test.go +++ b/bitswap/decision/engine_test.go @@ -11,10 +11,10 @@ import ( message "github.com/ipfs/go-ipfs/exchange/bitswap/message" - blocks "gx/ipfs/QmTRCUvZLiir12Qr6MV3HKfKMHX8Nf1Vddn6t2g5nsQSb9/go-block-format" + blockstore "gx/ipfs/QmRatnbGjPcoyzVjfixMZnuT1xQbjM7FgnL6FX4CKJeDE2/go-ipfs-blockstore" + blocks "gx/ipfs/QmVzK524a2VWLqyvtBeiHKsUAWYgeAk4DBeZoY7vpNPNRx/go-block-format" testutil "gx/ipfs/QmcW4FGAt24fdK1jBgWQn3yP4R9ZLyWQqjozv9QK7epRhL/go-testutil" peer "gx/ipfs/QmdVrMn1LhB4ybb8hMVaMLXnA8XRSewMnK6YqXKXoTcRvN/go-libp2p-peer" - blockstore "gx/ipfs/QmdpuJBPBZ6sLPj9BQpn3Rpi38BT2cF1QMiUfyzNWeySW4/go-ipfs-blockstore" ds "gx/ipfs/QmeiCcJfDW1GJnWUArudsv5rQsihpi4oyddPhdqo3CfX6i/go-datastore" dssync "gx/ipfs/QmeiCcJfDW1GJnWUArudsv5rQsihpi4oyddPhdqo3CfX6i/go-datastore/sync" ) diff --git a/bitswap/decision/ledger.go b/bitswap/decision/ledger.go index 6c3504788..a30f662e1 100644 --- a/bitswap/decision/ledger.go +++ b/bitswap/decision/ledger.go @@ -6,7 +6,7 @@ import ( wl "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" - cid "gx/ipfs/QmapdYm1b22Frv3k17fqrBYTFRxwiaVJkB299Mfn33edeB/go-cid" + cid "gx/ipfs/QmYVNvtQkeZ6AKSwDrjQTs432QtL6umrrK41EBq3cu7iSP/go-cid" peer "gx/ipfs/QmdVrMn1LhB4ybb8hMVaMLXnA8XRSewMnK6YqXKXoTcRvN/go-libp2p-peer" ) diff --git a/bitswap/decision/peer_request_queue.go b/bitswap/decision/peer_request_queue.go index f2873361a..cfa582a9c 100644 --- a/bitswap/decision/peer_request_queue.go +++ b/bitswap/decision/peer_request_queue.go @@ -6,8 +6,8 @@ import ( wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" + cid "gx/ipfs/QmYVNvtQkeZ6AKSwDrjQTs432QtL6umrrK41EBq3cu7iSP/go-cid" pq "gx/ipfs/QmZUbTDJ39JpvtFCSubiWeUTQRvMA1tVE5RZCJrY4oeAsC/go-ipfs-pq" - cid "gx/ipfs/QmapdYm1b22Frv3k17fqrBYTFRxwiaVJkB299Mfn33edeB/go-cid" peer "gx/ipfs/QmdVrMn1LhB4ybb8hMVaMLXnA8XRSewMnK6YqXKXoTcRvN/go-libp2p-peer" ) diff --git a/bitswap/decision/peer_request_queue_test.go b/bitswap/decision/peer_request_queue_test.go index d84a5695c..02733dcd1 100644 --- a/bitswap/decision/peer_request_queue_test.go +++ b/bitswap/decision/peer_request_queue_test.go @@ -10,7 +10,7 @@ import ( "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" u "gx/ipfs/QmPdKqUcHGFdeSpvjVoaTRPPstGif9GBZb5Q56RVw9o69A/go-ipfs-util" - cid "gx/ipfs/QmapdYm1b22Frv3k17fqrBYTFRxwiaVJkB299Mfn33edeB/go-cid" + cid "gx/ipfs/QmYVNvtQkeZ6AKSwDrjQTs432QtL6umrrK41EBq3cu7iSP/go-cid" "gx/ipfs/QmcW4FGAt24fdK1jBgWQn3yP4R9ZLyWQqjozv9QK7epRhL/go-testutil" ) diff --git a/bitswap/get.go b/bitswap/get.go index a2d9466cd..32d11090f 100644 --- a/bitswap/get.go +++ b/bitswap/get.go @@ -6,9 +6,9 @@ import ( notifications "github.com/ipfs/go-ipfs/exchange/bitswap/notifications" - blocks "gx/ipfs/QmTRCUvZLiir12Qr6MV3HKfKMHX8Nf1Vddn6t2g5nsQSb9/go-block-format" - cid "gx/ipfs/QmapdYm1b22Frv3k17fqrBYTFRxwiaVJkB299Mfn33edeB/go-cid" - blockstore "gx/ipfs/QmdpuJBPBZ6sLPj9BQpn3Rpi38BT2cF1QMiUfyzNWeySW4/go-ipfs-blockstore" + blockstore "gx/ipfs/QmRatnbGjPcoyzVjfixMZnuT1xQbjM7FgnL6FX4CKJeDE2/go-ipfs-blockstore" + blocks "gx/ipfs/QmVzK524a2VWLqyvtBeiHKsUAWYgeAk4DBeZoY7vpNPNRx/go-block-format" + cid "gx/ipfs/QmYVNvtQkeZ6AKSwDrjQTs432QtL6umrrK41EBq3cu7iSP/go-cid" ) type getBlocksFunc func(context.Context, []*cid.Cid) (<-chan blocks.Block, error) diff --git a/bitswap/message/message.go b/bitswap/message/message.go index dde2f9e01..50c32cdb2 100644 --- a/bitswap/message/message.go +++ b/bitswap/message/message.go @@ -6,12 +6,12 @@ import ( pb "github.com/ipfs/go-ipfs/exchange/bitswap/message/pb" wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" - blocks "gx/ipfs/QmTRCUvZLiir12Qr6MV3HKfKMHX8Nf1Vddn6t2g5nsQSb9/go-block-format" + blocks "gx/ipfs/QmVzK524a2VWLqyvtBeiHKsUAWYgeAk4DBeZoY7vpNPNRx/go-block-format" inet "gx/ipfs/QmPjvxTpVH8qJyQDnxnsxF9kv9jezKD1kozz1hs3fCGsNh/go-libp2p-net" + cid "gx/ipfs/QmYVNvtQkeZ6AKSwDrjQTs432QtL6umrrK41EBq3cu7iSP/go-cid" ggio "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/io" proto "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/proto" - cid "gx/ipfs/QmapdYm1b22Frv3k17fqrBYTFRxwiaVJkB299Mfn33edeB/go-cid" ) // TODO move message.go into the bitswap package diff --git a/bitswap/message/message_test.go b/bitswap/message/message_test.go index abd3e77db..bea8455c8 100644 --- a/bitswap/message/message_test.go +++ b/bitswap/message/message_test.go @@ -7,9 +7,9 @@ import ( pb "github.com/ipfs/go-ipfs/exchange/bitswap/message/pb" u "gx/ipfs/QmPdKqUcHGFdeSpvjVoaTRPPstGif9GBZb5Q56RVw9o69A/go-ipfs-util" - blocks "gx/ipfs/QmTRCUvZLiir12Qr6MV3HKfKMHX8Nf1Vddn6t2g5nsQSb9/go-block-format" + blocks "gx/ipfs/QmVzK524a2VWLqyvtBeiHKsUAWYgeAk4DBeZoY7vpNPNRx/go-block-format" + cid "gx/ipfs/QmYVNvtQkeZ6AKSwDrjQTs432QtL6umrrK41EBq3cu7iSP/go-cid" proto "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/proto" - cid "gx/ipfs/QmapdYm1b22Frv3k17fqrBYTFRxwiaVJkB299Mfn33edeB/go-cid" ) func mkFakeCid(s string) *cid.Cid { diff --git a/bitswap/network/interface.go b/bitswap/network/interface.go index 635e5d2bf..191bf9253 100644 --- a/bitswap/network/interface.go +++ b/bitswap/network/interface.go @@ -6,8 +6,8 @@ import ( bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" ifconnmgr "gx/ipfs/QmXuucFcuvAWYAJfhHV2h4BYreHEAsLSsiquosiXeuduTN/go-libp2p-interface-connmgr" + cid "gx/ipfs/QmYVNvtQkeZ6AKSwDrjQTs432QtL6umrrK41EBq3cu7iSP/go-cid" protocol "gx/ipfs/QmZNkThpqfVXs9GNbexPrfBbXSLNYeKrE7jwFM2oqHbyqN/go-libp2p-protocol" - cid "gx/ipfs/QmapdYm1b22Frv3k17fqrBYTFRxwiaVJkB299Mfn33edeB/go-cid" peer "gx/ipfs/QmdVrMn1LhB4ybb8hMVaMLXnA8XRSewMnK6YqXKXoTcRvN/go-libp2p-peer" ) diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index a5012e252..efeb693c2 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -9,12 +9,12 @@ import ( bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" inet "gx/ipfs/QmPjvxTpVH8qJyQDnxnsxF9kv9jezKD1kozz1hs3fCGsNh/go-libp2p-net" - routing "gx/ipfs/QmPpdpS9fknTBM3qHDcpayU6nYPZQeVjia2fbNrD8YWDe6/go-libp2p-routing" ifconnmgr "gx/ipfs/QmXuucFcuvAWYAJfhHV2h4BYreHEAsLSsiquosiXeuduTN/go-libp2p-interface-connmgr" + cid "gx/ipfs/QmYVNvtQkeZ6AKSwDrjQTs432QtL6umrrK41EBq3cu7iSP/go-cid" ma "gx/ipfs/QmYmsdtJ3HsodkePE3eU3TsCaP2YvPZJ4LoXnNkDE5Tpt7/go-multiaddr" + routing "gx/ipfs/QmZ383TySJVeZWzGnWui6pRcKyYZk9VkKTuW7tmKRWk5au/go-libp2p-routing" ggio "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/io" pstore "gx/ipfs/QmZR2XWVVBCtbgBWnQhWk2xcQfaR3W8faQPriAiaaj7rsr/go-libp2p-peerstore" - cid "gx/ipfs/QmapdYm1b22Frv3k17fqrBYTFRxwiaVJkB299Mfn33edeB/go-cid" host "gx/ipfs/Qmb8T6YBBsjYsVGfrihQLfCJveczZnneSBqBKkYEBWDjge/go-libp2p-host" logging "gx/ipfs/QmcVVHfdyv15GVPk7NrxdWjh2hLVccXnoD8j2tyQShiXJb/go-log" peer "gx/ipfs/QmdVrMn1LhB4ybb8hMVaMLXnA8XRSewMnK6YqXKXoTcRvN/go-libp2p-peer" diff --git a/bitswap/notifications/notifications.go b/bitswap/notifications/notifications.go index 31109c719..08ec4065e 100644 --- a/bitswap/notifications/notifications.go +++ b/bitswap/notifications/notifications.go @@ -4,8 +4,8 @@ import ( "context" "sync" - blocks "gx/ipfs/QmTRCUvZLiir12Qr6MV3HKfKMHX8Nf1Vddn6t2g5nsQSb9/go-block-format" - cid "gx/ipfs/QmapdYm1b22Frv3k17fqrBYTFRxwiaVJkB299Mfn33edeB/go-cid" + blocks "gx/ipfs/QmVzK524a2VWLqyvtBeiHKsUAWYgeAk4DBeZoY7vpNPNRx/go-block-format" + cid "gx/ipfs/QmYVNvtQkeZ6AKSwDrjQTs432QtL6umrrK41EBq3cu7iSP/go-cid" pubsub "gx/ipfs/QmdbxjQWogRCHRaxhhGnYdT1oQJzL9GdqSKzCdqWr85AP2/pubsub" ) diff --git a/bitswap/notifications/notifications_test.go b/bitswap/notifications/notifications_test.go index 102b3fb73..232124377 100644 --- a/bitswap/notifications/notifications_test.go +++ b/bitswap/notifications/notifications_test.go @@ -6,9 +6,9 @@ import ( "testing" "time" - blocks "gx/ipfs/QmTRCUvZLiir12Qr6MV3HKfKMHX8Nf1Vddn6t2g5nsQSb9/go-block-format" - blocksutil "gx/ipfs/QmYmE4kxv6uFGaWkeBAFYDuNcxzCn87pzwm6CkBkM9C8BM/go-ipfs-blocksutil" - cid "gx/ipfs/QmapdYm1b22Frv3k17fqrBYTFRxwiaVJkB299Mfn33edeB/go-cid" + blocks "gx/ipfs/QmVzK524a2VWLqyvtBeiHKsUAWYgeAk4DBeZoY7vpNPNRx/go-block-format" + cid "gx/ipfs/QmYVNvtQkeZ6AKSwDrjQTs432QtL6umrrK41EBq3cu7iSP/go-cid" + blocksutil "gx/ipfs/QmYqPGpZ9Yemr55xus9DiEztkns6Jti5XJ7hC94JbvkdqZ/go-ipfs-blocksutil" ) func TestDuplicates(t *testing.T) { diff --git a/bitswap/session.go b/bitswap/session.go index 16f3b475c..97bb8f552 100644 --- a/bitswap/session.go +++ b/bitswap/session.go @@ -8,9 +8,9 @@ import ( notifications "github.com/ipfs/go-ipfs/exchange/bitswap/notifications" loggables "gx/ipfs/QmRPkGkHLB72caXgdDYnoaWigXNWx95BcYDKV1n3KTEpaG/go-libp2p-loggables" - blocks "gx/ipfs/QmTRCUvZLiir12Qr6MV3HKfKMHX8Nf1Vddn6t2g5nsQSb9/go-block-format" lru "gx/ipfs/QmVYxfoJQiZijTgPNHCHgHELvQpbsJNTg6Crmc3dQkj3yy/golang-lru" - cid "gx/ipfs/QmapdYm1b22Frv3k17fqrBYTFRxwiaVJkB299Mfn33edeB/go-cid" + blocks "gx/ipfs/QmVzK524a2VWLqyvtBeiHKsUAWYgeAk4DBeZoY7vpNPNRx/go-block-format" + cid "gx/ipfs/QmYVNvtQkeZ6AKSwDrjQTs432QtL6umrrK41EBq3cu7iSP/go-cid" logging "gx/ipfs/QmcVVHfdyv15GVPk7NrxdWjh2hLVccXnoD8j2tyQShiXJb/go-log" peer "gx/ipfs/QmdVrMn1LhB4ybb8hMVaMLXnA8XRSewMnK6YqXKXoTcRvN/go-libp2p-peer" ) diff --git a/bitswap/session_test.go b/bitswap/session_test.go index 6edc6e065..c6b37c3d9 100644 --- a/bitswap/session_test.go +++ b/bitswap/session_test.go @@ -6,9 +6,9 @@ import ( "testing" "time" - blocks "gx/ipfs/QmTRCUvZLiir12Qr6MV3HKfKMHX8Nf1Vddn6t2g5nsQSb9/go-block-format" - blocksutil "gx/ipfs/QmYmE4kxv6uFGaWkeBAFYDuNcxzCn87pzwm6CkBkM9C8BM/go-ipfs-blocksutil" - cid "gx/ipfs/QmapdYm1b22Frv3k17fqrBYTFRxwiaVJkB299Mfn33edeB/go-cid" + blocks "gx/ipfs/QmVzK524a2VWLqyvtBeiHKsUAWYgeAk4DBeZoY7vpNPNRx/go-block-format" + cid "gx/ipfs/QmYVNvtQkeZ6AKSwDrjQTs432QtL6umrrK41EBq3cu7iSP/go-cid" + blocksutil "gx/ipfs/QmYqPGpZ9Yemr55xus9DiEztkns6Jti5XJ7hC94JbvkdqZ/go-ipfs-blocksutil" tu "gx/ipfs/QmcW4FGAt24fdK1jBgWQn3yP4R9ZLyWQqjozv9QK7epRhL/go-testutil" ) diff --git a/bitswap/stat.go b/bitswap/stat.go index 85390475d..b6332a6f4 100644 --- a/bitswap/stat.go +++ b/bitswap/stat.go @@ -3,7 +3,7 @@ package bitswap import ( "sort" - cid "gx/ipfs/QmapdYm1b22Frv3k17fqrBYTFRxwiaVJkB299Mfn33edeB/go-cid" + cid "gx/ipfs/QmYVNvtQkeZ6AKSwDrjQTs432QtL6umrrK41EBq3cu7iSP/go-cid" ) type Stat struct { diff --git a/bitswap/testnet/network_test.go b/bitswap/testnet/network_test.go index 1fa8a8930..245b5db30 100644 --- a/bitswap/testnet/network_test.go +++ b/bitswap/testnet/network_test.go @@ -9,8 +9,8 @@ import ( bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" delay "gx/ipfs/QmRJVNatYJwTAHgdSM1Xef9QVQ1Ch3XHdmcrykjP5Y4soL/go-ipfs-delay" - blocks "gx/ipfs/QmTRCUvZLiir12Qr6MV3HKfKMHX8Nf1Vddn6t2g5nsQSb9/go-block-format" - mockrouting "gx/ipfs/QmWLQyLU7yopJnwMvpHM5VSMG4xmbKgcq6P246mDy9xy5E/go-ipfs-routing/mock" + blocks "gx/ipfs/QmVzK524a2VWLqyvtBeiHKsUAWYgeAk4DBeZoY7vpNPNRx/go-block-format" + mockrouting "gx/ipfs/QmbFRJeEmEU16y3BmKKaD4a9fm5oHsEAMHe2vSB1UnfLMi/go-ipfs-routing/mock" testutil "gx/ipfs/QmcW4FGAt24fdK1jBgWQn3yP4R9ZLyWQqjozv9QK7epRhL/go-testutil" peer "gx/ipfs/QmdVrMn1LhB4ybb8hMVaMLXnA8XRSewMnK6YqXKXoTcRvN/go-libp2p-peer" ) diff --git a/bitswap/testnet/peernet.go b/bitswap/testnet/peernet.go index dc5349391..04aaad204 100644 --- a/bitswap/testnet/peernet.go +++ b/bitswap/testnet/peernet.go @@ -5,8 +5,8 @@ import ( bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" - mockrouting "gx/ipfs/QmWLQyLU7yopJnwMvpHM5VSMG4xmbKgcq6P246mDy9xy5E/go-ipfs-routing/mock" mockpeernet "gx/ipfs/QmZ86eLPtXkQ1Dfa992Q8NpXArUoWWh3y728JDcWvzRrvC/go-libp2p/p2p/net/mock" + mockrouting "gx/ipfs/QmbFRJeEmEU16y3BmKKaD4a9fm5oHsEAMHe2vSB1UnfLMi/go-ipfs-routing/mock" testutil "gx/ipfs/QmcW4FGAt24fdK1jBgWQn3yP4R9ZLyWQqjozv9QK7epRhL/go-testutil" peer "gx/ipfs/QmdVrMn1LhB4ybb8hMVaMLXnA8XRSewMnK6YqXKXoTcRvN/go-libp2p-peer" ds "gx/ipfs/QmeiCcJfDW1GJnWUArudsv5rQsihpi4oyddPhdqo3CfX6i/go-datastore" diff --git a/bitswap/testnet/virtual.go b/bitswap/testnet/virtual.go index cfb307f10..bc064d18e 100644 --- a/bitswap/testnet/virtual.go +++ b/bitswap/testnet/virtual.go @@ -9,11 +9,11 @@ import ( bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" - routing "gx/ipfs/QmPpdpS9fknTBM3qHDcpayU6nYPZQeVjia2fbNrD8YWDe6/go-libp2p-routing" delay "gx/ipfs/QmRJVNatYJwTAHgdSM1Xef9QVQ1Ch3XHdmcrykjP5Y4soL/go-ipfs-delay" - mockrouting "gx/ipfs/QmWLQyLU7yopJnwMvpHM5VSMG4xmbKgcq6P246mDy9xy5E/go-ipfs-routing/mock" ifconnmgr "gx/ipfs/QmXuucFcuvAWYAJfhHV2h4BYreHEAsLSsiquosiXeuduTN/go-libp2p-interface-connmgr" - cid "gx/ipfs/QmapdYm1b22Frv3k17fqrBYTFRxwiaVJkB299Mfn33edeB/go-cid" + cid "gx/ipfs/QmYVNvtQkeZ6AKSwDrjQTs432QtL6umrrK41EBq3cu7iSP/go-cid" + routing "gx/ipfs/QmZ383TySJVeZWzGnWui6pRcKyYZk9VkKTuW7tmKRWk5au/go-libp2p-routing" + mockrouting "gx/ipfs/QmbFRJeEmEU16y3BmKKaD4a9fm5oHsEAMHe2vSB1UnfLMi/go-ipfs-routing/mock" logging "gx/ipfs/QmcVVHfdyv15GVPk7NrxdWjh2hLVccXnoD8j2tyQShiXJb/go-log" testutil "gx/ipfs/QmcW4FGAt24fdK1jBgWQn3yP4R9ZLyWQqjozv9QK7epRhL/go-testutil" peer "gx/ipfs/QmdVrMn1LhB4ybb8hMVaMLXnA8XRSewMnK6YqXKXoTcRvN/go-libp2p-peer" diff --git a/bitswap/testutils.go b/bitswap/testutils.go index 9f6ed03c7..53f82df99 100644 --- a/bitswap/testutils.go +++ b/bitswap/testutils.go @@ -7,10 +7,10 @@ import ( tn "github.com/ipfs/go-ipfs/exchange/bitswap/testnet" delay "gx/ipfs/QmRJVNatYJwTAHgdSM1Xef9QVQ1Ch3XHdmcrykjP5Y4soL/go-ipfs-delay" + blockstore "gx/ipfs/QmRatnbGjPcoyzVjfixMZnuT1xQbjM7FgnL6FX4CKJeDE2/go-ipfs-blockstore" testutil "gx/ipfs/QmcW4FGAt24fdK1jBgWQn3yP4R9ZLyWQqjozv9QK7epRhL/go-testutil" p2ptestutil "gx/ipfs/QmcxUtMB5sJrXR3znSvkrDd2ghvwGM8rLRqwJiPUdgQwat/go-libp2p-netutil" peer "gx/ipfs/QmdVrMn1LhB4ybb8hMVaMLXnA8XRSewMnK6YqXKXoTcRvN/go-libp2p-peer" - blockstore "gx/ipfs/QmdpuJBPBZ6sLPj9BQpn3Rpi38BT2cF1QMiUfyzNWeySW4/go-ipfs-blockstore" ds "gx/ipfs/QmeiCcJfDW1GJnWUArudsv5rQsihpi4oyddPhdqo3CfX6i/go-datastore" delayed "gx/ipfs/QmeiCcJfDW1GJnWUArudsv5rQsihpi4oyddPhdqo3CfX6i/go-datastore/delayed" ds_sync "gx/ipfs/QmeiCcJfDW1GJnWUArudsv5rQsihpi4oyddPhdqo3CfX6i/go-datastore/sync" diff --git a/bitswap/wantlist/wantlist.go b/bitswap/wantlist/wantlist.go index 6f230ba5b..c25d8efa2 100644 --- a/bitswap/wantlist/wantlist.go +++ b/bitswap/wantlist/wantlist.go @@ -6,7 +6,7 @@ import ( "sort" "sync" - cid "gx/ipfs/QmapdYm1b22Frv3k17fqrBYTFRxwiaVJkB299Mfn33edeB/go-cid" + cid "gx/ipfs/QmYVNvtQkeZ6AKSwDrjQTs432QtL6umrrK41EBq3cu7iSP/go-cid" ) type ThreadSafe struct { diff --git a/bitswap/wantlist/wantlist_test.go b/bitswap/wantlist/wantlist_test.go index dc7925941..440d3c935 100644 --- a/bitswap/wantlist/wantlist_test.go +++ b/bitswap/wantlist/wantlist_test.go @@ -3,7 +3,7 @@ package wantlist import ( "testing" - cid "gx/ipfs/QmapdYm1b22Frv3k17fqrBYTFRxwiaVJkB299Mfn33edeB/go-cid" + cid "gx/ipfs/QmYVNvtQkeZ6AKSwDrjQTs432QtL6umrrK41EBq3cu7iSP/go-cid" ) var testcids []*cid.Cid diff --git a/bitswap/wantmanager.go b/bitswap/wantmanager.go index 7b30bf23a..00ff5a7d6 100644 --- a/bitswap/wantmanager.go +++ b/bitswap/wantmanager.go @@ -11,7 +11,7 @@ import ( wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" metrics "gx/ipfs/QmRg1gKTHzc3CZXSKzem8aR4E3TubFhbgXwfVuWnSK5CC5/go-metrics-interface" - cid "gx/ipfs/QmapdYm1b22Frv3k17fqrBYTFRxwiaVJkB299Mfn33edeB/go-cid" + cid "gx/ipfs/QmYVNvtQkeZ6AKSwDrjQTs432QtL6umrrK41EBq3cu7iSP/go-cid" peer "gx/ipfs/QmdVrMn1LhB4ybb8hMVaMLXnA8XRSewMnK6YqXKXoTcRvN/go-libp2p-peer" ) diff --git a/bitswap/workers.go b/bitswap/workers.go index 3dd5f9cb2..98731cd64 100644 --- a/bitswap/workers.go +++ b/bitswap/workers.go @@ -10,7 +10,7 @@ import ( process "gx/ipfs/QmSF8fPo3jgVBAy8fpdjjYqgG87dkJgUprRBHRd2tmfgpP/goprocess" procctx "gx/ipfs/QmSF8fPo3jgVBAy8fpdjjYqgG87dkJgUprRBHRd2tmfgpP/goprocess/context" - cid "gx/ipfs/QmapdYm1b22Frv3k17fqrBYTFRxwiaVJkB299Mfn33edeB/go-cid" + cid "gx/ipfs/QmYVNvtQkeZ6AKSwDrjQTs432QtL6umrrK41EBq3cu7iSP/go-cid" logging "gx/ipfs/QmcVVHfdyv15GVPk7NrxdWjh2hLVccXnoD8j2tyQShiXJb/go-log" peer "gx/ipfs/QmdVrMn1LhB4ybb8hMVaMLXnA8XRSewMnK6YqXKXoTcRvN/go-libp2p-peer" ) From 41d05524c7ecf5fff9352e97b632dd3088762338 Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Wed, 18 Jul 2018 09:56:25 -0700 Subject: [PATCH 0643/1035] when sending blocks in bitswap, close streams asynchronously Otherwise, we tie up the bitswap worker until the other side responds with an EOF. fixes #5247 related to https://github.com/libp2p/go-libp2p-net/issues/28 License: MIT Signed-off-by: Steven Allen This commit was moved from ipfs/go-bitswap@8ed926d219174f3d057ac1ff158aab6085560f1a --- bitswap/network/ipfs_impl.go | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index efeb693c2..1b6e38986 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -123,9 +123,10 @@ func (bsnet *impl) SendMessage( s.Reset() return err } - // Yes, return this error. We have no reason to believe that the block - // was actually *sent* unless we see the EOF. - return inet.FullClose(s) + // TODO(https://github.com/libp2p/go-libp2p-net/issues/28): Avoid this goroutine. + go inet.AwaitEOF(s) + return s.Close() + } func (bsnet *impl) SetDelegate(r Receiver) { From 686019983f10824a745ae896512993621984afa8 Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Fri, 20 Jul 2018 21:07:58 -0700 Subject: [PATCH 0644/1035] gx update deps Updates: * go-net * go-text * dns * prometheus * protobuf (golang, not gogo) License: MIT Signed-off-by: Steven Allen This commit was moved from ipfs/go-bitswap@fb183fcf0f411c0788c3f6bbbb81a59ab3bbe8a6 --- bitswap/bitswap.go | 4 ++-- bitswap/bitswap_test.go | 2 +- bitswap/decision/engine.go | 2 +- bitswap/decision/engine_test.go | 2 +- bitswap/get.go | 2 +- bitswap/testnet/peernet.go | 2 +- bitswap/testutils.go | 2 +- bitswap/wantmanager.go | 2 +- 8 files changed, 9 insertions(+), 9 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index da6d7317e..33b793710 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -17,15 +17,15 @@ import ( delay "gx/ipfs/QmRJVNatYJwTAHgdSM1Xef9QVQ1Ch3XHdmcrykjP5Y4soL/go-ipfs-delay" flags "gx/ipfs/QmRMGdC6HKdLsPDABL9aXPDidrpmEHzJqFWSvshkbn9Hj8/go-ipfs-flags" - blockstore "gx/ipfs/QmRatnbGjPcoyzVjfixMZnuT1xQbjM7FgnL6FX4CKJeDE2/go-ipfs-blockstore" - metrics "gx/ipfs/QmRg1gKTHzc3CZXSKzem8aR4E3TubFhbgXwfVuWnSK5CC5/go-metrics-interface" process "gx/ipfs/QmSF8fPo3jgVBAy8fpdjjYqgG87dkJgUprRBHRd2tmfgpP/goprocess" procctx "gx/ipfs/QmSF8fPo3jgVBAy8fpdjjYqgG87dkJgUprRBHRd2tmfgpP/goprocess/context" blocks "gx/ipfs/QmVzK524a2VWLqyvtBeiHKsUAWYgeAk4DBeZoY7vpNPNRx/go-block-format" cid "gx/ipfs/QmYVNvtQkeZ6AKSwDrjQTs432QtL6umrrK41EBq3cu7iSP/go-cid" + blockstore "gx/ipfs/QmadMhXJLHMFjpRmh85XjpmVDkEtQpNYEZNRpWRvYVLrvb/go-ipfs-blockstore" exchange "gx/ipfs/Qmc2faLf7URkHpsbfYM4EMbr8iSAcGAe8VPgVi64HVnwji/go-ipfs-exchange-interface" logging "gx/ipfs/QmcVVHfdyv15GVPk7NrxdWjh2hLVccXnoD8j2tyQShiXJb/go-log" peer "gx/ipfs/QmdVrMn1LhB4ybb8hMVaMLXnA8XRSewMnK6YqXKXoTcRvN/go-libp2p-peer" + metrics "gx/ipfs/QmekzFM3hPZjTjUFGTABdQkEnQ3PTiMstY198PwSFr5w1Q/go-metrics-interface" ) var log = logging.Logger("bitswap") diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index bdaaf8d20..b360a4f25 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -12,10 +12,10 @@ import ( tn "github.com/ipfs/go-ipfs/exchange/bitswap/testnet" delay "gx/ipfs/QmRJVNatYJwTAHgdSM1Xef9QVQ1Ch3XHdmcrykjP5Y4soL/go-ipfs-delay" - blockstore "gx/ipfs/QmRatnbGjPcoyzVjfixMZnuT1xQbjM7FgnL6FX4CKJeDE2/go-ipfs-blockstore" blocks "gx/ipfs/QmVzK524a2VWLqyvtBeiHKsUAWYgeAk4DBeZoY7vpNPNRx/go-block-format" cid "gx/ipfs/QmYVNvtQkeZ6AKSwDrjQTs432QtL6umrrK41EBq3cu7iSP/go-cid" blocksutil "gx/ipfs/QmYqPGpZ9Yemr55xus9DiEztkns6Jti5XJ7hC94JbvkdqZ/go-ipfs-blocksutil" + blockstore "gx/ipfs/QmadMhXJLHMFjpRmh85XjpmVDkEtQpNYEZNRpWRvYVLrvb/go-ipfs-blockstore" mockrouting "gx/ipfs/QmbFRJeEmEU16y3BmKKaD4a9fm5oHsEAMHe2vSB1UnfLMi/go-ipfs-routing/mock" tu "gx/ipfs/QmcW4FGAt24fdK1jBgWQn3yP4R9ZLyWQqjozv9QK7epRhL/go-testutil" travis "gx/ipfs/QmcW4FGAt24fdK1jBgWQn3yP4R9ZLyWQqjozv9QK7epRhL/go-testutil/ci/travis" diff --git a/bitswap/decision/engine.go b/bitswap/decision/engine.go index 135edf14f..5d0aafa83 100644 --- a/bitswap/decision/engine.go +++ b/bitswap/decision/engine.go @@ -9,8 +9,8 @@ import ( bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" wl "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" - bstore "gx/ipfs/QmRatnbGjPcoyzVjfixMZnuT1xQbjM7FgnL6FX4CKJeDE2/go-ipfs-blockstore" blocks "gx/ipfs/QmVzK524a2VWLqyvtBeiHKsUAWYgeAk4DBeZoY7vpNPNRx/go-block-format" + bstore "gx/ipfs/QmadMhXJLHMFjpRmh85XjpmVDkEtQpNYEZNRpWRvYVLrvb/go-ipfs-blockstore" logging "gx/ipfs/QmcVVHfdyv15GVPk7NrxdWjh2hLVccXnoD8j2tyQShiXJb/go-log" peer "gx/ipfs/QmdVrMn1LhB4ybb8hMVaMLXnA8XRSewMnK6YqXKXoTcRvN/go-libp2p-peer" ) diff --git a/bitswap/decision/engine_test.go b/bitswap/decision/engine_test.go index afd144a08..c97461639 100644 --- a/bitswap/decision/engine_test.go +++ b/bitswap/decision/engine_test.go @@ -11,8 +11,8 @@ import ( message "github.com/ipfs/go-ipfs/exchange/bitswap/message" - blockstore "gx/ipfs/QmRatnbGjPcoyzVjfixMZnuT1xQbjM7FgnL6FX4CKJeDE2/go-ipfs-blockstore" blocks "gx/ipfs/QmVzK524a2VWLqyvtBeiHKsUAWYgeAk4DBeZoY7vpNPNRx/go-block-format" + blockstore "gx/ipfs/QmadMhXJLHMFjpRmh85XjpmVDkEtQpNYEZNRpWRvYVLrvb/go-ipfs-blockstore" testutil "gx/ipfs/QmcW4FGAt24fdK1jBgWQn3yP4R9ZLyWQqjozv9QK7epRhL/go-testutil" peer "gx/ipfs/QmdVrMn1LhB4ybb8hMVaMLXnA8XRSewMnK6YqXKXoTcRvN/go-libp2p-peer" ds "gx/ipfs/QmeiCcJfDW1GJnWUArudsv5rQsihpi4oyddPhdqo3CfX6i/go-datastore" diff --git a/bitswap/get.go b/bitswap/get.go index 32d11090f..4ba686f35 100644 --- a/bitswap/get.go +++ b/bitswap/get.go @@ -6,9 +6,9 @@ import ( notifications "github.com/ipfs/go-ipfs/exchange/bitswap/notifications" - blockstore "gx/ipfs/QmRatnbGjPcoyzVjfixMZnuT1xQbjM7FgnL6FX4CKJeDE2/go-ipfs-blockstore" blocks "gx/ipfs/QmVzK524a2VWLqyvtBeiHKsUAWYgeAk4DBeZoY7vpNPNRx/go-block-format" cid "gx/ipfs/QmYVNvtQkeZ6AKSwDrjQTs432QtL6umrrK41EBq3cu7iSP/go-cid" + blockstore "gx/ipfs/QmadMhXJLHMFjpRmh85XjpmVDkEtQpNYEZNRpWRvYVLrvb/go-ipfs-blockstore" ) type getBlocksFunc func(context.Context, []*cid.Cid) (<-chan blocks.Block, error) diff --git a/bitswap/testnet/peernet.go b/bitswap/testnet/peernet.go index 04aaad204..0d6cdbe44 100644 --- a/bitswap/testnet/peernet.go +++ b/bitswap/testnet/peernet.go @@ -5,7 +5,7 @@ import ( bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" - mockpeernet "gx/ipfs/QmZ86eLPtXkQ1Dfa992Q8NpXArUoWWh3y728JDcWvzRrvC/go-libp2p/p2p/net/mock" + mockpeernet "gx/ipfs/QmY51bqSM5XgxQZqsBrQcRkKTnCb8EKpJpR9K6Qax7Njco/go-libp2p/p2p/net/mock" mockrouting "gx/ipfs/QmbFRJeEmEU16y3BmKKaD4a9fm5oHsEAMHe2vSB1UnfLMi/go-ipfs-routing/mock" testutil "gx/ipfs/QmcW4FGAt24fdK1jBgWQn3yP4R9ZLyWQqjozv9QK7epRhL/go-testutil" peer "gx/ipfs/QmdVrMn1LhB4ybb8hMVaMLXnA8XRSewMnK6YqXKXoTcRvN/go-libp2p-peer" diff --git a/bitswap/testutils.go b/bitswap/testutils.go index 53f82df99..b71f451cb 100644 --- a/bitswap/testutils.go +++ b/bitswap/testutils.go @@ -7,7 +7,7 @@ import ( tn "github.com/ipfs/go-ipfs/exchange/bitswap/testnet" delay "gx/ipfs/QmRJVNatYJwTAHgdSM1Xef9QVQ1Ch3XHdmcrykjP5Y4soL/go-ipfs-delay" - blockstore "gx/ipfs/QmRatnbGjPcoyzVjfixMZnuT1xQbjM7FgnL6FX4CKJeDE2/go-ipfs-blockstore" + blockstore "gx/ipfs/QmadMhXJLHMFjpRmh85XjpmVDkEtQpNYEZNRpWRvYVLrvb/go-ipfs-blockstore" testutil "gx/ipfs/QmcW4FGAt24fdK1jBgWQn3yP4R9ZLyWQqjozv9QK7epRhL/go-testutil" p2ptestutil "gx/ipfs/QmcxUtMB5sJrXR3znSvkrDd2ghvwGM8rLRqwJiPUdgQwat/go-libp2p-netutil" peer "gx/ipfs/QmdVrMn1LhB4ybb8hMVaMLXnA8XRSewMnK6YqXKXoTcRvN/go-libp2p-peer" diff --git a/bitswap/wantmanager.go b/bitswap/wantmanager.go index 00ff5a7d6..4bbb7ff93 100644 --- a/bitswap/wantmanager.go +++ b/bitswap/wantmanager.go @@ -10,9 +10,9 @@ import ( bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" - metrics "gx/ipfs/QmRg1gKTHzc3CZXSKzem8aR4E3TubFhbgXwfVuWnSK5CC5/go-metrics-interface" cid "gx/ipfs/QmYVNvtQkeZ6AKSwDrjQTs432QtL6umrrK41EBq3cu7iSP/go-cid" peer "gx/ipfs/QmdVrMn1LhB4ybb8hMVaMLXnA8XRSewMnK6YqXKXoTcRvN/go-libp2p-peer" + metrics "gx/ipfs/QmekzFM3hPZjTjUFGTABdQkEnQ3PTiMstY198PwSFr5w1Q/go-metrics-interface" ) type WantManager struct { From 38789aa99dc58473ae6634aed3351cd107bc309e Mon Sep 17 00:00:00 2001 From: Jeromy Date: Fri, 27 Jul 2018 14:34:40 -0700 Subject: [PATCH 0645/1035] Extract from go-ipfs This commit was moved from ipfs/go-bitswap@89fdf4e1393610e99e99fcdc18e1744262e886dc --- bitswap/README.md | 68 ++++++++++----------- bitswap/bitswap.go | 32 +++++----- bitswap/bitswap_test.go | 26 ++++---- bitswap/decision/bench_test.go | 10 +-- bitswap/decision/engine.go | 12 ++-- bitswap/decision/engine_test.go | 16 ++--- bitswap/decision/ledger.go | 6 +- bitswap/decision/peer_request_queue.go | 8 +-- bitswap/decision/peer_request_queue_test.go | 8 +-- bitswap/get.go | 8 +-- bitswap/message/message.go | 16 ++--- bitswap/message/message_test.go | 10 +-- bitswap/message/pb/message.pb.go | 2 +- bitswap/network/interface.go | 10 +-- bitswap/network/ipfs_impl.go | 24 ++++---- bitswap/notifications/notifications.go | 6 +- bitswap/notifications/notifications_test.go | 6 +- bitswap/session.go | 16 ++--- bitswap/session_test.go | 8 +-- bitswap/stat.go | 2 +- bitswap/testnet/interface.go | 6 +- bitswap/testnet/network_test.go | 16 ++--- bitswap/testnet/peernet.go | 12 ++-- bitswap/testnet/virtual.go | 22 +++---- bitswap/testutils.go | 20 +++--- bitswap/wantlist/wantlist.go | 2 +- bitswap/wantlist/wantlist_test.go | 2 +- bitswap/wantmanager.go | 16 ++--- bitswap/workers.go | 12 ++-- 29 files changed, 199 insertions(+), 203 deletions(-) diff --git a/bitswap/README.md b/bitswap/README.md index 417d87ff3..8ec2580a7 100644 --- a/bitswap/README.md +++ b/bitswap/README.md @@ -1,37 +1,33 @@ -# Bitswap - -## Protocol -Bitswap is the data trading module for ipfs, it manages requesting and sending -blocks to and from other peers in the network. Bitswap has two main jobs, the -first is to acquire blocks requested by the client from the network. The second -is to judiciously send blocks in its possession to other peers who want them. - -Bitswap is a message based protocol, as opposed to response-reply. All messages -contain wantlists, or blocks. Upon receiving a wantlist, a node should consider -sending out wanted blocks if they have them. Upon receiving blocks, the node -should send out a notification called a 'Cancel' signifying that they no longer -want the block. At a protocol level, bitswap is very simple. - -## go-ipfs Implementation -Internally, when a message with a wantlist is received, it is sent to the -decision engine to be considered, and blocks that we have that are wanted are -placed into the peer request queue. Any block we possess that is wanted by -another peer has a task in the peer request queue created for it. The peer -request queue is a priority queue that sorts available tasks by some metric, -currently, that metric is very simple and aims to fairly address the tasks -of each other peer. More advanced decision logic will be implemented in the -future. Task workers pull tasks to be done off of the queue, retrieve the block -to be sent, and send it off. The number of task workers is limited by a constant -factor. - -Client requests for new blocks are handled by the want manager, for every new -block (or set of blocks) wanted, the 'WantBlocks' method is invoked. The want -manager then ensures that connected peers are notified of the new block that we -want by sending the new entries to a message queue for each peer. The message -queue will loop while there is work available and do the following: 1) Ensure it -has a connection to its peer, 2) grab the message to be sent, and 3) send it. -If new messages are added while the loop is in steps 1 or 3, the messages are -combined into one to avoid having to keep an actual queue and send multiple -messages. The same process occurs when the client receives a block and sends a -cancel message for it. +go-bitswap +================== +[![](https://img.shields.io/badge/made%20by-Protocol%20Labs-blue.svg?style=flat-square)](http://ipn.io) +[![](https://img.shields.io/badge/project-IPFS-blue.svg?style=flat-square)](http://ipfs.io/) +[![](https://img.shields.io/badge/freenode-%23ipfs-blue.svg?style=flat-square)](http://webchat.freenode.net/?channels=%23ipfs) +[![Coverage Status](https://codecov.io/gh/ipfs/go-bitswap/branch/master/graph/badge.svg)](https://codecov.io/gh/ipfs/go-bitswap/branch/master) +[![Travis CI](https://travis-ci.org/ipfs/go-bitswap.svg?branch=master)](https://travis-ci.org/ipfs/go-bitswap) + +> An implementation of the bitswap protocol in go! + + +## Table of Contents + +- [Install](#install) +- [Usage](#usage) +- [API](#api) +- [Contribute](#contribute) +- [License](#license) + +## Install + +TODO + +## Contribute + +PRs are welcome! + +Small note: If editing the Readme, please conform to the [standard-readme](https://github.com/RichardLitt/standard-readme) specification. + +## License + +MIT © Juan Batiz-Benet diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 33b793710..f6a42fc7a 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -10,22 +10,22 @@ import ( "sync/atomic" "time" - decision "github.com/ipfs/go-ipfs/exchange/bitswap/decision" - bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" - bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" - notifications "github.com/ipfs/go-ipfs/exchange/bitswap/notifications" - - delay "gx/ipfs/QmRJVNatYJwTAHgdSM1Xef9QVQ1Ch3XHdmcrykjP5Y4soL/go-ipfs-delay" - flags "gx/ipfs/QmRMGdC6HKdLsPDABL9aXPDidrpmEHzJqFWSvshkbn9Hj8/go-ipfs-flags" - process "gx/ipfs/QmSF8fPo3jgVBAy8fpdjjYqgG87dkJgUprRBHRd2tmfgpP/goprocess" - procctx "gx/ipfs/QmSF8fPo3jgVBAy8fpdjjYqgG87dkJgUprRBHRd2tmfgpP/goprocess/context" - blocks "gx/ipfs/QmVzK524a2VWLqyvtBeiHKsUAWYgeAk4DBeZoY7vpNPNRx/go-block-format" - cid "gx/ipfs/QmYVNvtQkeZ6AKSwDrjQTs432QtL6umrrK41EBq3cu7iSP/go-cid" - blockstore "gx/ipfs/QmadMhXJLHMFjpRmh85XjpmVDkEtQpNYEZNRpWRvYVLrvb/go-ipfs-blockstore" - exchange "gx/ipfs/Qmc2faLf7URkHpsbfYM4EMbr8iSAcGAe8VPgVi64HVnwji/go-ipfs-exchange-interface" - logging "gx/ipfs/QmcVVHfdyv15GVPk7NrxdWjh2hLVccXnoD8j2tyQShiXJb/go-log" - peer "gx/ipfs/QmdVrMn1LhB4ybb8hMVaMLXnA8XRSewMnK6YqXKXoTcRvN/go-libp2p-peer" - metrics "gx/ipfs/QmekzFM3hPZjTjUFGTABdQkEnQ3PTiMstY198PwSFr5w1Q/go-metrics-interface" + decision "github.com/ipfs/go-bitswap/decision" + bsmsg "github.com/ipfs/go-bitswap/message" + bsnet "github.com/ipfs/go-bitswap/network" + notifications "github.com/ipfs/go-bitswap/notifications" + + blocks "github.com/ipfs/go-block-format" + cid "github.com/ipfs/go-cid" + blockstore "github.com/ipfs/go-ipfs-blockstore" + delay "github.com/ipfs/go-ipfs-delay" + exchange "github.com/ipfs/go-ipfs-exchange-interface" + flags "github.com/ipfs/go-ipfs-flags" + logging "github.com/ipfs/go-log" + metrics "github.com/ipfs/go-metrics-interface" + process "github.com/jbenet/goprocess" + procctx "github.com/jbenet/goprocess/context" + peer "github.com/libp2p/go-libp2p-peer" ) var log = logging.Logger("bitswap") diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index b360a4f25..348859966 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -8,19 +8,19 @@ import ( "testing" "time" - decision "github.com/ipfs/go-ipfs/exchange/bitswap/decision" - tn "github.com/ipfs/go-ipfs/exchange/bitswap/testnet" - - delay "gx/ipfs/QmRJVNatYJwTAHgdSM1Xef9QVQ1Ch3XHdmcrykjP5Y4soL/go-ipfs-delay" - blocks "gx/ipfs/QmVzK524a2VWLqyvtBeiHKsUAWYgeAk4DBeZoY7vpNPNRx/go-block-format" - cid "gx/ipfs/QmYVNvtQkeZ6AKSwDrjQTs432QtL6umrrK41EBq3cu7iSP/go-cid" - blocksutil "gx/ipfs/QmYqPGpZ9Yemr55xus9DiEztkns6Jti5XJ7hC94JbvkdqZ/go-ipfs-blocksutil" - blockstore "gx/ipfs/QmadMhXJLHMFjpRmh85XjpmVDkEtQpNYEZNRpWRvYVLrvb/go-ipfs-blockstore" - mockrouting "gx/ipfs/QmbFRJeEmEU16y3BmKKaD4a9fm5oHsEAMHe2vSB1UnfLMi/go-ipfs-routing/mock" - tu "gx/ipfs/QmcW4FGAt24fdK1jBgWQn3yP4R9ZLyWQqjozv9QK7epRhL/go-testutil" - travis "gx/ipfs/QmcW4FGAt24fdK1jBgWQn3yP4R9ZLyWQqjozv9QK7epRhL/go-testutil/ci/travis" - p2ptestutil "gx/ipfs/QmcxUtMB5sJrXR3znSvkrDd2ghvwGM8rLRqwJiPUdgQwat/go-libp2p-netutil" - detectrace "gx/ipfs/Qmf7HqcW7LtCi1W8y2bdx2eJpze74jkbKqpByxgXikdbLF/go-detect-race" + decision "github.com/ipfs/go-bitswap/decision" + tn "github.com/ipfs/go-bitswap/testnet" + + blocks "github.com/ipfs/go-block-format" + cid "github.com/ipfs/go-cid" + detectrace "github.com/ipfs/go-detect-race" + blockstore "github.com/ipfs/go-ipfs-blockstore" + blocksutil "github.com/ipfs/go-ipfs-blocksutil" + delay "github.com/ipfs/go-ipfs-delay" + mockrouting "github.com/ipfs/go-ipfs-routing/mock" + p2ptestutil "github.com/libp2p/go-libp2p-netutil" + tu "github.com/libp2p/go-testutil" + travis "github.com/libp2p/go-testutil/ci/travis" ) // FIXME the tests are really sensitive to the network delay. fix them to work diff --git a/bitswap/decision/bench_test.go b/bitswap/decision/bench_test.go index 26e10c40e..dc3aea066 100644 --- a/bitswap/decision/bench_test.go +++ b/bitswap/decision/bench_test.go @@ -5,11 +5,11 @@ import ( "math" "testing" - "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" - u "gx/ipfs/QmPdKqUcHGFdeSpvjVoaTRPPstGif9GBZb5Q56RVw9o69A/go-ipfs-util" - cid "gx/ipfs/QmYVNvtQkeZ6AKSwDrjQTs432QtL6umrrK41EBq3cu7iSP/go-cid" - "gx/ipfs/QmcW4FGAt24fdK1jBgWQn3yP4R9ZLyWQqjozv9QK7epRhL/go-testutil" - "gx/ipfs/QmdVrMn1LhB4ybb8hMVaMLXnA8XRSewMnK6YqXKXoTcRvN/go-libp2p-peer" + "github.com/ipfs/go-bitswap/wantlist" + cid "github.com/ipfs/go-cid" + u "github.com/ipfs/go-ipfs-util" + "github.com/libp2p/go-libp2p-peer" + "github.com/libp2p/go-testutil" ) // FWIW: At the time of this commit, including a timestamp in task increases diff --git a/bitswap/decision/engine.go b/bitswap/decision/engine.go index 5d0aafa83..736e5d46d 100644 --- a/bitswap/decision/engine.go +++ b/bitswap/decision/engine.go @@ -6,13 +6,13 @@ import ( "sync" "time" - bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" - wl "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" + bsmsg "github.com/ipfs/go-bitswap/message" + wl "github.com/ipfs/go-bitswap/wantlist" - blocks "gx/ipfs/QmVzK524a2VWLqyvtBeiHKsUAWYgeAk4DBeZoY7vpNPNRx/go-block-format" - bstore "gx/ipfs/QmadMhXJLHMFjpRmh85XjpmVDkEtQpNYEZNRpWRvYVLrvb/go-ipfs-blockstore" - logging "gx/ipfs/QmcVVHfdyv15GVPk7NrxdWjh2hLVccXnoD8j2tyQShiXJb/go-log" - peer "gx/ipfs/QmdVrMn1LhB4ybb8hMVaMLXnA8XRSewMnK6YqXKXoTcRvN/go-libp2p-peer" + blocks "github.com/ipfs/go-block-format" + bstore "github.com/ipfs/go-ipfs-blockstore" + logging "github.com/ipfs/go-log" + peer "github.com/libp2p/go-libp2p-peer" ) // TODO consider taking responsibility for other types of requests. For diff --git a/bitswap/decision/engine_test.go b/bitswap/decision/engine_test.go index c97461639..ed7d1055d 100644 --- a/bitswap/decision/engine_test.go +++ b/bitswap/decision/engine_test.go @@ -9,14 +9,14 @@ import ( "sync" "testing" - message "github.com/ipfs/go-ipfs/exchange/bitswap/message" - - blocks "gx/ipfs/QmVzK524a2VWLqyvtBeiHKsUAWYgeAk4DBeZoY7vpNPNRx/go-block-format" - blockstore "gx/ipfs/QmadMhXJLHMFjpRmh85XjpmVDkEtQpNYEZNRpWRvYVLrvb/go-ipfs-blockstore" - testutil "gx/ipfs/QmcW4FGAt24fdK1jBgWQn3yP4R9ZLyWQqjozv9QK7epRhL/go-testutil" - peer "gx/ipfs/QmdVrMn1LhB4ybb8hMVaMLXnA8XRSewMnK6YqXKXoTcRvN/go-libp2p-peer" - ds "gx/ipfs/QmeiCcJfDW1GJnWUArudsv5rQsihpi4oyddPhdqo3CfX6i/go-datastore" - dssync "gx/ipfs/QmeiCcJfDW1GJnWUArudsv5rQsihpi4oyddPhdqo3CfX6i/go-datastore/sync" + message "github.com/ipfs/go-bitswap/message" + + blocks "github.com/ipfs/go-block-format" + ds "github.com/ipfs/go-datastore" + dssync "github.com/ipfs/go-datastore/sync" + blockstore "github.com/ipfs/go-ipfs-blockstore" + peer "github.com/libp2p/go-libp2p-peer" + testutil "github.com/libp2p/go-testutil" ) type peerAndEngine struct { diff --git a/bitswap/decision/ledger.go b/bitswap/decision/ledger.go index a30f662e1..f38460ec1 100644 --- a/bitswap/decision/ledger.go +++ b/bitswap/decision/ledger.go @@ -4,10 +4,10 @@ import ( "sync" "time" - wl "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" + wl "github.com/ipfs/go-bitswap/wantlist" - cid "gx/ipfs/QmYVNvtQkeZ6AKSwDrjQTs432QtL6umrrK41EBq3cu7iSP/go-cid" - peer "gx/ipfs/QmdVrMn1LhB4ybb8hMVaMLXnA8XRSewMnK6YqXKXoTcRvN/go-libp2p-peer" + cid "github.com/ipfs/go-cid" + peer "github.com/libp2p/go-libp2p-peer" ) func newLedger(p peer.ID) *ledger { diff --git a/bitswap/decision/peer_request_queue.go b/bitswap/decision/peer_request_queue.go index cfa582a9c..b9e34763c 100644 --- a/bitswap/decision/peer_request_queue.go +++ b/bitswap/decision/peer_request_queue.go @@ -4,11 +4,11 @@ import ( "sync" "time" - wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" + wantlist "github.com/ipfs/go-bitswap/wantlist" - cid "gx/ipfs/QmYVNvtQkeZ6AKSwDrjQTs432QtL6umrrK41EBq3cu7iSP/go-cid" - pq "gx/ipfs/QmZUbTDJ39JpvtFCSubiWeUTQRvMA1tVE5RZCJrY4oeAsC/go-ipfs-pq" - peer "gx/ipfs/QmdVrMn1LhB4ybb8hMVaMLXnA8XRSewMnK6YqXKXoTcRvN/go-libp2p-peer" + cid "github.com/ipfs/go-cid" + pq "github.com/ipfs/go-ipfs-pq" + peer "github.com/libp2p/go-libp2p-peer" ) type peerRequestQueue interface { diff --git a/bitswap/decision/peer_request_queue_test.go b/bitswap/decision/peer_request_queue_test.go index 02733dcd1..32e93a272 100644 --- a/bitswap/decision/peer_request_queue_test.go +++ b/bitswap/decision/peer_request_queue_test.go @@ -8,10 +8,10 @@ import ( "strings" "testing" - "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" - u "gx/ipfs/QmPdKqUcHGFdeSpvjVoaTRPPstGif9GBZb5Q56RVw9o69A/go-ipfs-util" - cid "gx/ipfs/QmYVNvtQkeZ6AKSwDrjQTs432QtL6umrrK41EBq3cu7iSP/go-cid" - "gx/ipfs/QmcW4FGAt24fdK1jBgWQn3yP4R9ZLyWQqjozv9QK7epRhL/go-testutil" + "github.com/ipfs/go-bitswap/wantlist" + cid "github.com/ipfs/go-cid" + u "github.com/ipfs/go-ipfs-util" + "github.com/libp2p/go-testutil" ) func TestPushPop(t *testing.T) { diff --git a/bitswap/get.go b/bitswap/get.go index 4ba686f35..be5cf3cb6 100644 --- a/bitswap/get.go +++ b/bitswap/get.go @@ -4,11 +4,11 @@ import ( "context" "errors" - notifications "github.com/ipfs/go-ipfs/exchange/bitswap/notifications" + notifications "github.com/ipfs/go-bitswap/notifications" - blocks "gx/ipfs/QmVzK524a2VWLqyvtBeiHKsUAWYgeAk4DBeZoY7vpNPNRx/go-block-format" - cid "gx/ipfs/QmYVNvtQkeZ6AKSwDrjQTs432QtL6umrrK41EBq3cu7iSP/go-cid" - blockstore "gx/ipfs/QmadMhXJLHMFjpRmh85XjpmVDkEtQpNYEZNRpWRvYVLrvb/go-ipfs-blockstore" + blocks "github.com/ipfs/go-block-format" + cid "github.com/ipfs/go-cid" + blockstore "github.com/ipfs/go-ipfs-blockstore" ) type getBlocksFunc func(context.Context, []*cid.Cid) (<-chan blocks.Block, error) diff --git a/bitswap/message/message.go b/bitswap/message/message.go index 50c32cdb2..ea163661b 100644 --- a/bitswap/message/message.go +++ b/bitswap/message/message.go @@ -4,14 +4,14 @@ import ( "fmt" "io" - pb "github.com/ipfs/go-ipfs/exchange/bitswap/message/pb" - wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" - blocks "gx/ipfs/QmVzK524a2VWLqyvtBeiHKsUAWYgeAk4DBeZoY7vpNPNRx/go-block-format" - - inet "gx/ipfs/QmPjvxTpVH8qJyQDnxnsxF9kv9jezKD1kozz1hs3fCGsNh/go-libp2p-net" - cid "gx/ipfs/QmYVNvtQkeZ6AKSwDrjQTs432QtL6umrrK41EBq3cu7iSP/go-cid" - ggio "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/io" - proto "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/proto" + pb "github.com/ipfs/go-bitswap/message/pb" + wantlist "github.com/ipfs/go-bitswap/wantlist" + blocks "github.com/ipfs/go-block-format" + + ggio "github.com/gogo/protobuf/io" + proto "github.com/gogo/protobuf/proto" + cid "github.com/ipfs/go-cid" + inet "github.com/libp2p/go-libp2p-net" ) // TODO move message.go into the bitswap package diff --git a/bitswap/message/message_test.go b/bitswap/message/message_test.go index bea8455c8..348f5f400 100644 --- a/bitswap/message/message_test.go +++ b/bitswap/message/message_test.go @@ -4,12 +4,12 @@ import ( "bytes" "testing" - pb "github.com/ipfs/go-ipfs/exchange/bitswap/message/pb" + pb "github.com/ipfs/go-bitswap/message/pb" - u "gx/ipfs/QmPdKqUcHGFdeSpvjVoaTRPPstGif9GBZb5Q56RVw9o69A/go-ipfs-util" - blocks "gx/ipfs/QmVzK524a2VWLqyvtBeiHKsUAWYgeAk4DBeZoY7vpNPNRx/go-block-format" - cid "gx/ipfs/QmYVNvtQkeZ6AKSwDrjQTs432QtL6umrrK41EBq3cu7iSP/go-cid" - proto "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/proto" + proto "github.com/gogo/protobuf/proto" + blocks "github.com/ipfs/go-block-format" + cid "github.com/ipfs/go-cid" + u "github.com/ipfs/go-ipfs-util" ) func mkFakeCid(s string) *cid.Cid { diff --git a/bitswap/message/pb/message.pb.go b/bitswap/message/pb/message.pb.go index 18e4a60e3..e88fd710b 100644 --- a/bitswap/message/pb/message.pb.go +++ b/bitswap/message/pb/message.pb.go @@ -13,7 +13,7 @@ It has these top-level messages: */ package bitswap_message_pb -import proto "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/proto" +import proto "github.com/gogo/protobuf/proto" import fmt "fmt" import math "math" diff --git a/bitswap/network/interface.go b/bitswap/network/interface.go index 191bf9253..03a379806 100644 --- a/bitswap/network/interface.go +++ b/bitswap/network/interface.go @@ -3,12 +3,12 @@ package network import ( "context" - bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" + bsmsg "github.com/ipfs/go-bitswap/message" - ifconnmgr "gx/ipfs/QmXuucFcuvAWYAJfhHV2h4BYreHEAsLSsiquosiXeuduTN/go-libp2p-interface-connmgr" - cid "gx/ipfs/QmYVNvtQkeZ6AKSwDrjQTs432QtL6umrrK41EBq3cu7iSP/go-cid" - protocol "gx/ipfs/QmZNkThpqfVXs9GNbexPrfBbXSLNYeKrE7jwFM2oqHbyqN/go-libp2p-protocol" - peer "gx/ipfs/QmdVrMn1LhB4ybb8hMVaMLXnA8XRSewMnK6YqXKXoTcRvN/go-libp2p-peer" + cid "github.com/ipfs/go-cid" + ifconnmgr "github.com/libp2p/go-libp2p-interface-connmgr" + peer "github.com/libp2p/go-libp2p-peer" + protocol "github.com/libp2p/go-libp2p-protocol" ) var ( diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index 1b6e38986..aa142d879 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -6,18 +6,18 @@ import ( "io" "time" - bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" - - inet "gx/ipfs/QmPjvxTpVH8qJyQDnxnsxF9kv9jezKD1kozz1hs3fCGsNh/go-libp2p-net" - ifconnmgr "gx/ipfs/QmXuucFcuvAWYAJfhHV2h4BYreHEAsLSsiquosiXeuduTN/go-libp2p-interface-connmgr" - cid "gx/ipfs/QmYVNvtQkeZ6AKSwDrjQTs432QtL6umrrK41EBq3cu7iSP/go-cid" - ma "gx/ipfs/QmYmsdtJ3HsodkePE3eU3TsCaP2YvPZJ4LoXnNkDE5Tpt7/go-multiaddr" - routing "gx/ipfs/QmZ383TySJVeZWzGnWui6pRcKyYZk9VkKTuW7tmKRWk5au/go-libp2p-routing" - ggio "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/io" - pstore "gx/ipfs/QmZR2XWVVBCtbgBWnQhWk2xcQfaR3W8faQPriAiaaj7rsr/go-libp2p-peerstore" - host "gx/ipfs/Qmb8T6YBBsjYsVGfrihQLfCJveczZnneSBqBKkYEBWDjge/go-libp2p-host" - logging "gx/ipfs/QmcVVHfdyv15GVPk7NrxdWjh2hLVccXnoD8j2tyQShiXJb/go-log" - peer "gx/ipfs/QmdVrMn1LhB4ybb8hMVaMLXnA8XRSewMnK6YqXKXoTcRvN/go-libp2p-peer" + bsmsg "github.com/ipfs/go-bitswap/message" + + ggio "github.com/gogo/protobuf/io" + cid "github.com/ipfs/go-cid" + logging "github.com/ipfs/go-log" + host "github.com/libp2p/go-libp2p-host" + ifconnmgr "github.com/libp2p/go-libp2p-interface-connmgr" + inet "github.com/libp2p/go-libp2p-net" + peer "github.com/libp2p/go-libp2p-peer" + pstore "github.com/libp2p/go-libp2p-peerstore" + routing "github.com/libp2p/go-libp2p-routing" + ma "github.com/multiformats/go-multiaddr" ) var log = logging.Logger("bitswap_network") diff --git a/bitswap/notifications/notifications.go b/bitswap/notifications/notifications.go index 08ec4065e..d20270109 100644 --- a/bitswap/notifications/notifications.go +++ b/bitswap/notifications/notifications.go @@ -4,9 +4,9 @@ import ( "context" "sync" - blocks "gx/ipfs/QmVzK524a2VWLqyvtBeiHKsUAWYgeAk4DBeZoY7vpNPNRx/go-block-format" - cid "gx/ipfs/QmYVNvtQkeZ6AKSwDrjQTs432QtL6umrrK41EBq3cu7iSP/go-cid" - pubsub "gx/ipfs/QmdbxjQWogRCHRaxhhGnYdT1oQJzL9GdqSKzCdqWr85AP2/pubsub" + pubsub "github.com/gxed/pubsub" + blocks "github.com/ipfs/go-block-format" + cid "github.com/ipfs/go-cid" ) const bufferSize = 16 diff --git a/bitswap/notifications/notifications_test.go b/bitswap/notifications/notifications_test.go index 232124377..e377f319e 100644 --- a/bitswap/notifications/notifications_test.go +++ b/bitswap/notifications/notifications_test.go @@ -6,9 +6,9 @@ import ( "testing" "time" - blocks "gx/ipfs/QmVzK524a2VWLqyvtBeiHKsUAWYgeAk4DBeZoY7vpNPNRx/go-block-format" - cid "gx/ipfs/QmYVNvtQkeZ6AKSwDrjQTs432QtL6umrrK41EBq3cu7iSP/go-cid" - blocksutil "gx/ipfs/QmYqPGpZ9Yemr55xus9DiEztkns6Jti5XJ7hC94JbvkdqZ/go-ipfs-blocksutil" + blocks "github.com/ipfs/go-block-format" + cid "github.com/ipfs/go-cid" + blocksutil "github.com/ipfs/go-ipfs-blocksutil" ) func TestDuplicates(t *testing.T) { diff --git a/bitswap/session.go b/bitswap/session.go index 97bb8f552..d652dac1e 100644 --- a/bitswap/session.go +++ b/bitswap/session.go @@ -5,14 +5,14 @@ import ( "fmt" "time" - notifications "github.com/ipfs/go-ipfs/exchange/bitswap/notifications" - - loggables "gx/ipfs/QmRPkGkHLB72caXgdDYnoaWigXNWx95BcYDKV1n3KTEpaG/go-libp2p-loggables" - lru "gx/ipfs/QmVYxfoJQiZijTgPNHCHgHELvQpbsJNTg6Crmc3dQkj3yy/golang-lru" - blocks "gx/ipfs/QmVzK524a2VWLqyvtBeiHKsUAWYgeAk4DBeZoY7vpNPNRx/go-block-format" - cid "gx/ipfs/QmYVNvtQkeZ6AKSwDrjQTs432QtL6umrrK41EBq3cu7iSP/go-cid" - logging "gx/ipfs/QmcVVHfdyv15GVPk7NrxdWjh2hLVccXnoD8j2tyQShiXJb/go-log" - peer "gx/ipfs/QmdVrMn1LhB4ybb8hMVaMLXnA8XRSewMnK6YqXKXoTcRvN/go-libp2p-peer" + notifications "github.com/ipfs/go-bitswap/notifications" + + lru "github.com/hashicorp/golang-lru" + blocks "github.com/ipfs/go-block-format" + cid "github.com/ipfs/go-cid" + logging "github.com/ipfs/go-log" + loggables "github.com/libp2p/go-libp2p-loggables" + peer "github.com/libp2p/go-libp2p-peer" ) const activeWantsLimit = 16 diff --git a/bitswap/session_test.go b/bitswap/session_test.go index c6b37c3d9..97b7a31a8 100644 --- a/bitswap/session_test.go +++ b/bitswap/session_test.go @@ -6,10 +6,10 @@ import ( "testing" "time" - blocks "gx/ipfs/QmVzK524a2VWLqyvtBeiHKsUAWYgeAk4DBeZoY7vpNPNRx/go-block-format" - cid "gx/ipfs/QmYVNvtQkeZ6AKSwDrjQTs432QtL6umrrK41EBq3cu7iSP/go-cid" - blocksutil "gx/ipfs/QmYqPGpZ9Yemr55xus9DiEztkns6Jti5XJ7hC94JbvkdqZ/go-ipfs-blocksutil" - tu "gx/ipfs/QmcW4FGAt24fdK1jBgWQn3yP4R9ZLyWQqjozv9QK7epRhL/go-testutil" + blocks "github.com/ipfs/go-block-format" + cid "github.com/ipfs/go-cid" + blocksutil "github.com/ipfs/go-ipfs-blocksutil" + tu "github.com/libp2p/go-testutil" ) func TestBasicSessions(t *testing.T) { diff --git a/bitswap/stat.go b/bitswap/stat.go index b6332a6f4..99dbbd32b 100644 --- a/bitswap/stat.go +++ b/bitswap/stat.go @@ -3,7 +3,7 @@ package bitswap import ( "sort" - cid "gx/ipfs/QmYVNvtQkeZ6AKSwDrjQTs432QtL6umrrK41EBq3cu7iSP/go-cid" + cid "github.com/ipfs/go-cid" ) type Stat struct { diff --git a/bitswap/testnet/interface.go b/bitswap/testnet/interface.go index c4ac9b368..ed7d4b1ec 100644 --- a/bitswap/testnet/interface.go +++ b/bitswap/testnet/interface.go @@ -1,9 +1,9 @@ package bitswap import ( - bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" - "gx/ipfs/QmcW4FGAt24fdK1jBgWQn3yP4R9ZLyWQqjozv9QK7epRhL/go-testutil" - peer "gx/ipfs/QmdVrMn1LhB4ybb8hMVaMLXnA8XRSewMnK6YqXKXoTcRvN/go-libp2p-peer" + bsnet "github.com/ipfs/go-bitswap/network" + peer "github.com/libp2p/go-libp2p-peer" + "github.com/libp2p/go-testutil" ) type Network interface { diff --git a/bitswap/testnet/network_test.go b/bitswap/testnet/network_test.go index 245b5db30..988c33ef1 100644 --- a/bitswap/testnet/network_test.go +++ b/bitswap/testnet/network_test.go @@ -5,14 +5,14 @@ import ( "sync" "testing" - bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" - bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" - - delay "gx/ipfs/QmRJVNatYJwTAHgdSM1Xef9QVQ1Ch3XHdmcrykjP5Y4soL/go-ipfs-delay" - blocks "gx/ipfs/QmVzK524a2VWLqyvtBeiHKsUAWYgeAk4DBeZoY7vpNPNRx/go-block-format" - mockrouting "gx/ipfs/QmbFRJeEmEU16y3BmKKaD4a9fm5oHsEAMHe2vSB1UnfLMi/go-ipfs-routing/mock" - testutil "gx/ipfs/QmcW4FGAt24fdK1jBgWQn3yP4R9ZLyWQqjozv9QK7epRhL/go-testutil" - peer "gx/ipfs/QmdVrMn1LhB4ybb8hMVaMLXnA8XRSewMnK6YqXKXoTcRvN/go-libp2p-peer" + bsmsg "github.com/ipfs/go-bitswap/message" + bsnet "github.com/ipfs/go-bitswap/network" + + blocks "github.com/ipfs/go-block-format" + delay "github.com/ipfs/go-ipfs-delay" + mockrouting "github.com/ipfs/go-ipfs-routing/mock" + peer "github.com/libp2p/go-libp2p-peer" + testutil "github.com/libp2p/go-testutil" ) func TestSendMessageAsyncButWaitForResponse(t *testing.T) { diff --git a/bitswap/testnet/peernet.go b/bitswap/testnet/peernet.go index 0d6cdbe44..dbad1f65e 100644 --- a/bitswap/testnet/peernet.go +++ b/bitswap/testnet/peernet.go @@ -3,13 +3,13 @@ package bitswap import ( "context" - bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" + bsnet "github.com/ipfs/go-bitswap/network" - mockpeernet "gx/ipfs/QmY51bqSM5XgxQZqsBrQcRkKTnCb8EKpJpR9K6Qax7Njco/go-libp2p/p2p/net/mock" - mockrouting "gx/ipfs/QmbFRJeEmEU16y3BmKKaD4a9fm5oHsEAMHe2vSB1UnfLMi/go-ipfs-routing/mock" - testutil "gx/ipfs/QmcW4FGAt24fdK1jBgWQn3yP4R9ZLyWQqjozv9QK7epRhL/go-testutil" - peer "gx/ipfs/QmdVrMn1LhB4ybb8hMVaMLXnA8XRSewMnK6YqXKXoTcRvN/go-libp2p-peer" - ds "gx/ipfs/QmeiCcJfDW1GJnWUArudsv5rQsihpi4oyddPhdqo3CfX6i/go-datastore" + ds "github.com/ipfs/go-datastore" + mockrouting "github.com/ipfs/go-ipfs-routing/mock" + peer "github.com/libp2p/go-libp2p-peer" + mockpeernet "github.com/libp2p/go-libp2p/p2p/net/mock" + testutil "github.com/libp2p/go-testutil" ) type peernet struct { diff --git a/bitswap/testnet/virtual.go b/bitswap/testnet/virtual.go index bc064d18e..2a1e9377c 100644 --- a/bitswap/testnet/virtual.go +++ b/bitswap/testnet/virtual.go @@ -6,17 +6,17 @@ import ( "sync" "time" - bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" - bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" - - delay "gx/ipfs/QmRJVNatYJwTAHgdSM1Xef9QVQ1Ch3XHdmcrykjP5Y4soL/go-ipfs-delay" - ifconnmgr "gx/ipfs/QmXuucFcuvAWYAJfhHV2h4BYreHEAsLSsiquosiXeuduTN/go-libp2p-interface-connmgr" - cid "gx/ipfs/QmYVNvtQkeZ6AKSwDrjQTs432QtL6umrrK41EBq3cu7iSP/go-cid" - routing "gx/ipfs/QmZ383TySJVeZWzGnWui6pRcKyYZk9VkKTuW7tmKRWk5au/go-libp2p-routing" - mockrouting "gx/ipfs/QmbFRJeEmEU16y3BmKKaD4a9fm5oHsEAMHe2vSB1UnfLMi/go-ipfs-routing/mock" - logging "gx/ipfs/QmcVVHfdyv15GVPk7NrxdWjh2hLVccXnoD8j2tyQShiXJb/go-log" - testutil "gx/ipfs/QmcW4FGAt24fdK1jBgWQn3yP4R9ZLyWQqjozv9QK7epRhL/go-testutil" - peer "gx/ipfs/QmdVrMn1LhB4ybb8hMVaMLXnA8XRSewMnK6YqXKXoTcRvN/go-libp2p-peer" + bsmsg "github.com/ipfs/go-bitswap/message" + bsnet "github.com/ipfs/go-bitswap/network" + + cid "github.com/ipfs/go-cid" + delay "github.com/ipfs/go-ipfs-delay" + mockrouting "github.com/ipfs/go-ipfs-routing/mock" + logging "github.com/ipfs/go-log" + ifconnmgr "github.com/libp2p/go-libp2p-interface-connmgr" + peer "github.com/libp2p/go-libp2p-peer" + routing "github.com/libp2p/go-libp2p-routing" + testutil "github.com/libp2p/go-testutil" ) var log = logging.Logger("bstestnet") diff --git a/bitswap/testutils.go b/bitswap/testutils.go index b71f451cb..aa4ffa9f7 100644 --- a/bitswap/testutils.go +++ b/bitswap/testutils.go @@ -4,16 +4,16 @@ import ( "context" "time" - tn "github.com/ipfs/go-ipfs/exchange/bitswap/testnet" - - delay "gx/ipfs/QmRJVNatYJwTAHgdSM1Xef9QVQ1Ch3XHdmcrykjP5Y4soL/go-ipfs-delay" - blockstore "gx/ipfs/QmadMhXJLHMFjpRmh85XjpmVDkEtQpNYEZNRpWRvYVLrvb/go-ipfs-blockstore" - testutil "gx/ipfs/QmcW4FGAt24fdK1jBgWQn3yP4R9ZLyWQqjozv9QK7epRhL/go-testutil" - p2ptestutil "gx/ipfs/QmcxUtMB5sJrXR3znSvkrDd2ghvwGM8rLRqwJiPUdgQwat/go-libp2p-netutil" - peer "gx/ipfs/QmdVrMn1LhB4ybb8hMVaMLXnA8XRSewMnK6YqXKXoTcRvN/go-libp2p-peer" - ds "gx/ipfs/QmeiCcJfDW1GJnWUArudsv5rQsihpi4oyddPhdqo3CfX6i/go-datastore" - delayed "gx/ipfs/QmeiCcJfDW1GJnWUArudsv5rQsihpi4oyddPhdqo3CfX6i/go-datastore/delayed" - ds_sync "gx/ipfs/QmeiCcJfDW1GJnWUArudsv5rQsihpi4oyddPhdqo3CfX6i/go-datastore/sync" + tn "github.com/ipfs/go-bitswap/testnet" + + ds "github.com/ipfs/go-datastore" + delayed "github.com/ipfs/go-datastore/delayed" + ds_sync "github.com/ipfs/go-datastore/sync" + blockstore "github.com/ipfs/go-ipfs-blockstore" + delay "github.com/ipfs/go-ipfs-delay" + p2ptestutil "github.com/libp2p/go-libp2p-netutil" + peer "github.com/libp2p/go-libp2p-peer" + testutil "github.com/libp2p/go-testutil" ) // WARNING: this uses RandTestBogusIdentity DO NOT USE for NON TESTS! diff --git a/bitswap/wantlist/wantlist.go b/bitswap/wantlist/wantlist.go index c25d8efa2..beb4ac752 100644 --- a/bitswap/wantlist/wantlist.go +++ b/bitswap/wantlist/wantlist.go @@ -6,7 +6,7 @@ import ( "sort" "sync" - cid "gx/ipfs/QmYVNvtQkeZ6AKSwDrjQTs432QtL6umrrK41EBq3cu7iSP/go-cid" + cid "github.com/ipfs/go-cid" ) type ThreadSafe struct { diff --git a/bitswap/wantlist/wantlist_test.go b/bitswap/wantlist/wantlist_test.go index 440d3c935..0d4c696ad 100644 --- a/bitswap/wantlist/wantlist_test.go +++ b/bitswap/wantlist/wantlist_test.go @@ -3,7 +3,7 @@ package wantlist import ( "testing" - cid "gx/ipfs/QmYVNvtQkeZ6AKSwDrjQTs432QtL6umrrK41EBq3cu7iSP/go-cid" + cid "github.com/ipfs/go-cid" ) var testcids []*cid.Cid diff --git a/bitswap/wantmanager.go b/bitswap/wantmanager.go index 4bbb7ff93..380d85381 100644 --- a/bitswap/wantmanager.go +++ b/bitswap/wantmanager.go @@ -5,14 +5,14 @@ import ( "sync" "time" - engine "github.com/ipfs/go-ipfs/exchange/bitswap/decision" - bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" - bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" - wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist" - - cid "gx/ipfs/QmYVNvtQkeZ6AKSwDrjQTs432QtL6umrrK41EBq3cu7iSP/go-cid" - peer "gx/ipfs/QmdVrMn1LhB4ybb8hMVaMLXnA8XRSewMnK6YqXKXoTcRvN/go-libp2p-peer" - metrics "gx/ipfs/QmekzFM3hPZjTjUFGTABdQkEnQ3PTiMstY198PwSFr5w1Q/go-metrics-interface" + engine "github.com/ipfs/go-bitswap/decision" + bsmsg "github.com/ipfs/go-bitswap/message" + bsnet "github.com/ipfs/go-bitswap/network" + wantlist "github.com/ipfs/go-bitswap/wantlist" + + cid "github.com/ipfs/go-cid" + metrics "github.com/ipfs/go-metrics-interface" + peer "github.com/libp2p/go-libp2p-peer" ) type WantManager struct { diff --git a/bitswap/workers.go b/bitswap/workers.go index 98731cd64..8f5e6edda 100644 --- a/bitswap/workers.go +++ b/bitswap/workers.go @@ -6,13 +6,13 @@ import ( "sync" "time" - bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message" + bsmsg "github.com/ipfs/go-bitswap/message" - process "gx/ipfs/QmSF8fPo3jgVBAy8fpdjjYqgG87dkJgUprRBHRd2tmfgpP/goprocess" - procctx "gx/ipfs/QmSF8fPo3jgVBAy8fpdjjYqgG87dkJgUprRBHRd2tmfgpP/goprocess/context" - cid "gx/ipfs/QmYVNvtQkeZ6AKSwDrjQTs432QtL6umrrK41EBq3cu7iSP/go-cid" - logging "gx/ipfs/QmcVVHfdyv15GVPk7NrxdWjh2hLVccXnoD8j2tyQShiXJb/go-log" - peer "gx/ipfs/QmdVrMn1LhB4ybb8hMVaMLXnA8XRSewMnK6YqXKXoTcRvN/go-libp2p-peer" + cid "github.com/ipfs/go-cid" + logging "github.com/ipfs/go-log" + process "github.com/jbenet/goprocess" + procctx "github.com/jbenet/goprocess/context" + peer "github.com/libp2p/go-libp2p-peer" ) var TaskWorkerCount = 8 From 07f3ba7f5870bc60607796ae604b041e08f65a42 Mon Sep 17 00:00:00 2001 From: Jeromy Date: Fri, 27 Jul 2018 15:00:34 -0700 Subject: [PATCH 0646/1035] refixer readme This commit was moved from ipfs/go-bitswap@a9946993b9385e8e40d77a22d3ce7a83a30abe28 --- bitswap/README.md | 39 +++++++++++++++++++++++++++++++++++---- 1 file changed, 35 insertions(+), 4 deletions(-) diff --git a/bitswap/README.md b/bitswap/README.md index 8ec2580a7..62bbd9b39 100644 --- a/bitswap/README.md +++ b/bitswap/README.md @@ -13,14 +13,45 @@ go-bitswap ## Table of Contents - [Install](#install) -- [Usage](#usage) -- [API](#api) +- [Protocol](#protocol) +- [Implementation](#implementation) - [Contribute](#contribute) - [License](#license) -## Install +## Protocol +Bitswap is the data trading module for ipfs, it manages requesting and sending +blocks to and from other peers in the network. Bitswap has two main jobs, the +first is to acquire blocks requested by the client from the network. The second +is to judiciously send blocks in its possession to other peers who want them. -TODO +Bitswap is a message based protocol, as opposed to response-reply. All messages +contain wantlists, or blocks. Upon receiving a wantlist, a node should consider +sending out wanted blocks if they have them. Upon receiving blocks, the node +should send out a notification called a 'Cancel' signifying that they no longer +want the block. At a protocol level, bitswap is very simple. + +## Implementation +Internally, when a message with a wantlist is received, it is sent to the +decision engine to be considered, and blocks that we have that are wanted are +placed into the peer request queue. Any block we possess that is wanted by +another peer has a task in the peer request queue created for it. The peer +request queue is a priority queue that sorts available tasks by some metric, +currently, that metric is very simple and aims to fairly address the tasks +of each other peer. More advanced decision logic will be implemented in the +future. Task workers pull tasks to be done off of the queue, retrieve the block +to be sent, and send it off. The number of task workers is limited by a constant +factor. + +Client requests for new blocks are handled by the want manager, for every new +block (or set of blocks) wanted, the 'WantBlocks' method is invoked. The want +manager then ensures that connected peers are notified of the new block that we +want by sending the new entries to a message queue for each peer. The message +queue will loop while there is work available and do the following: 1) Ensure it +has a connection to its peer, 2) grab the message to be sent, and 3) send it. +If new messages are added while the loop is in steps 1 or 3, the messages are +combined into one to avoid having to keep an actual queue and send multiple +messages. The same process occurs when the client receives a block and sends a +cancel message for it. ## Contribute From 4eff662ae6b31e22e81374badb85b8cbea8209ca Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Tue, 7 Aug 2018 18:43:16 -0700 Subject: [PATCH 0647/1035] update gogo protobuf and switch to proto3 This commit was moved from ipfs/go-bitswap@18c43be9e7dd3346d68a6e59bea89a9117372b0f --- bitswap/message/message.go | 17 +- bitswap/message/message_test.go | 5 +- bitswap/message/pb/Makefile | 13 +- bitswap/message/pb/message.pb.go | 1070 +++++++++++++++++++++++++++++- bitswap/message/pb/message.proto | 20 +- 5 files changed, 1064 insertions(+), 61 deletions(-) diff --git a/bitswap/message/message.go b/bitswap/message/message.go index ea163661b..9aba444b3 100644 --- a/bitswap/message/message.go +++ b/bitswap/message/message.go @@ -9,7 +9,6 @@ import ( blocks "github.com/ipfs/go-block-format" ggio "github.com/gogo/protobuf/io" - proto "github.com/gogo/protobuf/proto" cid "github.com/ipfs/go-cid" inet "github.com/libp2p/go-libp2p-net" ) @@ -185,12 +184,12 @@ func (m *impl) ToProtoV0() *pb.Message { pbm.Wantlist.Entries = make([]*pb.Message_Wantlist_Entry, 0, len(m.wantlist)) for _, e := range m.wantlist { pbm.Wantlist.Entries = append(pbm.Wantlist.Entries, &pb.Message_Wantlist_Entry{ - Block: proto.String(e.Cid.KeyString()), - Priority: proto.Int32(int32(e.Priority)), - Cancel: proto.Bool(e.Cancel), + Block: e.Cid.Bytes(), + Priority: int32(e.Priority), + Cancel: e.Cancel, }) } - pbm.Wantlist.Full = proto.Bool(m.full) + pbm.Wantlist.Full = m.full blocks := m.Blocks() pbm.Blocks = make([][]byte, 0, len(blocks)) @@ -206,12 +205,12 @@ func (m *impl) ToProtoV1() *pb.Message { pbm.Wantlist.Entries = make([]*pb.Message_Wantlist_Entry, 0, len(m.wantlist)) for _, e := range m.wantlist { pbm.Wantlist.Entries = append(pbm.Wantlist.Entries, &pb.Message_Wantlist_Entry{ - Block: proto.String(e.Cid.KeyString()), - Priority: proto.Int32(int32(e.Priority)), - Cancel: proto.Bool(e.Cancel), + Block: e.Cid.Bytes(), + Priority: int32(e.Priority), + Cancel: e.Cancel, }) } - pbm.Wantlist.Full = proto.Bool(m.full) + pbm.Wantlist.Full = m.full blocks := m.Blocks() pbm.Payload = make([]*pb.Message_Block, 0, len(blocks)) diff --git a/bitswap/message/message_test.go b/bitswap/message/message_test.go index 348f5f400..539d212e5 100644 --- a/bitswap/message/message_test.go +++ b/bitswap/message/message_test.go @@ -6,7 +6,6 @@ import ( pb "github.com/ipfs/go-bitswap/message/pb" - proto "github.com/gogo/protobuf/proto" blocks "github.com/ipfs/go-block-format" cid "github.com/ipfs/go-cid" u "github.com/ipfs/go-ipfs-util" @@ -31,7 +30,7 @@ func TestNewMessageFromProto(t *testing.T) { protoMessage := new(pb.Message) protoMessage.Wantlist = new(pb.Message_Wantlist) protoMessage.Wantlist.Entries = []*pb.Message_Wantlist_Entry{ - {Block: proto.String(str.KeyString())}, + {Block: str.Bytes()}, } if !wantlistContains(protoMessage.Wantlist, str) { t.Fail() @@ -166,7 +165,7 @@ func TestToAndFromNetMessage(t *testing.T) { func wantlistContains(wantlist *pb.Message_Wantlist, c *cid.Cid) bool { for _, e := range wantlist.GetEntries() { - if e.GetBlock() == c.KeyString() { + if bytes.Equal(e.GetBlock(), c.Bytes()) { return true } } diff --git a/bitswap/message/pb/Makefile b/bitswap/message/pb/Makefile index 5bbebea07..eb14b5768 100644 --- a/bitswap/message/pb/Makefile +++ b/bitswap/message/pb/Makefile @@ -1,8 +1,11 @@ -# TODO(brian): add proto tasks -all: message.pb.go +PB = $(wildcard *.proto) +GO = $(PB:.proto=.pb.go) -message.pb.go: message.proto - protoc --gogo_out=. --proto_path=../../../../../:/usr/local/opt/protobuf/include:. $< +all: $(GO) + +%.pb.go: %.proto + protoc --proto_path=$(GOPATH)/src:. --gogofast_out=. $< clean: - rm message.pb.go + rm -f *.pb.go + rm -f *.go diff --git a/bitswap/message/pb/message.pb.go b/bitswap/message/pb/message.pb.go index e88fd710b..2c668d1a4 100644 --- a/bitswap/message/pb/message.pb.go +++ b/bitswap/message/pb/message.pb.go @@ -1,37 +1,66 @@ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: message.proto -// DO NOT EDIT! -/* -Package bitswap_message_pb is a generated protocol buffer package. - -It is generated from these files: - message.proto - -It has these top-level messages: - Message -*/ package bitswap_message_pb import proto "github.com/gogo/protobuf/proto" import fmt "fmt" import math "math" +import io "io" + // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + type Message struct { - Wantlist *Message_Wantlist `protobuf:"bytes,1,opt,name=wantlist" json:"wantlist,omitempty"` - Blocks [][]byte `protobuf:"bytes,2,rep,name=blocks" json:"blocks,omitempty"` - Payload []*Message_Block `protobuf:"bytes,3,rep,name=payload" json:"payload,omitempty"` - XXX_unrecognized []byte `json:"-"` + Wantlist *Message_Wantlist `protobuf:"bytes,1,opt,name=wantlist" json:"wantlist,omitempty"` + Blocks [][]byte `protobuf:"bytes,2,rep,name=blocks" json:"blocks,omitempty"` + Payload []*Message_Block `protobuf:"bytes,3,rep,name=payload" json:"payload,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *Message) Reset() { *m = Message{} } func (m *Message) String() string { return proto.CompactTextString(m) } func (*Message) ProtoMessage() {} +func (*Message) Descriptor() ([]byte, []int) { + return fileDescriptor_message_1e228ff77b8fb7b4, []int{0} +} +func (m *Message) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Message) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Message.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *Message) XXX_Merge(src proto.Message) { + xxx_messageInfo_Message.Merge(dst, src) +} +func (m *Message) XXX_Size() int { + return m.Size() +} +func (m *Message) XXX_DiscardUnknown() { + xxx_messageInfo_Message.DiscardUnknown(m) +} + +var xxx_messageInfo_Message proto.InternalMessageInfo func (m *Message) GetWantlist() *Message_Wantlist { if m != nil { @@ -55,14 +84,45 @@ func (m *Message) GetPayload() []*Message_Block { } type Message_Wantlist struct { - Entries []*Message_Wantlist_Entry `protobuf:"bytes,1,rep,name=entries" json:"entries,omitempty"` - Full *bool `protobuf:"varint,2,opt,name=full" json:"full,omitempty"` - XXX_unrecognized []byte `json:"-"` + Entries []*Message_Wantlist_Entry `protobuf:"bytes,1,rep,name=entries" json:"entries,omitempty"` + Full bool `protobuf:"varint,2,opt,name=full,proto3" json:"full,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *Message_Wantlist) Reset() { *m = Message_Wantlist{} } func (m *Message_Wantlist) String() string { return proto.CompactTextString(m) } func (*Message_Wantlist) ProtoMessage() {} +func (*Message_Wantlist) Descriptor() ([]byte, []int) { + return fileDescriptor_message_1e228ff77b8fb7b4, []int{0, 0} +} +func (m *Message_Wantlist) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Message_Wantlist) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Message_Wantlist.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *Message_Wantlist) XXX_Merge(src proto.Message) { + xxx_messageInfo_Message_Wantlist.Merge(dst, src) +} +func (m *Message_Wantlist) XXX_Size() int { + return m.Size() +} +func (m *Message_Wantlist) XXX_DiscardUnknown() { + xxx_messageInfo_Message_Wantlist.DiscardUnknown(m) +} + +var xxx_messageInfo_Message_Wantlist proto.InternalMessageInfo func (m *Message_Wantlist) GetEntries() []*Message_Wantlist_Entry { if m != nil { @@ -72,53 +132,115 @@ func (m *Message_Wantlist) GetEntries() []*Message_Wantlist_Entry { } func (m *Message_Wantlist) GetFull() bool { - if m != nil && m.Full != nil { - return *m.Full + if m != nil { + return m.Full } return false } type Message_Wantlist_Entry struct { - Block *string `protobuf:"bytes,1,opt,name=block" json:"block,omitempty"` - Priority *int32 `protobuf:"varint,2,opt,name=priority" json:"priority,omitempty"` - Cancel *bool `protobuf:"varint,3,opt,name=cancel" json:"cancel,omitempty"` - XXX_unrecognized []byte `json:"-"` + Block []byte `protobuf:"bytes,1,opt,name=block,proto3" json:"block,omitempty"` + Priority int32 `protobuf:"varint,2,opt,name=priority,proto3" json:"priority,omitempty"` + Cancel bool `protobuf:"varint,3,opt,name=cancel,proto3" json:"cancel,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *Message_Wantlist_Entry) Reset() { *m = Message_Wantlist_Entry{} } func (m *Message_Wantlist_Entry) String() string { return proto.CompactTextString(m) } func (*Message_Wantlist_Entry) ProtoMessage() {} +func (*Message_Wantlist_Entry) Descriptor() ([]byte, []int) { + return fileDescriptor_message_1e228ff77b8fb7b4, []int{0, 0, 0} +} +func (m *Message_Wantlist_Entry) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Message_Wantlist_Entry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Message_Wantlist_Entry.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *Message_Wantlist_Entry) XXX_Merge(src proto.Message) { + xxx_messageInfo_Message_Wantlist_Entry.Merge(dst, src) +} +func (m *Message_Wantlist_Entry) XXX_Size() int { + return m.Size() +} +func (m *Message_Wantlist_Entry) XXX_DiscardUnknown() { + xxx_messageInfo_Message_Wantlist_Entry.DiscardUnknown(m) +} -func (m *Message_Wantlist_Entry) GetBlock() string { - if m != nil && m.Block != nil { - return *m.Block +var xxx_messageInfo_Message_Wantlist_Entry proto.InternalMessageInfo + +func (m *Message_Wantlist_Entry) GetBlock() []byte { + if m != nil { + return m.Block } - return "" + return nil } func (m *Message_Wantlist_Entry) GetPriority() int32 { - if m != nil && m.Priority != nil { - return *m.Priority + if m != nil { + return m.Priority } return 0 } func (m *Message_Wantlist_Entry) GetCancel() bool { - if m != nil && m.Cancel != nil { - return *m.Cancel + if m != nil { + return m.Cancel } return false } type Message_Block struct { - Prefix []byte `protobuf:"bytes,1,opt,name=prefix" json:"prefix,omitempty"` - Data []byte `protobuf:"bytes,2,opt,name=data" json:"data,omitempty"` - XXX_unrecognized []byte `json:"-"` + Prefix []byte `protobuf:"bytes,1,opt,name=prefix,proto3" json:"prefix,omitempty"` + Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *Message_Block) Reset() { *m = Message_Block{} } func (m *Message_Block) String() string { return proto.CompactTextString(m) } func (*Message_Block) ProtoMessage() {} +func (*Message_Block) Descriptor() ([]byte, []int) { + return fileDescriptor_message_1e228ff77b8fb7b4, []int{0, 1} +} +func (m *Message_Block) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Message_Block) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Message_Block.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *Message_Block) XXX_Merge(src proto.Message) { + xxx_messageInfo_Message_Block.Merge(dst, src) +} +func (m *Message_Block) XXX_Size() int { + return m.Size() +} +func (m *Message_Block) XXX_DiscardUnknown() { + xxx_messageInfo_Message_Block.DiscardUnknown(m) +} + +var xxx_messageInfo_Message_Block proto.InternalMessageInfo func (m *Message_Block) GetPrefix() []byte { if m != nil { @@ -140,3 +262,881 @@ func init() { proto.RegisterType((*Message_Wantlist_Entry)(nil), "bitswap.message.pb.Message.Wantlist.Entry") proto.RegisterType((*Message_Block)(nil), "bitswap.message.pb.Message.Block") } +func (m *Message) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Message) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Wantlist != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintMessage(dAtA, i, uint64(m.Wantlist.Size())) + n1, err := m.Wantlist.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + } + if len(m.Blocks) > 0 { + for _, b := range m.Blocks { + dAtA[i] = 0x12 + i++ + i = encodeVarintMessage(dAtA, i, uint64(len(b))) + i += copy(dAtA[i:], b) + } + } + if len(m.Payload) > 0 { + for _, msg := range m.Payload { + dAtA[i] = 0x1a + i++ + i = encodeVarintMessage(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *Message_Wantlist) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Message_Wantlist) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Entries) > 0 { + for _, msg := range m.Entries { + dAtA[i] = 0xa + i++ + i = encodeVarintMessage(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.Full { + dAtA[i] = 0x10 + i++ + if m.Full { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *Message_Wantlist_Entry) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Message_Wantlist_Entry) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Block) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintMessage(dAtA, i, uint64(len(m.Block))) + i += copy(dAtA[i:], m.Block) + } + if m.Priority != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintMessage(dAtA, i, uint64(m.Priority)) + } + if m.Cancel { + dAtA[i] = 0x18 + i++ + if m.Cancel { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *Message_Block) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Message_Block) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Prefix) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintMessage(dAtA, i, uint64(len(m.Prefix))) + i += copy(dAtA[i:], m.Prefix) + } + if len(m.Data) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintMessage(dAtA, i, uint64(len(m.Data))) + i += copy(dAtA[i:], m.Data) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func encodeVarintMessage(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *Message) Size() (n int) { + var l int + _ = l + if m.Wantlist != nil { + l = m.Wantlist.Size() + n += 1 + l + sovMessage(uint64(l)) + } + if len(m.Blocks) > 0 { + for _, b := range m.Blocks { + l = len(b) + n += 1 + l + sovMessage(uint64(l)) + } + } + if len(m.Payload) > 0 { + for _, e := range m.Payload { + l = e.Size() + n += 1 + l + sovMessage(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Message_Wantlist) Size() (n int) { + var l int + _ = l + if len(m.Entries) > 0 { + for _, e := range m.Entries { + l = e.Size() + n += 1 + l + sovMessage(uint64(l)) + } + } + if m.Full { + n += 2 + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Message_Wantlist_Entry) Size() (n int) { + var l int + _ = l + l = len(m.Block) + if l > 0 { + n += 1 + l + sovMessage(uint64(l)) + } + if m.Priority != 0 { + n += 1 + sovMessage(uint64(m.Priority)) + } + if m.Cancel { + n += 2 + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Message_Block) Size() (n int) { + var l int + _ = l + l = len(m.Prefix) + if l > 0 { + n += 1 + l + sovMessage(uint64(l)) + } + l = len(m.Data) + if l > 0 { + n += 1 + l + sovMessage(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovMessage(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozMessage(x uint64) (n int) { + return sovMessage(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Message) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Message: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Message: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Wantlist", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMessage + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Wantlist == nil { + m.Wantlist = &Message_Wantlist{} + } + if err := m.Wantlist.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Blocks", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthMessage + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Blocks = append(m.Blocks, make([]byte, postIndex-iNdEx)) + copy(m.Blocks[len(m.Blocks)-1], dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Payload", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMessage + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Payload = append(m.Payload, &Message_Block{}) + if err := m.Payload[len(m.Payload)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMessage(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMessage + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Message_Wantlist) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Wantlist: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Wantlist: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Entries", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMessage + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Entries = append(m.Entries, &Message_Wantlist_Entry{}) + if err := m.Entries[len(m.Entries)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Full", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Full = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipMessage(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMessage + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Message_Wantlist_Entry) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Entry: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Entry: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Block", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthMessage + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Block = append(m.Block[:0], dAtA[iNdEx:postIndex]...) + if m.Block == nil { + m.Block = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Priority", wireType) + } + m.Priority = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Priority |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Cancel", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Cancel = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipMessage(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMessage + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Message_Block) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Block: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Block: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Prefix", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthMessage + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Prefix = append(m.Prefix[:0], dAtA[iNdEx:postIndex]...) + if m.Prefix == nil { + m.Prefix = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthMessage + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMessage(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMessage + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipMessage(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowMessage + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowMessage + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowMessage + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthMessage + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowMessage + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipMessage(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthMessage = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowMessage = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("message.proto", fileDescriptor_message_1e228ff77b8fb7b4) } + +var fileDescriptor_message_1e228ff77b8fb7b4 = []byte{ + // 287 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x91, 0xb1, 0x4e, 0xf3, 0x30, + 0x14, 0x85, 0xe5, 0xe6, 0x4f, 0x1b, 0xdd, 0xe6, 0x5f, 0x2c, 0x84, 0xac, 0x0c, 0x55, 0x40, 0x0c, + 0x11, 0x83, 0x87, 0x76, 0x64, 0x41, 0x15, 0x8c, 0x0c, 0x78, 0x61, 0x76, 0x52, 0x17, 0x59, 0x98, + 0x24, 0xb2, 0x8d, 0x4a, 0x9e, 0x82, 0xc7, 0xe1, 0x15, 0x18, 0x79, 0x04, 0x94, 0x27, 0x41, 0xb9, + 0x75, 0xb2, 0x20, 0x21, 0xb6, 0x7b, 0xac, 0xf3, 0x1d, 0x9f, 0x6b, 0xc3, 0xff, 0x67, 0xe5, 0x9c, + 0x7c, 0x54, 0xbc, 0xb5, 0x8d, 0x6f, 0x28, 0x2d, 0xb5, 0x77, 0x07, 0xd9, 0xf2, 0xe9, 0xb8, 0x3c, + 0x7f, 0x8b, 0x60, 0x71, 0x77, 0x94, 0xf4, 0x1a, 0x92, 0x83, 0xac, 0xbd, 0xd1, 0xce, 0x33, 0x92, + 0x93, 0x62, 0xb9, 0xbe, 0xe0, 0x3f, 0x11, 0x1e, 0xec, 0xfc, 0x21, 0x78, 0xc5, 0x44, 0xd1, 0x53, + 0x98, 0x97, 0xa6, 0xa9, 0x9e, 0x1c, 0x9b, 0xe5, 0x51, 0x91, 0x8a, 0xa0, 0xe8, 0x15, 0x2c, 0x5a, + 0xd9, 0x99, 0x46, 0xee, 0x58, 0x94, 0x47, 0xc5, 0x72, 0x7d, 0xf6, 0x5b, 0xf0, 0x76, 0x80, 0xc4, + 0x48, 0x64, 0xef, 0x04, 0x92, 0xf1, 0x2e, 0x7a, 0x03, 0x0b, 0x55, 0x7b, 0xab, 0x95, 0x63, 0x04, + 0x93, 0x2e, 0xff, 0x52, 0x91, 0xdf, 0xd6, 0xde, 0x76, 0x62, 0x44, 0x29, 0x85, 0x7f, 0xfb, 0x17, + 0x63, 0xd8, 0x2c, 0x27, 0x45, 0x22, 0x70, 0xce, 0xee, 0x21, 0x46, 0x17, 0x3d, 0x81, 0x18, 0x6b, + 0xe3, 0x1b, 0xa4, 0xe2, 0x28, 0x68, 0x06, 0x49, 0x6b, 0x75, 0x63, 0xb5, 0xef, 0x10, 0x8b, 0xc5, + 0xa4, 0x87, 0xb5, 0x2b, 0x59, 0x57, 0xca, 0xb0, 0x08, 0x03, 0x83, 0xca, 0x36, 0x10, 0xe3, 0x2e, + 0x83, 0xa1, 0xb5, 0x6a, 0xaf, 0x5f, 0x43, 0x66, 0x50, 0x43, 0x8f, 0x9d, 0xf4, 0x12, 0x03, 0x53, + 0x81, 0xf3, 0x36, 0xfd, 0xe8, 0x57, 0xe4, 0xb3, 0x5f, 0x91, 0xaf, 0x7e, 0x45, 0xca, 0x39, 0x7e, + 0xdd, 0xe6, 0x3b, 0x00, 0x00, 0xff, 0xff, 0xd2, 0x95, 0x9b, 0xc1, 0xcb, 0x01, 0x00, 0x00, +} diff --git a/bitswap/message/pb/message.proto b/bitswap/message/pb/message.proto index 59d03a6e1..23d5ef852 100644 --- a/bitswap/message/pb/message.proto +++ b/bitswap/message/pb/message.proto @@ -1,3 +1,5 @@ +syntax = "proto3"; + package bitswap.message.pb; message Message { @@ -5,21 +7,21 @@ message Message { message Wantlist { message Entry { - optional string block = 1; // the block cid (cidV0 in bitswap 1.0.0, cidV1 in bitswap 1.1.0) - optional int32 priority = 2; // the priority (normalized). default to 1 - optional bool cancel = 3; // whether this revokes an entry - } + bytes block = 1; // the block cid (cidV0 in bitswap 1.0.0, cidV1 in bitswap 1.1.0) + int32 priority = 2; // the priority (normalized). default to 1 + bool cancel = 3; // whether this revokes an entry + } - repeated Entry entries = 1; // a list of wantlist entries - optional bool full = 2; // whether this is the full wantlist. default to false + repeated Entry entries = 1; // a list of wantlist entries + bool full = 2; // whether this is the full wantlist. default to false } message Block { - optional bytes prefix = 1; // CID prefix (cid version, multicodec and multihash prefix (type + length) - optional bytes data = 2; + bytes prefix = 1; // CID prefix (cid version, multicodec and multihash prefix (type + length) + bytes data = 2; } - optional Wantlist wantlist = 1; + Wantlist wantlist = 1; repeated bytes blocks = 2; // used to send Blocks in bitswap 1.0.0 repeated Block payload = 3; // used to send Blocks in bitswap 1.1.0 } From 816315ca43b7716a48d3d5502b218b38fa249e57 Mon Sep 17 00:00:00 2001 From: Kevin Atkinson Date: Wed, 5 Sep 2018 03:12:12 -0400 Subject: [PATCH 0648/1035] gx update and fix code to use new Cid type This commit was moved from ipfs/go-bitswap@5345e9eb0a814ca61cca0861cce524f17a56e3bb --- bitswap/bitswap.go | 30 ++++++------- bitswap/bitswap_test.go | 12 ++--- bitswap/decision/ledger.go | 6 +-- bitswap/decision/peer_request_queue.go | 10 ++--- bitswap/get.go | 16 +++---- bitswap/message/message.go | 10 ++--- bitswap/message/message_test.go | 6 +-- bitswap/network/interface.go | 4 +- bitswap/network/ipfs_impl.go | 4 +- bitswap/notifications/notifications.go | 6 +-- bitswap/notifications/notifications_test.go | 4 +- bitswap/session.go | 50 ++++++++++----------- bitswap/session_test.go | 16 +++---- bitswap/stat.go | 2 +- bitswap/testnet/virtual.go | 4 +- bitswap/wantlist/wantlist.go | 16 +++---- bitswap/wantlist/wantlist_test.go | 8 ++-- bitswap/wantmanager.go | 6 +-- bitswap/workers.go | 8 ++-- 19 files changed, 109 insertions(+), 109 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index f6a42fc7a..b8dd498c0 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -96,8 +96,8 @@ func New(parent context.Context, network bsnet.BitSwapNetwork, network: network, findKeys: make(chan *blockRequest, sizeBatchRequestChan), process: px, - newBlocks: make(chan *cid.Cid, HasBlockBufferSize), - provideKeys: make(chan *cid.Cid, provideKeysBufferSize), + newBlocks: make(chan cid.Cid, HasBlockBufferSize), + provideKeys: make(chan cid.Cid, provideKeysBufferSize), wm: NewWantManager(ctx, network), counters: new(counters), @@ -146,9 +146,9 @@ type Bitswap struct { // newBlocks is a channel for newly added blocks to be provided to the // network. blocks pushed down this channel get buffered and fed to the // provideKeys channel later on to avoid too much network activity - newBlocks chan *cid.Cid + newBlocks chan cid.Cid // provideKeys directly feeds provide workers - provideKeys chan *cid.Cid + provideKeys chan cid.Cid process process.Process @@ -179,18 +179,18 @@ type counters struct { } type blockRequest struct { - Cid *cid.Cid + Cid cid.Cid Ctx context.Context } // GetBlock attempts to retrieve a particular block from peers within the // deadline enforced by the context. -func (bs *Bitswap) GetBlock(parent context.Context, k *cid.Cid) (blocks.Block, error) { +func (bs *Bitswap) GetBlock(parent context.Context, k cid.Cid) (blocks.Block, error) { return getBlock(parent, k, bs.GetBlocks) } -func (bs *Bitswap) WantlistForPeer(p peer.ID) []*cid.Cid { - var out []*cid.Cid +func (bs *Bitswap) WantlistForPeer(p peer.ID) []cid.Cid { + var out []cid.Cid for _, e := range bs.engine.WantlistForPeer(p) { out = append(out, e.Cid) } @@ -208,7 +208,7 @@ func (bs *Bitswap) LedgerForPeer(p peer.ID) *decision.Receipt { // NB: Your request remains open until the context expires. To conserve // resources, provide a context with a reasonably short deadline (ie. not one // that lasts throughout the lifetime of the server) -func (bs *Bitswap) GetBlocks(ctx context.Context, keys []*cid.Cid) (<-chan blocks.Block, error) { +func (bs *Bitswap) GetBlocks(ctx context.Context, keys []cid.Cid) (<-chan blocks.Block, error) { if len(keys) == 0 { out := make(chan blocks.Block) close(out) @@ -259,7 +259,7 @@ func (bs *Bitswap) GetBlocks(ctx context.Context, keys []*cid.Cid) (<-chan block return } - bs.CancelWants([]*cid.Cid{blk.Cid()}, mses) + bs.CancelWants([]cid.Cid{blk.Cid()}, mses) remaining.Remove(blk.Cid()) select { case out <- blk: @@ -288,7 +288,7 @@ func (bs *Bitswap) getNextSessionID() uint64 { } // CancelWant removes a given key from the wantlist -func (bs *Bitswap) CancelWants(cids []*cid.Cid, ses uint64) { +func (bs *Bitswap) CancelWants(cids []cid.Cid, ses uint64) { if len(cids) == 0 { return } @@ -326,7 +326,7 @@ func (bs *Bitswap) receiveBlockFrom(blk blocks.Block, from peer.ID) error { bs.notifications.Publish(blk) k := blk.Cid() - ks := []*cid.Cid{k} + ks := []cid.Cid{k} for _, s := range bs.SessionsForBlock(k) { s.receiveBlockFrom(from, blk) bs.CancelWants(ks, s.id) @@ -344,7 +344,7 @@ func (bs *Bitswap) receiveBlockFrom(blk blocks.Block, from peer.ID) error { } // SessionsForBlock returns a slice of all sessions that may be interested in the given cid -func (bs *Bitswap) SessionsForBlock(c *cid.Cid) []*Session { +func (bs *Bitswap) SessionsForBlock(c cid.Cid) []*Session { bs.sessLk.Lock() defer bs.sessLk.Unlock() @@ -440,9 +440,9 @@ func (bs *Bitswap) Close() error { return bs.process.Close() } -func (bs *Bitswap) GetWantlist() []*cid.Cid { +func (bs *Bitswap) GetWantlist() []cid.Cid { entries := bs.wm.wl.Entries() - out := make([]*cid.Cid, 0, len(entries)) + out := make([]cid.Cid, 0, len(entries)) for _, e := range entries { out = append(out, e.Cid) } diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index 348859966..715958eb1 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -179,7 +179,7 @@ func PerformDistributionTest(t *testing.T, numInstances, numBlocks int) { } } - var blkeys []*cid.Cid + var blkeys []cid.Cid first := instances[0] for _, b := range blocks { blkeys = append(blkeys, b.Cid()) @@ -253,7 +253,7 @@ func TestSendToWantingPeer(t *testing.T) { // peerA requests and waits for block alpha ctx, cancel := context.WithTimeout(context.Background(), waitTime) defer cancel() - alphaPromise, err := peerA.Exchange.GetBlocks(ctx, []*cid.Cid{alpha.Cid()}) + alphaPromise, err := peerA.Exchange.GetBlocks(ctx, []cid.Cid{alpha.Cid()}) if err != nil { t.Fatal(err) } @@ -285,7 +285,7 @@ func TestEmptyKey(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) defer cancel() - _, err := bs.GetBlock(ctx, nil) + _, err := bs.GetBlock(ctx, cid.Cid{}) if err != blockstore.ErrNotFound { t.Error("empty str key should return ErrNotFound") } @@ -393,7 +393,7 @@ func TestDoubleGet(t *testing.T) { // through before the peers even get connected. This is okay, bitswap // *should* be able to handle this. ctx1, cancel1 := context.WithCancel(context.Background()) - blkch1, err := instances[1].Exchange.GetBlocks(ctx1, []*cid.Cid{blocks[0].Cid()}) + blkch1, err := instances[1].Exchange.GetBlocks(ctx1, []cid.Cid{blocks[0].Cid()}) if err != nil { t.Fatal(err) } @@ -401,7 +401,7 @@ func TestDoubleGet(t *testing.T) { ctx2, cancel2 := context.WithCancel(context.Background()) defer cancel2() - blkch2, err := instances[1].Exchange.GetBlocks(ctx2, []*cid.Cid{blocks[0].Cid()}) + blkch2, err := instances[1].Exchange.GetBlocks(ctx2, []cid.Cid{blocks[0].Cid()}) if err != nil { t.Fatal(err) } @@ -456,7 +456,7 @@ func TestWantlistCleanup(t *testing.T) { bswap := instances.Exchange blocks := bg.Blocks(20) - var keys []*cid.Cid + var keys []cid.Cid for _, b := range blocks { keys = append(keys, b.Cid()) } diff --git a/bitswap/decision/ledger.go b/bitswap/decision/ledger.go index f38460ec1..2c4497631 100644 --- a/bitswap/decision/ledger.go +++ b/bitswap/decision/ledger.go @@ -76,16 +76,16 @@ func (l *ledger) ReceivedBytes(n int) { l.Accounting.BytesRecv += uint64(n) } -func (l *ledger) Wants(k *cid.Cid, priority int) { +func (l *ledger) Wants(k cid.Cid, priority int) { log.Debugf("peer %s wants %s", l.Partner, k) l.wantList.Add(k, priority) } -func (l *ledger) CancelWant(k *cid.Cid) { +func (l *ledger) CancelWant(k cid.Cid) { l.wantList.Remove(k) } -func (l *ledger) WantListContains(k *cid.Cid) (*wl.Entry, bool) { +func (l *ledger) WantListContains(k cid.Cid) (*wl.Entry, bool) { return l.wantList.Contains(k) } diff --git a/bitswap/decision/peer_request_queue.go b/bitswap/decision/peer_request_queue.go index b9e34763c..78113f75d 100644 --- a/bitswap/decision/peer_request_queue.go +++ b/bitswap/decision/peer_request_queue.go @@ -15,7 +15,7 @@ type peerRequestQueue interface { // Pop returns the next peerRequestTask. Returns nil if the peerRequestQueue is empty. Pop() *peerRequestTask Push(entry *wantlist.Entry, to peer.ID) - Remove(k *cid.Cid, p peer.ID) + Remove(k cid.Cid, p peer.ID) // NB: cannot expose simply expose taskQueue.Len because trashed elements // may exist. These trashed elements should not contribute to the count. @@ -114,7 +114,7 @@ func (tl *prq) Pop() *peerRequestTask { } // Remove removes a task from the queue -func (tl *prq) Remove(k *cid.Cid, p peer.ID) { +func (tl *prq) Remove(k cid.Cid, p peer.ID) { tl.lock.Lock() t, ok := tl.taskMap[taskKey(p, k)] if ok { @@ -195,7 +195,7 @@ func (t *peerRequestTask) SetIndex(i int) { } // taskKey returns a key that uniquely identifies a task. -func taskKey(p peer.ID, k *cid.Cid) string { +func taskKey(p peer.ID, k cid.Cid) string { return string(p) + k.KeyString() } @@ -281,7 +281,7 @@ func partnerCompare(a, b pq.Elem) bool { } // StartTask signals that a task was started for this partner -func (p *activePartner) StartTask(k *cid.Cid) { +func (p *activePartner) StartTask(k cid.Cid) { p.activelk.Lock() p.activeBlocks.Add(k) p.active++ @@ -289,7 +289,7 @@ func (p *activePartner) StartTask(k *cid.Cid) { } // TaskDone signals that a task was completed for this partner -func (p *activePartner) TaskDone(k *cid.Cid) { +func (p *activePartner) TaskDone(k cid.Cid) { p.activelk.Lock() p.activeBlocks.Remove(k) p.active-- diff --git a/bitswap/get.go b/bitswap/get.go index be5cf3cb6..8578277e8 100644 --- a/bitswap/get.go +++ b/bitswap/get.go @@ -11,11 +11,11 @@ import ( blockstore "github.com/ipfs/go-ipfs-blockstore" ) -type getBlocksFunc func(context.Context, []*cid.Cid) (<-chan blocks.Block, error) +type getBlocksFunc func(context.Context, []cid.Cid) (<-chan blocks.Block, error) -func getBlock(p context.Context, k *cid.Cid, gb getBlocksFunc) (blocks.Block, error) { - if k == nil { - log.Error("nil cid in GetBlock") +func getBlock(p context.Context, k cid.Cid, gb getBlocksFunc) (blocks.Block, error) { + if !k.Defined() { + log.Error("undefined cid in GetBlock") return nil, blockstore.ErrNotFound } @@ -28,7 +28,7 @@ func getBlock(p context.Context, k *cid.Cid, gb getBlocksFunc) (blocks.Block, er ctx, cancel := context.WithCancel(p) defer cancel() - promise, err := gb(ctx, []*cid.Cid{k}) + promise, err := gb(ctx, []cid.Cid{k}) if err != nil { return nil, err } @@ -49,9 +49,9 @@ func getBlock(p context.Context, k *cid.Cid, gb getBlocksFunc) (blocks.Block, er } } -type wantFunc func(context.Context, []*cid.Cid) +type wantFunc func(context.Context, []cid.Cid) -func getBlocksImpl(ctx context.Context, keys []*cid.Cid, notif notifications.PubSub, want wantFunc, cwants func([]*cid.Cid)) (<-chan blocks.Block, error) { +func getBlocksImpl(ctx context.Context, keys []cid.Cid, notif notifications.PubSub, want wantFunc, cwants func([]cid.Cid)) (<-chan blocks.Block, error) { if len(keys) == 0 { out := make(chan blocks.Block) close(out) @@ -72,7 +72,7 @@ func getBlocksImpl(ctx context.Context, keys []*cid.Cid, notif notifications.Pub return out, nil } -func handleIncoming(ctx context.Context, remaining *cid.Set, in <-chan blocks.Block, out chan blocks.Block, cfun func([]*cid.Cid)) { +func handleIncoming(ctx context.Context, remaining *cid.Set, in <-chan blocks.Block, out chan blocks.Block, cfun func([]cid.Cid)) { ctx, cancel := context.WithCancel(ctx) defer func() { cancel() diff --git a/bitswap/message/message.go b/bitswap/message/message.go index 9aba444b3..92f0259cd 100644 --- a/bitswap/message/message.go +++ b/bitswap/message/message.go @@ -25,9 +25,9 @@ type BitSwapMessage interface { Blocks() []blocks.Block // AddEntry adds an entry to the Wantlist. - AddEntry(key *cid.Cid, priority int) + AddEntry(key cid.Cid, priority int) - Cancel(key *cid.Cid) + Cancel(key cid.Cid) Empty() bool @@ -134,16 +134,16 @@ func (m *impl) Blocks() []blocks.Block { return bs } -func (m *impl) Cancel(k *cid.Cid) { +func (m *impl) Cancel(k cid.Cid) { delete(m.wantlist, k.KeyString()) m.addEntry(k, 0, true) } -func (m *impl) AddEntry(k *cid.Cid, priority int) { +func (m *impl) AddEntry(k cid.Cid, priority int) { m.addEntry(k, priority, false) } -func (m *impl) addEntry(c *cid.Cid, priority int, cancel bool) { +func (m *impl) addEntry(c cid.Cid, priority int, cancel bool) { k := c.KeyString() e, exists := m.wantlist[k] if exists { diff --git a/bitswap/message/message_test.go b/bitswap/message/message_test.go index 539d212e5..a3e1cd8f9 100644 --- a/bitswap/message/message_test.go +++ b/bitswap/message/message_test.go @@ -11,7 +11,7 @@ import ( u "github.com/ipfs/go-ipfs-util" ) -func mkFakeCid(s string) *cid.Cid { +func mkFakeCid(s string) cid.Cid { return cid.NewCidV0(u.Hash([]byte(s))) } @@ -67,7 +67,7 @@ func TestAppendBlock(t *testing.T) { } func TestWantlist(t *testing.T) { - keystrs := []*cid.Cid{mkFakeCid("foo"), mkFakeCid("bar"), mkFakeCid("baz"), mkFakeCid("bat")} + keystrs := []cid.Cid{mkFakeCid("foo"), mkFakeCid("bar"), mkFakeCid("baz"), mkFakeCid("bat")} m := New(true) for _, s := range keystrs { m.AddEntry(s, 1) @@ -163,7 +163,7 @@ func TestToAndFromNetMessage(t *testing.T) { } } -func wantlistContains(wantlist *pb.Message_Wantlist, c *cid.Cid) bool { +func wantlistContains(wantlist *pb.Message_Wantlist, c cid.Cid) bool { for _, e := range wantlist.GetEntries() { if bytes.Equal(e.GetBlock(), c.Bytes()) { return true diff --git a/bitswap/network/interface.go b/bitswap/network/interface.go index 03a379806..fd5622c1f 100644 --- a/bitswap/network/interface.go +++ b/bitswap/network/interface.go @@ -63,8 +63,8 @@ type Receiver interface { type Routing interface { // FindProvidersAsync returns a channel of providers for the given key - FindProvidersAsync(context.Context, *cid.Cid, int) <-chan peer.ID + FindProvidersAsync(context.Context, cid.Cid, int) <-chan peer.ID // Provide provides the key to the network - Provide(context.Context, *cid.Cid) error + Provide(context.Context, cid.Cid) error } diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index aa142d879..cd0670aef 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -138,7 +138,7 @@ func (bsnet *impl) ConnectTo(ctx context.Context, p peer.ID) error { } // FindProvidersAsync returns a channel of providers for the given key -func (bsnet *impl) FindProvidersAsync(ctx context.Context, k *cid.Cid, max int) <-chan peer.ID { +func (bsnet *impl) FindProvidersAsync(ctx context.Context, k cid.Cid, max int) <-chan peer.ID { // Since routing queries are expensive, give bitswap the peers to which we // have open connections. Note that this may cause issues if bitswap starts @@ -174,7 +174,7 @@ func (bsnet *impl) FindProvidersAsync(ctx context.Context, k *cid.Cid, max int) } // Provide provides the key to the network -func (bsnet *impl) Provide(ctx context.Context, k *cid.Cid) error { +func (bsnet *impl) Provide(ctx context.Context, k cid.Cid) error { return bsnet.routing.Provide(ctx, k, true) } diff --git a/bitswap/notifications/notifications.go b/bitswap/notifications/notifications.go index d20270109..81ba39499 100644 --- a/bitswap/notifications/notifications.go +++ b/bitswap/notifications/notifications.go @@ -13,7 +13,7 @@ const bufferSize = 16 type PubSub interface { Publish(block blocks.Block) - Subscribe(ctx context.Context, keys ...*cid.Cid) <-chan blocks.Block + Subscribe(ctx context.Context, keys ...cid.Cid) <-chan blocks.Block Shutdown() } @@ -61,7 +61,7 @@ func (ps *impl) Shutdown() { // Subscribe returns a channel of blocks for the given |keys|. |blockChannel| // is closed if the |ctx| times out or is cancelled, or after sending len(keys) // blocks. -func (ps *impl) Subscribe(ctx context.Context, keys ...*cid.Cid) <-chan blocks.Block { +func (ps *impl) Subscribe(ctx context.Context, keys ...cid.Cid) <-chan blocks.Block { blocksCh := make(chan blocks.Block, len(keys)) valuesCh := make(chan interface{}, len(keys)) // provide our own channel to control buffer, prevent blocking @@ -121,7 +121,7 @@ func (ps *impl) Subscribe(ctx context.Context, keys ...*cid.Cid) <-chan blocks.B return blocksCh } -func toStrings(keys []*cid.Cid) []string { +func toStrings(keys []cid.Cid) []string { strs := make([]string, 0, len(keys)) for _, key := range keys { strs = append(strs, key.KeyString()) diff --git a/bitswap/notifications/notifications_test.go b/bitswap/notifications/notifications_test.go index e377f319e..38ab6f9af 100644 --- a/bitswap/notifications/notifications_test.go +++ b/bitswap/notifications/notifications_test.go @@ -151,8 +151,8 @@ func TestDoesNotDeadLockIfContextCancelledBeforePublish(t *testing.T) { t.Log("generate a large number of blocks. exceed default buffer") bs := g.Blocks(1000) - ks := func() []*cid.Cid { - var keys []*cid.Cid + ks := func() []cid.Cid { + var keys []cid.Cid for _, b := range bs { keys = append(keys, b.Cid()) } diff --git a/bitswap/session.go b/bitswap/session.go index d652dac1e..a3b6005b7 100644 --- a/bitswap/session.go +++ b/bitswap/session.go @@ -28,8 +28,8 @@ type Session struct { bs *Bitswap incoming chan blkRecv - newReqs chan []*cid.Cid - cancelKeys chan []*cid.Cid + newReqs chan []cid.Cid + cancelKeys chan []cid.Cid interestReqs chan interestReq interest *lru.Cache @@ -55,8 +55,8 @@ func (bs *Bitswap) NewSession(ctx context.Context) *Session { s := &Session{ activePeers: make(map[peer.ID]struct{}), liveWants: make(map[string]time.Time), - newReqs: make(chan []*cid.Cid), - cancelKeys: make(chan []*cid.Cid), + newReqs: make(chan []cid.Cid), + cancelKeys: make(chan []cid.Cid), tofetch: newCidQueue(), interestReqs: make(chan interestReq), ctx: ctx, @@ -85,7 +85,7 @@ func (bs *Bitswap) NewSession(ctx context.Context) *Session { func (bs *Bitswap) removeSession(s *Session) { s.notif.Shutdown() - live := make([]*cid.Cid, 0, len(s.liveWants)) + live := make([]cid.Cid, 0, len(s.liveWants)) for c := range s.liveWants { cs, _ := cid.Cast([]byte(c)) live = append(live, cs) @@ -116,7 +116,7 @@ func (s *Session) receiveBlockFrom(from peer.ID, blk blocks.Block) { } type interestReq struct { - c *cid.Cid + c cid.Cid resp chan bool } @@ -127,7 +127,7 @@ type interestReq struct { // note that in the average case (where this session *is* interested in the // block we received) this function will not be called, as the cid will likely // still be in the interest cache. -func (s *Session) isLiveWant(c *cid.Cid) bool { +func (s *Session) isLiveWant(c cid.Cid) bool { resp := make(chan bool, 1) select { case s.interestReqs <- interestReq{ @@ -146,7 +146,7 @@ func (s *Session) isLiveWant(c *cid.Cid) bool { } } -func (s *Session) interestedIn(c *cid.Cid) bool { +func (s *Session) interestedIn(c cid.Cid) bool { return s.interest.Contains(c.KeyString()) || s.isLiveWant(c) } @@ -208,7 +208,7 @@ func (s *Session) run(ctx context.Context) { s.cancel(keys) case <-s.tick.C: - live := make([]*cid.Cid, 0, len(s.liveWants)) + live := make([]cid.Cid, 0, len(s.liveWants)) now := time.Now() for c := range s.liveWants { cs, _ := cid.Cast([]byte(c)) @@ -220,7 +220,7 @@ func (s *Session) run(ctx context.Context) { s.bs.wm.WantBlocks(ctx, live, nil, s.id) if len(live) > 0 { - go func(k *cid.Cid) { + go func(k cid.Cid) { // TODO: have a task queue setup for this to: // - rate limit // - manage timeouts @@ -249,7 +249,7 @@ func (s *Session) run(ctx context.Context) { } } -func (s *Session) cidIsWanted(c *cid.Cid) bool { +func (s *Session) cidIsWanted(c cid.Cid) bool { _, ok := s.liveWants[c.KeyString()] if !ok { ok = s.tofetch.Has(c) @@ -272,13 +272,13 @@ func (s *Session) receiveBlock(ctx context.Context, blk blocks.Block) { s.fetchcnt++ s.notif.Publish(blk) - if next := s.tofetch.Pop(); next != nil { - s.wantBlocks(ctx, []*cid.Cid{next}) + if next := s.tofetch.Pop(); next.Defined() { + s.wantBlocks(ctx, []cid.Cid{next}) } } } -func (s *Session) wantBlocks(ctx context.Context, ks []*cid.Cid) { +func (s *Session) wantBlocks(ctx context.Context, ks []cid.Cid) { now := time.Now() for _, c := range ks { s.liveWants[c.KeyString()] = now @@ -286,20 +286,20 @@ func (s *Session) wantBlocks(ctx context.Context, ks []*cid.Cid) { s.bs.wm.WantBlocks(ctx, ks, s.activePeersArr, s.id) } -func (s *Session) cancel(keys []*cid.Cid) { +func (s *Session) cancel(keys []cid.Cid) { for _, c := range keys { s.tofetch.Remove(c) } } -func (s *Session) cancelWants(keys []*cid.Cid) { +func (s *Session) cancelWants(keys []cid.Cid) { select { case s.cancelKeys <- keys: case <-s.ctx.Done(): } } -func (s *Session) fetch(ctx context.Context, keys []*cid.Cid) { +func (s *Session) fetch(ctx context.Context, keys []cid.Cid) { select { case s.newReqs <- keys: case <-ctx.Done(): @@ -310,18 +310,18 @@ func (s *Session) fetch(ctx context.Context, keys []*cid.Cid) { // GetBlocks fetches a set of blocks within the context of this session and // returns a channel that found blocks will be returned on. No order is // guaranteed on the returned blocks. -func (s *Session) GetBlocks(ctx context.Context, keys []*cid.Cid) (<-chan blocks.Block, error) { +func (s *Session) GetBlocks(ctx context.Context, keys []cid.Cid) (<-chan blocks.Block, error) { ctx = logging.ContextWithLoggable(ctx, s.uuid) return getBlocksImpl(ctx, keys, s.notif, s.fetch, s.cancelWants) } // GetBlock fetches a single block -func (s *Session) GetBlock(parent context.Context, k *cid.Cid) (blocks.Block, error) { +func (s *Session) GetBlock(parent context.Context, k cid.Cid) (blocks.Block, error) { return getBlock(parent, k, s.GetBlocks) } type cidQueue struct { - elems []*cid.Cid + elems []cid.Cid eset *cid.Set } @@ -329,10 +329,10 @@ func newCidQueue() *cidQueue { return &cidQueue{eset: cid.NewSet()} } -func (cq *cidQueue) Pop() *cid.Cid { +func (cq *cidQueue) Pop() cid.Cid { for { if len(cq.elems) == 0 { - return nil + return cid.Cid{} } out := cq.elems[0] @@ -345,17 +345,17 @@ func (cq *cidQueue) Pop() *cid.Cid { } } -func (cq *cidQueue) Push(c *cid.Cid) { +func (cq *cidQueue) Push(c cid.Cid) { if cq.eset.Visit(c) { cq.elems = append(cq.elems, c) } } -func (cq *cidQueue) Remove(c *cid.Cid) { +func (cq *cidQueue) Remove(c cid.Cid) { cq.eset.Remove(c) } -func (cq *cidQueue) Has(c *cid.Cid) bool { +func (cq *cidQueue) Has(c cid.Cid) bool { return cq.eset.Has(c) } diff --git a/bitswap/session_test.go b/bitswap/session_test.go index 97b7a31a8..8769d891f 100644 --- a/bitswap/session_test.go +++ b/bitswap/session_test.go @@ -76,7 +76,7 @@ func TestSessionBetweenPeers(t *testing.T) { t.Fatal(err) } - var cids []*cid.Cid + var cids []cid.Cid for _, blk := range blks { cids = append(cids, blk.Cid()) } @@ -127,7 +127,7 @@ func TestSessionSplitFetch(t *testing.T) { } } - var cids []*cid.Cid + var cids []cid.Cid for _, blk := range blks { cids = append(cids, blk.Cid()) } @@ -167,12 +167,12 @@ func TestInterestCacheOverflow(t *testing.T) { b := inst[1] ses := a.Exchange.NewSession(ctx) - zeroch, err := ses.GetBlocks(ctx, []*cid.Cid{blks[0].Cid()}) + zeroch, err := ses.GetBlocks(ctx, []cid.Cid{blks[0].Cid()}) if err != nil { t.Fatal(err) } - var restcids []*cid.Cid + var restcids []cid.Cid for _, blk := range blks[1:] { restcids = append(restcids, blk.Cid()) } @@ -219,7 +219,7 @@ func TestPutAfterSessionCacheEvict(t *testing.T) { ses := a.Exchange.NewSession(ctx) - var allcids []*cid.Cid + var allcids []cid.Cid for _, blk := range blks[1:] { allcids = append(allcids, blk.Cid()) } @@ -261,14 +261,14 @@ func TestMultipleSessions(t *testing.T) { ctx1, cancel1 := context.WithCancel(ctx) ses := a.Exchange.NewSession(ctx1) - blkch, err := ses.GetBlocks(ctx, []*cid.Cid{blk.Cid()}) + blkch, err := ses.GetBlocks(ctx, []cid.Cid{blk.Cid()}) if err != nil { t.Fatal(err) } cancel1() ses2 := a.Exchange.NewSession(ctx) - blkch2, err := ses2.GetBlocks(ctx, []*cid.Cid{blk.Cid()}) + blkch2, err := ses2.GetBlocks(ctx, []cid.Cid{blk.Cid()}) if err != nil { t.Fatal(err) } @@ -296,7 +296,7 @@ func TestWantlistClearsOnCancel(t *testing.T) { bgen := blocksutil.NewBlockGenerator() blks := bgen.Blocks(10) - var cids []*cid.Cid + var cids []cid.Cid for _, blk := range blks { cids = append(cids, blk.Cid()) } diff --git a/bitswap/stat.go b/bitswap/stat.go index 99dbbd32b..d01d17172 100644 --- a/bitswap/stat.go +++ b/bitswap/stat.go @@ -8,7 +8,7 @@ import ( type Stat struct { ProvideBufLen int - Wantlist []*cid.Cid + Wantlist []cid.Cid Peers []string BlocksReceived uint64 DataReceived uint64 diff --git a/bitswap/testnet/virtual.go b/bitswap/testnet/virtual.go index 2a1e9377c..004dd66c0 100644 --- a/bitswap/testnet/virtual.go +++ b/bitswap/testnet/virtual.go @@ -131,7 +131,7 @@ func (nc *networkClient) SendMessage( } // FindProvidersAsync returns a channel of providers for the given key -func (nc *networkClient) FindProvidersAsync(ctx context.Context, k *cid.Cid, max int) <-chan peer.ID { +func (nc *networkClient) FindProvidersAsync(ctx context.Context, k cid.Cid, max int) <-chan peer.ID { // NB: this function duplicates the PeerInfo -> ID transformation in the // bitswap network adapter. Not to worry. This network client will be @@ -185,7 +185,7 @@ func (n *networkClient) NewMessageSender(ctx context.Context, p peer.ID) (bsnet. } // Provide provides the key to the network -func (nc *networkClient) Provide(ctx context.Context, k *cid.Cid) error { +func (nc *networkClient) Provide(ctx context.Context, k cid.Cid) error { return nc.routing.Provide(ctx, k, true) } diff --git a/bitswap/wantlist/wantlist.go b/bitswap/wantlist/wantlist.go index beb4ac752..22819240c 100644 --- a/bitswap/wantlist/wantlist.go +++ b/bitswap/wantlist/wantlist.go @@ -20,14 +20,14 @@ type Wantlist struct { } type Entry struct { - Cid *cid.Cid + Cid cid.Cid Priority int SesTrk map[uint64]struct{} } // NewRefEntry creates a new reference tracked wantlist entry -func NewRefEntry(c *cid.Cid, p int) *Entry { +func NewRefEntry(c cid.Cid, p int) *Entry { return &Entry{ Cid: c, Priority: p, @@ -61,7 +61,7 @@ func New() *Wantlist { // TODO: think through priority changes here // Add returns true if the cid did not exist in the wantlist before this call // (even if it was under a different session) -func (w *ThreadSafe) Add(c *cid.Cid, priority int, ses uint64) bool { +func (w *ThreadSafe) Add(c cid.Cid, priority int, ses uint64) bool { w.lk.Lock() defer w.lk.Unlock() k := c.KeyString() @@ -97,7 +97,7 @@ func (w *ThreadSafe) AddEntry(e *Entry, ses uint64) bool { // 'true' is returned if this call to Remove removed the final session ID // tracking the cid. (meaning true will be returned iff this call caused the // value of 'Contains(c)' to change from true to false) -func (w *ThreadSafe) Remove(c *cid.Cid, ses uint64) bool { +func (w *ThreadSafe) Remove(c cid.Cid, ses uint64) bool { w.lk.Lock() defer w.lk.Unlock() k := c.KeyString() @@ -116,7 +116,7 @@ func (w *ThreadSafe) Remove(c *cid.Cid, ses uint64) bool { // Contains returns true if the given cid is in the wantlist tracked by one or // more sessions -func (w *ThreadSafe) Contains(k *cid.Cid) (*Entry, bool) { +func (w *ThreadSafe) Contains(k cid.Cid) (*Entry, bool) { w.lk.RLock() defer w.lk.RUnlock() e, ok := w.set[k.KeyString()] @@ -149,7 +149,7 @@ func (w *Wantlist) Len() int { return len(w.set) } -func (w *Wantlist) Add(c *cid.Cid, priority int) bool { +func (w *Wantlist) Add(c cid.Cid, priority int) bool { k := c.KeyString() if _, ok := w.set[k]; ok { return false @@ -172,7 +172,7 @@ func (w *Wantlist) AddEntry(e *Entry) bool { return true } -func (w *Wantlist) Remove(c *cid.Cid) bool { +func (w *Wantlist) Remove(c cid.Cid) bool { k := c.KeyString() _, ok := w.set[k] if !ok { @@ -183,7 +183,7 @@ func (w *Wantlist) Remove(c *cid.Cid) bool { return true } -func (w *Wantlist) Contains(k *cid.Cid) (*Entry, bool) { +func (w *Wantlist) Contains(k cid.Cid) (*Entry, bool) { e, ok := w.set[k.KeyString()] return e, ok } diff --git a/bitswap/wantlist/wantlist_test.go b/bitswap/wantlist/wantlist_test.go index 0d4c696ad..4ce31949f 100644 --- a/bitswap/wantlist/wantlist_test.go +++ b/bitswap/wantlist/wantlist_test.go @@ -6,7 +6,7 @@ import ( cid "github.com/ipfs/go-cid" ) -var testcids []*cid.Cid +var testcids []cid.Cid func init() { strs := []string{ @@ -25,10 +25,10 @@ func init() { } type wli interface { - Contains(*cid.Cid) (*Entry, bool) + Contains(cid.Cid) (*Entry, bool) } -func assertHasCid(t *testing.T, w wli, c *cid.Cid) { +func assertHasCid(t *testing.T, w wli, c cid.Cid) { e, ok := w.Contains(c) if !ok { t.Fatal("expected to have ", c) @@ -38,7 +38,7 @@ func assertHasCid(t *testing.T, w wli, c *cid.Cid) { } } -func assertNotHasCid(t *testing.T, w wli, c *cid.Cid) { +func assertNotHasCid(t *testing.T, w wli, c cid.Cid) { _, ok := w.Contains(c) if ok { t.Fatal("expected not to have ", c) diff --git a/bitswap/wantmanager.go b/bitswap/wantmanager.go index 380d85381..87efb8605 100644 --- a/bitswap/wantmanager.go +++ b/bitswap/wantmanager.go @@ -77,13 +77,13 @@ type msgQueue struct { } // WantBlocks adds the given cids to the wantlist, tracked by the given session -func (pm *WantManager) WantBlocks(ctx context.Context, ks []*cid.Cid, peers []peer.ID, ses uint64) { +func (pm *WantManager) WantBlocks(ctx context.Context, ks []cid.Cid, peers []peer.ID, ses uint64) { log.Infof("want blocks: %s", ks) pm.addEntries(ctx, ks, peers, false, ses) } // CancelWants removes the given cids from the wantlist, tracked by the given session -func (pm *WantManager) CancelWants(ctx context.Context, ks []*cid.Cid, peers []peer.ID, ses uint64) { +func (pm *WantManager) CancelWants(ctx context.Context, ks []cid.Cid, peers []peer.ID, ses uint64) { pm.addEntries(context.Background(), ks, peers, true, ses) } @@ -93,7 +93,7 @@ type wantSet struct { from uint64 } -func (pm *WantManager) addEntries(ctx context.Context, ks []*cid.Cid, targets []peer.ID, cancel bool, ses uint64) { +func (pm *WantManager) addEntries(ctx context.Context, ks []cid.Cid, targets []peer.ID, cancel bool, ses uint64) { entries := make([]*bsmsg.Entry, 0, len(ks)) for i, k := range ks { entries = append(entries, &bsmsg.Entry{ diff --git a/bitswap/workers.go b/bitswap/workers.go index 8f5e6edda..41ede8e99 100644 --- a/bitswap/workers.go +++ b/bitswap/workers.go @@ -91,7 +91,7 @@ func (bs *Bitswap) provideWorker(px process.Process) { limit := make(chan struct{}, provideWorkerMax) - limitedGoProvide := func(k *cid.Cid, wid int) { + limitedGoProvide := func(k cid.Cid, wid int) { defer func() { // replace token when done <-limit @@ -135,9 +135,9 @@ func (bs *Bitswap) provideWorker(px process.Process) { func (bs *Bitswap) provideCollector(ctx context.Context) { defer close(bs.provideKeys) - var toProvide []*cid.Cid - var nextKey *cid.Cid - var keysOut chan *cid.Cid + var toProvide []cid.Cid + var nextKey cid.Cid + var keysOut chan cid.Cid for { select { From 5185f69def4b0bd6d9c860b6d5c7c5cfd0add1bf Mon Sep 17 00:00:00 2001 From: taylor Date: Wed, 3 Oct 2018 21:30:12 -0400 Subject: [PATCH 0649/1035] bitswap: Bitswap now sends multiple blocks per message Updated PeerRequestTask to hold multiple wantlist.Entry(s). This allows Bitswap to send multiple blocks in bulk per a Peer's request. Also, added a metric for how many blocks to put in a given message. Currently: 512 * 1024 bytes. License: MIT Signed-off-by: Jeromy This commit was moved from ipfs/go-bitswap@eb0d1ffc0a582a25f0f84816b9ce30007e9041ab --- bitswap/decision/bench_test.go | 2 +- bitswap/decision/engine.go | 52 +++++++--- bitswap/decision/engine_test.go | 54 ++++++++--- bitswap/decision/peer_request_queue.go | 102 ++++++++++++-------- bitswap/decision/peer_request_queue_test.go | 16 +-- bitswap/wantlist/wantlist.go | 2 + bitswap/wantmanager.go | 14 ++- bitswap/workers.go | 27 +++--- 8 files changed, 180 insertions(+), 89 deletions(-) diff --git a/bitswap/decision/bench_test.go b/bitswap/decision/bench_test.go index dc3aea066..46d40ce0d 100644 --- a/bitswap/decision/bench_test.go +++ b/bitswap/decision/bench_test.go @@ -25,6 +25,6 @@ func BenchmarkTaskQueuePush(b *testing.B) { for i := 0; i < b.N; i++ { c := cid.NewCidV0(u.Hash([]byte(fmt.Sprint(i)))) - q.Push(&wantlist.Entry{Cid: c, Priority: math.MaxInt32}, peers[i%len(peers)]) + q.Push(peers[i%len(peers)], &wantlist.Entry{Cid: c, Priority: math.MaxInt32}) } } diff --git a/bitswap/decision/engine.go b/bitswap/decision/engine.go index 736e5d46d..e605996db 100644 --- a/bitswap/decision/engine.go +++ b/bitswap/decision/engine.go @@ -52,6 +52,8 @@ var log = logging.Logger("engine") const ( // outboxChanBuffer must be 0 to prevent stale messages from being sent outboxChanBuffer = 0 + // maxMessageSize is the maximum size of the batched payload + maxMessageSize = 512 * 1024 ) // Envelope contains a message for a Peer @@ -59,8 +61,8 @@ type Envelope struct { // Peer is the intended recipient Peer peer.ID - // Block is the payload - Block blocks.Block + // Message is the payload + Message bsmsg.BitSwapMessage // A callback to notify the decision queue that the task is complete Sent func() @@ -166,21 +168,28 @@ func (e *Engine) nextEnvelope(ctx context.Context) (*Envelope, error) { } // with a task in hand, we're ready to prepare the envelope... + msg := bsmsg.New(true) + for _, entry := range nextTask.Entries { + block, err := e.bs.Get(entry.Cid) + if err != nil { + log.Errorf("tried to execute a task and errored fetching block: %s", err) + continue + } + msg.AddBlock(block) + } - block, err := e.bs.Get(nextTask.Entry.Cid) - if err != nil { - log.Errorf("tried to execute a task and errored fetching block: %s", err) + if msg.Empty() { // If we don't have the block, don't hold that against the peer // make sure to update that the task has been 'completed' - nextTask.Done() + nextTask.Done(nextTask.Entries) continue } return &Envelope{ - Peer: nextTask.Target, - Block: block, + Peer: nextTask.Target, + Message: msg, Sent: func() { - nextTask.Done() + nextTask.Done(nextTask.Entries) select { case e.workSignal <- struct{}{}: // work completing may mean that our queue will provide new @@ -231,6 +240,8 @@ func (e *Engine) MessageReceived(p peer.ID, m bsmsg.BitSwapMessage) error { l.wantList = wl.New() } + var msgSize int + var activeEntries []*wl.Entry for _, entry := range m.Wantlist() { if entry.Cancel { log.Debugf("%s cancel %s", p, entry.Cid) @@ -239,13 +250,28 @@ func (e *Engine) MessageReceived(p peer.ID, m bsmsg.BitSwapMessage) error { } else { log.Debugf("wants %s - %d", entry.Cid, entry.Priority) l.Wants(entry.Cid, entry.Priority) - if exists, err := e.bs.Has(entry.Cid); err == nil && exists { - e.peerRequestQueue.Push(entry.Entry, p) + blockSize, err := e.bs.GetSize(entry.Cid) + if err != nil { + if err == bstore.ErrNotFound { + continue + } + log.Error(err) + } else { + // we have the block newWorkExists = true + if msgSize + blockSize > maxMessageSize { + e.peerRequestQueue.Push(p, activeEntries...) + activeEntries = []*wl.Entry{} + msgSize = 0 + } + activeEntries = append(activeEntries, entry.Entry) + msgSize += blockSize } } } - + if len(activeEntries) > 0 { + e.peerRequestQueue.Push(p, activeEntries...) + } for _, block := range m.Blocks() { log.Debugf("got block %s %d bytes", block, len(block.RawData())) l.ReceivedBytes(len(block.RawData())) @@ -259,7 +285,7 @@ func (e *Engine) addBlock(block blocks.Block) { for _, l := range e.ledgerMap { l.lk.Lock() if entry, ok := l.WantListContains(block.Cid()); ok { - e.peerRequestQueue.Push(entry, l.Partner) + e.peerRequestQueue.Push(l.Partner, entry) work = true } l.lk.Unlock() diff --git a/bitswap/decision/engine_test.go b/bitswap/decision/engine_test.go index ed7d1055d..73130ca14 100644 --- a/bitswap/decision/engine_test.go +++ b/bitswap/decision/engine_test.go @@ -4,7 +4,6 @@ import ( "context" "errors" "fmt" - "math" "strings" "sync" "testing" @@ -139,6 +138,19 @@ func TestPartnerWantsThenCancels(t *testing.T) { }, { alphabet, stringsComplement(alphabet, vowels), + alphabet[1:25], stringsComplement(alphabet[1:25], vowels), alphabet[2:25], stringsComplement(alphabet[2:25], vowels), + alphabet[3:25], stringsComplement(alphabet[3:25], vowels), alphabet[4:25], stringsComplement(alphabet[4:25], vowels), + alphabet[5:25], stringsComplement(alphabet[5:25], vowels), alphabet[6:25], stringsComplement(alphabet[6:25], vowels), + alphabet[7:25], stringsComplement(alphabet[7:25], vowels), alphabet[8:25], stringsComplement(alphabet[8:25], vowels), + alphabet[9:25], stringsComplement(alphabet[9:25], vowels), alphabet[10:25], stringsComplement(alphabet[10:25], vowels), + alphabet[11:25], stringsComplement(alphabet[11:25], vowels), alphabet[12:25], stringsComplement(alphabet[12:25], vowels), + alphabet[13:25], stringsComplement(alphabet[13:25], vowels), alphabet[14:25], stringsComplement(alphabet[14:25], vowels), + alphabet[15:25], stringsComplement(alphabet[15:25], vowels), alphabet[16:25], stringsComplement(alphabet[16:25], vowels), + alphabet[17:25], stringsComplement(alphabet[17:25], vowels), alphabet[18:25], stringsComplement(alphabet[18:25], vowels), + alphabet[19:25], stringsComplement(alphabet[19:25], vowels), alphabet[20:25], stringsComplement(alphabet[20:25], vowels), + alphabet[21:25], stringsComplement(alphabet[21:25], vowels), alphabet[22:25], stringsComplement(alphabet[22:25], vowels), + alphabet[23:25], stringsComplement(alphabet[23:25], vowels), alphabet[24:25], stringsComplement(alphabet[24:25], vowels), + alphabet[25:25], stringsComplement(alphabet[25:25], vowels), }, } @@ -151,20 +163,22 @@ func TestPartnerWantsThenCancels(t *testing.T) { } for i := 0; i < numRounds; i++ { + expected := make([][]string, 0, len(testcases)) + e := NewEngine(context.Background(), bs) for _, testcase := range testcases { set := testcase[0] cancels := testcase[1] keeps := stringsComplement(set, cancels) + expected = append(expected, keeps) - e := NewEngine(context.Background(), bs) partner := testutil.RandPeerIDFatal(t) partnerWants(e, set, partner) partnerCancels(e, cancels, partner) - if err := checkHandledInOrder(t, e, keeps); err != nil { - t.Logf("run #%d of %d", i, numRounds) - t.Fatal(err) - } + } + if err := checkHandledInOrder(t, e, expected); err != nil { + t.Logf("run #%d of %d", i, numRounds) + t.Fatal(err) } } } @@ -173,7 +187,7 @@ func partnerWants(e *Engine, keys []string, partner peer.ID) { add := message.New(false) for i, letter := range keys { block := blocks.NewBlock([]byte(letter)) - add.AddEntry(block.Cid(), math.MaxInt32-i) + add.AddEntry(block.Cid(), len(keys)-i) } e.MessageReceived(partner, add) } @@ -187,14 +201,28 @@ func partnerCancels(e *Engine, keys []string, partner peer.ID) { e.MessageReceived(partner, cancels) } -func checkHandledInOrder(t *testing.T, e *Engine, keys []string) error { - for _, k := range keys { +func checkHandledInOrder(t *testing.T, e *Engine, expected [][]string) error { + for _, keys := range expected { next := <-e.Outbox() envelope := <-next - received := envelope.Block - expected := blocks.NewBlock([]byte(k)) - if !received.Cid().Equals(expected.Cid()) { - return errors.New(fmt.Sprintln("received", string(received.RawData()), "expected", string(expected.RawData()))) + received := envelope.Message.Blocks() + // Verify payload message length + if len(received) != len(keys) { + return errors.New(fmt.Sprintln("# blocks received", len(received), "# blocks expected", len(keys))) + } + // Verify payload message contents + for _, k := range keys { + found := false + expected := blocks.NewBlock([]byte(k)) + for _, block := range received { + if block.Cid().Equals(expected.Cid()) { + found = true + break + } + } + if !found { + return errors.New(fmt.Sprintln("received", received, "expected", string(expected.RawData()))) + } } } return nil diff --git a/bitswap/decision/peer_request_queue.go b/bitswap/decision/peer_request_queue.go index 78113f75d..47736a71d 100644 --- a/bitswap/decision/peer_request_queue.go +++ b/bitswap/decision/peer_request_queue.go @@ -14,7 +14,7 @@ import ( type peerRequestQueue interface { // Pop returns the next peerRequestTask. Returns nil if the peerRequestQueue is empty. Pop() *peerRequestTask - Push(entry *wantlist.Entry, to peer.ID) + Push(to peer.ID, entries ...*wantlist.Entry) Remove(k cid.Cid, p peer.ID) // NB: cannot expose simply expose taskQueue.Len because trashed elements @@ -46,7 +46,7 @@ type prq struct { } // Push currently adds a new peerRequestTask to the end of the list -func (tl *prq) Push(entry *wantlist.Entry, to peer.ID) { +func (tl *prq) Push(to peer.ID, entries ...*wantlist.Entry) { tl.lock.Lock() defer tl.lock.Unlock() partner, ok := tl.partners[to] @@ -58,31 +58,49 @@ func (tl *prq) Push(entry *wantlist.Entry, to peer.ID) { partner.activelk.Lock() defer partner.activelk.Unlock() - if partner.activeBlocks.Has(entry.Cid) { - return + + var priority int + newEntries := make([]*wantlist.Entry, 0, len(entries)) + for _, entry := range entries { + if partner.activeBlocks.Has(entry.Cid) { + continue + } + if task, ok := tl.taskMap[taskEntryKey(to, entry.Cid)]; ok { + if entry.Priority > task.Priority { + task.Priority = entry.Priority + partner.taskQueue.Update(task.index) + } + continue + } + if entry.Priority > priority { + priority = entry.Priority + } + newEntries = append(newEntries, entry) } - if task, ok := tl.taskMap[taskKey(to, entry.Cid)]; ok { - task.Entry.Priority = entry.Priority - partner.taskQueue.Update(task.index) + if len(newEntries) == 0 { return } task := &peerRequestTask{ - Entry: entry, + Entries: newEntries, Target: to, created: time.Now(), - Done: func() { + Done: func(e []*wantlist.Entry) { tl.lock.Lock() - partner.TaskDone(entry.Cid) + for _, entry := range e { + partner.TaskDone(entry.Cid) + } tl.pQueue.Update(partner.Index()) tl.lock.Unlock() }, } - + task.Priority = priority partner.taskQueue.Push(task) - tl.taskMap[task.Key()] = task - partner.requests++ + for _, entry := range newEntries { + tl.taskMap[taskEntryKey(to, entry.Cid)] = task + } + partner.requests += len(newEntries) tl.pQueue.Update(partner.Index()) } @@ -98,14 +116,23 @@ func (tl *prq) Pop() *peerRequestTask { var out *peerRequestTask for partner.taskQueue.Len() > 0 && partner.freezeVal == 0 { out = partner.taskQueue.Pop().(*peerRequestTask) - delete(tl.taskMap, out.Key()) - if out.trash { - out = nil - continue // discarding tasks that have been removed - } - partner.StartTask(out.Entry.Cid) - partner.requests-- + newEntries := make([]*wantlist.Entry, 0, len(out.Entries)) + for _, entry := range out.Entries { + delete(tl.taskMap, taskEntryKey(out.Target, entry.Cid)) + if entry.Trash { + continue + } + partner.requests-- + partner.StartTask(entry.Cid) + newEntries = append(newEntries, entry) + } + if len(newEntries) > 0 { + out.Entries = newEntries + } else { + out = nil // discarding tasks that have been removed + continue + } break // and return |out| } @@ -116,12 +143,17 @@ func (tl *prq) Pop() *peerRequestTask { // Remove removes a task from the queue func (tl *prq) Remove(k cid.Cid, p peer.ID) { tl.lock.Lock() - t, ok := tl.taskMap[taskKey(p, k)] + t, ok := tl.taskMap[taskEntryKey(p, k)] if ok { - // remove the task "lazily" - // simply mark it as trash, so it'll be dropped when popped off the - // queue. - t.trash = true + for _, entry := range t.Entries { + if entry.Cid.Equals(k) { + // remove the task "lazily" + // simply mark it as trash, so it'll be dropped when popped off the + // queue. + entry.Trash = true + break + } + } // having canceled a block, we now account for that in the given partner partner := tl.partners[p] @@ -166,24 +198,18 @@ func (tl *prq) thawRound() { } type peerRequestTask struct { - Entry *wantlist.Entry - Target peer.ID + Entries []*wantlist.Entry + Priority int + Target peer.ID // A callback to signal that this task has been completed - Done func() + Done func([]*wantlist.Entry) - // trash in a book-keeping field - trash bool // created marks the time that the task was added to the queue created time.Time index int // book-keeping field used by the pq container } -// Key uniquely identifies a task. -func (t *peerRequestTask) Key() string { - return taskKey(t.Target, t.Entry.Cid) -} - // Index implements pq.Elem func (t *peerRequestTask) Index() int { return t.index @@ -194,8 +220,8 @@ func (t *peerRequestTask) SetIndex(i int) { t.index = i } -// taskKey returns a key that uniquely identifies a task. -func taskKey(p peer.ID, k cid.Cid) string { +// taskEntryKey returns a key that uniquely identifies a task. +func taskEntryKey(p peer.ID, k cid.Cid) string { return string(p) + k.KeyString() } @@ -208,7 +234,7 @@ var FIFO = func(a, b *peerRequestTask) bool { // different peers, the oldest task is prioritized. var V1 = func(a, b *peerRequestTask) bool { if a.Target == b.Target { - return a.Entry.Priority > b.Entry.Priority + return a.Priority > b.Priority } return FIFO(a, b) } diff --git a/bitswap/decision/peer_request_queue_test.go b/bitswap/decision/peer_request_queue_test.go index 32e93a272..d6ad8989a 100644 --- a/bitswap/decision/peer_request_queue_test.go +++ b/bitswap/decision/peer_request_queue_test.go @@ -45,7 +45,7 @@ func TestPushPop(t *testing.T) { t.Log(partner.String()) c := cid.NewCidV0(u.Hash([]byte(letter))) - prq.Push(&wantlist.Entry{Cid: c, Priority: math.MaxInt32 - index}, partner) + prq.Push(partner, &wantlist.Entry{Cid: c, Priority: math.MaxInt32 - index}) } for _, consonant := range consonants { c := cid.NewCidV0(u.Hash([]byte(consonant))) @@ -61,7 +61,9 @@ func TestPushPop(t *testing.T) { break } - out = append(out, received.Entry.Cid.String()) + for _, entry := range received.Entries { + out = append(out, entry.Cid.String()) + } } // Entries popped should already be in correct order @@ -85,10 +87,10 @@ func TestPeerRepeats(t *testing.T) { for i := 0; i < 5; i++ { elcid := cid.NewCidV0(u.Hash([]byte(fmt.Sprint(i)))) - prq.Push(&wantlist.Entry{Cid: elcid}, a) - prq.Push(&wantlist.Entry{Cid: elcid}, b) - prq.Push(&wantlist.Entry{Cid: elcid}, c) - prq.Push(&wantlist.Entry{Cid: elcid}, d) + prq.Push(a, &wantlist.Entry{Cid: elcid}) + prq.Push(b, &wantlist.Entry{Cid: elcid}) + prq.Push(c, &wantlist.Entry{Cid: elcid}) + prq.Push(d, &wantlist.Entry{Cid: elcid}) } // now, pop off four entries, there should be one from each @@ -117,7 +119,7 @@ func TestPeerRepeats(t *testing.T) { for blockI := 0; blockI < 4; blockI++ { for i := 0; i < 4; i++ { // its okay to mark the same task done multiple times here (JUST FOR TESTING) - tasks[i].Done() + tasks[i].Done(tasks[i].Entries) ntask := prq.Pop() if ntask.Target != tasks[i].Target { diff --git a/bitswap/wantlist/wantlist.go b/bitswap/wantlist/wantlist.go index 22819240c..ad6b0f03b 100644 --- a/bitswap/wantlist/wantlist.go +++ b/bitswap/wantlist/wantlist.go @@ -24,6 +24,8 @@ type Entry struct { Priority int SesTrk map[uint64]struct{} + // Trash in a book-keeping field + Trash bool } // NewRefEntry creates a new reference tracked wantlist entry diff --git a/bitswap/wantmanager.go b/bitswap/wantmanager.go index 87efb8605..8d033ff9b 100644 --- a/bitswap/wantmanager.go +++ b/bitswap/wantmanager.go @@ -114,16 +114,20 @@ func (pm *WantManager) ConnectedPeers() []peer.ID { return <-resp } -func (pm *WantManager) SendBlock(ctx context.Context, env *engine.Envelope) { +func (pm *WantManager) SendBlocks(ctx context.Context, env *engine.Envelope) { // Blocks need to be sent synchronously to maintain proper backpressure // throughout the network stack defer env.Sent() - pm.sentHistogram.Observe(float64(len(env.Block.RawData()))) - + msgSize := 0 msg := bsmsg.New(false) - msg.AddBlock(env.Block) - log.Infof("Sending block %s to %s", env.Block, env.Peer) + for _, block := range env.Message.Blocks() { + msgSize += len(block.RawData()) + msg.AddBlock(block) + log.Infof("Sending block %s to %s", block, env.Peer) + } + + pm.sentHistogram.Observe(float64(msgSize)) err := pm.network.SendMessage(ctx, env.Peer, msg) if err != nil { log.Infof("sendblock error: %s", err) diff --git a/bitswap/workers.go b/bitswap/workers.go index 41ede8e99..3fbe1bb15 100644 --- a/bitswap/workers.go +++ b/bitswap/workers.go @@ -59,24 +59,27 @@ func (bs *Bitswap) taskWorker(ctx context.Context, id int) { if !ok { continue } - log.Event(ctx, "Bitswap.TaskWorker.Work", logging.LoggableF(func() map[string]interface{} { - return logging.LoggableMap{ - "ID": id, - "Target": envelope.Peer.Pretty(), - "Block": envelope.Block.Cid().String(), - } - })) - // update the BS ledger to reflect sent message // TODO: Should only track *useful* messages in ledger outgoing := bsmsg.New(false) - outgoing.AddBlock(envelope.Block) + for _, block := range envelope.Message.Blocks() { + log.Event(ctx, "Bitswap.TaskWorker.Work", logging.LoggableF(func() map[string]interface{} { + return logging.LoggableMap{ + "ID": id, + "Target": envelope.Peer.Pretty(), + "Block": block.Cid().String(), + } + })) + outgoing.AddBlock(block) + } bs.engine.MessageSent(envelope.Peer, outgoing) - bs.wm.SendBlock(ctx, envelope) + bs.wm.SendBlocks(ctx, envelope) bs.counterLk.Lock() - bs.counters.blocksSent++ - bs.counters.dataSent += uint64(len(envelope.Block.RawData())) + for _, block := range envelope.Message.Blocks() { + bs.counters.blocksSent++ + bs.counters.dataSent += uint64(len(block.RawData())) + } bs.counterLk.Unlock() case <-ctx.Done(): return From 7e18d13fd45c7b0619e358d1c8eef58195153e42 Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Thu, 4 Oct 2018 09:33:16 -0700 Subject: [PATCH 0650/1035] use CIDs directly as map keys 1. Use a `taskEntryKey` *type* instead of a string (now that both peer IDs and CIDs are hashable). 2. Get rid of all uses of `cid.KeyString` (mostly just for type safety). This also means we don't need to parse the CID and allocate to convert it *back* from a string. This commit was moved from ipfs/go-bitswap@77ea854e9591214d21d68ba9b0f50beaef8e471c --- bitswap/decision/peer_request_queue.go | 19 +++++------ bitswap/message/message.go | 17 +++++----- bitswap/message/message_test.go | 12 +++---- bitswap/session.go | 23 ++++++-------- bitswap/wantlist/wantlist.go | 44 +++++++++++--------------- 5 files changed, 53 insertions(+), 62 deletions(-) diff --git a/bitswap/decision/peer_request_queue.go b/bitswap/decision/peer_request_queue.go index 47736a71d..c02329fc3 100644 --- a/bitswap/decision/peer_request_queue.go +++ b/bitswap/decision/peer_request_queue.go @@ -23,7 +23,7 @@ type peerRequestQueue interface { func newPRQ() *prq { return &prq{ - taskMap: make(map[string]*peerRequestTask), + taskMap: make(map[taskEntryKey]*peerRequestTask), partners: make(map[peer.ID]*activePartner), frozen: make(map[peer.ID]*activePartner), pQueue: pq.New(partnerCompare), @@ -39,7 +39,7 @@ var _ peerRequestQueue = &prq{} type prq struct { lock sync.Mutex pQueue pq.PQ - taskMap map[string]*peerRequestTask + taskMap map[taskEntryKey]*peerRequestTask partners map[peer.ID]*activePartner frozen map[peer.ID]*activePartner @@ -65,7 +65,7 @@ func (tl *prq) Push(to peer.ID, entries ...*wantlist.Entry) { if partner.activeBlocks.Has(entry.Cid) { continue } - if task, ok := tl.taskMap[taskEntryKey(to, entry.Cid)]; ok { + if task, ok := tl.taskMap[taskEntryKey{to, entry.Cid}]; ok { if entry.Priority > task.Priority { task.Priority = entry.Priority partner.taskQueue.Update(task.index) @@ -98,7 +98,7 @@ func (tl *prq) Push(to peer.ID, entries ...*wantlist.Entry) { task.Priority = priority partner.taskQueue.Push(task) for _, entry := range newEntries { - tl.taskMap[taskEntryKey(to, entry.Cid)] = task + tl.taskMap[taskEntryKey{to, entry.Cid}] = task } partner.requests += len(newEntries) tl.pQueue.Update(partner.Index()) @@ -119,7 +119,7 @@ func (tl *prq) Pop() *peerRequestTask { newEntries := make([]*wantlist.Entry, 0, len(out.Entries)) for _, entry := range out.Entries { - delete(tl.taskMap, taskEntryKey(out.Target, entry.Cid)) + delete(tl.taskMap, taskEntryKey{out.Target, entry.Cid}) if entry.Trash { continue } @@ -143,7 +143,7 @@ func (tl *prq) Pop() *peerRequestTask { // Remove removes a task from the queue func (tl *prq) Remove(k cid.Cid, p peer.ID) { tl.lock.Lock() - t, ok := tl.taskMap[taskEntryKey(p, k)] + t, ok := tl.taskMap[taskEntryKey{p, k}] if ok { for _, entry := range t.Entries { if entry.Cid.Equals(k) { @@ -220,9 +220,10 @@ func (t *peerRequestTask) SetIndex(i int) { t.index = i } -// taskEntryKey returns a key that uniquely identifies a task. -func taskEntryKey(p peer.ID, k cid.Cid) string { - return string(p) + k.KeyString() +// taskEntryKey is a key identifying a task. +type taskEntryKey struct { + p peer.ID + k cid.Cid } // FIFO is a basic task comparator that returns tasks in the order created. diff --git a/bitswap/message/message.go b/bitswap/message/message.go index 92f0259cd..e200e8d86 100644 --- a/bitswap/message/message.go +++ b/bitswap/message/message.go @@ -49,8 +49,8 @@ type Exportable interface { type impl struct { full bool - wantlist map[string]*Entry - blocks map[string]blocks.Block + wantlist map[cid.Cid]*Entry + blocks map[cid.Cid]blocks.Block } func New(full bool) BitSwapMessage { @@ -59,8 +59,8 @@ func New(full bool) BitSwapMessage { func newMsg(full bool) *impl { return &impl{ - blocks: make(map[string]blocks.Block), - wantlist: make(map[string]*Entry), + blocks: make(map[cid.Cid]blocks.Block), + wantlist: make(map[cid.Cid]*Entry), full: full, } } @@ -135,7 +135,7 @@ func (m *impl) Blocks() []blocks.Block { } func (m *impl) Cancel(k cid.Cid) { - delete(m.wantlist, k.KeyString()) + delete(m.wantlist, k) m.addEntry(k, 0, true) } @@ -144,13 +144,12 @@ func (m *impl) AddEntry(k cid.Cid, priority int) { } func (m *impl) addEntry(c cid.Cid, priority int, cancel bool) { - k := c.KeyString() - e, exists := m.wantlist[k] + e, exists := m.wantlist[c] if exists { e.Priority = priority e.Cancel = cancel } else { - m.wantlist[k] = &Entry{ + m.wantlist[c] = &Entry{ Entry: &wantlist.Entry{ Cid: c, Priority: priority, @@ -161,7 +160,7 @@ func (m *impl) addEntry(c cid.Cid, priority int, cancel bool) { } func (m *impl) AddBlock(b blocks.Block) { - m.blocks[b.Cid().KeyString()] = b + m.blocks[b.Cid()] = b } func FromNet(r io.Reader) (BitSwapMessage, error) { diff --git a/bitswap/message/message_test.go b/bitswap/message/message_test.go index a3e1cd8f9..35c026739 100644 --- a/bitswap/message/message_test.go +++ b/bitswap/message/message_test.go @@ -121,13 +121,13 @@ func TestToNetFromNetPreservesWantList(t *testing.T) { t.Fatal("fullness attribute got dropped on marshal") } - keys := make(map[string]bool) + keys := make(map[cid.Cid]bool) for _, k := range copied.Wantlist() { - keys[k.Cid.KeyString()] = true + keys[k.Cid] = true } for _, k := range original.Wantlist() { - if _, ok := keys[k.Cid.KeyString()]; !ok { + if _, ok := keys[k.Cid]; !ok { t.Fatalf("Key Missing: \"%v\"", k) } } @@ -151,13 +151,13 @@ func TestToAndFromNetMessage(t *testing.T) { t.Fatal(err) } - keys := make(map[string]bool) + keys := make(map[cid.Cid]bool) for _, b := range m2.Blocks() { - keys[b.Cid().KeyString()] = true + keys[b.Cid()] = true } for _, b := range original.Blocks() { - if _, ok := keys[b.Cid().KeyString()]; !ok { + if _, ok := keys[b.Cid()]; !ok { t.Fail() } } diff --git a/bitswap/session.go b/bitswap/session.go index a3b6005b7..063a40d93 100644 --- a/bitswap/session.go +++ b/bitswap/session.go @@ -33,7 +33,7 @@ type Session struct { interestReqs chan interestReq interest *lru.Cache - liveWants map[string]time.Time + liveWants map[cid.Cid]time.Time tick *time.Timer baseTickDelay time.Duration @@ -54,7 +54,7 @@ type Session struct { func (bs *Bitswap) NewSession(ctx context.Context) *Session { s := &Session{ activePeers: make(map[peer.ID]struct{}), - liveWants: make(map[string]time.Time), + liveWants: make(map[cid.Cid]time.Time), newReqs: make(chan []cid.Cid), cancelKeys: make(chan []cid.Cid), tofetch: newCidQueue(), @@ -87,8 +87,7 @@ func (bs *Bitswap) removeSession(s *Session) { live := make([]cid.Cid, 0, len(s.liveWants)) for c := range s.liveWants { - cs, _ := cid.Cast([]byte(c)) - live = append(live, cs) + live = append(live, c) } bs.CancelWants(live, s.id) @@ -147,7 +146,7 @@ func (s *Session) isLiveWant(c cid.Cid) bool { } func (s *Session) interestedIn(c cid.Cid) bool { - return s.interest.Contains(c.KeyString()) || s.isLiveWant(c) + return s.interest.Contains(c) || s.isLiveWant(c) } const provSearchDelay = time.Second * 10 @@ -188,7 +187,7 @@ func (s *Session) run(ctx context.Context) { s.resetTick() case keys := <-s.newReqs: for _, k := range keys { - s.interest.Add(k.KeyString(), nil) + s.interest.Add(k, nil) } if len(s.liveWants) < activeWantsLimit { toadd := activeWantsLimit - len(s.liveWants) @@ -211,8 +210,7 @@ func (s *Session) run(ctx context.Context) { live := make([]cid.Cid, 0, len(s.liveWants)) now := time.Now() for c := range s.liveWants { - cs, _ := cid.Cast([]byte(c)) - live = append(live, cs) + live = append(live, c) s.liveWants[c] = now } @@ -250,7 +248,7 @@ func (s *Session) run(ctx context.Context) { } func (s *Session) cidIsWanted(c cid.Cid) bool { - _, ok := s.liveWants[c.KeyString()] + _, ok := s.liveWants[c] if !ok { ok = s.tofetch.Has(c) } @@ -261,11 +259,10 @@ func (s *Session) cidIsWanted(c cid.Cid) bool { func (s *Session) receiveBlock(ctx context.Context, blk blocks.Block) { c := blk.Cid() if s.cidIsWanted(c) { - ks := c.KeyString() - tval, ok := s.liveWants[ks] + tval, ok := s.liveWants[c] if ok { s.latTotal += time.Since(tval) - delete(s.liveWants, ks) + delete(s.liveWants, c) } else { s.tofetch.Remove(c) } @@ -281,7 +278,7 @@ func (s *Session) receiveBlock(ctx context.Context, blk blocks.Block) { func (s *Session) wantBlocks(ctx context.Context, ks []cid.Cid) { now := time.Now() for _, c := range ks { - s.liveWants[c.KeyString()] = now + s.liveWants[c] = now } s.bs.wm.WantBlocks(ctx, ks, s.activePeersArr, s.id) } diff --git a/bitswap/wantlist/wantlist.go b/bitswap/wantlist/wantlist.go index ad6b0f03b..83130072d 100644 --- a/bitswap/wantlist/wantlist.go +++ b/bitswap/wantlist/wantlist.go @@ -11,12 +11,12 @@ import ( type ThreadSafe struct { lk sync.RWMutex - set map[string]*Entry + set map[cid.Cid]*Entry } // not threadsafe type Wantlist struct { - set map[string]*Entry + set map[cid.Cid]*Entry } type Entry struct { @@ -45,13 +45,13 @@ func (es entrySlice) Less(i, j int) bool { return es[i].Priority > es[j].Priorit func NewThreadSafe() *ThreadSafe { return &ThreadSafe{ - set: make(map[string]*Entry), + set: make(map[cid.Cid]*Entry), } } func New() *Wantlist { return &Wantlist{ - set: make(map[string]*Entry), + set: make(map[cid.Cid]*Entry), } } @@ -66,13 +66,12 @@ func New() *Wantlist { func (w *ThreadSafe) Add(c cid.Cid, priority int, ses uint64) bool { w.lk.Lock() defer w.lk.Unlock() - k := c.KeyString() - if e, ok := w.set[k]; ok { + if e, ok := w.set[c]; ok { e.SesTrk[ses] = struct{}{} return false } - w.set[k] = &Entry{ + w.set[c] = &Entry{ Cid: c, Priority: priority, SesTrk: map[uint64]struct{}{ses: struct{}{}}, @@ -85,12 +84,11 @@ func (w *ThreadSafe) Add(c cid.Cid, priority int, ses uint64) bool { func (w *ThreadSafe) AddEntry(e *Entry, ses uint64) bool { w.lk.Lock() defer w.lk.Unlock() - k := e.Cid.KeyString() - if ex, ok := w.set[k]; ok { + if ex, ok := w.set[e.Cid]; ok { ex.SesTrk[ses] = struct{}{} return false } - w.set[k] = e + w.set[e.Cid] = e e.SesTrk[ses] = struct{}{} return true } @@ -102,15 +100,14 @@ func (w *ThreadSafe) AddEntry(e *Entry, ses uint64) bool { func (w *ThreadSafe) Remove(c cid.Cid, ses uint64) bool { w.lk.Lock() defer w.lk.Unlock() - k := c.KeyString() - e, ok := w.set[k] + e, ok := w.set[c] if !ok { return false } delete(e.SesTrk, ses) if len(e.SesTrk) == 0 { - delete(w.set, k) + delete(w.set, c) return true } return false @@ -121,7 +118,7 @@ func (w *ThreadSafe) Remove(c cid.Cid, ses uint64) bool { func (w *ThreadSafe) Contains(k cid.Cid) (*Entry, bool) { w.lk.RLock() defer w.lk.RUnlock() - e, ok := w.set[k.KeyString()] + e, ok := w.set[k] return e, ok } @@ -152,12 +149,11 @@ func (w *Wantlist) Len() int { } func (w *Wantlist) Add(c cid.Cid, priority int) bool { - k := c.KeyString() - if _, ok := w.set[k]; ok { + if _, ok := w.set[c]; ok { return false } - w.set[k] = &Entry{ + w.set[c] = &Entry{ Cid: c, Priority: priority, } @@ -166,27 +162,25 @@ func (w *Wantlist) Add(c cid.Cid, priority int) bool { } func (w *Wantlist) AddEntry(e *Entry) bool { - k := e.Cid.KeyString() - if _, ok := w.set[k]; ok { + if _, ok := w.set[e.Cid]; ok { return false } - w.set[k] = e + w.set[e.Cid] = e return true } func (w *Wantlist) Remove(c cid.Cid) bool { - k := c.KeyString() - _, ok := w.set[k] + _, ok := w.set[c] if !ok { return false } - delete(w.set, k) + delete(w.set, c) return true } -func (w *Wantlist) Contains(k cid.Cid) (*Entry, bool) { - e, ok := w.set[k.KeyString()] +func (w *Wantlist) Contains(c cid.Cid) (*Entry, bool) { + e, ok := w.set[c] return e, ok } From 96f299e55580c27fea5f12a37179926636ca6845 Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Thu, 4 Oct 2018 10:35:44 -0700 Subject: [PATCH 0651/1035] allocate less in protobufs This was showing up as a major source of heap allocations (well, at least when the DHT is in client-only mode). This commit was moved from ipfs/go-bitswap@243a6c53b17d485c05b3b7cb3871937c5329a405 --- bitswap/message/message.go | 27 +++--- bitswap/message/message_test.go | 11 ++- bitswap/message/pb/Makefile | 2 +- bitswap/message/pb/message.pb.go | 141 ++++++++++++------------------- bitswap/message/pb/message.proto | 8 +- 5 files changed, 76 insertions(+), 113 deletions(-) diff --git a/bitswap/message/message.go b/bitswap/message/message.go index e200e8d86..3289507dd 100644 --- a/bitswap/message/message.go +++ b/bitswap/message/message.go @@ -71,17 +71,17 @@ type Entry struct { } func newMessageFromProto(pbm pb.Message) (BitSwapMessage, error) { - m := newMsg(pbm.GetWantlist().GetFull()) - for _, e := range pbm.GetWantlist().GetEntries() { - c, err := cid.Cast([]byte(e.GetBlock())) + m := newMsg(pbm.Wantlist.Full) + for _, e := range pbm.Wantlist.Entries { + c, err := cid.Cast([]byte(e.Block)) if err != nil { return nil, fmt.Errorf("incorrectly formatted cid in wantlist: %s", err) } - m.addEntry(c, int(e.GetPriority()), e.GetCancel()) + m.addEntry(c, int(e.Priority), e.Cancel) } // deprecated - for _, d := range pbm.GetBlocks() { + for _, d := range pbm.Blocks { // CIDv0, sha256, protobuf only b := blocks.NewBlock(d) m.AddBlock(b) @@ -179,10 +179,9 @@ func FromPBReader(pbr ggio.Reader) (BitSwapMessage, error) { func (m *impl) ToProtoV0() *pb.Message { pbm := new(pb.Message) - pbm.Wantlist = new(pb.Message_Wantlist) - pbm.Wantlist.Entries = make([]*pb.Message_Wantlist_Entry, 0, len(m.wantlist)) + pbm.Wantlist.Entries = make([]pb.Message_Wantlist_Entry, 0, len(m.wantlist)) for _, e := range m.wantlist { - pbm.Wantlist.Entries = append(pbm.Wantlist.Entries, &pb.Message_Wantlist_Entry{ + pbm.Wantlist.Entries = append(pbm.Wantlist.Entries, pb.Message_Wantlist_Entry{ Block: e.Cid.Bytes(), Priority: int32(e.Priority), Cancel: e.Cancel, @@ -200,10 +199,9 @@ func (m *impl) ToProtoV0() *pb.Message { func (m *impl) ToProtoV1() *pb.Message { pbm := new(pb.Message) - pbm.Wantlist = new(pb.Message_Wantlist) - pbm.Wantlist.Entries = make([]*pb.Message_Wantlist_Entry, 0, len(m.wantlist)) + pbm.Wantlist.Entries = make([]pb.Message_Wantlist_Entry, 0, len(m.wantlist)) for _, e := range m.wantlist { - pbm.Wantlist.Entries = append(pbm.Wantlist.Entries, &pb.Message_Wantlist_Entry{ + pbm.Wantlist.Entries = append(pbm.Wantlist.Entries, pb.Message_Wantlist_Entry{ Block: e.Cid.Bytes(), Priority: int32(e.Priority), Cancel: e.Cancel, @@ -212,13 +210,12 @@ func (m *impl) ToProtoV1() *pb.Message { pbm.Wantlist.Full = m.full blocks := m.Blocks() - pbm.Payload = make([]*pb.Message_Block, 0, len(blocks)) + pbm.Payload = make([]pb.Message_Block, 0, len(blocks)) for _, b := range blocks { - blk := &pb.Message_Block{ + pbm.Payload = append(pbm.Payload, pb.Message_Block{ Data: b.RawData(), Prefix: b.Cid().Prefix().Bytes(), - } - pbm.Payload = append(pbm.Payload, blk) + }) } return pbm } diff --git a/bitswap/message/message_test.go b/bitswap/message/message_test.go index 35c026739..686ac4a4a 100644 --- a/bitswap/message/message_test.go +++ b/bitswap/message/message_test.go @@ -20,7 +20,7 @@ func TestAppendWanted(t *testing.T) { m := New(true) m.AddEntry(str, 1) - if !wantlistContains(m.ToProtoV0().GetWantlist(), str) { + if !wantlistContains(&m.ToProtoV0().Wantlist, str) { t.Fail() } } @@ -28,11 +28,10 @@ func TestAppendWanted(t *testing.T) { func TestNewMessageFromProto(t *testing.T) { str := mkFakeCid("a_key") protoMessage := new(pb.Message) - protoMessage.Wantlist = new(pb.Message_Wantlist) - protoMessage.Wantlist.Entries = []*pb.Message_Wantlist_Entry{ + protoMessage.Wantlist.Entries = []pb.Message_Wantlist_Entry{ {Block: str.Bytes()}, } - if !wantlistContains(protoMessage.Wantlist, str) { + if !wantlistContains(&protoMessage.Wantlist, str) { t.Fail() } m, err := newMessageFromProto(*protoMessage) @@ -40,7 +39,7 @@ func TestNewMessageFromProto(t *testing.T) { t.Fatal(err) } - if !wantlistContains(m.ToProtoV0().GetWantlist(), str) { + if !wantlistContains(&m.ToProtoV0().Wantlist, str) { t.Fail() } } @@ -94,7 +93,7 @@ func TestCopyProtoByValue(t *testing.T) { m := New(true) protoBeforeAppend := m.ToProtoV0() m.AddEntry(str, 1) - if wantlistContains(protoBeforeAppend.GetWantlist(), str) { + if wantlistContains(&protoBeforeAppend.Wantlist, str) { t.Fail() } } diff --git a/bitswap/message/pb/Makefile b/bitswap/message/pb/Makefile index eb14b5768..df34e54b0 100644 --- a/bitswap/message/pb/Makefile +++ b/bitswap/message/pb/Makefile @@ -4,7 +4,7 @@ GO = $(PB:.proto=.pb.go) all: $(GO) %.pb.go: %.proto - protoc --proto_path=$(GOPATH)/src:. --gogofast_out=. $< + protoc --proto_path=$(GOPATH)/src:. --gogofaster_out=. $< clean: rm -f *.pb.go diff --git a/bitswap/message/pb/message.pb.go b/bitswap/message/pb/message.pb.go index 2c668d1a4..9a6b2821b 100644 --- a/bitswap/message/pb/message.pb.go +++ b/bitswap/message/pb/message.pb.go @@ -6,6 +6,7 @@ package bitswap_message_pb import proto "github.com/gogo/protobuf/proto" import fmt "fmt" import math "math" +import _ "github.com/gogo/protobuf/gogoproto" import io "io" @@ -21,19 +22,18 @@ var _ = math.Inf const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package type Message struct { - Wantlist *Message_Wantlist `protobuf:"bytes,1,opt,name=wantlist" json:"wantlist,omitempty"` - Blocks [][]byte `protobuf:"bytes,2,rep,name=blocks" json:"blocks,omitempty"` - Payload []*Message_Block `protobuf:"bytes,3,rep,name=payload" json:"payload,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Wantlist Message_Wantlist `protobuf:"bytes,1,opt,name=wantlist" json:"wantlist"` + Blocks [][]byte `protobuf:"bytes,2,rep,name=blocks" json:"blocks,omitempty"` + Payload []Message_Block `protobuf:"bytes,3,rep,name=payload" json:"payload"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *Message) Reset() { *m = Message{} } func (m *Message) String() string { return proto.CompactTextString(m) } func (*Message) ProtoMessage() {} func (*Message) Descriptor() ([]byte, []int) { - return fileDescriptor_message_1e228ff77b8fb7b4, []int{0} + return fileDescriptor_message_c28309e4affd853b, []int{0} } func (m *Message) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -62,11 +62,11 @@ func (m *Message) XXX_DiscardUnknown() { var xxx_messageInfo_Message proto.InternalMessageInfo -func (m *Message) GetWantlist() *Message_Wantlist { +func (m *Message) GetWantlist() Message_Wantlist { if m != nil { return m.Wantlist } - return nil + return Message_Wantlist{} } func (m *Message) GetBlocks() [][]byte { @@ -76,7 +76,7 @@ func (m *Message) GetBlocks() [][]byte { return nil } -func (m *Message) GetPayload() []*Message_Block { +func (m *Message) GetPayload() []Message_Block { if m != nil { return m.Payload } @@ -84,18 +84,17 @@ func (m *Message) GetPayload() []*Message_Block { } type Message_Wantlist struct { - Entries []*Message_Wantlist_Entry `protobuf:"bytes,1,rep,name=entries" json:"entries,omitempty"` - Full bool `protobuf:"varint,2,opt,name=full,proto3" json:"full,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Entries []Message_Wantlist_Entry `protobuf:"bytes,1,rep,name=entries" json:"entries"` + Full bool `protobuf:"varint,2,opt,name=full,proto3" json:"full,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *Message_Wantlist) Reset() { *m = Message_Wantlist{} } func (m *Message_Wantlist) String() string { return proto.CompactTextString(m) } func (*Message_Wantlist) ProtoMessage() {} func (*Message_Wantlist) Descriptor() ([]byte, []int) { - return fileDescriptor_message_1e228ff77b8fb7b4, []int{0, 0} + return fileDescriptor_message_c28309e4affd853b, []int{0, 0} } func (m *Message_Wantlist) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -124,7 +123,7 @@ func (m *Message_Wantlist) XXX_DiscardUnknown() { var xxx_messageInfo_Message_Wantlist proto.InternalMessageInfo -func (m *Message_Wantlist) GetEntries() []*Message_Wantlist_Entry { +func (m *Message_Wantlist) GetEntries() []Message_Wantlist_Entry { if m != nil { return m.Entries } @@ -143,7 +142,6 @@ type Message_Wantlist_Entry struct { Priority int32 `protobuf:"varint,2,opt,name=priority,proto3" json:"priority,omitempty"` Cancel bool `protobuf:"varint,3,opt,name=cancel,proto3" json:"cancel,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } @@ -151,7 +149,7 @@ func (m *Message_Wantlist_Entry) Reset() { *m = Message_Wantlist_Entry{} func (m *Message_Wantlist_Entry) String() string { return proto.CompactTextString(m) } func (*Message_Wantlist_Entry) ProtoMessage() {} func (*Message_Wantlist_Entry) Descriptor() ([]byte, []int) { - return fileDescriptor_message_1e228ff77b8fb7b4, []int{0, 0, 0} + return fileDescriptor_message_c28309e4affd853b, []int{0, 0, 0} } func (m *Message_Wantlist_Entry) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -205,7 +203,6 @@ type Message_Block struct { Prefix []byte `protobuf:"bytes,1,opt,name=prefix,proto3" json:"prefix,omitempty"` Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } @@ -213,7 +210,7 @@ func (m *Message_Block) Reset() { *m = Message_Block{} } func (m *Message_Block) String() string { return proto.CompactTextString(m) } func (*Message_Block) ProtoMessage() {} func (*Message_Block) Descriptor() ([]byte, []int) { - return fileDescriptor_message_1e228ff77b8fb7b4, []int{0, 1} + return fileDescriptor_message_c28309e4affd853b, []int{0, 1} } func (m *Message_Block) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -277,16 +274,14 @@ func (m *Message) MarshalTo(dAtA []byte) (int, error) { _ = i var l int _ = l - if m.Wantlist != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintMessage(dAtA, i, uint64(m.Wantlist.Size())) - n1, err := m.Wantlist.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 + dAtA[i] = 0xa + i++ + i = encodeVarintMessage(dAtA, i, uint64(m.Wantlist.Size())) + n1, err := m.Wantlist.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } + i += n1 if len(m.Blocks) > 0 { for _, b := range m.Blocks { dAtA[i] = 0x12 @@ -307,9 +302,6 @@ func (m *Message) MarshalTo(dAtA []byte) (int, error) { i += n } } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) - } return i, nil } @@ -350,9 +342,6 @@ func (m *Message_Wantlist) MarshalTo(dAtA []byte) (int, error) { } i++ } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) - } return i, nil } @@ -392,9 +381,6 @@ func (m *Message_Wantlist_Entry) MarshalTo(dAtA []byte) (int, error) { } i++ } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) - } return i, nil } @@ -425,9 +411,6 @@ func (m *Message_Block) MarshalTo(dAtA []byte) (int, error) { i = encodeVarintMessage(dAtA, i, uint64(len(m.Data))) i += copy(dAtA[i:], m.Data) } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) - } return i, nil } @@ -443,10 +426,8 @@ func encodeVarintMessage(dAtA []byte, offset int, v uint64) int { func (m *Message) Size() (n int) { var l int _ = l - if m.Wantlist != nil { - l = m.Wantlist.Size() - n += 1 + l + sovMessage(uint64(l)) - } + l = m.Wantlist.Size() + n += 1 + l + sovMessage(uint64(l)) if len(m.Blocks) > 0 { for _, b := range m.Blocks { l = len(b) @@ -459,9 +440,6 @@ func (m *Message) Size() (n int) { n += 1 + l + sovMessage(uint64(l)) } } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } @@ -477,9 +455,6 @@ func (m *Message_Wantlist) Size() (n int) { if m.Full { n += 2 } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } @@ -496,9 +471,6 @@ func (m *Message_Wantlist_Entry) Size() (n int) { if m.Cancel { n += 2 } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } @@ -513,9 +485,6 @@ func (m *Message_Block) Size() (n int) { if l > 0 { n += 1 + l + sovMessage(uint64(l)) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } @@ -587,9 +556,6 @@ func (m *Message) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Wantlist == nil { - m.Wantlist = &Message_Wantlist{} - } if err := m.Wantlist.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -649,7 +615,7 @@ func (m *Message) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Payload = append(m.Payload, &Message_Block{}) + m.Payload = append(m.Payload, Message_Block{}) if err := m.Payload[len(m.Payload)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -666,7 +632,6 @@ func (m *Message) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -731,7 +696,7 @@ func (m *Message_Wantlist) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Entries = append(m.Entries, &Message_Wantlist_Entry{}) + m.Entries = append(m.Entries, Message_Wantlist_Entry{}) if err := m.Entries[len(m.Entries)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -768,7 +733,6 @@ func (m *Message_Wantlist) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -889,7 +853,6 @@ func (m *Message_Wantlist_Entry) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -1002,7 +965,6 @@ func (m *Message_Block) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -1117,26 +1079,29 @@ var ( ErrIntOverflowMessage = fmt.Errorf("proto: integer overflow") ) -func init() { proto.RegisterFile("message.proto", fileDescriptor_message_1e228ff77b8fb7b4) } +func init() { proto.RegisterFile("message.proto", fileDescriptor_message_c28309e4affd853b) } -var fileDescriptor_message_1e228ff77b8fb7b4 = []byte{ - // 287 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x91, 0xb1, 0x4e, 0xf3, 0x30, - 0x14, 0x85, 0xe5, 0xe6, 0x4f, 0x1b, 0xdd, 0xe6, 0x5f, 0x2c, 0x84, 0xac, 0x0c, 0x55, 0x40, 0x0c, - 0x11, 0x83, 0x87, 0x76, 0x64, 0x41, 0x15, 0x8c, 0x0c, 0x78, 0x61, 0x76, 0x52, 0x17, 0x59, 0x98, - 0x24, 0xb2, 0x8d, 0x4a, 0x9e, 0x82, 0xc7, 0xe1, 0x15, 0x18, 0x79, 0x04, 0x94, 0x27, 0x41, 0xb9, - 0x75, 0xb2, 0x20, 0x21, 0xb6, 0x7b, 0xac, 0xf3, 0x1d, 0x9f, 0x6b, 0xc3, 0xff, 0x67, 0xe5, 0x9c, - 0x7c, 0x54, 0xbc, 0xb5, 0x8d, 0x6f, 0x28, 0x2d, 0xb5, 0x77, 0x07, 0xd9, 0xf2, 0xe9, 0xb8, 0x3c, - 0x7f, 0x8b, 0x60, 0x71, 0x77, 0x94, 0xf4, 0x1a, 0x92, 0x83, 0xac, 0xbd, 0xd1, 0xce, 0x33, 0x92, - 0x93, 0x62, 0xb9, 0xbe, 0xe0, 0x3f, 0x11, 0x1e, 0xec, 0xfc, 0x21, 0x78, 0xc5, 0x44, 0xd1, 0x53, - 0x98, 0x97, 0xa6, 0xa9, 0x9e, 0x1c, 0x9b, 0xe5, 0x51, 0x91, 0x8a, 0xa0, 0xe8, 0x15, 0x2c, 0x5a, - 0xd9, 0x99, 0x46, 0xee, 0x58, 0x94, 0x47, 0xc5, 0x72, 0x7d, 0xf6, 0x5b, 0xf0, 0x76, 0x80, 0xc4, - 0x48, 0x64, 0xef, 0x04, 0x92, 0xf1, 0x2e, 0x7a, 0x03, 0x0b, 0x55, 0x7b, 0xab, 0x95, 0x63, 0x04, - 0x93, 0x2e, 0xff, 0x52, 0x91, 0xdf, 0xd6, 0xde, 0x76, 0x62, 0x44, 0x29, 0x85, 0x7f, 0xfb, 0x17, - 0x63, 0xd8, 0x2c, 0x27, 0x45, 0x22, 0x70, 0xce, 0xee, 0x21, 0x46, 0x17, 0x3d, 0x81, 0x18, 0x6b, - 0xe3, 0x1b, 0xa4, 0xe2, 0x28, 0x68, 0x06, 0x49, 0x6b, 0x75, 0x63, 0xb5, 0xef, 0x10, 0x8b, 0xc5, - 0xa4, 0x87, 0xb5, 0x2b, 0x59, 0x57, 0xca, 0xb0, 0x08, 0x03, 0x83, 0xca, 0x36, 0x10, 0xe3, 0x2e, - 0x83, 0xa1, 0xb5, 0x6a, 0xaf, 0x5f, 0x43, 0x66, 0x50, 0x43, 0x8f, 0x9d, 0xf4, 0x12, 0x03, 0x53, - 0x81, 0xf3, 0x36, 0xfd, 0xe8, 0x57, 0xe4, 0xb3, 0x5f, 0x91, 0xaf, 0x7e, 0x45, 0xca, 0x39, 0x7e, - 0xdd, 0xe6, 0x3b, 0x00, 0x00, 0xff, 0xff, 0xd2, 0x95, 0x9b, 0xc1, 0xcb, 0x01, 0x00, 0x00, +var fileDescriptor_message_c28309e4affd853b = []byte{ + // 328 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x91, 0xcf, 0x4a, 0xf3, 0x40, + 0x14, 0xc5, 0x3b, 0x4d, 0xd3, 0x86, 0xdb, 0x7e, 0xf0, 0x31, 0x88, 0x84, 0x2c, 0x62, 0x14, 0x17, + 0x41, 0x70, 0x0a, 0xed, 0x13, 0x58, 0xd0, 0x85, 0xe0, 0xc2, 0x6c, 0x5c, 0x4f, 0xd2, 0x34, 0x0e, + 0xa6, 0x99, 0x30, 0x33, 0xa5, 0xf6, 0x2d, 0x7c, 0x05, 0x1f, 0xc4, 0x7d, 0x97, 0x3e, 0x81, 0x48, + 0x7d, 0x11, 0xc9, 0xed, 0x34, 0x1b, 0x41, 0xdc, 0xdd, 0x33, 0x9c, 0xf3, 0xbb, 0x7f, 0x06, 0xfe, + 0x2d, 0x73, 0xad, 0x79, 0x91, 0xb3, 0x5a, 0x49, 0x23, 0x29, 0x4d, 0x85, 0xd1, 0x6b, 0x5e, 0xb3, + 0xf6, 0x39, 0x0d, 0x2e, 0x0b, 0x61, 0x1e, 0x57, 0x29, 0xcb, 0xe4, 0x72, 0x5c, 0xc8, 0x42, 0x8e, + 0xd1, 0x9a, 0xae, 0x16, 0xa8, 0x50, 0x60, 0xb5, 0x47, 0x9c, 0xbd, 0x3a, 0x30, 0xb8, 0xdb, 0xa7, + 0xe9, 0x0d, 0x78, 0x6b, 0x5e, 0x99, 0x52, 0x68, 0xe3, 0x93, 0x88, 0xc4, 0xc3, 0xc9, 0x39, 0xfb, + 0xd9, 0x81, 0x59, 0x3b, 0x7b, 0xb0, 0xde, 0x59, 0x6f, 0xfb, 0x71, 0xd2, 0x49, 0xda, 0x2c, 0x3d, + 0x86, 0x7e, 0x5a, 0xca, 0xec, 0x49, 0xfb, 0xdd, 0xc8, 0x89, 0x47, 0x89, 0x55, 0xf4, 0x0a, 0x06, + 0x35, 0xdf, 0x94, 0x92, 0xcf, 0x7d, 0x27, 0x72, 0xe2, 0xe1, 0xe4, 0xf4, 0x37, 0xfc, 0xac, 0x09, + 0x59, 0xf6, 0x21, 0x17, 0xbc, 0x11, 0xf0, 0x0e, 0x7d, 0xe9, 0x2d, 0x0c, 0xf2, 0xca, 0x28, 0x91, + 0x6b, 0x9f, 0x20, 0xef, 0xe2, 0x2f, 0xe3, 0xb2, 0xeb, 0xca, 0xa8, 0xcd, 0x01, 0x6c, 0x01, 0x94, + 0x42, 0x6f, 0xb1, 0x2a, 0x4b, 0xbf, 0x1b, 0x91, 0xd8, 0x4b, 0xb0, 0x0e, 0xee, 0xc1, 0x45, 0x2f, + 0x3d, 0x02, 0x17, 0x57, 0xc0, 0xab, 0x8c, 0x92, 0xbd, 0xa0, 0x01, 0x78, 0xb5, 0x12, 0x52, 0x09, + 0xb3, 0xc1, 0x98, 0x9b, 0xb4, 0xba, 0x39, 0x41, 0xc6, 0xab, 0x2c, 0x2f, 0x7d, 0x07, 0x81, 0x56, + 0x05, 0x53, 0x70, 0x71, 0xaf, 0xc6, 0x50, 0xab, 0x7c, 0x21, 0x9e, 0x2d, 0xd3, 0xaa, 0x66, 0x8e, + 0x39, 0x37, 0x1c, 0x81, 0xa3, 0x04, 0xeb, 0xd9, 0xff, 0xed, 0x2e, 0x24, 0xef, 0xbb, 0x90, 0x7c, + 0xee, 0x42, 0xf2, 0xf2, 0x15, 0x76, 0xd2, 0x3e, 0x7e, 0xde, 0xf4, 0x3b, 0x00, 0x00, 0xff, 0xff, + 0xd1, 0x6a, 0x3a, 0xa2, 0x10, 0x02, 0x00, 0x00, } diff --git a/bitswap/message/pb/message.proto b/bitswap/message/pb/message.proto index 23d5ef852..102b3431d 100644 --- a/bitswap/message/pb/message.proto +++ b/bitswap/message/pb/message.proto @@ -2,6 +2,8 @@ syntax = "proto3"; package bitswap.message.pb; +import "github.com/gogo/protobuf/gogoproto/gogo.proto"; + message Message { message Wantlist { @@ -12,7 +14,7 @@ message Message { bool cancel = 3; // whether this revokes an entry } - repeated Entry entries = 1; // a list of wantlist entries + repeated Entry entries = 1 [(gogoproto.nullable) = false]; // a list of wantlist entries bool full = 2; // whether this is the full wantlist. default to false } @@ -21,7 +23,7 @@ message Message { bytes data = 2; } - Wantlist wantlist = 1; + Wantlist wantlist = 1 [(gogoproto.nullable) = false]; repeated bytes blocks = 2; // used to send Blocks in bitswap 1.0.0 - repeated Block payload = 3; // used to send Blocks in bitswap 1.1.0 + repeated Block payload = 3 [(gogoproto.nullable) = false]; // used to send Blocks in bitswap 1.1.0 } From b765a94543dc57b286aac30cbe77024b85381545 Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Thu, 4 Oct 2018 10:56:11 -0700 Subject: [PATCH 0652/1035] avoid allocating for a simple debug message Wantlist/Blocks *copy*. This commit was moved from ipfs/go-bitswap@9093b83cbee27cb49a60f1fc230dff55508d1c26 --- bitswap/decision/engine.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/bitswap/decision/engine.go b/bitswap/decision/engine.go index e605996db..90155a1df 100644 --- a/bitswap/decision/engine.go +++ b/bitswap/decision/engine.go @@ -222,7 +222,7 @@ func (e *Engine) Peers() []peer.ID { // MessageReceived performs book-keeping. Returns error if passed invalid // arguments. func (e *Engine) MessageReceived(p peer.ID, m bsmsg.BitSwapMessage) error { - if len(m.Wantlist()) == 0 && len(m.Blocks()) == 0 { + if m.Empty() { log.Debugf("received empty message from %s", p) } @@ -257,9 +257,9 @@ func (e *Engine) MessageReceived(p peer.ID, m bsmsg.BitSwapMessage) error { } log.Error(err) } else { - // we have the block + // we have the block newWorkExists = true - if msgSize + blockSize > maxMessageSize { + if msgSize+blockSize > maxMessageSize { e.peerRequestQueue.Push(p, activeEntries...) activeEntries = []*wl.Entry{} msgSize = 0 From ed59d74b1128adae5b7930921805ae1c0f24175d Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Wed, 17 Oct 2018 15:34:30 +0100 Subject: [PATCH 0653/1035] buffer writes Let's not split every wantlist into a length and a wantlist... This commit was moved from ipfs/go-bitswap@fc1278e68095a1d8f367ea4b37a571a0a137d65c --- bitswap/network/ipfs_impl.go | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index cd0670aef..78dee0dc9 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -1,6 +1,7 @@ package network import ( + "bufio" "context" "fmt" "io" @@ -70,19 +71,20 @@ func msgToStream(ctx context.Context, s inet.Stream, msg bsmsg.BitSwapMessage) e if dl, ok := ctx.Deadline(); ok { deadline = dl } - if err := s.SetWriteDeadline(deadline); err != nil { log.Warningf("error setting deadline: %s", err) } + w := bufio.NewWriter(s) + switch s.Protocol() { case ProtocolBitswap: - if err := msg.ToNetV1(s); err != nil { + if err := msg.ToNetV1(w); err != nil { log.Debugf("error: %s", err) return err } case ProtocolBitswapOne, ProtocolBitswapNoVers: - if err := msg.ToNetV0(s); err != nil { + if err := msg.ToNetV0(w); err != nil { log.Debugf("error: %s", err) return err } @@ -90,6 +92,11 @@ func msgToStream(ctx context.Context, s inet.Stream, msg bsmsg.BitSwapMessage) e return fmt.Errorf("unrecognized protocol on remote: %s", s.Protocol()) } + if err := w.Flush(); err != nil { + log.Debugf("error: %s", err) + return err + } + if err := s.SetWriteDeadline(time.Time{}); err != nil { log.Warningf("error resetting deadline: %s", err) } From 7f74007a0e8167b5e5f55b0dcdd2a64f31a56ef7 Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Mon, 22 Oct 2018 15:14:35 -0700 Subject: [PATCH 0654/1035] delay finding providers It's expensive and causes quite a bit of dialing. Let's give bitswap a second to work it's magic before we try this. fixes #16 This commit was moved from ipfs/go-bitswap@93de01c2adeda04b6319b072f69d61876df3abd0 --- bitswap/bitswap.go | 40 ++++++++++++++++++++++++++-------------- 1 file changed, 26 insertions(+), 14 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index b8dd498c0..542a6d83b 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -36,6 +36,7 @@ const ( // results. // TODO: if a 'non-nice' strategy is implemented, consider increasing this value maxProvidersPerRequest = 3 + findProviderDelay = 1 * time.Second providerRequestTimeout = time.Second * 10 provideTimeout = time.Second * 15 sizeBatchRequestChan = 32 @@ -230,14 +231,6 @@ func (bs *Bitswap) GetBlocks(ctx context.Context, keys []cid.Cid) (<-chan blocks bs.wm.WantBlocks(ctx, keys, nil, mses) - // NB: Optimization. Assumes that providers of key[0] are likely to - // be able to provide for all keys. This currently holds true in most - // every situation. Later, this assumption may not hold as true. - req := &blockRequest{ - Cid: keys[0], - Ctx: ctx, - } - remaining := cid.NewSet() for _, k := range keys { remaining.Add(k) @@ -252,13 +245,37 @@ func (bs *Bitswap) GetBlocks(ctx context.Context, keys []cid.Cid) (<-chan blocks // can't just defer this call on its own, arguments are resolved *when* the defer is created bs.CancelWants(remaining.Keys(), mses) }() + findProvsDelay := time.NewTimer(findProviderDelay) + defer findProvsDelay.Stop() + + findProvsDelayCh := findProvsDelay.C + req := &blockRequest{ + Cid: keys[0], + Ctx: ctx, + } + + var findProvsReqCh chan<- *blockRequest + for { select { + case <-findProvsDelayCh: + // NB: Optimization. Assumes that providers of key[0] are likely to + // be able to provide for all keys. This currently holds true in most + // every situation. Later, this assumption may not hold as true. + findProvsReqCh = bs.findKeys + findProvsDelayCh = nil + case findProvsReqCh <- req: + findProvsReqCh = nil case blk, ok := <-promise: if !ok { return } + // No need to find providers now. + findProvsDelay.Stop() + findProvsDelayCh = nil + findProvsReqCh = nil + bs.CancelWants([]cid.Cid{blk.Cid()}, mses) remaining.Remove(blk.Cid()) select { @@ -272,12 +289,7 @@ func (bs *Bitswap) GetBlocks(ctx context.Context, keys []cid.Cid) (<-chan blocks } }() - select { - case bs.findKeys <- req: - return out, nil - case <-ctx.Done(): - return nil, ctx.Err() - } + return out, nil } func (bs *Bitswap) getNextSessionID() uint64 { From 8d7fed3cefbbc8a7a9471188297800139615bf54 Mon Sep 17 00:00:00 2001 From: Jeromy Date: Wed, 19 Sep 2018 14:39:37 -0700 Subject: [PATCH 0655/1035] fix session exchange interface implementation This commit was moved from ipfs/go-bitswap@55a5c2b6bc95147521dc30bd39c7040f85573318 --- bitswap/bitswap.go | 2 + bitswap/dup_blocks_test.go | 292 +++++++++++++++++++++++++++++++++++++ bitswap/session.go | 3 +- bitswap/session_test.go | 2 +- 4 files changed, 297 insertions(+), 2 deletions(-) create mode 100644 bitswap/dup_blocks_test.go diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 542a6d83b..942679d4f 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -30,6 +30,8 @@ import ( var log = logging.Logger("bitswap") +var _ exchange.SessionExchange = (*Bitswap)(nil) + const ( // maxProvidersPerRequest specifies the maximum number of providers desired // from the network. This value is specified because the network streams diff --git a/bitswap/dup_blocks_test.go b/bitswap/dup_blocks_test.go new file mode 100644 index 000000000..326efc4a3 --- /dev/null +++ b/bitswap/dup_blocks_test.go @@ -0,0 +1,292 @@ +package bitswap + +import ( + "context" + "encoding/json" + "io/ioutil" + "math/rand" + "sync" + "testing" + "time" + + tn "github.com/ipfs/go-bitswap/testnet" + + "github.com/ipfs/go-block-format" + cid "github.com/ipfs/go-cid" + blocksutil "github.com/ipfs/go-ipfs-blocksutil" + delay "github.com/ipfs/go-ipfs-delay" + mockrouting "github.com/ipfs/go-ipfs-routing/mock" +) + +type fetchFunc func(t *testing.T, bs *Bitswap, ks []cid.Cid) + +type distFunc func(t *testing.T, provs []Instance, blocks []blocks.Block) + +type runStats struct { + Dups uint64 + MsgSent uint64 + MsgRecd uint64 + Time time.Duration + Name string +} + +var benchmarkLog []runStats + +func TestDups2Nodes(t *testing.T) { + t.Run("AllToAll-OneAtATime", func(t *testing.T) { + subtestDistributeAndFetch(t, 3, 100, allToAll, oneAtATime) + }) + t.Run("AllToAll-BigBatch", func(t *testing.T) { + subtestDistributeAndFetch(t, 3, 100, allToAll, batchFetchAll) + }) + + t.Run("Overlap1-OneAtATime", func(t *testing.T) { + subtestDistributeAndFetch(t, 3, 100, overlap1, oneAtATime) + }) + + t.Run("Overlap2-BatchBy10", func(t *testing.T) { + subtestDistributeAndFetch(t, 3, 100, overlap2, batchFetchBy10) + }) + + t.Run("Overlap3-OneAtATime", func(t *testing.T) { + subtestDistributeAndFetch(t, 3, 100, overlap3, oneAtATime) + }) + t.Run("Overlap3-BatchBy10", func(t *testing.T) { + subtestDistributeAndFetch(t, 3, 100, overlap3, batchFetchBy10) + }) + t.Run("Overlap3-AllConcurrent", func(t *testing.T) { + subtestDistributeAndFetch(t, 3, 100, overlap3, fetchAllConcurrent) + }) + t.Run("Overlap3-BigBatch", func(t *testing.T) { + subtestDistributeAndFetch(t, 3, 100, overlap3, batchFetchAll) + }) + t.Run("Overlap3-UnixfsFetch", func(t *testing.T) { + subtestDistributeAndFetch(t, 3, 100, overlap3, unixfsFileFetch) + }) + t.Run("10Nodes-AllToAll-OneAtATime", func(t *testing.T) { + subtestDistributeAndFetch(t, 10, 100, allToAll, oneAtATime) + }) + t.Run("10Nodes-AllToAll-BatchFetchBy10", func(t *testing.T) { + subtestDistributeAndFetch(t, 10, 100, allToAll, batchFetchBy10) + }) + t.Run("10Nodes-AllToAll-BigBatch", func(t *testing.T) { + subtestDistributeAndFetch(t, 10, 100, allToAll, batchFetchAll) + }) + t.Run("10Nodes-AllToAll-AllConcurrent", func(t *testing.T) { + subtestDistributeAndFetch(t, 10, 100, allToAll, fetchAllConcurrent) + }) + t.Run("10Nodes-AllToAll-UnixfsFetch", func(t *testing.T) { + subtestDistributeAndFetch(t, 10, 100, allToAll, unixfsFileFetch) + }) + t.Run("10Nodes-OnePeerPerBlock-OneAtATime", func(t *testing.T) { + subtestDistributeAndFetch(t, 10, 100, onePeerPerBlock, oneAtATime) + }) + t.Run("10Nodes-OnePeerPerBlock-BigBatch", func(t *testing.T) { + subtestDistributeAndFetch(t, 10, 100, onePeerPerBlock, batchFetchAll) + }) + t.Run("10Nodes-OnePeerPerBlock-UnixfsFetch", func(t *testing.T) { + subtestDistributeAndFetch(t, 10, 100, onePeerPerBlock, unixfsFileFetch) + }) + t.Run("200Nodes-AllToAll-BigBatch", func(t *testing.T) { + subtestDistributeAndFetch(t, 200, 20, allToAll, batchFetchAll) + }) + + out, _ := json.MarshalIndent(benchmarkLog, "", " ") + ioutil.WriteFile("benchmark.json", out, 0666) +} + +func subtestDistributeAndFetch(t *testing.T, numnodes, numblks int, df distFunc, ff fetchFunc) { + start := time.Now() + net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(10*time.Millisecond)) + sg := NewTestSessionGenerator(net) + defer sg.Close() + + bg := blocksutil.NewBlockGenerator() + + instances := sg.Instances(numnodes) + blocks := bg.Blocks(numblks) + + fetcher := instances[numnodes-1] + + df(t, instances[:numnodes-1], blocks) + + var ks []cid.Cid + for _, blk := range blocks { + ks = append(ks, blk.Cid()) + } + + ff(t, fetcher.Exchange, ks) + + st, err := fetcher.Exchange.Stat() + if err != nil { + t.Fatal(err) + } + + nst := fetcher.Exchange.network.Stats() + stats := runStats{ + Time: time.Now().Sub(start), + MsgRecd: nst.MessagesRecvd, + MsgSent: nst.MessagesSent, + Dups: st.DupBlksReceived, + Name: t.Name(), + } + benchmarkLog = append(benchmarkLog, stats) + t.Logf("send/recv: %d / %d", nst.MessagesSent, nst.MessagesRecvd) + if st.DupBlksReceived != 0 { + t.Fatalf("got %d duplicate blocks!", st.DupBlksReceived) + } +} + +func allToAll(t *testing.T, provs []Instance, blocks []blocks.Block) { + for _, p := range provs { + if err := p.Blockstore().PutMany(blocks); err != nil { + t.Fatal(err) + } + } +} + +// overlap1 gives the first 75 blocks to the first peer, and the last 75 blocks +// to the second peer. This means both peers have the middle 50 blocks +func overlap1(t *testing.T, provs []Instance, blks []blocks.Block) { + if len(provs) != 2 { + t.Fatal("overlap1 only works with 2 provs") + } + bill := provs[0] + jeff := provs[1] + + if err := bill.Blockstore().PutMany(blks[:75]); err != nil { + t.Fatal(err) + } + if err := jeff.Blockstore().PutMany(blks[25:]); err != nil { + t.Fatal(err) + } +} + +// overlap2 gives every even numbered block to the first peer, odd numbered +// blocks to the second. it also gives every third block to both peers +func overlap2(t *testing.T, provs []Instance, blks []blocks.Block) { + if len(provs) != 2 { + t.Fatal("overlap2 only works with 2 provs") + } + bill := provs[0] + jeff := provs[1] + + bill.Blockstore().Put(blks[0]) + jeff.Blockstore().Put(blks[0]) + for i, blk := range blks { + if i%3 == 0 { + bill.Blockstore().Put(blk) + jeff.Blockstore().Put(blk) + } else if i%2 == 1 { + bill.Blockstore().Put(blk) + } else { + jeff.Blockstore().Put(blk) + } + } +} + +func overlap3(t *testing.T, provs []Instance, blks []blocks.Block) { + if len(provs) != 2 { + t.Fatal("overlap3 only works with 2 provs") + } + + bill := provs[0] + jeff := provs[1] + + bill.Blockstore().Put(blks[0]) + jeff.Blockstore().Put(blks[0]) + for i, blk := range blks { + if i%3 == 0 { + bill.Blockstore().Put(blk) + jeff.Blockstore().Put(blk) + } else if i%2 == 1 { + bill.Blockstore().Put(blk) + } else { + jeff.Blockstore().Put(blk) + } + } +} + +// onePeerPerBlock picks a random peer to hold each block +// with this layout, we shouldnt actually ever see any duplicate blocks +// but we're mostly just testing performance of the sync algorithm +func onePeerPerBlock(t *testing.T, provs []Instance, blks []blocks.Block) { + for _, blk := range blks { + provs[rand.Intn(len(provs))].Blockstore().Put(blk) + } +} + +func oneAtATime(t *testing.T, bs *Bitswap, ks []cid.Cid) { + ses := bs.NewSession(context.Background()).(*Session) + for _, c := range ks { + _, err := ses.GetBlock(context.Background(), c) + if err != nil { + t.Fatal(err) + } + } + t.Logf("Session fetch latency: %s", ses.latTotal/time.Duration(ses.fetchcnt)) +} + +// fetch data in batches, 10 at a time +func batchFetchBy10(t *testing.T, bs *Bitswap, ks []cid.Cid) { + ses := bs.NewSession(context.Background()) + for i := 0; i < len(ks); i += 10 { + out, err := ses.GetBlocks(context.Background(), ks[i:i+10]) + if err != nil { + t.Fatal(err) + } + for range out { + } + } +} + +// fetch each block at the same time concurrently +func fetchAllConcurrent(t *testing.T, bs *Bitswap, ks []cid.Cid) { + ses := bs.NewSession(context.Background()) + + var wg sync.WaitGroup + for _, c := range ks { + wg.Add(1) + go func(c cid.Cid) { + defer wg.Done() + _, err := ses.GetBlock(context.Background(), c) + if err != nil { + t.Fatal(err) + } + }(c) + } + wg.Wait() +} + +func batchFetchAll(t *testing.T, bs *Bitswap, ks []cid.Cid) { + ses := bs.NewSession(context.Background()) + out, err := ses.GetBlocks(context.Background(), ks) + if err != nil { + t.Fatal(err) + } + for range out { + } +} + +// simulates the fetch pattern of trying to sync a unixfs file graph as fast as possible +func unixfsFileFetch(t *testing.T, bs *Bitswap, ks []cid.Cid) { + ses := bs.NewSession(context.Background()) + _, err := ses.GetBlock(context.Background(), ks[0]) + if err != nil { + t.Fatal(err) + } + + out, err := ses.GetBlocks(context.Background(), ks[1:11]) + if err != nil { + t.Fatal(err) + } + for range out { + } + + out, err = ses.GetBlocks(context.Background(), ks[11:]) + if err != nil { + t.Fatal(err) + } + for range out { + } +} diff --git a/bitswap/session.go b/bitswap/session.go index 063a40d93..9cbeb7db5 100644 --- a/bitswap/session.go +++ b/bitswap/session.go @@ -10,6 +10,7 @@ import ( lru "github.com/hashicorp/golang-lru" blocks "github.com/ipfs/go-block-format" cid "github.com/ipfs/go-cid" + exchange "github.com/ipfs/go-ipfs-exchange-interface" logging "github.com/ipfs/go-log" loggables "github.com/libp2p/go-libp2p-loggables" peer "github.com/libp2p/go-libp2p-peer" @@ -51,7 +52,7 @@ type Session struct { // NewSession creates a new bitswap session whose lifetime is bounded by the // given context -func (bs *Bitswap) NewSession(ctx context.Context) *Session { +func (bs *Bitswap) NewSession(ctx context.Context) exchange.Fetcher { s := &Session{ activePeers: make(map[peer.ID]struct{}), liveWants: make(map[cid.Cid]time.Time), diff --git a/bitswap/session_test.go b/bitswap/session_test.go index 8769d891f..c5a00a90b 100644 --- a/bitswap/session_test.go +++ b/bitswap/session_test.go @@ -132,7 +132,7 @@ func TestSessionSplitFetch(t *testing.T) { cids = append(cids, blk.Cid()) } - ses := inst[10].Exchange.NewSession(ctx) + ses := inst[10].Exchange.NewSession(ctx).(*Session) ses.baseTickDelay = time.Millisecond * 10 for i := 0; i < 10; i++ { From abb634ba3ef10dfe556b196435f1b5e2d0abf9a0 Mon Sep 17 00:00:00 2001 From: Jeromy Date: Fri, 31 Aug 2018 18:34:40 -0700 Subject: [PATCH 0656/1035] add statistics for network messages sent/recvd This commit was moved from ipfs/go-bitswap@6419f7cee0f0f8f573ab86ddb0f6cfff7dcc2840 --- bitswap/network/interface.go | 10 ++++++++++ bitswap/network/ipfs_impl.go | 13 +++++++++++++ bitswap/stat.go | 20 +++++++++++--------- bitswap/testnet/virtual.go | 36 +++++++++++++++++++----------------- bitswap/testutils.go | 2 +- 5 files changed, 54 insertions(+), 27 deletions(-) diff --git a/bitswap/network/interface.go b/bitswap/network/interface.go index fd5622c1f..6c325b1c1 100644 --- a/bitswap/network/interface.go +++ b/bitswap/network/interface.go @@ -38,6 +38,8 @@ type BitSwapNetwork interface { ConnectionManager() ifconnmgr.ConnManager + Stats() NetworkStats + Routing } @@ -68,3 +70,11 @@ type Routing interface { // Provide provides the key to the network Provide(context.Context, cid.Cid) error } + +// NetworkStats is a container for statistics about the bitswap network +// the numbers inside are specific to bitswap, and not any other protocols +// using the same underlying network. +type NetworkStats struct { + MessagesSent uint64 + MessagesRecvd uint64 +} diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index 78dee0dc9..f6c04e357 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -5,6 +5,7 @@ import ( "context" "fmt" "io" + "sync/atomic" "time" bsmsg "github.com/ipfs/go-bitswap/message" @@ -48,6 +49,8 @@ type impl struct { // inbound messages from the network are forwarded to the receiver receiver Receiver + + stats NetworkStats } type streamMessageSender struct { @@ -130,6 +133,8 @@ func (bsnet *impl) SendMessage( s.Reset() return err } + atomic.AddUint64(&bsnet.stats.MessagesSent, 1) + // TODO(https://github.com/libp2p/go-libp2p-net/issues/28): Avoid this goroutine. go inet.AwaitEOF(s) return s.Close() @@ -210,6 +215,7 @@ func (bsnet *impl) handleNewStream(s inet.Stream) { ctx := context.Background() log.Debugf("bitswap net handleNewStream from %s", s.Conn().RemotePeer()) bsnet.receiver.ReceiveMessage(ctx, p, received) + atomic.AddUint64(&bsnet.stats.MessagesRecvd, 1) } } @@ -217,6 +223,13 @@ func (bsnet *impl) ConnectionManager() ifconnmgr.ConnManager { return bsnet.host.ConnManager() } +func (bsnet *impl) Stats() NetworkStats { + return NetworkStats{ + MessagesRecvd: atomic.LoadUint64(&bsnet.stats.MessagesRecvd), + MessagesSent: atomic.LoadUint64(&bsnet.stats.MessagesSent), + } +} + type netNotifiee impl func (nn *netNotifiee) impl() *impl { diff --git a/bitswap/stat.go b/bitswap/stat.go index d01d17172..99b2def1c 100644 --- a/bitswap/stat.go +++ b/bitswap/stat.go @@ -7,15 +7,16 @@ import ( ) type Stat struct { - ProvideBufLen int - Wantlist []cid.Cid - Peers []string - BlocksReceived uint64 - DataReceived uint64 - BlocksSent uint64 - DataSent uint64 - DupBlksReceived uint64 - DupDataReceived uint64 + ProvideBufLen int + Wantlist []cid.Cid + Peers []string + BlocksReceived uint64 + DataReceived uint64 + BlocksSent uint64 + DataSent uint64 + DupBlksReceived uint64 + DupDataReceived uint64 + MessagesReceived uint64 } func (bs *Bitswap) Stat() (*Stat, error) { @@ -30,6 +31,7 @@ func (bs *Bitswap) Stat() (*Stat, error) { st.BlocksSent = c.blocksSent st.DataSent = c.dataSent st.DataReceived = c.dataRecvd + st.MessagesReceived = c.messagesRecvd bs.counterLk.Unlock() peers := bs.engine.Peers() diff --git a/bitswap/testnet/virtual.go b/bitswap/testnet/virtual.go index 004dd66c0..7a6257e79 100644 --- a/bitswap/testnet/virtual.go +++ b/bitswap/testnet/virtual.go @@ -4,6 +4,7 @@ import ( "context" "errors" "sync" + "sync/atomic" "time" bsmsg "github.com/ipfs/go-bitswap/message" @@ -48,7 +49,7 @@ type message struct { // order* with their delays respected as much as sending them in order allows // for type receiverQueue struct { - receiver bsnet.Receiver + receiver *networkClient queue []*message active bool lk sync.Mutex @@ -104,30 +105,30 @@ func (n *network) SendMessage( return nil } -func (n *network) deliver( - r bsnet.Receiver, from peer.ID, message bsmsg.BitSwapMessage) error { - if message == nil || from == "" { - return errors.New("invalid input") - } - - n.delay.Wait() - - r.ReceiveMessage(context.TODO(), from, message) - return nil -} - type networkClient struct { local peer.ID bsnet.Receiver network *network routing routing.IpfsRouting + stats bsnet.NetworkStats } func (nc *networkClient) SendMessage( ctx context.Context, to peer.ID, message bsmsg.BitSwapMessage) error { - return nc.network.SendMessage(ctx, nc.local, to, message) + if err := nc.network.SendMessage(ctx, nc.local, to, message); err != nil { + return err + } + atomic.AddUint64(&nc.stats.MessagesSent, 1) + return nil +} + +func (nc *networkClient) Stats() bsnet.NetworkStats { + return bsnet.NetworkStats{ + MessagesRecvd: atomic.LoadUint64(&nc.stats.MessagesRecvd), + MessagesSent: atomic.LoadUint64(&nc.stats.MessagesSent), + } } // FindProvidersAsync returns a channel of providers for the given key @@ -157,14 +158,14 @@ func (nc *networkClient) ConnectionManager() ifconnmgr.ConnManager { } type messagePasser struct { - net *network + net *networkClient target peer.ID local peer.ID ctx context.Context } func (mp *messagePasser) SendMsg(ctx context.Context, m bsmsg.BitSwapMessage) error { - return mp.net.SendMessage(ctx, mp.local, mp.target, m) + return mp.net.SendMessage(ctx, mp.target, m) } func (mp *messagePasser) Close() error { @@ -177,7 +178,7 @@ func (mp *messagePasser) Reset() error { func (n *networkClient) NewMessageSender(ctx context.Context, p peer.ID) (bsnet.MessageSender, error) { return &messagePasser{ - net: n.network, + net: n, target: p, local: n.local, ctx: ctx, @@ -241,6 +242,7 @@ func (rq *receiverQueue) process() { rq.lk.Unlock() time.Sleep(time.Until(m.shouldSend)) + atomic.AddUint64(&rq.receiver.stats.MessagesRecvd, 1) rq.receiver.ReceiveMessage(context.TODO(), m.from, m.msg) } } diff --git a/bitswap/testutils.go b/bitswap/testutils.go index aa4ffa9f7..f9be69435 100644 --- a/bitswap/testutils.go +++ b/bitswap/testutils.go @@ -81,7 +81,7 @@ func (i *Instance) SetBlockstoreLatency(t time.Duration) time.Duration { return i.blockstoreDelay.Set(t) } -// session creates a test bitswap session. +// session creates a test bitswap instance. // // NB: It's easy make mistakes by providing the same peer ID to two different // sessions. To safeguard, use the SessionGenerator to generate sessions. It's From 13a3d2257b25a64a11012fa90fd0f8cd500608dd Mon Sep 17 00:00:00 2001 From: hannahhoward Date: Mon, 29 Oct 2018 14:42:28 -0700 Subject: [PATCH 0657/1035] fix(dup_blocks_test): convert to benchmark So that CI passes, and because it's not reliable as a test, and is more a benchmark to measure performance, convert dup_block_test.go to a benchmark, which can be run using `go test -bench .` This commit was moved from ipfs/go-bitswap@d6144d9e3fab417a17f0de160f3759337b08b763 --- bitswap/dup_blocks_test.go | 140 ++++++++++++++++++------------------- 1 file changed, 70 insertions(+), 70 deletions(-) diff --git a/bitswap/dup_blocks_test.go b/bitswap/dup_blocks_test.go index 326efc4a3..35fd07a06 100644 --- a/bitswap/dup_blocks_test.go +++ b/bitswap/dup_blocks_test.go @@ -18,9 +18,9 @@ import ( mockrouting "github.com/ipfs/go-ipfs-routing/mock" ) -type fetchFunc func(t *testing.T, bs *Bitswap, ks []cid.Cid) +type fetchFunc func(b *testing.B, bs *Bitswap, ks []cid.Cid) -type distFunc func(t *testing.T, provs []Instance, blocks []blocks.Block) +type distFunc func(b *testing.B, provs []Instance, blocks []blocks.Block) type runStats struct { Dups uint64 @@ -32,70 +32,70 @@ type runStats struct { var benchmarkLog []runStats -func TestDups2Nodes(t *testing.T) { - t.Run("AllToAll-OneAtATime", func(t *testing.T) { - subtestDistributeAndFetch(t, 3, 100, allToAll, oneAtATime) +func BenchmarkDups2Nodes(b *testing.B) { + b.Run("AllToAll-OneAtATime", func(b *testing.B) { + subtestDistributeAndFetch(b, 3, 100, allToAll, oneAtATime) }) - t.Run("AllToAll-BigBatch", func(t *testing.T) { - subtestDistributeAndFetch(t, 3, 100, allToAll, batchFetchAll) + b.Run("AllToAll-BigBatch", func(b *testing.B) { + subtestDistributeAndFetch(b, 3, 100, allToAll, batchFetchAll) }) - t.Run("Overlap1-OneAtATime", func(t *testing.T) { - subtestDistributeAndFetch(t, 3, 100, overlap1, oneAtATime) + b.Run("Overlap1-OneAtATime", func(b *testing.B) { + subtestDistributeAndFetch(b, 3, 100, overlap1, oneAtATime) }) - t.Run("Overlap2-BatchBy10", func(t *testing.T) { - subtestDistributeAndFetch(t, 3, 100, overlap2, batchFetchBy10) + b.Run("Overlap2-BatchBy10", func(b *testing.B) { + subtestDistributeAndFetch(b, 3, 100, overlap2, batchFetchBy10) }) - t.Run("Overlap3-OneAtATime", func(t *testing.T) { - subtestDistributeAndFetch(t, 3, 100, overlap3, oneAtATime) + b.Run("Overlap3-OneAtATime", func(b *testing.B) { + subtestDistributeAndFetch(b, 3, 100, overlap3, oneAtATime) }) - t.Run("Overlap3-BatchBy10", func(t *testing.T) { - subtestDistributeAndFetch(t, 3, 100, overlap3, batchFetchBy10) + b.Run("Overlap3-BatchBy10", func(b *testing.B) { + subtestDistributeAndFetch(b, 3, 100, overlap3, batchFetchBy10) }) - t.Run("Overlap3-AllConcurrent", func(t *testing.T) { - subtestDistributeAndFetch(t, 3, 100, overlap3, fetchAllConcurrent) + b.Run("Overlap3-AllConcurrent", func(b *testing.B) { + subtestDistributeAndFetch(b, 3, 100, overlap3, fetchAllConcurrent) }) - t.Run("Overlap3-BigBatch", func(t *testing.T) { - subtestDistributeAndFetch(t, 3, 100, overlap3, batchFetchAll) + b.Run("Overlap3-BigBatch", func(b *testing.B) { + subtestDistributeAndFetch(b, 3, 100, overlap3, batchFetchAll) }) - t.Run("Overlap3-UnixfsFetch", func(t *testing.T) { - subtestDistributeAndFetch(t, 3, 100, overlap3, unixfsFileFetch) + b.Run("Overlap3-UnixfsFetch", func(b *testing.B) { + subtestDistributeAndFetch(b, 3, 100, overlap3, unixfsFileFetch) }) - t.Run("10Nodes-AllToAll-OneAtATime", func(t *testing.T) { - subtestDistributeAndFetch(t, 10, 100, allToAll, oneAtATime) + b.Run("10Nodes-AllToAll-OneAtATime", func(b *testing.B) { + subtestDistributeAndFetch(b, 10, 100, allToAll, oneAtATime) }) - t.Run("10Nodes-AllToAll-BatchFetchBy10", func(t *testing.T) { - subtestDistributeAndFetch(t, 10, 100, allToAll, batchFetchBy10) + b.Run("10Nodes-AllToAll-BatchFetchBy10", func(b *testing.B) { + subtestDistributeAndFetch(b, 10, 100, allToAll, batchFetchBy10) }) - t.Run("10Nodes-AllToAll-BigBatch", func(t *testing.T) { - subtestDistributeAndFetch(t, 10, 100, allToAll, batchFetchAll) + b.Run("10Nodes-AllToAll-BigBatch", func(b *testing.B) { + subtestDistributeAndFetch(b, 10, 100, allToAll, batchFetchAll) }) - t.Run("10Nodes-AllToAll-AllConcurrent", func(t *testing.T) { - subtestDistributeAndFetch(t, 10, 100, allToAll, fetchAllConcurrent) + b.Run("10Nodes-AllToAll-AllConcurrent", func(b *testing.B) { + subtestDistributeAndFetch(b, 10, 100, allToAll, fetchAllConcurrent) }) - t.Run("10Nodes-AllToAll-UnixfsFetch", func(t *testing.T) { - subtestDistributeAndFetch(t, 10, 100, allToAll, unixfsFileFetch) + b.Run("10Nodes-AllToAll-UnixfsFetch", func(b *testing.B) { + subtestDistributeAndFetch(b, 10, 100, allToAll, unixfsFileFetch) }) - t.Run("10Nodes-OnePeerPerBlock-OneAtATime", func(t *testing.T) { - subtestDistributeAndFetch(t, 10, 100, onePeerPerBlock, oneAtATime) + b.Run("10Nodes-OnePeerPerBlock-OneAtATime", func(b *testing.B) { + subtestDistributeAndFetch(b, 10, 100, onePeerPerBlock, oneAtATime) }) - t.Run("10Nodes-OnePeerPerBlock-BigBatch", func(t *testing.T) { - subtestDistributeAndFetch(t, 10, 100, onePeerPerBlock, batchFetchAll) + b.Run("10Nodes-OnePeerPerBlock-BigBatch", func(b *testing.B) { + subtestDistributeAndFetch(b, 10, 100, onePeerPerBlock, batchFetchAll) }) - t.Run("10Nodes-OnePeerPerBlock-UnixfsFetch", func(t *testing.T) { - subtestDistributeAndFetch(t, 10, 100, onePeerPerBlock, unixfsFileFetch) + b.Run("10Nodes-OnePeerPerBlock-UnixfsFetch", func(b *testing.B) { + subtestDistributeAndFetch(b, 10, 100, onePeerPerBlock, unixfsFileFetch) }) - t.Run("200Nodes-AllToAll-BigBatch", func(t *testing.T) { - subtestDistributeAndFetch(t, 200, 20, allToAll, batchFetchAll) + b.Run("200Nodes-AllToAll-BigBatch", func(b *testing.B) { + subtestDistributeAndFetch(b, 200, 20, allToAll, batchFetchAll) }) out, _ := json.MarshalIndent(benchmarkLog, "", " ") ioutil.WriteFile("benchmark.json", out, 0666) } -func subtestDistributeAndFetch(t *testing.T, numnodes, numblks int, df distFunc, ff fetchFunc) { +func subtestDistributeAndFetch(b *testing.B, numnodes, numblks int, df distFunc, ff fetchFunc) { start := time.Now() net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(10*time.Millisecond)) sg := NewTestSessionGenerator(net) @@ -108,18 +108,18 @@ func subtestDistributeAndFetch(t *testing.T, numnodes, numblks int, df distFunc, fetcher := instances[numnodes-1] - df(t, instances[:numnodes-1], blocks) + df(b, instances[:numnodes-1], blocks) var ks []cid.Cid for _, blk := range blocks { ks = append(ks, blk.Cid()) } - ff(t, fetcher.Exchange, ks) + ff(b, fetcher.Exchange, ks) st, err := fetcher.Exchange.Stat() if err != nil { - t.Fatal(err) + b.Fatal(err) } nst := fetcher.Exchange.network.Stats() @@ -128,45 +128,45 @@ func subtestDistributeAndFetch(t *testing.T, numnodes, numblks int, df distFunc, MsgRecd: nst.MessagesRecvd, MsgSent: nst.MessagesSent, Dups: st.DupBlksReceived, - Name: t.Name(), + Name: b.Name(), } benchmarkLog = append(benchmarkLog, stats) - t.Logf("send/recv: %d / %d", nst.MessagesSent, nst.MessagesRecvd) + b.Logf("send/recv: %d / %d", nst.MessagesSent, nst.MessagesRecvd) if st.DupBlksReceived != 0 { - t.Fatalf("got %d duplicate blocks!", st.DupBlksReceived) + b.Fatalf("got %d duplicate blocks!", st.DupBlksReceived) } } -func allToAll(t *testing.T, provs []Instance, blocks []blocks.Block) { +func allToAll(b *testing.B, provs []Instance, blocks []blocks.Block) { for _, p := range provs { if err := p.Blockstore().PutMany(blocks); err != nil { - t.Fatal(err) + b.Fatal(err) } } } // overlap1 gives the first 75 blocks to the first peer, and the last 75 blocks // to the second peer. This means both peers have the middle 50 blocks -func overlap1(t *testing.T, provs []Instance, blks []blocks.Block) { +func overlap1(b *testing.B, provs []Instance, blks []blocks.Block) { if len(provs) != 2 { - t.Fatal("overlap1 only works with 2 provs") + b.Fatal("overlap1 only works with 2 provs") } bill := provs[0] jeff := provs[1] if err := bill.Blockstore().PutMany(blks[:75]); err != nil { - t.Fatal(err) + b.Fatal(err) } if err := jeff.Blockstore().PutMany(blks[25:]); err != nil { - t.Fatal(err) + b.Fatal(err) } } // overlap2 gives every even numbered block to the first peer, odd numbered // blocks to the second. it also gives every third block to both peers -func overlap2(t *testing.T, provs []Instance, blks []blocks.Block) { +func overlap2(b *testing.B, provs []Instance, blks []blocks.Block) { if len(provs) != 2 { - t.Fatal("overlap2 only works with 2 provs") + b.Fatal("overlap2 only works with 2 provs") } bill := provs[0] jeff := provs[1] @@ -185,9 +185,9 @@ func overlap2(t *testing.T, provs []Instance, blks []blocks.Block) { } } -func overlap3(t *testing.T, provs []Instance, blks []blocks.Block) { +func overlap3(b *testing.B, provs []Instance, blks []blocks.Block) { if len(provs) != 2 { - t.Fatal("overlap3 only works with 2 provs") + b.Fatal("overlap3 only works with 2 provs") } bill := provs[0] @@ -210,30 +210,30 @@ func overlap3(t *testing.T, provs []Instance, blks []blocks.Block) { // onePeerPerBlock picks a random peer to hold each block // with this layout, we shouldnt actually ever see any duplicate blocks // but we're mostly just testing performance of the sync algorithm -func onePeerPerBlock(t *testing.T, provs []Instance, blks []blocks.Block) { +func onePeerPerBlock(b *testing.B, provs []Instance, blks []blocks.Block) { for _, blk := range blks { provs[rand.Intn(len(provs))].Blockstore().Put(blk) } } -func oneAtATime(t *testing.T, bs *Bitswap, ks []cid.Cid) { +func oneAtATime(b *testing.B, bs *Bitswap, ks []cid.Cid) { ses := bs.NewSession(context.Background()).(*Session) for _, c := range ks { _, err := ses.GetBlock(context.Background(), c) if err != nil { - t.Fatal(err) + b.Fatal(err) } } - t.Logf("Session fetch latency: %s", ses.latTotal/time.Duration(ses.fetchcnt)) + b.Logf("Session fetch latency: %s", ses.latTotal/time.Duration(ses.fetchcnt)) } // fetch data in batches, 10 at a time -func batchFetchBy10(t *testing.T, bs *Bitswap, ks []cid.Cid) { +func batchFetchBy10(b *testing.B, bs *Bitswap, ks []cid.Cid) { ses := bs.NewSession(context.Background()) for i := 0; i < len(ks); i += 10 { out, err := ses.GetBlocks(context.Background(), ks[i:i+10]) if err != nil { - t.Fatal(err) + b.Fatal(err) } for range out { } @@ -241,7 +241,7 @@ func batchFetchBy10(t *testing.T, bs *Bitswap, ks []cid.Cid) { } // fetch each block at the same time concurrently -func fetchAllConcurrent(t *testing.T, bs *Bitswap, ks []cid.Cid) { +func fetchAllConcurrent(b *testing.B, bs *Bitswap, ks []cid.Cid) { ses := bs.NewSession(context.Background()) var wg sync.WaitGroup @@ -251,41 +251,41 @@ func fetchAllConcurrent(t *testing.T, bs *Bitswap, ks []cid.Cid) { defer wg.Done() _, err := ses.GetBlock(context.Background(), c) if err != nil { - t.Fatal(err) + b.Fatal(err) } }(c) } wg.Wait() } -func batchFetchAll(t *testing.T, bs *Bitswap, ks []cid.Cid) { +func batchFetchAll(b *testing.B, bs *Bitswap, ks []cid.Cid) { ses := bs.NewSession(context.Background()) out, err := ses.GetBlocks(context.Background(), ks) if err != nil { - t.Fatal(err) + b.Fatal(err) } for range out { } } // simulates the fetch pattern of trying to sync a unixfs file graph as fast as possible -func unixfsFileFetch(t *testing.T, bs *Bitswap, ks []cid.Cid) { +func unixfsFileFetch(b *testing.B, bs *Bitswap, ks []cid.Cid) { ses := bs.NewSession(context.Background()) _, err := ses.GetBlock(context.Background(), ks[0]) if err != nil { - t.Fatal(err) + b.Fatal(err) } out, err := ses.GetBlocks(context.Background(), ks[1:11]) if err != nil { - t.Fatal(err) + b.Fatal(err) } for range out { } out, err = ses.GetBlocks(context.Background(), ks[11:]) if err != nil { - t.Fatal(err) + b.Fatal(err) } for range out { } From f8e80a928be0d230b5e510e1f87245241eeb0c04 Mon Sep 17 00:00:00 2001 From: hannahhoward Date: Tue, 13 Nov 2018 11:25:30 -0800 Subject: [PATCH 0658/1035] fix(Receiver): Ignore unwanted blocks If Bitswap receives a block that isn't in it's wantlist, is should ignore it fix #21 fix #22 This commit was moved from ipfs/go-bitswap@779c923a05d273d9312922962e9d9ed4c850ff09 --- bitswap/bitswap.go | 6 ++++++ bitswap/bitswap_test.go | 33 +++++++++++++++++++++++++++++++++ 2 files changed, 39 insertions(+) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 942679d4f..4b72b52db 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -388,6 +388,7 @@ func (bs *Bitswap) ReceiveMessage(ctx context.Context, p peer.ID, incoming bsmsg wg := sync.WaitGroup{} for _, block := range iblocks { + wg.Add(1) go func(b blocks.Block) { // TODO: this probably doesnt need to be a goroutine... defer wg.Done() @@ -396,6 +397,11 @@ func (bs *Bitswap) ReceiveMessage(ctx context.Context, p peer.ID, incoming bsmsg log.Debugf("got block %s from %s", b, p) + // skip received blocks that are not in the wantlist + if _, contains := bs.wm.wl.Contains(b.Cid()); !contains { + return + } + if err := bs.receiveBlockFrom(b, p); err != nil { log.Warningf("ReceiveMessage recvBlockFrom error: %s", err) } diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index 715958eb1..d55fd0733 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -9,6 +9,7 @@ import ( "time" decision "github.com/ipfs/go-bitswap/decision" + "github.com/ipfs/go-bitswap/message" tn "github.com/ipfs/go-bitswap/testnet" blocks "github.com/ipfs/go-block-format" @@ -98,6 +99,38 @@ func TestGetBlockFromPeerAfterPeerAnnounces(t *testing.T) { } } +func TestUnwantedBlockNotAdded(t *testing.T) { + + net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay)) + block := blocks.NewBlock([]byte("block")) + bsMessage := message.New(true) + bsMessage.AddBlock(block) + + g := NewTestSessionGenerator(net) + defer g.Close() + + peers := g.Instances(2) + hasBlock := peers[0] + defer hasBlock.Exchange.Close() + + if err := hasBlock.Exchange.HasBlock(block); err != nil { + t.Fatal(err) + } + + doesNotWantBlock := peers[1] + defer doesNotWantBlock.Exchange.Close() + + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + + doesNotWantBlock.Exchange.ReceiveMessage(ctx, hasBlock.Peer, bsMessage) + + blockInStore, err := doesNotWantBlock.blockstore.Has(block.Cid()) + if err != nil || blockInStore { + t.Fatal("Unwanted block added to block store") + } +} + func TestLargeSwarm(t *testing.T) { if testing.Short() { t.SkipNow() From 96eeb5a362e43e42cc4679c87849f53e6bf70e81 Mon Sep 17 00:00:00 2001 From: hannahhoward Date: Tue, 6 Nov 2018 14:58:34 -0800 Subject: [PATCH 0659/1035] feat(Benchmarks): Add real world dup blocks test - add a delay generator that similates real world latencies one might encounter on the internet - modify virtual network to accept different latencies for different peers based on using NextWaitTime on passed delay - modify dup_blocks_test subtestDistributeAndFetch to accept a custom delay - Add a real world benchmarks that simulates the kinds of problems one might encounter bitswaping with a long lived session and a large swarm of peers with real world latency distributions (that causes #8 not to function well in practice) This commit was moved from ipfs/go-bitswap@39fa3c7358686f1b676921f8cb184335971fbc27 --- bitswap/dup_blocks_test.go | 73 +++++++++++++------ .../internet_latency_delay_generator.go | 63 ++++++++++++++++ .../internet_latency_delay_generator_test.go | 69 ++++++++++++++++++ bitswap/testnet/virtual.go | 46 ++++++++++-- 4 files changed, 223 insertions(+), 28 deletions(-) create mode 100644 bitswap/testnet/internet_latency_delay_generator.go create mode 100644 bitswap/testnet/internet_latency_delay_generator_test.go diff --git a/bitswap/dup_blocks_test.go b/bitswap/dup_blocks_test.go index 35fd07a06..a48889a3c 100644 --- a/bitswap/dup_blocks_test.go +++ b/bitswap/dup_blocks_test.go @@ -33,71 +33,102 @@ type runStats struct { var benchmarkLog []runStats func BenchmarkDups2Nodes(b *testing.B) { + fixedDelay := delay.Fixed(10 * time.Millisecond) b.Run("AllToAll-OneAtATime", func(b *testing.B) { - subtestDistributeAndFetch(b, 3, 100, allToAll, oneAtATime) + subtestDistributeAndFetch(b, 3, 100, fixedDelay, allToAll, oneAtATime) }) b.Run("AllToAll-BigBatch", func(b *testing.B) { - subtestDistributeAndFetch(b, 3, 100, allToAll, batchFetchAll) + subtestDistributeAndFetch(b, 3, 100, fixedDelay, allToAll, batchFetchAll) }) b.Run("Overlap1-OneAtATime", func(b *testing.B) { - subtestDistributeAndFetch(b, 3, 100, overlap1, oneAtATime) + subtestDistributeAndFetch(b, 3, 100, fixedDelay, overlap1, oneAtATime) }) b.Run("Overlap2-BatchBy10", func(b *testing.B) { - subtestDistributeAndFetch(b, 3, 100, overlap2, batchFetchBy10) + subtestDistributeAndFetch(b, 3, 100, fixedDelay, overlap2, batchFetchBy10) }) b.Run("Overlap3-OneAtATime", func(b *testing.B) { - subtestDistributeAndFetch(b, 3, 100, overlap3, oneAtATime) + subtestDistributeAndFetch(b, 3, 100, fixedDelay, overlap3, oneAtATime) }) b.Run("Overlap3-BatchBy10", func(b *testing.B) { - subtestDistributeAndFetch(b, 3, 100, overlap3, batchFetchBy10) + subtestDistributeAndFetch(b, 3, 100, fixedDelay, overlap3, batchFetchBy10) }) b.Run("Overlap3-AllConcurrent", func(b *testing.B) { - subtestDistributeAndFetch(b, 3, 100, overlap3, fetchAllConcurrent) + subtestDistributeAndFetch(b, 3, 100, fixedDelay, overlap3, fetchAllConcurrent) }) b.Run("Overlap3-BigBatch", func(b *testing.B) { - subtestDistributeAndFetch(b, 3, 100, overlap3, batchFetchAll) + subtestDistributeAndFetch(b, 3, 100, fixedDelay, overlap3, batchFetchAll) }) b.Run("Overlap3-UnixfsFetch", func(b *testing.B) { - subtestDistributeAndFetch(b, 3, 100, overlap3, unixfsFileFetch) + subtestDistributeAndFetch(b, 3, 100, fixedDelay, overlap3, unixfsFileFetch) }) b.Run("10Nodes-AllToAll-OneAtATime", func(b *testing.B) { - subtestDistributeAndFetch(b, 10, 100, allToAll, oneAtATime) + subtestDistributeAndFetch(b, 10, 100, fixedDelay, allToAll, oneAtATime) }) b.Run("10Nodes-AllToAll-BatchFetchBy10", func(b *testing.B) { - subtestDistributeAndFetch(b, 10, 100, allToAll, batchFetchBy10) + subtestDistributeAndFetch(b, 10, 100, fixedDelay, allToAll, batchFetchBy10) }) b.Run("10Nodes-AllToAll-BigBatch", func(b *testing.B) { - subtestDistributeAndFetch(b, 10, 100, allToAll, batchFetchAll) + subtestDistributeAndFetch(b, 10, 100, fixedDelay, allToAll, batchFetchAll) }) b.Run("10Nodes-AllToAll-AllConcurrent", func(b *testing.B) { - subtestDistributeAndFetch(b, 10, 100, allToAll, fetchAllConcurrent) + subtestDistributeAndFetch(b, 10, 100, fixedDelay, allToAll, fetchAllConcurrent) }) b.Run("10Nodes-AllToAll-UnixfsFetch", func(b *testing.B) { - subtestDistributeAndFetch(b, 10, 100, allToAll, unixfsFileFetch) + subtestDistributeAndFetch(b, 10, 100, fixedDelay, allToAll, unixfsFileFetch) }) b.Run("10Nodes-OnePeerPerBlock-OneAtATime", func(b *testing.B) { - subtestDistributeAndFetch(b, 10, 100, onePeerPerBlock, oneAtATime) + subtestDistributeAndFetch(b, 10, 100, fixedDelay, onePeerPerBlock, oneAtATime) }) b.Run("10Nodes-OnePeerPerBlock-BigBatch", func(b *testing.B) { - subtestDistributeAndFetch(b, 10, 100, onePeerPerBlock, batchFetchAll) + subtestDistributeAndFetch(b, 10, 100, fixedDelay, onePeerPerBlock, batchFetchAll) }) b.Run("10Nodes-OnePeerPerBlock-UnixfsFetch", func(b *testing.B) { - subtestDistributeAndFetch(b, 10, 100, onePeerPerBlock, unixfsFileFetch) + subtestDistributeAndFetch(b, 10, 100, fixedDelay, onePeerPerBlock, unixfsFileFetch) }) b.Run("200Nodes-AllToAll-BigBatch", func(b *testing.B) { - subtestDistributeAndFetch(b, 200, 20, allToAll, batchFetchAll) + subtestDistributeAndFetch(b, 200, 20, fixedDelay, allToAll, batchFetchAll) }) - out, _ := json.MarshalIndent(benchmarkLog, "", " ") ioutil.WriteFile("benchmark.json", out, 0666) } -func subtestDistributeAndFetch(b *testing.B, numnodes, numblks int, df distFunc, ff fetchFunc) { +const fastSpeed = 60 * time.Millisecond +const mediumSpeed = 200 * time.Millisecond +const slowSpeed = 800 * time.Millisecond +const superSlowSpeed = 4000 * time.Millisecond +const distribution = 20 * time.Millisecond + +func BenchmarkDupsManyNodesRealWorldNetwork(b *testing.B) { + fastNetworkDelayGenerator := tn.InternetLatencyDelayGenerator( + mediumSpeed-fastSpeed, slowSpeed-fastSpeed, + 0.0, 0.0, distribution, nil) + fastNetworkDelay := delay.Delay(fastSpeed, fastNetworkDelayGenerator) + averageNetworkDelayGenerator := tn.InternetLatencyDelayGenerator( + mediumSpeed-fastSpeed, slowSpeed-fastSpeed, + 0.3, 0.3, distribution, nil) + averageNetworkDelay := delay.Delay(fastSpeed, averageNetworkDelayGenerator) + slowNetworkDelayGenerator := tn.InternetLatencyDelayGenerator( + mediumSpeed-fastSpeed, superSlowSpeed-fastSpeed, + 0.3, 0.3, distribution, nil) + slowNetworkDelay := delay.Delay(fastSpeed, slowNetworkDelayGenerator) + + b.Run("200Nodes-AllToAll-BigBatch-FastNetwork", func(b *testing.B) { + subtestDistributeAndFetch(b, 300, 200, fastNetworkDelay, allToAll, batchFetchAll) + }) + b.Run("200Nodes-AllToAll-BigBatch-AverageVariableSpeedNetwork", func(b *testing.B) { + subtestDistributeAndFetch(b, 300, 200, averageNetworkDelay, allToAll, batchFetchAll) + }) + b.Run("200Nodes-AllToAll-BigBatch-SlowVariableSpeedNetwork", func(b *testing.B) { + subtestDistributeAndFetch(b, 300, 200, slowNetworkDelay, allToAll, batchFetchAll) + }) +} + +func subtestDistributeAndFetch(b *testing.B, numnodes, numblks int, d delay.D, df distFunc, ff fetchFunc) { start := time.Now() - net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(10*time.Millisecond)) + net := tn.VirtualNetwork(mockrouting.NewServer(), d) sg := NewTestSessionGenerator(net) defer sg.Close() diff --git a/bitswap/testnet/internet_latency_delay_generator.go b/bitswap/testnet/internet_latency_delay_generator.go new file mode 100644 index 000000000..d1fd3ae15 --- /dev/null +++ b/bitswap/testnet/internet_latency_delay_generator.go @@ -0,0 +1,63 @@ +package bitswap + +import ( + "math/rand" + "time" + + "github.com/ipfs/go-ipfs-delay" +) + +var sharedRNG = rand.New(rand.NewSource(time.Now().UnixNano())) + +// InternetLatencyDelayGenerator generates three clusters of delays, +// typical of the type of peers you would encounter on the interenet +// Given a base delay time T, the wait time generated will be either: +// 1. A normalized distribution around the base time +// 2. A normalized distribution around the base time plus a "medium" delay +// 3. A normalized distribution around the base time plus a "large" delay +// The size of the medium & large delays are determined when the generator +// is constructed, as well as the relative percentages with which delays fall +// into each of the three different clusters, and the standard deviation for +// the normalized distribution +// This can be used to generate a number of scenarios typical of latency +// distribution among peers on the internet +func InternetLatencyDelayGenerator( + mediumDelay time.Duration, + largeDelay time.Duration, + percentMedium float64, + percentLarge float64, + std time.Duration, + rng *rand.Rand) delay.Generator { + if rng == nil { + rng = sharedRNG + } + + return &internetLatencyDelayGenerator{ + mediumDelay: mediumDelay, + largeDelay: largeDelay, + percentLarge: percentLarge, + percentMedium: percentMedium, + std: std, + rng: rng, + } +} + +type internetLatencyDelayGenerator struct { + mediumDelay time.Duration + largeDelay time.Duration + percentLarge float64 + percentMedium float64 + std time.Duration + rng *rand.Rand +} + +func (d *internetLatencyDelayGenerator) NextWaitTime(t time.Duration) time.Duration { + clusterDistribution := d.rng.Float64() + baseDelay := time.Duration(d.rng.NormFloat64()*float64(d.std)) + t + if clusterDistribution < d.percentLarge { + return baseDelay + d.largeDelay + } else if clusterDistribution < d.percentMedium+d.percentLarge { + return baseDelay + d.mediumDelay + } + return baseDelay +} diff --git a/bitswap/testnet/internet_latency_delay_generator_test.go b/bitswap/testnet/internet_latency_delay_generator_test.go new file mode 100644 index 000000000..dcd6a92b5 --- /dev/null +++ b/bitswap/testnet/internet_latency_delay_generator_test.go @@ -0,0 +1,69 @@ +package bitswap + +import ( + "math" + "math/rand" + "testing" + "time" +) + +const testSeed = 99 + +func TestInternetLatencyDelayNextWaitTimeDistribution(t *testing.T) { + initialValue := 1000 * time.Millisecond + deviation := 100 * time.Millisecond + mediumDelay := 1000 * time.Millisecond + largeDelay := 3000 * time.Millisecond + percentMedium := 0.2 + percentLarge := 0.4 + buckets := make(map[string]int) + internetLatencyDistributionDelay := InternetLatencyDelayGenerator( + mediumDelay, + largeDelay, + percentMedium, + percentLarge, + deviation, + rand.New(rand.NewSource(testSeed))) + + buckets["fast"] = 0 + buckets["medium"] = 0 + buckets["slow"] = 0 + buckets["outside_1_deviation"] = 0 + + // strategy here is rather than mock randomness, just use enough samples to + // get approximately the distribution you'd expect + for i := 0; i < 10000; i++ { + next := internetLatencyDistributionDelay.NextWaitTime(initialValue) + if math.Abs((next - initialValue).Seconds()) <= deviation.Seconds() { + buckets["fast"]++ + } else if math.Abs((next - initialValue - mediumDelay).Seconds()) <= deviation.Seconds() { + buckets["medium"]++ + } else if math.Abs((next - initialValue - largeDelay).Seconds()) <= deviation.Seconds() { + buckets["slow"]++ + } else { + buckets["outside_1_deviation"]++ + } + } + totalInOneDeviation := float64(10000 - buckets["outside_1_deviation"]) + oneDeviationPercentage := totalInOneDeviation / 10000 + fastPercentageResult := float64(buckets["fast"]) / totalInOneDeviation + mediumPercentageResult := float64(buckets["medium"]) / totalInOneDeviation + slowPercentageResult := float64(buckets["slow"]) / totalInOneDeviation + + // see 68-95-99 rule for normal distributions + if math.Abs(oneDeviationPercentage-0.6827) >= 0.1 { + t.Fatal("Failed to distribute values normally based on standard deviation") + } + + if math.Abs(fastPercentageResult+percentMedium+percentLarge-1) >= 0.1 { + t.Fatal("Incorrect percentage of values distributed around fast delay time") + } + + if math.Abs(mediumPercentageResult-percentMedium) >= 0.1 { + t.Fatal("Incorrect percentage of values distributed around medium delay time") + } + + if math.Abs(slowPercentageResult-percentLarge) >= 0.1 { + t.Fatal("Incorrect percentage of values distributed around slow delay time") + } +} diff --git a/bitswap/testnet/virtual.go b/bitswap/testnet/virtual.go index 7a6257e79..7d1921174 100644 --- a/bitswap/testnet/virtual.go +++ b/bitswap/testnet/virtual.go @@ -3,6 +3,7 @@ package bitswap import ( "context" "errors" + "sort" "sync" "sync/atomic" "time" @@ -24,6 +25,7 @@ var log = logging.Logger("bstestnet") func VirtualNetwork(rs mockrouting.Server, d delay.D) Network { return &network{ + latencies: make(map[peer.ID]map[peer.ID]time.Duration), clients: make(map[peer.ID]*receiverQueue), delay: d, routingserver: rs, @@ -33,6 +35,7 @@ func VirtualNetwork(rs mockrouting.Server, d delay.D) Network { type network struct { mu sync.Mutex + latencies map[peer.ID]map[peer.ID]time.Duration clients map[peer.ID]*receiverQueue routingserver mockrouting.Server delay delay.D @@ -87,6 +90,18 @@ func (n *network) SendMessage( n.mu.Lock() defer n.mu.Unlock() + latencies, ok := n.latencies[from] + if !ok { + latencies = make(map[peer.ID]time.Duration) + n.latencies[from] = latencies + } + + latency, ok := latencies[to] + if !ok { + latency = n.delay.NextWaitTime() + latencies[to] = latency + } + receiver, ok := n.clients[to] if !ok { return errors.New("cannot locate peer on network") @@ -98,7 +113,7 @@ func (n *network) SendMessage( msg := &message{ from: from, msg: mes, - shouldSend: time.Now().Add(n.delay.Get()), + shouldSend: time.Now().Add(latency), } receiver.enqueue(msg) @@ -229,21 +244,38 @@ func (rq *receiverQueue) enqueue(m *message) { } } +func (rq *receiverQueue) Swap(i, j int) { + rq.queue[i], rq.queue[j] = rq.queue[j], rq.queue[i] +} + +func (rq *receiverQueue) Len() int { + return len(rq.queue) +} + +func (rq *receiverQueue) Less(i, j int) bool { + return rq.queue[i].shouldSend.UnixNano() < rq.queue[j].shouldSend.UnixNano() +} + func (rq *receiverQueue) process() { for { rq.lk.Lock() + sort.Sort(rq) if len(rq.queue) == 0 { rq.active = false rq.lk.Unlock() return } m := rq.queue[0] - rq.queue = rq.queue[1:] - rq.lk.Unlock() - - time.Sleep(time.Until(m.shouldSend)) - atomic.AddUint64(&rq.receiver.stats.MessagesRecvd, 1) - rq.receiver.ReceiveMessage(context.TODO(), m.from, m.msg) + if time.Until(m.shouldSend).Seconds() < 0.1 { + rq.queue = rq.queue[1:] + rq.lk.Unlock() + time.Sleep(time.Until(m.shouldSend)) + atomic.AddUint64(&rq.receiver.stats.MessagesRecvd, 1) + rq.receiver.ReceiveMessage(context.TODO(), m.from, m.msg) + } else { + rq.lk.Unlock() + time.Sleep(100 * time.Millisecond) + } } } From 978c81981c3ffad077bf50ebbc6ac354966b66a1 Mon Sep 17 00:00:00 2001 From: hannahhoward Date: Thu, 15 Nov 2018 14:19:42 -0800 Subject: [PATCH 0660/1035] refactor(general): extract components to packages Extract session manager from bitswap Extract session manager & want manager to package Move want manager message queue to seperate file Move Message Queue to subpackage Respond to PR Comments This commit was moved from ipfs/go-bitswap@69d063bf87ac0c44fb9dc635df24946bb3c1c6f9 --- bitswap/bitswap.go | 39 +-- bitswap/messagequeue/messagequeue.go | 208 ++++++++++++ bitswap/session.go | 17 +- bitswap/sessionmanager/sessionmanager.go | 59 ++++ bitswap/wantmanager.go | 404 ----------------------- bitswap/wantmanager/wantmanager.go | 251 ++++++++++++++ bitswap/workers.go | 4 +- 7 files changed, 536 insertions(+), 446 deletions(-) create mode 100644 bitswap/messagequeue/messagequeue.go create mode 100644 bitswap/sessionmanager/sessionmanager.go delete mode 100644 bitswap/wantmanager.go create mode 100644 bitswap/wantmanager/wantmanager.go diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 4b72b52db..0e8fbf4e9 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -5,7 +5,6 @@ package bitswap import ( "context" "errors" - "math" "sync" "sync/atomic" "time" @@ -14,6 +13,8 @@ import ( bsmsg "github.com/ipfs/go-bitswap/message" bsnet "github.com/ipfs/go-bitswap/network" notifications "github.com/ipfs/go-bitswap/notifications" + bssm "github.com/ipfs/go-bitswap/sessionmanager" + bswm "github.com/ipfs/go-bitswap/wantmanager" blocks "github.com/ipfs/go-block-format" cid "github.com/ipfs/go-cid" @@ -42,8 +43,6 @@ const ( providerRequestTimeout = time.Second * 10 provideTimeout = time.Second * 15 sizeBatchRequestChan = 32 - // kMaxPriority is the max priority as defined by the bitswap protocol - kMaxPriority = math.MaxInt32 ) var ( @@ -101,7 +100,8 @@ func New(parent context.Context, network bsnet.BitSwapNetwork, process: px, newBlocks: make(chan cid.Cid, HasBlockBufferSize), provideKeys: make(chan cid.Cid, provideKeysBufferSize), - wm: NewWantManager(ctx, network), + wm: bswm.New(ctx, network), + sm: bssm.New(), counters: new(counters), dupMetric: dupHist, @@ -128,7 +128,7 @@ func New(parent context.Context, network bsnet.BitSwapNetwork, type Bitswap struct { // the peermanager manages sending messages to peers in a way that // wont block bitswap operation - wm *WantManager + wm *bswm.WantManager // the engine is the bit of logic that decides who to send which blocks to engine *decision.Engine @@ -163,12 +163,8 @@ type Bitswap struct { dupMetric metrics.Histogram allMetric metrics.Histogram - // Sessions - sessions []*Session - sessLk sync.Mutex - - sessID uint64 - sessIDLk sync.Mutex + // the sessionmanager manages tracking sessions + sm *bssm.SessionManager } type counters struct { @@ -229,7 +225,7 @@ func (bs *Bitswap) GetBlocks(ctx context.Context, keys []cid.Cid) (<-chan blocks log.Event(ctx, "Bitswap.GetBlockRequest.Start", k) } - mses := bs.getNextSessionID() + mses := bs.sm.GetNextSessionID() bs.wm.WantBlocks(ctx, keys, nil, mses) @@ -294,13 +290,6 @@ func (bs *Bitswap) GetBlocks(ctx context.Context, keys []cid.Cid) (<-chan blocks return out, nil } -func (bs *Bitswap) getNextSessionID() uint64 { - bs.sessIDLk.Lock() - defer bs.sessIDLk.Unlock() - bs.sessID++ - return bs.sessID -} - // CancelWant removes a given key from the wantlist func (bs *Bitswap) CancelWants(cids []cid.Cid, ses uint64) { if len(cids) == 0 { @@ -359,15 +348,13 @@ func (bs *Bitswap) receiveBlockFrom(blk blocks.Block, from peer.ID) error { // SessionsForBlock returns a slice of all sessions that may be interested in the given cid func (bs *Bitswap) SessionsForBlock(c cid.Cid) []*Session { - bs.sessLk.Lock() - defer bs.sessLk.Unlock() - var out []*Session - for _, s := range bs.sessions { + bs.sm.IterateSessions(func(session exchange.Fetcher) { + s := session.(*Session) if s.interestedIn(c) { out = append(out, s) } - } + }) return out } @@ -398,7 +385,7 @@ func (bs *Bitswap) ReceiveMessage(ctx context.Context, p peer.ID, incoming bsmsg log.Debugf("got block %s from %s", b, p) // skip received blocks that are not in the wantlist - if _, contains := bs.wm.wl.Contains(b.Cid()); !contains { + if !bs.wm.IsWanted(b.Cid()) { return } @@ -461,7 +448,7 @@ func (bs *Bitswap) Close() error { } func (bs *Bitswap) GetWantlist() []cid.Cid { - entries := bs.wm.wl.Entries() + entries := bs.wm.CurrentWants() out := make([]cid.Cid, 0, len(entries)) for _, e := range entries { out = append(out, e.Cid) diff --git a/bitswap/messagequeue/messagequeue.go b/bitswap/messagequeue/messagequeue.go new file mode 100644 index 000000000..f36117d65 --- /dev/null +++ b/bitswap/messagequeue/messagequeue.go @@ -0,0 +1,208 @@ +package messagequeue + +import ( + "context" + "sync" + "time" + + bsmsg "github.com/ipfs/go-bitswap/message" + bsnet "github.com/ipfs/go-bitswap/network" + wantlist "github.com/ipfs/go-bitswap/wantlist" + logging "github.com/ipfs/go-log" + peer "github.com/libp2p/go-libp2p-peer" +) + +var log = logging.Logger("bitswap") + +type MessageQueue struct { + p peer.ID + + outlk sync.Mutex + out bsmsg.BitSwapMessage + network bsnet.BitSwapNetwork + wl *wantlist.ThreadSafe + + sender bsnet.MessageSender + + refcnt int + + work chan struct{} + done chan struct{} +} + +func New(p peer.ID, network bsnet.BitSwapNetwork) *MessageQueue { + return &MessageQueue{ + done: make(chan struct{}), + work: make(chan struct{}, 1), + wl: wantlist.NewThreadSafe(), + network: network, + p: p, + refcnt: 1, + } +} + +func (mq *MessageQueue) RefIncrement() { + mq.refcnt++ +} + +func (mq *MessageQueue) RefDecrement() bool { + mq.refcnt-- + return mq.refcnt > 0 +} + +func (mq *MessageQueue) AddMessage(entries []*bsmsg.Entry, ses uint64) { + var work bool + mq.outlk.Lock() + defer func() { + mq.outlk.Unlock() + if !work { + return + } + select { + case mq.work <- struct{}{}: + default: + } + }() + + // if we have no message held allocate a new one + if mq.out == nil { + mq.out = bsmsg.New(false) + } + + // TODO: add a msg.Combine(...) method + // otherwise, combine the one we are holding with the + // one passed in + for _, e := range entries { + if e.Cancel { + if mq.wl.Remove(e.Cid, ses) { + work = true + mq.out.Cancel(e.Cid) + } + } else { + if mq.wl.Add(e.Cid, e.Priority, ses) { + work = true + mq.out.AddEntry(e.Cid, e.Priority) + } + } + } +} + +func (mq *MessageQueue) Startup(ctx context.Context, initialEntries []*wantlist.Entry) { + + // new peer, we will want to give them our full wantlist + fullwantlist := bsmsg.New(true) + for _, e := range initialEntries { + for k := range e.SesTrk { + mq.wl.AddEntry(e, k) + } + fullwantlist.AddEntry(e.Cid, e.Priority) + } + mq.out = fullwantlist + mq.work <- struct{}{} + + go mq.runQueue(ctx) +} + +func (mq *MessageQueue) Shutdown() { + close(mq.done) +} +func (mq *MessageQueue) runQueue(ctx context.Context) { + for { + select { + case <-mq.work: // there is work to be done + mq.doWork(ctx) + case <-mq.done: + if mq.sender != nil { + mq.sender.Close() + } + return + case <-ctx.Done(): + if mq.sender != nil { + mq.sender.Reset() + } + return + } + } +} + +func (mq *MessageQueue) doWork(ctx context.Context) { + // grab outgoing message + mq.outlk.Lock() + wlm := mq.out + if wlm == nil || wlm.Empty() { + mq.outlk.Unlock() + return + } + mq.out = nil + mq.outlk.Unlock() + + // NB: only open a stream if we actually have data to send + if mq.sender == nil { + err := mq.openSender(ctx) + if err != nil { + log.Infof("cant open message sender to peer %s: %s", mq.p, err) + // TODO: cant connect, what now? + return + } + } + + // send wantlist updates + for { // try to send this message until we fail. + err := mq.sender.SendMsg(ctx, wlm) + if err == nil { + return + } + + log.Infof("bitswap send error: %s", err) + mq.sender.Reset() + mq.sender = nil + + select { + case <-mq.done: + return + case <-ctx.Done(): + return + case <-time.After(time.Millisecond * 100): + // wait 100ms in case disconnect notifications are still propogating + log.Warning("SendMsg errored but neither 'done' nor context.Done() were set") + } + + err = mq.openSender(ctx) + if err != nil { + log.Infof("couldnt open sender again after SendMsg(%s) failed: %s", mq.p, err) + // TODO(why): what do we do now? + // I think the *right* answer is to probably put the message we're + // trying to send back, and then return to waiting for new work or + // a disconnect. + return + } + + // TODO: Is this the same instance for the remote peer? + // If its not, we should resend our entire wantlist to them + /* + if mq.sender.InstanceID() != mq.lastSeenInstanceID { + wlm = mq.getFullWantlistMessage() + } + */ + } +} + +func (mq *MessageQueue) openSender(ctx context.Context) error { + // allow ten minutes for connections this includes looking them up in the + // dht dialing them, and handshaking + conctx, cancel := context.WithTimeout(ctx, time.Minute*10) + defer cancel() + + err := mq.network.ConnectTo(conctx, mq.p) + if err != nil { + return err + } + + nsender, err := mq.network.NewMessageSender(ctx, mq.p) + if err != nil { + return err + } + + mq.sender = nsender + return nil +} diff --git a/bitswap/session.go b/bitswap/session.go index 9cbeb7db5..cd5f645a6 100644 --- a/bitswap/session.go +++ b/bitswap/session.go @@ -66,7 +66,7 @@ func (bs *Bitswap) NewSession(ctx context.Context) exchange.Fetcher { notif: notifications.New(), uuid: loggables.Uuid("GetBlockRequest"), baseTickDelay: time.Millisecond * 500, - id: bs.getNextSessionID(), + id: bs.sm.GetNextSessionID(), } s.tag = fmt.Sprint("bs-ses-", s.id) @@ -74,10 +74,7 @@ func (bs *Bitswap) NewSession(ctx context.Context) exchange.Fetcher { cache, _ := lru.New(2048) s.interest = cache - bs.sessLk.Lock() - bs.sessions = append(bs.sessions, s) - bs.sessLk.Unlock() - + bs.sm.AddSession(s) go s.run(ctx) return s @@ -92,15 +89,7 @@ func (bs *Bitswap) removeSession(s *Session) { } bs.CancelWants(live, s.id) - bs.sessLk.Lock() - defer bs.sessLk.Unlock() - for i := 0; i < len(bs.sessions); i++ { - if bs.sessions[i] == s { - bs.sessions[i] = bs.sessions[len(bs.sessions)-1] - bs.sessions = bs.sessions[:len(bs.sessions)-1] - return - } - } + bs.sm.RemoveSession(s) } type blkRecv struct { diff --git a/bitswap/sessionmanager/sessionmanager.go b/bitswap/sessionmanager/sessionmanager.go new file mode 100644 index 000000000..1ebee2fd1 --- /dev/null +++ b/bitswap/sessionmanager/sessionmanager.go @@ -0,0 +1,59 @@ +package sessionmanager + +import ( + "sync" + + exchange "github.com/ipfs/go-ipfs-exchange-interface" +) + +type SessionManager struct { + // Sessions + sessLk sync.Mutex + sessions []exchange.Fetcher + + // Session Index + sessIDLk sync.Mutex + sessID uint64 +} + +func New() *SessionManager { + return &SessionManager{} +} + +func (sm *SessionManager) AddSession(session exchange.Fetcher) { + sm.sessLk.Lock() + sm.sessions = append(sm.sessions, session) + sm.sessLk.Unlock() +} + +func (sm *SessionManager) RemoveSession(session exchange.Fetcher) { + sm.sessLk.Lock() + defer sm.sessLk.Unlock() + for i := 0; i < len(sm.sessions); i++ { + if sm.sessions[i] == session { + sm.sessions[i] = sm.sessions[len(sm.sessions)-1] + sm.sessions = sm.sessions[:len(sm.sessions)-1] + return + } + } +} + +func (sm *SessionManager) GetNextSessionID() uint64 { + sm.sessIDLk.Lock() + defer sm.sessIDLk.Unlock() + sm.sessID++ + return sm.sessID +} + +type IterateSessionFunc func(session exchange.Fetcher) + +// IterateSessions loops through all managed sessions and applies the given +// IterateSessionFunc +func (sm *SessionManager) IterateSessions(iterate IterateSessionFunc) { + sm.sessLk.Lock() + defer sm.sessLk.Unlock() + + for _, s := range sm.sessions { + iterate(s) + } +} diff --git a/bitswap/wantmanager.go b/bitswap/wantmanager.go deleted file mode 100644 index 8d033ff9b..000000000 --- a/bitswap/wantmanager.go +++ /dev/null @@ -1,404 +0,0 @@ -package bitswap - -import ( - "context" - "sync" - "time" - - engine "github.com/ipfs/go-bitswap/decision" - bsmsg "github.com/ipfs/go-bitswap/message" - bsnet "github.com/ipfs/go-bitswap/network" - wantlist "github.com/ipfs/go-bitswap/wantlist" - - cid "github.com/ipfs/go-cid" - metrics "github.com/ipfs/go-metrics-interface" - peer "github.com/libp2p/go-libp2p-peer" -) - -type WantManager struct { - // sync channels for Run loop - incoming chan *wantSet - connectEvent chan peerStatus // notification channel for peers connecting/disconnecting - peerReqs chan chan []peer.ID // channel to request connected peers on - - // synchronized by Run loop, only touch inside there - peers map[peer.ID]*msgQueue - wl *wantlist.ThreadSafe - bcwl *wantlist.ThreadSafe - - network bsnet.BitSwapNetwork - ctx context.Context - cancel func() - - wantlistGauge metrics.Gauge - sentHistogram metrics.Histogram -} - -type peerStatus struct { - connect bool - peer peer.ID -} - -func NewWantManager(ctx context.Context, network bsnet.BitSwapNetwork) *WantManager { - ctx, cancel := context.WithCancel(ctx) - wantlistGauge := metrics.NewCtx(ctx, "wantlist_total", - "Number of items in wantlist.").Gauge() - sentHistogram := metrics.NewCtx(ctx, "sent_all_blocks_bytes", "Histogram of blocks sent by"+ - " this bitswap").Histogram(metricsBuckets) - return &WantManager{ - incoming: make(chan *wantSet, 10), - connectEvent: make(chan peerStatus, 10), - peerReqs: make(chan chan []peer.ID), - peers: make(map[peer.ID]*msgQueue), - wl: wantlist.NewThreadSafe(), - bcwl: wantlist.NewThreadSafe(), - network: network, - ctx: ctx, - cancel: cancel, - wantlistGauge: wantlistGauge, - sentHistogram: sentHistogram, - } -} - -type msgQueue struct { - p peer.ID - - outlk sync.Mutex - out bsmsg.BitSwapMessage - network bsnet.BitSwapNetwork - wl *wantlist.ThreadSafe - - sender bsnet.MessageSender - - refcnt int - - work chan struct{} - done chan struct{} -} - -// WantBlocks adds the given cids to the wantlist, tracked by the given session -func (pm *WantManager) WantBlocks(ctx context.Context, ks []cid.Cid, peers []peer.ID, ses uint64) { - log.Infof("want blocks: %s", ks) - pm.addEntries(ctx, ks, peers, false, ses) -} - -// CancelWants removes the given cids from the wantlist, tracked by the given session -func (pm *WantManager) CancelWants(ctx context.Context, ks []cid.Cid, peers []peer.ID, ses uint64) { - pm.addEntries(context.Background(), ks, peers, true, ses) -} - -type wantSet struct { - entries []*bsmsg.Entry - targets []peer.ID - from uint64 -} - -func (pm *WantManager) addEntries(ctx context.Context, ks []cid.Cid, targets []peer.ID, cancel bool, ses uint64) { - entries := make([]*bsmsg.Entry, 0, len(ks)) - for i, k := range ks { - entries = append(entries, &bsmsg.Entry{ - Cancel: cancel, - Entry: wantlist.NewRefEntry(k, kMaxPriority-i), - }) - } - select { - case pm.incoming <- &wantSet{entries: entries, targets: targets, from: ses}: - case <-pm.ctx.Done(): - case <-ctx.Done(): - } -} - -func (pm *WantManager) ConnectedPeers() []peer.ID { - resp := make(chan []peer.ID) - pm.peerReqs <- resp - return <-resp -} - -func (pm *WantManager) SendBlocks(ctx context.Context, env *engine.Envelope) { - // Blocks need to be sent synchronously to maintain proper backpressure - // throughout the network stack - defer env.Sent() - - msgSize := 0 - msg := bsmsg.New(false) - for _, block := range env.Message.Blocks() { - msgSize += len(block.RawData()) - msg.AddBlock(block) - log.Infof("Sending block %s to %s", block, env.Peer) - } - - pm.sentHistogram.Observe(float64(msgSize)) - err := pm.network.SendMessage(ctx, env.Peer, msg) - if err != nil { - log.Infof("sendblock error: %s", err) - } -} - -func (pm *WantManager) startPeerHandler(p peer.ID) *msgQueue { - mq, ok := pm.peers[p] - if ok { - mq.refcnt++ - return nil - } - - mq = pm.newMsgQueue(p) - - // new peer, we will want to give them our full wantlist - fullwantlist := bsmsg.New(true) - for _, e := range pm.bcwl.Entries() { - for k := range e.SesTrk { - mq.wl.AddEntry(e, k) - } - fullwantlist.AddEntry(e.Cid, e.Priority) - } - mq.out = fullwantlist - mq.work <- struct{}{} - - pm.peers[p] = mq - go mq.runQueue(pm.ctx) - return mq -} - -func (pm *WantManager) stopPeerHandler(p peer.ID) { - pq, ok := pm.peers[p] - if !ok { - // TODO: log error? - return - } - - pq.refcnt-- - if pq.refcnt > 0 { - return - } - - close(pq.done) - delete(pm.peers, p) -} - -func (mq *msgQueue) runQueue(ctx context.Context) { - for { - select { - case <-mq.work: // there is work to be done - mq.doWork(ctx) - case <-mq.done: - if mq.sender != nil { - mq.sender.Close() - } - return - case <-ctx.Done(): - if mq.sender != nil { - mq.sender.Reset() - } - return - } - } -} - -func (mq *msgQueue) doWork(ctx context.Context) { - // grab outgoing message - mq.outlk.Lock() - wlm := mq.out - if wlm == nil || wlm.Empty() { - mq.outlk.Unlock() - return - } - mq.out = nil - mq.outlk.Unlock() - - // NB: only open a stream if we actually have data to send - if mq.sender == nil { - err := mq.openSender(ctx) - if err != nil { - log.Infof("cant open message sender to peer %s: %s", mq.p, err) - // TODO: cant connect, what now? - return - } - } - - // send wantlist updates - for { // try to send this message until we fail. - err := mq.sender.SendMsg(ctx, wlm) - if err == nil { - return - } - - log.Infof("bitswap send error: %s", err) - mq.sender.Reset() - mq.sender = nil - - select { - case <-mq.done: - return - case <-ctx.Done(): - return - case <-time.After(time.Millisecond * 100): - // wait 100ms in case disconnect notifications are still propogating - log.Warning("SendMsg errored but neither 'done' nor context.Done() were set") - } - - err = mq.openSender(ctx) - if err != nil { - log.Infof("couldnt open sender again after SendMsg(%s) failed: %s", mq.p, err) - // TODO(why): what do we do now? - // I think the *right* answer is to probably put the message we're - // trying to send back, and then return to waiting for new work or - // a disconnect. - return - } - - // TODO: Is this the same instance for the remote peer? - // If its not, we should resend our entire wantlist to them - /* - if mq.sender.InstanceID() != mq.lastSeenInstanceID { - wlm = mq.getFullWantlistMessage() - } - */ - } -} - -func (mq *msgQueue) openSender(ctx context.Context) error { - // allow ten minutes for connections this includes looking them up in the - // dht dialing them, and handshaking - conctx, cancel := context.WithTimeout(ctx, time.Minute*10) - defer cancel() - - err := mq.network.ConnectTo(conctx, mq.p) - if err != nil { - return err - } - - nsender, err := mq.network.NewMessageSender(ctx, mq.p) - if err != nil { - return err - } - - mq.sender = nsender - return nil -} - -func (pm *WantManager) Connected(p peer.ID) { - select { - case pm.connectEvent <- peerStatus{peer: p, connect: true}: - case <-pm.ctx.Done(): - } -} - -func (pm *WantManager) Disconnected(p peer.ID) { - select { - case pm.connectEvent <- peerStatus{peer: p, connect: false}: - case <-pm.ctx.Done(): - } -} - -// TODO: use goprocess here once i trust it -func (pm *WantManager) Run() { - // NOTE: Do not open any streams or connections from anywhere in this - // event loop. Really, just don't do anything likely to block. - for { - select { - case ws := <-pm.incoming: - - // is this a broadcast or not? - brdc := len(ws.targets) == 0 - - // add changes to our wantlist - for _, e := range ws.entries { - if e.Cancel { - if brdc { - pm.bcwl.Remove(e.Cid, ws.from) - } - - if pm.wl.Remove(e.Cid, ws.from) { - pm.wantlistGauge.Dec() - } - } else { - if brdc { - pm.bcwl.AddEntry(e.Entry, ws.from) - } - if pm.wl.AddEntry(e.Entry, ws.from) { - pm.wantlistGauge.Inc() - } - } - } - - // broadcast those wantlist changes - if len(ws.targets) == 0 { - for _, p := range pm.peers { - p.addMessage(ws.entries, ws.from) - } - } else { - for _, t := range ws.targets { - p, ok := pm.peers[t] - if !ok { - log.Infof("tried sending wantlist change to non-partner peer: %s", t) - continue - } - p.addMessage(ws.entries, ws.from) - } - } - - case p := <-pm.connectEvent: - if p.connect { - pm.startPeerHandler(p.peer) - } else { - pm.stopPeerHandler(p.peer) - } - case req := <-pm.peerReqs: - peers := make([]peer.ID, 0, len(pm.peers)) - for p := range pm.peers { - peers = append(peers, p) - } - req <- peers - case <-pm.ctx.Done(): - return - } - } -} - -func (wm *WantManager) newMsgQueue(p peer.ID) *msgQueue { - return &msgQueue{ - done: make(chan struct{}), - work: make(chan struct{}, 1), - wl: wantlist.NewThreadSafe(), - network: wm.network, - p: p, - refcnt: 1, - } -} - -func (mq *msgQueue) addMessage(entries []*bsmsg.Entry, ses uint64) { - var work bool - mq.outlk.Lock() - defer func() { - mq.outlk.Unlock() - if !work { - return - } - select { - case mq.work <- struct{}{}: - default: - } - }() - - // if we have no message held allocate a new one - if mq.out == nil { - mq.out = bsmsg.New(false) - } - - // TODO: add a msg.Combine(...) method - // otherwise, combine the one we are holding with the - // one passed in - for _, e := range entries { - if e.Cancel { - if mq.wl.Remove(e.Cid, ses) { - work = true - mq.out.Cancel(e.Cid) - } - } else { - if mq.wl.Add(e.Cid, e.Priority, ses) { - work = true - mq.out.AddEntry(e.Cid, e.Priority) - } - } - } -} diff --git a/bitswap/wantmanager/wantmanager.go b/bitswap/wantmanager/wantmanager.go new file mode 100644 index 000000000..e3734290c --- /dev/null +++ b/bitswap/wantmanager/wantmanager.go @@ -0,0 +1,251 @@ +package wantmanager + +import ( + "context" + "math" + + engine "github.com/ipfs/go-bitswap/decision" + bsmsg "github.com/ipfs/go-bitswap/message" + bsmq "github.com/ipfs/go-bitswap/messagequeue" + bsnet "github.com/ipfs/go-bitswap/network" + wantlist "github.com/ipfs/go-bitswap/wantlist" + logging "github.com/ipfs/go-log" + + cid "github.com/ipfs/go-cid" + metrics "github.com/ipfs/go-metrics-interface" + peer "github.com/libp2p/go-libp2p-peer" +) + +var log = logging.Logger("bitswap") + +const ( + // kMaxPriority is the max priority as defined by the bitswap protocol + kMaxPriority = math.MaxInt32 +) + +var ( + metricsBuckets = []float64{1 << 6, 1 << 10, 1 << 14, 1 << 18, 1<<18 + 15, 1 << 22} +) + +type WantManager struct { + // sync channels for Run loop + incoming chan *wantSet + connectEvent chan peerStatus // notification channel for peers connecting/disconnecting + peerReqs chan chan []peer.ID // channel to request connected peers on + + // synchronized by Run loop, only touch inside there + peers map[peer.ID]*bsmq.MessageQueue + wl *wantlist.ThreadSafe + bcwl *wantlist.ThreadSafe + + network bsnet.BitSwapNetwork + ctx context.Context + cancel func() + + wantlistGauge metrics.Gauge + sentHistogram metrics.Histogram +} + +type peerStatus struct { + connect bool + peer peer.ID +} + +func New(ctx context.Context, network bsnet.BitSwapNetwork) *WantManager { + ctx, cancel := context.WithCancel(ctx) + wantlistGauge := metrics.NewCtx(ctx, "wantlist_total", + "Number of items in wantlist.").Gauge() + sentHistogram := metrics.NewCtx(ctx, "sent_all_blocks_bytes", "Histogram of blocks sent by"+ + " this bitswap").Histogram(metricsBuckets) + return &WantManager{ + incoming: make(chan *wantSet, 10), + connectEvent: make(chan peerStatus, 10), + peerReqs: make(chan chan []peer.ID), + peers: make(map[peer.ID]*bsmq.MessageQueue), + wl: wantlist.NewThreadSafe(), + bcwl: wantlist.NewThreadSafe(), + network: network, + ctx: ctx, + cancel: cancel, + wantlistGauge: wantlistGauge, + sentHistogram: sentHistogram, + } +} + +// WantBlocks adds the given cids to the wantlist, tracked by the given session +func (wm *WantManager) WantBlocks(ctx context.Context, ks []cid.Cid, peers []peer.ID, ses uint64) { + log.Infof("want blocks: %s", ks) + wm.addEntries(ctx, ks, peers, false, ses) +} + +// CancelWants removes the given cids from the wantlist, tracked by the given session +func (wm *WantManager) CancelWants(ctx context.Context, ks []cid.Cid, peers []peer.ID, ses uint64) { + wm.addEntries(context.Background(), ks, peers, true, ses) +} + +type wantSet struct { + entries []*bsmsg.Entry + targets []peer.ID + from uint64 +} + +func (wm *WantManager) addEntries(ctx context.Context, ks []cid.Cid, targets []peer.ID, cancel bool, ses uint64) { + entries := make([]*bsmsg.Entry, 0, len(ks)) + for i, k := range ks { + entries = append(entries, &bsmsg.Entry{ + Cancel: cancel, + Entry: wantlist.NewRefEntry(k, kMaxPriority-i), + }) + } + select { + case wm.incoming <- &wantSet{entries: entries, targets: targets, from: ses}: + case <-wm.ctx.Done(): + case <-ctx.Done(): + } +} + +func (wm *WantManager) ConnectedPeers() []peer.ID { + resp := make(chan []peer.ID) + wm.peerReqs <- resp + return <-resp +} + +func (wm *WantManager) SendBlocks(ctx context.Context, env *engine.Envelope) { + // Blocks need to be sent synchronously to maintain proper backpressure + // throughout the network stack + defer env.Sent() + + msgSize := 0 + msg := bsmsg.New(false) + for _, block := range env.Message.Blocks() { + msgSize += len(block.RawData()) + msg.AddBlock(block) + log.Infof("Sending block %s to %s", block, env.Peer) + } + + wm.sentHistogram.Observe(float64(msgSize)) + err := wm.network.SendMessage(ctx, env.Peer, msg) + if err != nil { + log.Infof("sendblock error: %s", err) + } +} + +func (wm *WantManager) startPeerHandler(p peer.ID) *bsmq.MessageQueue { + mq, ok := wm.peers[p] + if ok { + mq.RefIncrement() + return nil + } + + mq = bsmq.New(p, wm.network) + wm.peers[p] = mq + mq.Startup(wm.ctx, wm.bcwl.Entries()) + return mq +} + +func (wm *WantManager) stopPeerHandler(p peer.ID) { + pq, ok := wm.peers[p] + if !ok { + // TODO: log error? + return + } + + if pq.RefDecrement() { + return + } + + pq.Shutdown() + delete(wm.peers, p) +} + +func (wm *WantManager) Connected(p peer.ID) { + select { + case wm.connectEvent <- peerStatus{peer: p, connect: true}: + case <-wm.ctx.Done(): + } +} + +func (wm *WantManager) Disconnected(p peer.ID) { + select { + case wm.connectEvent <- peerStatus{peer: p, connect: false}: + case <-wm.ctx.Done(): + } +} + +// TODO: use goprocess here once i trust it +func (wm *WantManager) Run() { + // NOTE: Do not open any streams or connections from anywhere in this + // event loop. Really, just don't do anything likely to block. + for { + select { + case ws := <-wm.incoming: + + // is this a broadcast or not? + brdc := len(ws.targets) == 0 + + // add changes to our wantlist + for _, e := range ws.entries { + if e.Cancel { + if brdc { + wm.bcwl.Remove(e.Cid, ws.from) + } + + if wm.wl.Remove(e.Cid, ws.from) { + wm.wantlistGauge.Dec() + } + } else { + if brdc { + wm.bcwl.AddEntry(e.Entry, ws.from) + } + if wm.wl.AddEntry(e.Entry, ws.from) { + wm.wantlistGauge.Inc() + } + } + } + + // broadcast those wantlist changes + if len(ws.targets) == 0 { + for _, p := range wm.peers { + p.AddMessage(ws.entries, ws.from) + } + } else { + for _, t := range ws.targets { + p, ok := wm.peers[t] + if !ok { + log.Infof("tried sending wantlist change to non-partner peer: %s", t) + continue + } + p.AddMessage(ws.entries, ws.from) + } + } + + case p := <-wm.connectEvent: + if p.connect { + wm.startPeerHandler(p.peer) + } else { + wm.stopPeerHandler(p.peer) + } + case req := <-wm.peerReqs: + peers := make([]peer.ID, 0, len(wm.peers)) + for p := range wm.peers { + peers = append(peers, p) + } + req <- peers + case <-wm.ctx.Done(): + return + } + } +} + +func (wm *WantManager) IsWanted(c cid.Cid) bool { + _, isWanted := wm.wl.Contains(c) + return isWanted +} + +func (wm *WantManager) CurrentWants() []*wantlist.Entry { + return wm.wl.Entries() +} + +func (wm *WantManager) WantCount() int { + return wm.wl.Len() +} diff --git a/bitswap/workers.go b/bitswap/workers.go index 3fbe1bb15..34b75bab2 100644 --- a/bitswap/workers.go +++ b/bitswap/workers.go @@ -183,13 +183,13 @@ func (bs *Bitswap) rebroadcastWorker(parent context.Context) { log.Event(ctx, "Bitswap.Rebroadcast.idle") select { case <-tick.C: - n := bs.wm.wl.Len() + n := bs.wm.WantCount() if n > 0 { log.Debug(n, " keys in bitswap wantlist") } case <-broadcastSignal.C: // resend unfulfilled wantlist keys log.Event(ctx, "Bitswap.Rebroadcast.active") - entries := bs.wm.wl.Entries() + entries := bs.wm.CurrentWants() if len(entries) == 0 { continue } From cc83c6cfc2177149e9ef656030c9aa5667852a5f Mon Sep 17 00:00:00 2001 From: hannahhoward Date: Mon, 26 Nov 2018 19:10:17 -0800 Subject: [PATCH 0661/1035] refactor(WantManager): extract PeerManager Seperates the functions of tracking wants from tracking peers Unit tests for both peer manager and want manager Refactor internals of both to address synchonization issues discovered in tests This commit was moved from ipfs/go-bitswap@693085c9c90faa8fe516ffe6979e5bc8c749c478 --- bitswap/bitswap.go | 36 ++- bitswap/bitswap_test.go | 4 +- bitswap/peermanager/peermanager.go | 192 ++++++++++++++++ bitswap/peermanager/peermanager_test.go | 128 +++++++++++ bitswap/wantmanager/wantmanager.go | 277 +++++++++++------------- bitswap/wantmanager/wantmanager_test.go | 244 +++++++++++++++++++++ bitswap/workers.go | 24 +- 7 files changed, 739 insertions(+), 166 deletions(-) create mode 100644 bitswap/peermanager/peermanager.go create mode 100644 bitswap/peermanager/peermanager_test.go create mode 100644 bitswap/wantmanager/wantmanager_test.go diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 0e8fbf4e9..b3e472d2d 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -11,8 +11,10 @@ import ( decision "github.com/ipfs/go-bitswap/decision" bsmsg "github.com/ipfs/go-bitswap/message" + bsmq "github.com/ipfs/go-bitswap/messagequeue" bsnet "github.com/ipfs/go-bitswap/network" notifications "github.com/ipfs/go-bitswap/notifications" + bspm "github.com/ipfs/go-bitswap/peermanager" bssm "github.com/ipfs/go-bitswap/sessionmanager" bswm "github.com/ipfs/go-bitswap/wantmanager" @@ -85,12 +87,19 @@ func New(parent context.Context, network bsnet.BitSwapNetwork, allHist := metrics.NewCtx(ctx, "recv_all_blocks_bytes", "Summary of all"+ " data blocks recived").Histogram(metricsBuckets) + sentHistogram := metrics.NewCtx(ctx, "sent_all_blocks_bytes", "Histogram of blocks sent by"+ + " this bitswap").Histogram(metricsBuckets) + notif := notifications.New() px := process.WithTeardown(func() error { notif.Shutdown() return nil }) + peerQueueFactory := func(p peer.ID) bspm.PeerQueue { + return bsmq.New(p, network) + } + bs := &Bitswap{ blockstore: bstore, notifications: notif, @@ -100,14 +109,18 @@ func New(parent context.Context, network bsnet.BitSwapNetwork, process: px, newBlocks: make(chan cid.Cid, HasBlockBufferSize), provideKeys: make(chan cid.Cid, provideKeysBufferSize), - wm: bswm.New(ctx, network), + wm: bswm.New(ctx), + pm: bspm.New(ctx, peerQueueFactory), sm: bssm.New(), counters: new(counters), - - dupMetric: dupHist, - allMetric: allHist, + dupMetric: dupHist, + allMetric: allHist, + sentHistogram: sentHistogram, } - go bs.wm.Run() + + bs.wm.SetDelegate(bs.pm) + bs.pm.Startup() + bs.wm.Startup() network.SetDelegate(bs) // Start up bitswaps async worker routines @@ -128,6 +141,9 @@ func New(parent context.Context, network bsnet.BitSwapNetwork, type Bitswap struct { // the peermanager manages sending messages to peers in a way that // wont block bitswap operation + pm *bspm.PeerManager + + // the wantlist tracks global wants for bitswap wm *bswm.WantManager // the engine is the bit of logic that decides who to send which blocks to @@ -160,8 +176,9 @@ type Bitswap struct { counters *counters // Metrics interface metrics - dupMetric metrics.Histogram - allMetric metrics.Histogram + dupMetric metrics.Histogram + allMetric metrics.Histogram + sentHistogram metrics.Histogram // the sessionmanager manages tracking sessions sm *bssm.SessionManager @@ -427,13 +444,14 @@ func (bs *Bitswap) updateReceiveCounters(b blocks.Block) { // Connected/Disconnected warns bitswap about peer connections func (bs *Bitswap) PeerConnected(p peer.ID) { - bs.wm.Connected(p) + initialWants := bs.wm.CurrentBroadcastWants() + bs.pm.Connected(p, initialWants) bs.engine.PeerConnected(p) } // Connected/Disconnected warns bitswap about peer connections func (bs *Bitswap) PeerDisconnected(p peer.ID) { - bs.wm.Disconnected(p) + bs.pm.Disconnected(p) bs.engine.PeerDisconnected(p) } diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index d55fd0733..ef2d73b8d 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -202,10 +202,10 @@ func PerformDistributionTest(t *testing.T, numInstances, numBlocks int) { nump := len(instances) - 1 // assert we're properly connected for _, inst := range instances { - peers := inst.Exchange.wm.ConnectedPeers() + peers := inst.Exchange.pm.ConnectedPeers() for i := 0; i < 10 && len(peers) != nump; i++ { time.Sleep(time.Millisecond * 50) - peers = inst.Exchange.wm.ConnectedPeers() + peers = inst.Exchange.pm.ConnectedPeers() } if len(peers) != nump { t.Fatal("not enough peers connected to instance") diff --git a/bitswap/peermanager/peermanager.go b/bitswap/peermanager/peermanager.go new file mode 100644 index 000000000..2fea3ef85 --- /dev/null +++ b/bitswap/peermanager/peermanager.go @@ -0,0 +1,192 @@ +package peermanager + +import ( + "context" + + bsmsg "github.com/ipfs/go-bitswap/message" + wantlist "github.com/ipfs/go-bitswap/wantlist" + logging "github.com/ipfs/go-log" + + peer "github.com/libp2p/go-libp2p-peer" +) + +var log = logging.Logger("bitswap") + +var ( + metricsBuckets = []float64{1 << 6, 1 << 10, 1 << 14, 1 << 18, 1<<18 + 15, 1 << 22} +) + +type sendMessageParams struct { + entries []*bsmsg.Entry + targets []peer.ID + from uint64 +} + +type connectParams struct { + peer peer.ID + initialEntries []*wantlist.Entry +} + +type peerMessageType int + +const ( + connect peerMessageType = iota + 1 + disconnect + getPeers + sendMessage +) + +type peerMessage struct { + messageType peerMessageType + params interface{} + resultsChan chan interface{} +} + +type PeerQueue interface { + RefIncrement() + RefDecrement() bool + AddMessage(entries []*bsmsg.Entry, ses uint64) + Startup(ctx context.Context, initialEntries []*wantlist.Entry) + Shutdown() +} + +type PeerQueueFactory func(p peer.ID) PeerQueue + +type PeerManager struct { + // sync channel for Run loop + peerMessages chan peerMessage + + // synchronized by Run loop, only touch inside there + peerQueues map[peer.ID]PeerQueue + + createPeerQueue PeerQueueFactory + ctx context.Context + cancel func() +} + +func New(ctx context.Context, createPeerQueue PeerQueueFactory) *PeerManager { + ctx, cancel := context.WithCancel(ctx) + return &PeerManager{ + peerMessages: make(chan peerMessage, 10), + peerQueues: make(map[peer.ID]PeerQueue), + createPeerQueue: createPeerQueue, + ctx: ctx, + cancel: cancel, + } +} + +func (pm *PeerManager) ConnectedPeers() []peer.ID { + resp := make(chan interface{}) + pm.peerMessages <- peerMessage{getPeers, nil, resp} + peers := <-resp + return peers.([]peer.ID) +} + +func (pm *PeerManager) startPeerHandler(p peer.ID, initialEntries []*wantlist.Entry) PeerQueue { + mq, ok := pm.peerQueues[p] + if ok { + mq.RefIncrement() + return nil + } + + mq = pm.createPeerQueue(p) + pm.peerQueues[p] = mq + mq.Startup(pm.ctx, initialEntries) + return mq +} + +func (pm *PeerManager) stopPeerHandler(p peer.ID) { + pq, ok := pm.peerQueues[p] + if !ok { + // TODO: log error? + return + } + + if pq.RefDecrement() { + return + } + + pq.Shutdown() + delete(pm.peerQueues, p) +} + +func (pm *PeerManager) Connected(p peer.ID, initialEntries []*wantlist.Entry) { + select { + case pm.peerMessages <- peerMessage{connect, connectParams{peer: p, initialEntries: initialEntries}, nil}: + case <-pm.ctx.Done(): + } +} + +func (pm *PeerManager) Disconnected(p peer.ID) { + select { + case pm.peerMessages <- peerMessage{disconnect, p, nil}: + case <-pm.ctx.Done(): + } +} + +func (pm *PeerManager) SendMessage(entries []*bsmsg.Entry, targets []peer.ID, from uint64) { + select { + case pm.peerMessages <- peerMessage{ + sendMessage, + &sendMessageParams{entries: entries, targets: targets, from: from}, + nil, + }: + case <-pm.ctx.Done(): + } +} + +func (pm *PeerManager) Startup() { + go pm.run() +} + +func (pm *PeerManager) Shutdown() { + pm.cancel() +} + +// TODO: use goprocess here once i trust it +func (pm *PeerManager) run() { + // NOTE: Do not open any streams or connections from anywhere in this + // event loop. Really, just don't do anything likely to block. + for { + select { + case message := <-pm.peerMessages: + pm.handleMessage(message) + case <-pm.ctx.Done(): + return + } + } +} + +func (pm *PeerManager) handleMessage(message peerMessage) { + + switch message.messageType { + case sendMessage: + ms := message.params.(*sendMessageParams) + if len(ms.targets) == 0 { + for _, p := range pm.peerQueues { + p.AddMessage(ms.entries, ms.from) + } + } else { + for _, t := range ms.targets { + p, ok := pm.peerQueues[t] + if !ok { + log.Infof("tried sending wantlist change to non-partner peer: %s", t) + continue + } + p.AddMessage(ms.entries, ms.from) + } + } + case connect: + p := message.params.(connectParams) + pm.startPeerHandler(p.peer, p.initialEntries) + case disconnect: + disconnectPeer := message.params.(peer.ID) + pm.stopPeerHandler(disconnectPeer) + case getPeers: + peers := make([]peer.ID, 0, len(pm.peerQueues)) + for p := range pm.peerQueues { + peers = append(peers, p) + } + message.resultsChan <- peers + } +} diff --git a/bitswap/peermanager/peermanager_test.go b/bitswap/peermanager/peermanager_test.go new file mode 100644 index 000000000..c6260df69 --- /dev/null +++ b/bitswap/peermanager/peermanager_test.go @@ -0,0 +1,128 @@ +package peermanager + +import ( + "context" + "testing" + + bsmsg "github.com/ipfs/go-bitswap/message" + wantlist "github.com/ipfs/go-bitswap/wantlist" + "github.com/ipfs/go-cid" + "github.com/ipfs/go-ipfs-blocksutil" + "github.com/libp2p/go-libp2p-peer" +) + +var blockGenerator = blocksutil.NewBlockGenerator() + +func generateCids(n int) []cid.Cid { + cids := make([]cid.Cid, 0, n) + for i := 0; i < n; i++ { + c := blockGenerator.Next().Cid() + cids = append(cids, c) + } + return cids +} + +var peerSeq int + +func generatePeers(n int) []peer.ID { + peerIds := make([]peer.ID, 0, n) + for i := 0; i < n; i++ { + peerSeq++ + p := peer.ID(peerSeq) + peerIds = append(peerIds, p) + } + return peerIds +} + +var nextSession uint64 + +func generateSessionID() uint64 { + nextSession++ + return uint64(nextSession) +} + +type messageSent struct { + p peer.ID + entries []*bsmsg.Entry + ses uint64 +} + +type fakePeer struct { + refcnt int + p peer.ID + messagesSent chan messageSent +} + +func containsPeer(peers []peer.ID, p peer.ID) bool { + for _, n := range peers { + if p == n { + return true + } + } + return false +} + +func (fp *fakePeer) Startup(ctx context.Context, initialEntries []*wantlist.Entry) {} +func (fp *fakePeer) Shutdown() {} +func (fp *fakePeer) RefIncrement() { fp.refcnt++ } +func (fp *fakePeer) RefDecrement() bool { + fp.refcnt-- + return fp.refcnt > 0 +} +func (fp *fakePeer) AddMessage(entries []*bsmsg.Entry, ses uint64) { + fp.messagesSent <- messageSent{fp.p, entries, ses} +} + +func makePeerQueueFactory(messagesSent chan messageSent) PeerQueueFactory { + return func(p peer.ID) PeerQueue { + return &fakePeer{ + p: p, + refcnt: 1, + messagesSent: messagesSent, + } + } +} + +func TestAddingAndRemovingPeers(t *testing.T) { + ctx := context.Background() + peerQueueFactory := makePeerQueueFactory(nil) + + tp := generatePeers(5) + peer1, peer2, peer3, peer4, peer5 := tp[0], tp[1], tp[2], tp[3], tp[4] + peerManager := New(ctx, peerQueueFactory) + peerManager.Startup() + + peerManager.Connected(peer1, nil) + peerManager.Connected(peer2, nil) + peerManager.Connected(peer3, nil) + + connectedPeers := peerManager.ConnectedPeers() + + if !containsPeer(connectedPeers, peer1) || + !containsPeer(connectedPeers, peer2) || + !containsPeer(connectedPeers, peer3) { + t.Fatal("Peers not connected that should be connected") + } + + if containsPeer(connectedPeers, peer4) || + containsPeer(connectedPeers, peer5) { + t.Fatal("Peers connected that shouldn't be connected") + } + + // removing a peer with only one reference + peerManager.Disconnected(peer1) + connectedPeers = peerManager.ConnectedPeers() + + if containsPeer(connectedPeers, peer1) { + t.Fatal("Peer should have been disconnected but was not") + } + + // connecting a peer twice, then disconnecting once, should stay in queue + peerManager.Connected(peer2, nil) + peerManager.Disconnected(peer2) + connectedPeers = peerManager.ConnectedPeers() + + if !containsPeer(connectedPeers, peer2) { + t.Fatal("Peer was disconnected but should not have been") + } +} diff --git a/bitswap/wantmanager/wantmanager.go b/bitswap/wantmanager/wantmanager.go index e3734290c..a9ea90163 100644 --- a/bitswap/wantmanager/wantmanager.go +++ b/bitswap/wantmanager/wantmanager.go @@ -4,10 +4,7 @@ import ( "context" "math" - engine "github.com/ipfs/go-bitswap/decision" bsmsg "github.com/ipfs/go-bitswap/message" - bsmq "github.com/ipfs/go-bitswap/messagequeue" - bsnet "github.com/ipfs/go-bitswap/network" wantlist "github.com/ipfs/go-bitswap/wantlist" logging "github.com/ipfs/go-log" @@ -19,59 +16,72 @@ import ( var log = logging.Logger("bitswap") const ( - // kMaxPriority is the max priority as defined by the bitswap protocol - kMaxPriority = math.MaxInt32 + // maxPriority is the max priority as defined by the bitswap protocol + maxPriority = math.MaxInt32 ) -var ( - metricsBuckets = []float64{1 << 6, 1 << 10, 1 << 14, 1 << 18, 1<<18 + 15, 1 << 22} +// WantSender sends changes out to the network as they get added to the wantlist +// managed by the WantManager +type WantSender interface { + SendMessage(entries []*bsmsg.Entry, targets []peer.ID, from uint64) +} + +type wantMessageType int + +const ( + isWanted wantMessageType = iota + 1 + addWants + currentWants + currentBroadcastWants + wantCount ) +type wantMessage struct { + messageType wantMessageType + params interface{} + resultsChan chan interface{} +} + +// WantManager manages a global want list. It tracks two seperate want lists - +// one for all wants, and one for wants that are specifically broadcast to the +// internet type WantManager struct { - // sync channels for Run loop - incoming chan *wantSet - connectEvent chan peerStatus // notification channel for peers connecting/disconnecting - peerReqs chan chan []peer.ID // channel to request connected peers on + // channel requests to the run loop + // to get predictable behavior while running this in a go routine + // having only one channel is neccesary, so requests are processed serially + messageReqs chan wantMessage // synchronized by Run loop, only touch inside there - peers map[peer.ID]*bsmq.MessageQueue - wl *wantlist.ThreadSafe - bcwl *wantlist.ThreadSafe + wl *wantlist.ThreadSafe + bcwl *wantlist.ThreadSafe - network bsnet.BitSwapNetwork - ctx context.Context - cancel func() + ctx context.Context + cancel func() + wantSender WantSender wantlistGauge metrics.Gauge - sentHistogram metrics.Histogram -} - -type peerStatus struct { - connect bool - peer peer.ID } -func New(ctx context.Context, network bsnet.BitSwapNetwork) *WantManager { +// New initializes a new WantManager +func New(ctx context.Context) *WantManager { ctx, cancel := context.WithCancel(ctx) wantlistGauge := metrics.NewCtx(ctx, "wantlist_total", "Number of items in wantlist.").Gauge() - sentHistogram := metrics.NewCtx(ctx, "sent_all_blocks_bytes", "Histogram of blocks sent by"+ - " this bitswap").Histogram(metricsBuckets) return &WantManager{ - incoming: make(chan *wantSet, 10), - connectEvent: make(chan peerStatus, 10), - peerReqs: make(chan chan []peer.ID), - peers: make(map[peer.ID]*bsmq.MessageQueue), + messageReqs: make(chan wantMessage, 10), wl: wantlist.NewThreadSafe(), bcwl: wantlist.NewThreadSafe(), - network: network, ctx: ctx, cancel: cancel, wantlistGauge: wantlistGauge, - sentHistogram: sentHistogram, } } +// SetDelegate specifies who will send want changes out to the internet +func (wm *WantManager) SetDelegate(wantSender WantSender) { + wm.wantSender = wantSender +} + // WantBlocks adds the given cids to the wantlist, tracked by the given session func (wm *WantManager) WantBlocks(ctx context.Context, ks []cid.Cid, peers []peer.ID, ses uint64) { log.Infof("want blocks: %s", ks) @@ -94,158 +104,119 @@ func (wm *WantManager) addEntries(ctx context.Context, ks []cid.Cid, targets []p for i, k := range ks { entries = append(entries, &bsmsg.Entry{ Cancel: cancel, - Entry: wantlist.NewRefEntry(k, kMaxPriority-i), + Entry: wantlist.NewRefEntry(k, maxPriority-i), }) } select { - case wm.incoming <- &wantSet{entries: entries, targets: targets, from: ses}: + case wm.messageReqs <- wantMessage{ + messageType: addWants, + params: &wantSet{entries: entries, targets: targets, from: ses}, + }: case <-wm.ctx.Done(): case <-ctx.Done(): } } -func (wm *WantManager) ConnectedPeers() []peer.ID { - resp := make(chan []peer.ID) - wm.peerReqs <- resp - return <-resp -} - -func (wm *WantManager) SendBlocks(ctx context.Context, env *engine.Envelope) { - // Blocks need to be sent synchronously to maintain proper backpressure - // throughout the network stack - defer env.Sent() - - msgSize := 0 - msg := bsmsg.New(false) - for _, block := range env.Message.Blocks() { - msgSize += len(block.RawData()) - msg.AddBlock(block) - log.Infof("Sending block %s to %s", block, env.Peer) - } - - wm.sentHistogram.Observe(float64(msgSize)) - err := wm.network.SendMessage(ctx, env.Peer, msg) - if err != nil { - log.Infof("sendblock error: %s", err) - } -} - -func (wm *WantManager) startPeerHandler(p peer.ID) *bsmq.MessageQueue { - mq, ok := wm.peers[p] - if ok { - mq.RefIncrement() - return nil - } - - mq = bsmq.New(p, wm.network) - wm.peers[p] = mq - mq.Startup(wm.ctx, wm.bcwl.Entries()) - return mq -} - -func (wm *WantManager) stopPeerHandler(p peer.ID) { - pq, ok := wm.peers[p] - if !ok { - // TODO: log error? - return - } - - if pq.RefDecrement() { - return - } - - pq.Shutdown() - delete(wm.peers, p) -} - -func (wm *WantManager) Connected(p peer.ID) { - select { - case wm.connectEvent <- peerStatus{peer: p, connect: true}: - case <-wm.ctx.Done(): - } +func (wm *WantManager) Startup() { + go wm.run() } -func (wm *WantManager) Disconnected(p peer.ID) { - select { - case wm.connectEvent <- peerStatus{peer: p, connect: false}: - case <-wm.ctx.Done(): - } +func (wm *WantManager) Shutdown() { + wm.cancel() } -// TODO: use goprocess here once i trust it -func (wm *WantManager) Run() { +func (wm *WantManager) run() { // NOTE: Do not open any streams or connections from anywhere in this // event loop. Really, just don't do anything likely to block. for { select { - case ws := <-wm.incoming: - - // is this a broadcast or not? - brdc := len(ws.targets) == 0 - - // add changes to our wantlist - for _, e := range ws.entries { - if e.Cancel { - if brdc { - wm.bcwl.Remove(e.Cid, ws.from) - } - - if wm.wl.Remove(e.Cid, ws.from) { - wm.wantlistGauge.Dec() - } - } else { - if brdc { - wm.bcwl.AddEntry(e.Entry, ws.from) - } - if wm.wl.AddEntry(e.Entry, ws.from) { - wm.wantlistGauge.Inc() - } + case message := <-wm.messageReqs: + wm.handleMessage(message) + case <-wm.ctx.Done(): + return + } + } +} + +func (wm *WantManager) handleMessage(message wantMessage) { + switch message.messageType { + case addWants: + ws := message.params.(*wantSet) + // is this a broadcast or not? + brdc := len(ws.targets) == 0 + + // add changes to our wantlist + for _, e := range ws.entries { + if e.Cancel { + if brdc { + wm.bcwl.Remove(e.Cid, ws.from) } - } - // broadcast those wantlist changes - if len(ws.targets) == 0 { - for _, p := range wm.peers { - p.AddMessage(ws.entries, ws.from) + if wm.wl.Remove(e.Cid, ws.from) { + wm.wantlistGauge.Dec() } } else { - for _, t := range ws.targets { - p, ok := wm.peers[t] - if !ok { - log.Infof("tried sending wantlist change to non-partner peer: %s", t) - continue - } - p.AddMessage(ws.entries, ws.from) + if brdc { + wm.bcwl.AddEntry(e.Entry, ws.from) + } + if wm.wl.AddEntry(e.Entry, ws.from) { + wm.wantlistGauge.Inc() } } - - case p := <-wm.connectEvent: - if p.connect { - wm.startPeerHandler(p.peer) - } else { - wm.stopPeerHandler(p.peer) - } - case req := <-wm.peerReqs: - peers := make([]peer.ID, 0, len(wm.peers)) - for p := range wm.peers { - peers = append(peers, p) - } - req <- peers - case <-wm.ctx.Done(): - return } + + // broadcast those wantlist changes + wm.wantSender.SendMessage(ws.entries, ws.targets, ws.from) + case isWanted: + c := message.params.(cid.Cid) + _, isWanted := wm.wl.Contains(c) + message.resultsChan <- isWanted + case currentWants: + message.resultsChan <- wm.wl.Entries() + case currentBroadcastWants: + message.resultsChan <- wm.bcwl.Entries() + case wantCount: + message.resultsChan <- wm.wl.Len() } } func (wm *WantManager) IsWanted(c cid.Cid) bool { - _, isWanted := wm.wl.Contains(c) - return isWanted + resp := make(chan interface{}) + wm.messageReqs <- wantMessage{ + messageType: isWanted, + params: c, + resultsChan: resp, + } + result := <-resp + return result.(bool) } func (wm *WantManager) CurrentWants() []*wantlist.Entry { - return wm.wl.Entries() + resp := make(chan interface{}) + wm.messageReqs <- wantMessage{ + messageType: currentWants, + resultsChan: resp, + } + result := <-resp + return result.([]*wantlist.Entry) +} + +func (wm *WantManager) CurrentBroadcastWants() []*wantlist.Entry { + resp := make(chan interface{}) + wm.messageReqs <- wantMessage{ + messageType: currentBroadcastWants, + resultsChan: resp, + } + result := <-resp + return result.([]*wantlist.Entry) } func (wm *WantManager) WantCount() int { - return wm.wl.Len() + resp := make(chan interface{}) + wm.messageReqs <- wantMessage{ + messageType: wantCount, + resultsChan: resp, + } + result := <-resp + return result.(int) } diff --git a/bitswap/wantmanager/wantmanager_test.go b/bitswap/wantmanager/wantmanager_test.go new file mode 100644 index 000000000..54cab8345 --- /dev/null +++ b/bitswap/wantmanager/wantmanager_test.go @@ -0,0 +1,244 @@ +package wantmanager + +import ( + "context" + "reflect" + "sync" + "testing" + + bsmsg "github.com/ipfs/go-bitswap/message" + "github.com/ipfs/go-cid" + "github.com/ipfs/go-ipfs-blocksutil" + "github.com/libp2p/go-libp2p-peer" +) + +var blockGenerator = blocksutil.NewBlockGenerator() + +func generateCids(n int) []cid.Cid { + cids := make([]cid.Cid, 0, n) + for i := 0; i < n; i++ { + c := blockGenerator.Next().Cid() + cids = append(cids, c) + } + return cids +} + +var peerSeq int + +func generatePeers(n int) []peer.ID { + peerIds := make([]peer.ID, 0, n) + for i := 0; i < n; i++ { + peerSeq++ + p := peer.ID(peerSeq) + peerIds = append(peerIds, p) + } + return peerIds +} + +var nextSession uint64 + +func generateSessionID() uint64 { + nextSession++ + return uint64(nextSession) +} + +type fakeWantSender struct { + lk sync.RWMutex + lastWantSet wantSet +} + +func (fws *fakeWantSender) SendMessage(entries []*bsmsg.Entry, targets []peer.ID, from uint64) { + fws.lk.Lock() + fws.lastWantSet = wantSet{entries, targets, from} + fws.lk.Unlock() +} + +func (fws *fakeWantSender) getLastWantSet() wantSet { + fws.lk.Lock() + defer fws.lk.Unlock() + return fws.lastWantSet +} + +func setupTestFixturesAndInitialWantList() ( + context.Context, *fakeWantSender, *WantManager, []cid.Cid, []cid.Cid, []peer.ID, uint64, uint64) { + ctx := context.Background() + + // setup fixtures + wantSender := &fakeWantSender{} + wantManager := New(ctx) + keys := generateCids(10) + otherKeys := generateCids(5) + peers := generatePeers(10) + session := generateSessionID() + otherSession := generateSessionID() + + // startup wantManager + wantManager.SetDelegate(wantSender) + wantManager.Startup() + + // add initial wants + wantManager.WantBlocks( + ctx, + keys, + peers, + session) + + return ctx, wantSender, wantManager, keys, otherKeys, peers, session, otherSession +} + +func TestInitialWantsAddedCorrectly(t *testing.T) { + + _, wantSender, wantManager, keys, _, peers, session, _ := + setupTestFixturesAndInitialWantList() + + bcwl := wantManager.CurrentBroadcastWants() + wl := wantManager.CurrentWants() + + if len(bcwl) > 0 { + t.Fatal("should not create broadcast wants when peers are specified") + } + + if len(wl) != len(keys) { + t.Fatal("did not add correct number of wants to want lsit") + } + + generatedWantSet := wantSender.getLastWantSet() + + if len(generatedWantSet.entries) != len(keys) { + t.Fatal("incorrect wants sent") + } + + for _, entry := range generatedWantSet.entries { + if entry.Cancel { + t.Fatal("did not send only non-cancel messages") + } + } + + if generatedWantSet.from != session { + t.Fatal("incorrect session used in sending") + } + + if !reflect.DeepEqual(generatedWantSet.targets, peers) { + t.Fatal("did not setup peers correctly") + } + + wantManager.Shutdown() +} + +func TestCancellingWants(t *testing.T) { + ctx, wantSender, wantManager, keys, _, peers, session, _ := + setupTestFixturesAndInitialWantList() + + wantManager.CancelWants(ctx, keys, peers, session) + + wl := wantManager.CurrentWants() + + if len(wl) != 0 { + t.Fatal("did not remove blocks from want list") + } + + generatedWantSet := wantSender.getLastWantSet() + + if len(generatedWantSet.entries) != len(keys) { + t.Fatal("incorrect wants sent") + } + + for _, entry := range generatedWantSet.entries { + if !entry.Cancel { + t.Fatal("did not send only cancel messages") + } + } + + if generatedWantSet.from != session { + t.Fatal("incorrect session used in sending") + } + + if !reflect.DeepEqual(generatedWantSet.targets, peers) { + t.Fatal("did not setup peers correctly") + } + + wantManager.Shutdown() + +} + +func TestCancellingWantsFromAnotherSessionHasNoEffect(t *testing.T) { + ctx, _, wantManager, keys, _, peers, _, otherSession := + setupTestFixturesAndInitialWantList() + + // cancelling wants from another session has no effect + wantManager.CancelWants(ctx, keys, peers, otherSession) + + wl := wantManager.CurrentWants() + + if len(wl) != len(keys) { + t.Fatal("should not cancel wants unless they match session that made them") + } + + wantManager.Shutdown() +} + +func TestAddingWantsWithNoPeersAddsToBroadcastAndRegularWantList(t *testing.T) { + ctx, _, wantManager, keys, otherKeys, _, session, _ := + setupTestFixturesAndInitialWantList() + + wantManager.WantBlocks(ctx, otherKeys, nil, session) + + bcwl := wantManager.CurrentBroadcastWants() + wl := wantManager.CurrentWants() + + if len(bcwl) != len(otherKeys) { + t.Fatal("want requests with no peers should get added to broadcast list") + } + + if len(wl) != len(otherKeys)+len(keys) { + t.Fatal("want requests with no peers should get added to regular want list") + } + + wantManager.Shutdown() +} + +func TestAddingRequestFromSecondSessionPreventsCancel(t *testing.T) { + ctx, wantSender, wantManager, keys, _, peers, session, otherSession := + setupTestFixturesAndInitialWantList() + + // add a second session requesting the first key + firstKeys := append([]cid.Cid(nil), keys[0]) + wantManager.WantBlocks(ctx, firstKeys, peers, otherSession) + + wl := wantManager.CurrentWants() + + if len(wl) != len(keys) { + t.Fatal("wants from other sessions should not get added seperately") + } + + generatedWantSet := wantSender.getLastWantSet() + if len(generatedWantSet.entries) != len(firstKeys) && + generatedWantSet.from != otherSession && + generatedWantSet.entries[0].Cid != firstKeys[0] && + generatedWantSet.entries[0].Cancel != false { + t.Fatal("should send additional message requesting want for new session") + } + + // cancel block from first session + wantManager.CancelWants(ctx, firstKeys, peers, session) + + wl = wantManager.CurrentWants() + + // want should still be on want list + if len(wl) != len(keys) { + t.Fatal("wants should not be removed until all sessions cancel wants") + } + + // cancel other block from first session + secondKeys := append([]cid.Cid(nil), keys[1]) + wantManager.CancelWants(ctx, secondKeys, peers, session) + + wl = wantManager.CurrentWants() + + // want should not be on want list, cause it was only tracked by one session + if len(wl) != len(keys)-1 { + t.Fatal("wants should be removed if all sessions have cancelled") + } + + wantManager.Shutdown() +} diff --git a/bitswap/workers.go b/bitswap/workers.go index 34b75bab2..99a967068 100644 --- a/bitswap/workers.go +++ b/bitswap/workers.go @@ -6,8 +6,8 @@ import ( "sync" "time" + engine "github.com/ipfs/go-bitswap/decision" bsmsg "github.com/ipfs/go-bitswap/message" - cid "github.com/ipfs/go-cid" logging "github.com/ipfs/go-log" process "github.com/jbenet/goprocess" @@ -74,7 +74,7 @@ func (bs *Bitswap) taskWorker(ctx context.Context, id int) { } bs.engine.MessageSent(envelope.Peer, outgoing) - bs.wm.SendBlocks(ctx, envelope) + bs.sendBlocks(ctx, envelope) bs.counterLk.Lock() for _, block := range envelope.Message.Blocks() { bs.counters.blocksSent++ @@ -90,6 +90,26 @@ func (bs *Bitswap) taskWorker(ctx context.Context, id int) { } } +func (bs *Bitswap) sendBlocks(ctx context.Context, env *engine.Envelope) { + // Blocks need to be sent synchronously to maintain proper backpressure + // throughout the network stack + defer env.Sent() + + msgSize := 0 + msg := bsmsg.New(false) + for _, block := range env.Message.Blocks() { + msgSize += len(block.RawData()) + msg.AddBlock(block) + log.Infof("Sending block %s to %s", block, env.Peer) + } + + bs.sentHistogram.Observe(float64(msgSize)) + err := bs.network.SendMessage(ctx, env.Peer, msg) + if err != nil { + log.Infof("sendblock error: %s", err) + } +} + func (bs *Bitswap) provideWorker(px process.Process) { limit := make(chan struct{}, provideWorkerMax) From 55c9e30f95e215011a25fa4dd10b69726fdbb39b Mon Sep 17 00:00:00 2001 From: hannahhoward Date: Tue, 27 Nov 2018 11:37:53 -0800 Subject: [PATCH 0662/1035] refactor(Managers): Further cleanup Finishing adding comments to WantManager and PeerManager, refactor message structure for type safety, add sending messages test This commit was moved from ipfs/go-bitswap@9ed150a736762ebc62bf7fc2d0d3639e52a50bc7 --- bitswap/peermanager/peermanager.go | 203 ++++++++++++----------- bitswap/peermanager/peermanager_test.go | 106 +++++++++++- bitswap/wantmanager/wantmanager.go | 212 ++++++++++++------------ 3 files changed, 309 insertions(+), 212 deletions(-) diff --git a/bitswap/peermanager/peermanager.go b/bitswap/peermanager/peermanager.go index 2fea3ef85..379fd4bd2 100644 --- a/bitswap/peermanager/peermanager.go +++ b/bitswap/peermanager/peermanager.go @@ -16,32 +16,7 @@ var ( metricsBuckets = []float64{1 << 6, 1 << 10, 1 << 14, 1 << 18, 1<<18 + 15, 1 << 22} ) -type sendMessageParams struct { - entries []*bsmsg.Entry - targets []peer.ID - from uint64 -} - -type connectParams struct { - peer peer.ID - initialEntries []*wantlist.Entry -} - -type peerMessageType int - -const ( - connect peerMessageType = iota + 1 - disconnect - getPeers - sendMessage -) - -type peerMessage struct { - messageType peerMessageType - params interface{} - resultsChan chan interface{} -} - +// PeerQueue provides a queer of messages to be sent for a single peer type PeerQueue interface { RefIncrement() RefDecrement() bool @@ -50,8 +25,14 @@ type PeerQueue interface { Shutdown() } +// PeerQueueFactory provides a function that will create a PeerQueue type PeerQueueFactory func(p peer.ID) PeerQueue +type peerMessage interface { + handle(pm *PeerManager) +} + +// PeerManager manages a pool of peers and sends messages to peers in the pool type PeerManager struct { // sync channel for Run loop peerMessages chan peerMessage @@ -64,6 +45,7 @@ type PeerManager struct { cancel func() } +// New creates a new PeerManager, given a context and a peerQueueFactory func New(ctx context.Context, createPeerQueue PeerQueueFactory) *PeerManager { ctx, cancel := context.WithCancel(ctx) return &PeerManager{ @@ -75,118 +57,145 @@ func New(ctx context.Context, createPeerQueue PeerQueueFactory) *PeerManager { } } +// ConnectedPeers returns a list of peers this PeerManager is managing func (pm *PeerManager) ConnectedPeers() []peer.ID { - resp := make(chan interface{}) - pm.peerMessages <- peerMessage{getPeers, nil, resp} - peers := <-resp - return peers.([]peer.ID) -} - -func (pm *PeerManager) startPeerHandler(p peer.ID, initialEntries []*wantlist.Entry) PeerQueue { - mq, ok := pm.peerQueues[p] - if ok { - mq.RefIncrement() - return nil - } - - mq = pm.createPeerQueue(p) - pm.peerQueues[p] = mq - mq.Startup(pm.ctx, initialEntries) - return mq -} - -func (pm *PeerManager) stopPeerHandler(p peer.ID) { - pq, ok := pm.peerQueues[p] - if !ok { - // TODO: log error? - return - } - - if pq.RefDecrement() { - return - } - - pq.Shutdown() - delete(pm.peerQueues, p) + resp := make(chan []peer.ID) + pm.peerMessages <- &getPeersMessage{resp} + return <-resp } +// Connected is called to add a new peer to the pool, and send it an initial set +// of wants func (pm *PeerManager) Connected(p peer.ID, initialEntries []*wantlist.Entry) { select { - case pm.peerMessages <- peerMessage{connect, connectParams{peer: p, initialEntries: initialEntries}, nil}: + case pm.peerMessages <- &connectPeerMessage{p, initialEntries}: case <-pm.ctx.Done(): } } +// Disconnected is called to remove a peer from the pool func (pm *PeerManager) Disconnected(p peer.ID) { select { - case pm.peerMessages <- peerMessage{disconnect, p, nil}: + case pm.peerMessages <- &disconnectPeerMessage{p}: case <-pm.ctx.Done(): } } +// SendMessage is called to send a message to all or some peers in the pool +// if targets is nil, it sends to all func (pm *PeerManager) SendMessage(entries []*bsmsg.Entry, targets []peer.ID, from uint64) { select { - case pm.peerMessages <- peerMessage{ - sendMessage, - &sendMessageParams{entries: entries, targets: targets, from: from}, - nil, - }: + case pm.peerMessages <- &sendPeerMessage{entries: entries, targets: targets, from: from}: case <-pm.ctx.Done(): } } +// Startup enables the run loop for the PeerManager - no processing will occur +// if startup is not called func (pm *PeerManager) Startup() { go pm.run() } +// Shutdown shutsdown processing for the PeerManager func (pm *PeerManager) Shutdown() { pm.cancel() } -// TODO: use goprocess here once i trust it func (pm *PeerManager) run() { - // NOTE: Do not open any streams or connections from anywhere in this - // event loop. Really, just don't do anything likely to block. for { select { case message := <-pm.peerMessages: - pm.handleMessage(message) + message.handle(pm) case <-pm.ctx.Done(): return } } } -func (pm *PeerManager) handleMessage(message peerMessage) { +type sendPeerMessage struct { + entries []*bsmsg.Entry + targets []peer.ID + from uint64 +} - switch message.messageType { - case sendMessage: - ms := message.params.(*sendMessageParams) - if len(ms.targets) == 0 { - for _, p := range pm.peerQueues { - p.AddMessage(ms.entries, ms.from) - } - } else { - for _, t := range ms.targets { - p, ok := pm.peerQueues[t] - if !ok { - log.Infof("tried sending wantlist change to non-partner peer: %s", t) - continue - } - p.AddMessage(ms.entries, ms.from) - } +func (s *sendPeerMessage) handle(pm *PeerManager) { + pm.sendMessage(s) +} + +type connectPeerMessage struct { + p peer.ID + initialEntries []*wantlist.Entry +} + +func (c *connectPeerMessage) handle(pm *PeerManager) { + pm.startPeerHandler(c.p, c.initialEntries) +} + +type disconnectPeerMessage struct { + p peer.ID +} + +func (dc *disconnectPeerMessage) handle(pm *PeerManager) { + pm.stopPeerHandler(dc.p) +} + +type getPeersMessage struct { + peerResp chan<- []peer.ID +} + +func (gp *getPeersMessage) handle(pm *PeerManager) { + pm.getPeers(gp.peerResp) +} + +func (pm *PeerManager) getPeers(peerResp chan<- []peer.ID) { + peers := make([]peer.ID, 0, len(pm.peerQueues)) + for p := range pm.peerQueues { + peers = append(peers, p) + } + peerResp <- peers +} + +func (pm *PeerManager) startPeerHandler(p peer.ID, initialEntries []*wantlist.Entry) PeerQueue { + mq, ok := pm.peerQueues[p] + if ok { + mq.RefIncrement() + return nil + } + + mq = pm.createPeerQueue(p) + pm.peerQueues[p] = mq + mq.Startup(pm.ctx, initialEntries) + return mq +} + +func (pm *PeerManager) stopPeerHandler(p peer.ID) { + pq, ok := pm.peerQueues[p] + if !ok { + // TODO: log error? + return + } + + if pq.RefDecrement() { + return + } + + pq.Shutdown() + delete(pm.peerQueues, p) +} + +func (pm *PeerManager) sendMessage(ms *sendPeerMessage) { + if len(ms.targets) == 0 { + for _, p := range pm.peerQueues { + p.AddMessage(ms.entries, ms.from) } - case connect: - p := message.params.(connectParams) - pm.startPeerHandler(p.peer, p.initialEntries) - case disconnect: - disconnectPeer := message.params.(peer.ID) - pm.stopPeerHandler(disconnectPeer) - case getPeers: - peers := make([]peer.ID, 0, len(pm.peerQueues)) - for p := range pm.peerQueues { - peers = append(peers, p) + } else { + for _, t := range ms.targets { + p, ok := pm.peerQueues[t] + if !ok { + log.Infof("tried sending wantlist change to non-partner peer: %s", t) + continue + } + p.AddMessage(ms.entries, ms.from) } - message.resultsChan <- peers } } diff --git a/bitswap/peermanager/peermanager_test.go b/bitswap/peermanager/peermanager_test.go index c6260df69..67ba38ae4 100644 --- a/bitswap/peermanager/peermanager_test.go +++ b/bitswap/peermanager/peermanager_test.go @@ -2,24 +2,30 @@ package peermanager import ( "context" + "reflect" "testing" + "time" bsmsg "github.com/ipfs/go-bitswap/message" wantlist "github.com/ipfs/go-bitswap/wantlist" - "github.com/ipfs/go-cid" "github.com/ipfs/go-ipfs-blocksutil" "github.com/libp2p/go-libp2p-peer" ) var blockGenerator = blocksutil.NewBlockGenerator() +var prioritySeq int -func generateCids(n int) []cid.Cid { - cids := make([]cid.Cid, 0, n) +func generateEntries(n int, isCancel bool) []*bsmsg.Entry { + bsmsgs := make([]*bsmsg.Entry, 0, n) for i := 0; i < n; i++ { - c := blockGenerator.Next().Cid() - cids = append(cids, c) + prioritySeq++ + msg := &bsmsg.Entry{ + Entry: wantlist.NewRefEntry(blockGenerator.Next().Cid(), prioritySeq), + Cancel: isCancel, + } + bsmsgs = append(bsmsgs, msg) } - return cids + return bsmsgs } var peerSeq int @@ -83,6 +89,32 @@ func makePeerQueueFactory(messagesSent chan messageSent) PeerQueueFactory { } } +func collectAndCheckMessages( + ctx context.Context, + t *testing.T, + messagesSent <-chan messageSent, + entries []*bsmsg.Entry, + ses uint64, + timeout time.Duration) []peer.ID { + var peersReceived []peer.ID + timeoutCtx, cancel := context.WithTimeout(ctx, timeout) + defer cancel() + for { + select { + case nextMessage := <-messagesSent: + if nextMessage.ses != ses { + t.Fatal("Message enqueued with wrong session") + } + if !reflect.DeepEqual(nextMessage.entries, entries) { + t.Fatal("Message enqueued with wrong wants") + } + peersReceived = append(peersReceived, nextMessage.p) + case <-timeoutCtx.Done(): + return peersReceived + } + } +} + func TestAddingAndRemovingPeers(t *testing.T) { ctx := context.Background() peerQueueFactory := makePeerQueueFactory(nil) @@ -126,3 +158,65 @@ func TestAddingAndRemovingPeers(t *testing.T) { t.Fatal("Peer was disconnected but should not have been") } } + +func TestSendingMessagesToPeers(t *testing.T) { + ctx := context.Background() + messagesSent := make(chan messageSent) + peerQueueFactory := makePeerQueueFactory(messagesSent) + + tp := generatePeers(5) + + peer1, peer2, peer3, peer4, peer5 := tp[0], tp[1], tp[2], tp[3], tp[4] + peerManager := New(ctx, peerQueueFactory) + peerManager.Startup() + + peerManager.Connected(peer1, nil) + peerManager.Connected(peer2, nil) + peerManager.Connected(peer3, nil) + + entries := generateEntries(5, false) + ses := generateSessionID() + + peerManager.SendMessage(entries, nil, ses) + + peersReceived := collectAndCheckMessages( + ctx, t, messagesSent, entries, ses, 200*time.Millisecond) + if len(peersReceived) != 3 { + t.Fatal("Incorrect number of peers received messages") + } + + if !containsPeer(peersReceived, peer1) || + !containsPeer(peersReceived, peer2) || + !containsPeer(peersReceived, peer3) { + t.Fatal("Peers should have received message but did not") + } + + if containsPeer(peersReceived, peer4) || + containsPeer(peersReceived, peer5) { + t.Fatal("Peers received message but should not have") + } + + var peersToSendTo []peer.ID + peersToSendTo = append(peersToSendTo, peer1, peer3, peer4) + peerManager.SendMessage(entries, peersToSendTo, ses) + peersReceived = collectAndCheckMessages( + ctx, t, messagesSent, entries, ses, 200*time.Millisecond) + + if len(peersReceived) != 2 { + t.Fatal("Incorrect number of peers received messages") + } + + if !containsPeer(peersReceived, peer1) || + !containsPeer(peersReceived, peer3) { + t.Fatal("Peers should have received message but did not") + } + + if containsPeer(peersReceived, peer2) || + containsPeer(peersReceived, peer5) { + t.Fatal("Peers received message but should not have") + } + + if containsPeer(peersReceived, peer4) { + t.Fatal("Peers targeted received message but was not connected") + } +} diff --git a/bitswap/wantmanager/wantmanager.go b/bitswap/wantmanager/wantmanager.go index a9ea90163..3dcff166b 100644 --- a/bitswap/wantmanager/wantmanager.go +++ b/bitswap/wantmanager/wantmanager.go @@ -26,20 +26,8 @@ type WantSender interface { SendMessage(entries []*bsmsg.Entry, targets []peer.ID, from uint64) } -type wantMessageType int - -const ( - isWanted wantMessageType = iota + 1 - addWants - currentWants - currentBroadcastWants - wantCount -) - -type wantMessage struct { - messageType wantMessageType - params interface{} - resultsChan chan interface{} +type wantMessage interface { + handle(wm *WantManager) } // WantManager manages a global want list. It tracks two seperate want lists - @@ -49,7 +37,7 @@ type WantManager struct { // channel requests to the run loop // to get predictable behavior while running this in a go routine // having only one channel is neccesary, so requests are processed serially - messageReqs chan wantMessage + wantMessages chan wantMessage // synchronized by Run loop, only touch inside there wl *wantlist.ThreadSafe @@ -62,13 +50,13 @@ type WantManager struct { wantlistGauge metrics.Gauge } -// New initializes a new WantManager +// New initializes a new WantManager for a given context func New(ctx context.Context) *WantManager { ctx, cancel := context.WithCancel(ctx) wantlistGauge := metrics.NewCtx(ctx, "wantlist_total", "Number of items in wantlist.").Gauge() return &WantManager{ - messageReqs: make(chan wantMessage, 10), + wantMessages: make(chan wantMessage, 10), wl: wantlist.NewThreadSafe(), bcwl: wantlist.NewThreadSafe(), ctx: ctx, @@ -93,34 +81,40 @@ func (wm *WantManager) CancelWants(ctx context.Context, ks []cid.Cid, peers []pe wm.addEntries(context.Background(), ks, peers, true, ses) } -type wantSet struct { - entries []*bsmsg.Entry - targets []peer.ID - from uint64 +// IsWanted returns whether a CID is currently wanted +func (wm *WantManager) IsWanted(c cid.Cid) bool { + resp := make(chan bool) + wm.wantMessages <- &isWantedMessage{c, resp} + return <-resp } -func (wm *WantManager) addEntries(ctx context.Context, ks []cid.Cid, targets []peer.ID, cancel bool, ses uint64) { - entries := make([]*bsmsg.Entry, 0, len(ks)) - for i, k := range ks { - entries = append(entries, &bsmsg.Entry{ - Cancel: cancel, - Entry: wantlist.NewRefEntry(k, maxPriority-i), - }) - } - select { - case wm.messageReqs <- wantMessage{ - messageType: addWants, - params: &wantSet{entries: entries, targets: targets, from: ses}, - }: - case <-wm.ctx.Done(): - case <-ctx.Done(): - } +// CurrentWants returns the list of current wants +func (wm *WantManager) CurrentWants() []*wantlist.Entry { + resp := make(chan []*wantlist.Entry) + wm.wantMessages <- ¤tWantsMessage{resp} + return <-resp +} + +// CurrentBroadcastWants returns the current list of wants that are broadcasts +func (wm *WantManager) CurrentBroadcastWants() []*wantlist.Entry { + resp := make(chan []*wantlist.Entry) + wm.wantMessages <- ¤tBroadcastWantsMessage{resp} + return <-resp +} + +// WantCount returns the total count of wants +func (wm *WantManager) WantCount() int { + resp := make(chan int) + wm.wantMessages <- &wantCountMessage{resp} + return <-resp } +// Startup starts processing for the WantManager func (wm *WantManager) Startup() { go wm.run() } +// Shutdown ends processing for the want manager func (wm *WantManager) Shutdown() { wm.cancel() } @@ -130,93 +124,93 @@ func (wm *WantManager) run() { // event loop. Really, just don't do anything likely to block. for { select { - case message := <-wm.messageReqs: - wm.handleMessage(message) + case message := <-wm.wantMessages: + message.handle(wm) case <-wm.ctx.Done(): return } } } -func (wm *WantManager) handleMessage(message wantMessage) { - switch message.messageType { - case addWants: - ws := message.params.(*wantSet) - // is this a broadcast or not? - brdc := len(ws.targets) == 0 - - // add changes to our wantlist - for _, e := range ws.entries { - if e.Cancel { - if brdc { - wm.bcwl.Remove(e.Cid, ws.from) - } - - if wm.wl.Remove(e.Cid, ws.from) { - wm.wantlistGauge.Dec() - } - } else { - if brdc { - wm.bcwl.AddEntry(e.Entry, ws.from) - } - if wm.wl.AddEntry(e.Entry, ws.from) { - wm.wantlistGauge.Inc() - } +func (wm *WantManager) addEntries(ctx context.Context, ks []cid.Cid, targets []peer.ID, cancel bool, ses uint64) { + entries := make([]*bsmsg.Entry, 0, len(ks)) + for i, k := range ks { + entries = append(entries, &bsmsg.Entry{ + Cancel: cancel, + Entry: wantlist.NewRefEntry(k, maxPriority-i), + }) + } + select { + case wm.wantMessages <- &wantSet{entries: entries, targets: targets, from: ses}: + case <-wm.ctx.Done(): + case <-ctx.Done(): + } +} + +type wantSet struct { + entries []*bsmsg.Entry + targets []peer.ID + from uint64 +} + +func (ws *wantSet) handle(wm *WantManager) { + // is this a broadcast or not? + brdc := len(ws.targets) == 0 + + // add changes to our wantlist + for _, e := range ws.entries { + if e.Cancel { + if brdc { + wm.bcwl.Remove(e.Cid, ws.from) } - } - // broadcast those wantlist changes - wm.wantSender.SendMessage(ws.entries, ws.targets, ws.from) - case isWanted: - c := message.params.(cid.Cid) - _, isWanted := wm.wl.Contains(c) - message.resultsChan <- isWanted - case currentWants: - message.resultsChan <- wm.wl.Entries() - case currentBroadcastWants: - message.resultsChan <- wm.bcwl.Entries() - case wantCount: - message.resultsChan <- wm.wl.Len() + if wm.wl.Remove(e.Cid, ws.from) { + wm.wantlistGauge.Dec() + } + } else { + if brdc { + wm.bcwl.AddEntry(e.Entry, ws.from) + } + if wm.wl.AddEntry(e.Entry, ws.from) { + wm.wantlistGauge.Inc() + } + } } + + // broadcast those wantlist changes + wm.wantSender.SendMessage(ws.entries, ws.targets, ws.from) } -func (wm *WantManager) IsWanted(c cid.Cid) bool { - resp := make(chan interface{}) - wm.messageReqs <- wantMessage{ - messageType: isWanted, - params: c, - resultsChan: resp, - } - result := <-resp - return result.(bool) +type isWantedMessage struct { + c cid.Cid + resp chan<- bool } -func (wm *WantManager) CurrentWants() []*wantlist.Entry { - resp := make(chan interface{}) - wm.messageReqs <- wantMessage{ - messageType: currentWants, - resultsChan: resp, - } - result := <-resp - return result.([]*wantlist.Entry) +func (iwm *isWantedMessage) handle(wm *WantManager) { + _, isWanted := wm.wl.Contains(iwm.c) + iwm.resp <- isWanted } -func (wm *WantManager) CurrentBroadcastWants() []*wantlist.Entry { - resp := make(chan interface{}) - wm.messageReqs <- wantMessage{ - messageType: currentBroadcastWants, - resultsChan: resp, - } - result := <-resp - return result.([]*wantlist.Entry) +type currentWantsMessage struct { + resp chan<- []*wantlist.Entry } -func (wm *WantManager) WantCount() int { - resp := make(chan interface{}) - wm.messageReqs <- wantMessage{ - messageType: wantCount, - resultsChan: resp, - } - result := <-resp - return result.(int) +func (cwm *currentWantsMessage) handle(wm *WantManager) { + cwm.resp <- wm.wl.Entries() +} + +type currentBroadcastWantsMessage struct { + resp chan<- []*wantlist.Entry +} + +func (cbcwm *currentBroadcastWantsMessage) handle(wm *WantManager) { + cbcwm.resp <- wm.bcwl.Entries() +} + +type wantCountMessage struct { + resp chan<- int +} + +func (wcm *wantCountMessage) handle(wm *WantManager) { + wcm.resp <- wm.wl.Len() } From d761c4aa7fc825d38fea65f897a9eab871ef29f9 Mon Sep 17 00:00:00 2001 From: hannahhoward Date: Tue, 27 Nov 2018 13:15:42 -0800 Subject: [PATCH 0663/1035] refactor(testing): extract common test utils This commit was moved from ipfs/go-bitswap@9532d009dbd08019440f810ddbf11304fc3003e6 --- bitswap/peermanager/peermanager_test.go | 89 +++++++------------------ bitswap/testutil/testutil.go | 67 +++++++++++++++++++ bitswap/wantmanager/wantmanager_test.go | 43 ++---------- 3 files changed, 97 insertions(+), 102 deletions(-) create mode 100644 bitswap/testutil/testutil.go diff --git a/bitswap/peermanager/peermanager_test.go b/bitswap/peermanager/peermanager_test.go index 67ba38ae4..9b242b55b 100644 --- a/bitswap/peermanager/peermanager_test.go +++ b/bitswap/peermanager/peermanager_test.go @@ -6,47 +6,13 @@ import ( "testing" "time" + "github.com/ipfs/go-bitswap/testutil" + bsmsg "github.com/ipfs/go-bitswap/message" wantlist "github.com/ipfs/go-bitswap/wantlist" - "github.com/ipfs/go-ipfs-blocksutil" "github.com/libp2p/go-libp2p-peer" ) -var blockGenerator = blocksutil.NewBlockGenerator() -var prioritySeq int - -func generateEntries(n int, isCancel bool) []*bsmsg.Entry { - bsmsgs := make([]*bsmsg.Entry, 0, n) - for i := 0; i < n; i++ { - prioritySeq++ - msg := &bsmsg.Entry{ - Entry: wantlist.NewRefEntry(blockGenerator.Next().Cid(), prioritySeq), - Cancel: isCancel, - } - bsmsgs = append(bsmsgs, msg) - } - return bsmsgs -} - -var peerSeq int - -func generatePeers(n int) []peer.ID { - peerIds := make([]peer.ID, 0, n) - for i := 0; i < n; i++ { - peerSeq++ - p := peer.ID(peerSeq) - peerIds = append(peerIds, p) - } - return peerIds -} - -var nextSession uint64 - -func generateSessionID() uint64 { - nextSession++ - return uint64(nextSession) -} - type messageSent struct { p peer.ID entries []*bsmsg.Entry @@ -59,15 +25,6 @@ type fakePeer struct { messagesSent chan messageSent } -func containsPeer(peers []peer.ID, p peer.ID) bool { - for _, n := range peers { - if p == n { - return true - } - } - return false -} - func (fp *fakePeer) Startup(ctx context.Context, initialEntries []*wantlist.Entry) {} func (fp *fakePeer) Shutdown() {} func (fp *fakePeer) RefIncrement() { fp.refcnt++ } @@ -119,7 +76,7 @@ func TestAddingAndRemovingPeers(t *testing.T) { ctx := context.Background() peerQueueFactory := makePeerQueueFactory(nil) - tp := generatePeers(5) + tp := testutil.GeneratePeers(5) peer1, peer2, peer3, peer4, peer5 := tp[0], tp[1], tp[2], tp[3], tp[4] peerManager := New(ctx, peerQueueFactory) peerManager.Startup() @@ -130,14 +87,14 @@ func TestAddingAndRemovingPeers(t *testing.T) { connectedPeers := peerManager.ConnectedPeers() - if !containsPeer(connectedPeers, peer1) || - !containsPeer(connectedPeers, peer2) || - !containsPeer(connectedPeers, peer3) { + if !testutil.ContainsPeer(connectedPeers, peer1) || + !testutil.ContainsPeer(connectedPeers, peer2) || + !testutil.ContainsPeer(connectedPeers, peer3) { t.Fatal("Peers not connected that should be connected") } - if containsPeer(connectedPeers, peer4) || - containsPeer(connectedPeers, peer5) { + if testutil.ContainsPeer(connectedPeers, peer4) || + testutil.ContainsPeer(connectedPeers, peer5) { t.Fatal("Peers connected that shouldn't be connected") } @@ -145,7 +102,7 @@ func TestAddingAndRemovingPeers(t *testing.T) { peerManager.Disconnected(peer1) connectedPeers = peerManager.ConnectedPeers() - if containsPeer(connectedPeers, peer1) { + if testutil.ContainsPeer(connectedPeers, peer1) { t.Fatal("Peer should have been disconnected but was not") } @@ -154,7 +111,7 @@ func TestAddingAndRemovingPeers(t *testing.T) { peerManager.Disconnected(peer2) connectedPeers = peerManager.ConnectedPeers() - if !containsPeer(connectedPeers, peer2) { + if !testutil.ContainsPeer(connectedPeers, peer2) { t.Fatal("Peer was disconnected but should not have been") } } @@ -164,7 +121,7 @@ func TestSendingMessagesToPeers(t *testing.T) { messagesSent := make(chan messageSent) peerQueueFactory := makePeerQueueFactory(messagesSent) - tp := generatePeers(5) + tp := testutil.GeneratePeers(5) peer1, peer2, peer3, peer4, peer5 := tp[0], tp[1], tp[2], tp[3], tp[4] peerManager := New(ctx, peerQueueFactory) @@ -174,8 +131,8 @@ func TestSendingMessagesToPeers(t *testing.T) { peerManager.Connected(peer2, nil) peerManager.Connected(peer3, nil) - entries := generateEntries(5, false) - ses := generateSessionID() + entries := testutil.GenerateEntries(5, false) + ses := testutil.GenerateSessionID() peerManager.SendMessage(entries, nil, ses) @@ -185,14 +142,14 @@ func TestSendingMessagesToPeers(t *testing.T) { t.Fatal("Incorrect number of peers received messages") } - if !containsPeer(peersReceived, peer1) || - !containsPeer(peersReceived, peer2) || - !containsPeer(peersReceived, peer3) { + if !testutil.ContainsPeer(peersReceived, peer1) || + !testutil.ContainsPeer(peersReceived, peer2) || + !testutil.ContainsPeer(peersReceived, peer3) { t.Fatal("Peers should have received message but did not") } - if containsPeer(peersReceived, peer4) || - containsPeer(peersReceived, peer5) { + if testutil.ContainsPeer(peersReceived, peer4) || + testutil.ContainsPeer(peersReceived, peer5) { t.Fatal("Peers received message but should not have") } @@ -206,17 +163,17 @@ func TestSendingMessagesToPeers(t *testing.T) { t.Fatal("Incorrect number of peers received messages") } - if !containsPeer(peersReceived, peer1) || - !containsPeer(peersReceived, peer3) { + if !testutil.ContainsPeer(peersReceived, peer1) || + !testutil.ContainsPeer(peersReceived, peer3) { t.Fatal("Peers should have received message but did not") } - if containsPeer(peersReceived, peer2) || - containsPeer(peersReceived, peer5) { + if testutil.ContainsPeer(peersReceived, peer2) || + testutil.ContainsPeer(peersReceived, peer5) { t.Fatal("Peers received message but should not have") } - if containsPeer(peersReceived, peer4) { + if testutil.ContainsPeer(peersReceived, peer4) { t.Fatal("Peers targeted received message but was not connected") } } diff --git a/bitswap/testutil/testutil.go b/bitswap/testutil/testutil.go new file mode 100644 index 000000000..6ac7dcbfb --- /dev/null +++ b/bitswap/testutil/testutil.go @@ -0,0 +1,67 @@ +package testutil + +import ( + bsmsg "github.com/ipfs/go-bitswap/message" + "github.com/ipfs/go-bitswap/wantlist" + cid "github.com/ipfs/go-cid" + blocksutil "github.com/ipfs/go-ipfs-blocksutil" + peer "github.com/libp2p/go-libp2p-peer" +) + +var blockGenerator = blocksutil.NewBlockGenerator() +var prioritySeq int + +// GenerateCids produces n content identifiers +func GenerateCids(n int) []cid.Cid { + cids := make([]cid.Cid, 0, n) + for i := 0; i < n; i++ { + c := blockGenerator.Next().Cid() + cids = append(cids, c) + } + return cids +} + +// GenerateEntries makes fake bitswap message entries +func GenerateEntries(n int, isCancel bool) []*bsmsg.Entry { + bsmsgs := make([]*bsmsg.Entry, 0, n) + for i := 0; i < n; i++ { + prioritySeq++ + msg := &bsmsg.Entry{ + Entry: wantlist.NewRefEntry(blockGenerator.Next().Cid(), prioritySeq), + Cancel: isCancel, + } + bsmsgs = append(bsmsgs, msg) + } + return bsmsgs +} + +var peerSeq int + +// GeneratePeers creates n peer ids +func GeneratePeers(n int) []peer.ID { + peerIds := make([]peer.ID, 0, n) + for i := 0; i < n; i++ { + peerSeq++ + p := peer.ID(peerSeq) + peerIds = append(peerIds, p) + } + return peerIds +} + +var nextSession uint64 + +// GenerateSessionID make a unit session identifier +func GenerateSessionID() uint64 { + nextSession++ + return uint64(nextSession) +} + +// ContainsPeer returns true if a peer is found n a list of peers +func ContainsPeer(peers []peer.ID, p peer.ID) bool { + for _, n := range peers { + if p == n { + return true + } + } + return false +} diff --git a/bitswap/wantmanager/wantmanager_test.go b/bitswap/wantmanager/wantmanager_test.go index 54cab8345..85590bb15 100644 --- a/bitswap/wantmanager/wantmanager_test.go +++ b/bitswap/wantmanager/wantmanager_test.go @@ -6,42 +6,13 @@ import ( "sync" "testing" + "github.com/ipfs/go-bitswap/testutil" + bsmsg "github.com/ipfs/go-bitswap/message" "github.com/ipfs/go-cid" - "github.com/ipfs/go-ipfs-blocksutil" "github.com/libp2p/go-libp2p-peer" ) -var blockGenerator = blocksutil.NewBlockGenerator() - -func generateCids(n int) []cid.Cid { - cids := make([]cid.Cid, 0, n) - for i := 0; i < n; i++ { - c := blockGenerator.Next().Cid() - cids = append(cids, c) - } - return cids -} - -var peerSeq int - -func generatePeers(n int) []peer.ID { - peerIds := make([]peer.ID, 0, n) - for i := 0; i < n; i++ { - peerSeq++ - p := peer.ID(peerSeq) - peerIds = append(peerIds, p) - } - return peerIds -} - -var nextSession uint64 - -func generateSessionID() uint64 { - nextSession++ - return uint64(nextSession) -} - type fakeWantSender struct { lk sync.RWMutex lastWantSet wantSet @@ -66,11 +37,11 @@ func setupTestFixturesAndInitialWantList() ( // setup fixtures wantSender := &fakeWantSender{} wantManager := New(ctx) - keys := generateCids(10) - otherKeys := generateCids(5) - peers := generatePeers(10) - session := generateSessionID() - otherSession := generateSessionID() + keys := testutil.GenerateCids(10) + otherKeys := testutil.GenerateCids(5) + peers := testutil.GeneratePeers(10) + session := testutil.GenerateSessionID() + otherSession := testutil.GenerateSessionID() // startup wantManager wantManager.SetDelegate(wantSender) From d42b1ca79a7e88e2cf4639c3b9a76a98e8864f92 Mon Sep 17 00:00:00 2001 From: hannahhoward Date: Tue, 27 Nov 2018 20:13:06 -0800 Subject: [PATCH 0664/1035] test(messagequeue): Add test for messagequeue This commit was moved from ipfs/go-bitswap@3b7ae9b87a493a4b4abb331a29cfae3247688bfa --- bitswap/messagequeue/messagequeue.go | 28 ++-- bitswap/messagequeue/messagequeue_test.go | 161 ++++++++++++++++++++++ bitswap/peermanager/peermanager_test.go | 6 +- bitswap/testutil/testutil.go | 15 +- 4 files changed, 195 insertions(+), 15 deletions(-) create mode 100644 bitswap/messagequeue/messagequeue_test.go diff --git a/bitswap/messagequeue/messagequeue.go b/bitswap/messagequeue/messagequeue.go index f36117d65..d8421a15a 100644 --- a/bitswap/messagequeue/messagequeue.go +++ b/bitswap/messagequeue/messagequeue.go @@ -14,12 +14,17 @@ import ( var log = logging.Logger("bitswap") +type MessageNetwork interface { + ConnectTo(context.Context, peer.ID) error + NewMessageSender(context.Context, peer.ID) (bsnet.MessageSender, error) +} + type MessageQueue struct { p peer.ID outlk sync.Mutex out bsmsg.BitSwapMessage - network bsnet.BitSwapNetwork + network MessageNetwork wl *wantlist.ThreadSafe sender bsnet.MessageSender @@ -30,7 +35,7 @@ type MessageQueue struct { done chan struct{} } -func New(p peer.ID, network bsnet.BitSwapNetwork) *MessageQueue { +func New(p peer.ID, network MessageNetwork) *MessageQueue { return &MessageQueue{ done: make(chan struct{}), work: make(chan struct{}, 1), @@ -90,22 +95,25 @@ func (mq *MessageQueue) AddMessage(entries []*bsmsg.Entry, ses uint64) { func (mq *MessageQueue) Startup(ctx context.Context, initialEntries []*wantlist.Entry) { // new peer, we will want to give them our full wantlist - fullwantlist := bsmsg.New(true) - for _, e := range initialEntries { - for k := range e.SesTrk { - mq.wl.AddEntry(e, k) + if len(initialEntries) > 0 { + fullwantlist := bsmsg.New(true) + for _, e := range initialEntries { + for k := range e.SesTrk { + mq.wl.AddEntry(e, k) + } + fullwantlist.AddEntry(e.Cid, e.Priority) } - fullwantlist.AddEntry(e.Cid, e.Priority) + mq.out = fullwantlist + mq.work <- struct{}{} } - mq.out = fullwantlist - mq.work <- struct{}{} - go mq.runQueue(ctx) + } func (mq *MessageQueue) Shutdown() { close(mq.done) } + func (mq *MessageQueue) runQueue(ctx context.Context) { for { select { diff --git a/bitswap/messagequeue/messagequeue_test.go b/bitswap/messagequeue/messagequeue_test.go new file mode 100644 index 000000000..f3389fe7e --- /dev/null +++ b/bitswap/messagequeue/messagequeue_test.go @@ -0,0 +1,161 @@ +package messagequeue + +import ( + "context" + "testing" + "time" + + "github.com/ipfs/go-bitswap/testutil" + + bsmsg "github.com/ipfs/go-bitswap/message" + bsnet "github.com/ipfs/go-bitswap/network" + peer "github.com/libp2p/go-libp2p-peer" +) + +type fakeMessageNetwork struct { + connectError error + messageSenderError error + messageSender bsnet.MessageSender +} + +func (fmn *fakeMessageNetwork) ConnectTo(context.Context, peer.ID) error { + return fmn.connectError +} + +func (fmn *fakeMessageNetwork) NewMessageSender(context.Context, peer.ID) (bsnet.MessageSender, error) { + if fmn.messageSenderError == nil { + return fmn.messageSender, nil + } else { + return nil, fmn.messageSenderError + } +} + +type fakeMessageSender struct { + sendError error + fullClosed chan<- struct{} + reset chan<- struct{} + messagesSent chan<- bsmsg.BitSwapMessage +} + +func (fms *fakeMessageSender) SendMsg(ctx context.Context, msg bsmsg.BitSwapMessage) error { + fms.messagesSent <- msg + return fms.sendError +} +func (fms *fakeMessageSender) Close() error { fms.fullClosed <- struct{}{}; return nil } +func (fms *fakeMessageSender) Reset() error { fms.reset <- struct{}{}; return nil } + +func collectMessages(ctx context.Context, + t *testing.T, + messagesSent <-chan bsmsg.BitSwapMessage, + timeout time.Duration) []bsmsg.BitSwapMessage { + var messagesReceived []bsmsg.BitSwapMessage + timeoutctx, cancel := context.WithTimeout(ctx, timeout) + defer cancel() + for { + select { + case messageReceived := <-messagesSent: + messagesReceived = append(messagesReceived, messageReceived) + case <-timeoutctx.Done(): + return messagesReceived + } + } +} + +func totalEntriesLength(messages []bsmsg.BitSwapMessage) int { + totalLength := 0 + for _, messages := range messages { + totalLength += len(messages.Wantlist()) + } + return totalLength +} + +func TestStartupAndShutdown(t *testing.T) { + ctx := context.Background() + messagesSent := make(chan bsmsg.BitSwapMessage) + resetChan := make(chan struct{}, 1) + fullClosedChan := make(chan struct{}, 1) + fakeSender := &fakeMessageSender{nil, fullClosedChan, resetChan, messagesSent} + fakenet := &fakeMessageNetwork{nil, nil, fakeSender} + peerID := testutil.GeneratePeers(1)[0] + messageQueue := New(peerID, fakenet) + ses := testutil.GenerateSessionID() + wl := testutil.GenerateWantlist(10, ses) + + messageQueue.Startup(ctx, wl.Entries()) + + messages := collectMessages(ctx, t, messagesSent, 10*time.Millisecond) + if len(messages) != 1 { + t.Fatal("wrong number of messages were sent for initial wants") + } + + firstMessage := messages[0] + if len(firstMessage.Wantlist()) != wl.Len() { + t.Fatal("did not add all wants to want list") + } + for _, entry := range firstMessage.Wantlist() { + if entry.Cancel { + t.Fatal("initial add sent cancel entry when it should not have") + } + } + + messageQueue.Shutdown() + + timeoutctx, cancel := context.WithTimeout(ctx, 10*time.Millisecond) + defer cancel() + select { + case <-fullClosedChan: + case <-resetChan: + t.Fatal("message sender should have been closed but was reset") + case <-timeoutctx.Done(): + t.Fatal("message sender should have been closed but wasn't") + } +} + +func TestSendingMessagesDeduped(t *testing.T) { + ctx := context.Background() + messagesSent := make(chan bsmsg.BitSwapMessage) + resetChan := make(chan struct{}, 1) + fullClosedChan := make(chan struct{}, 1) + fakeSender := &fakeMessageSender{nil, fullClosedChan, resetChan, messagesSent} + fakenet := &fakeMessageNetwork{nil, nil, fakeSender} + peerID := testutil.GeneratePeers(1)[0] + messageQueue := New(peerID, fakenet) + ses1 := testutil.GenerateSessionID() + ses2 := testutil.GenerateSessionID() + entries := testutil.GenerateMessageEntries(10, false) + messageQueue.Startup(ctx, nil) + + messageQueue.AddMessage(entries, ses1) + messageQueue.AddMessage(entries, ses2) + messages := collectMessages(ctx, t, messagesSent, 10*time.Millisecond) + + if totalEntriesLength(messages) != len(entries) { + t.Fatal("Messages were not deduped") + } +} + +func TestSendingMessagesPartialDupe(t *testing.T) { + ctx := context.Background() + messagesSent := make(chan bsmsg.BitSwapMessage) + resetChan := make(chan struct{}, 1) + fullClosedChan := make(chan struct{}, 1) + fakeSender := &fakeMessageSender{nil, fullClosedChan, resetChan, messagesSent} + fakenet := &fakeMessageNetwork{nil, nil, fakeSender} + peerID := testutil.GeneratePeers(1)[0] + messageQueue := New(peerID, fakenet) + ses1 := testutil.GenerateSessionID() + ses2 := testutil.GenerateSessionID() + entries := testutil.GenerateMessageEntries(10, false) + moreEntries := testutil.GenerateMessageEntries(5, false) + secondEntries := append(entries[5:], moreEntries...) + messageQueue.Startup(ctx, nil) + + messageQueue.AddMessage(entries, ses1) + messageQueue.AddMessage(secondEntries, ses2) + messages := collectMessages(ctx, t, messagesSent, 20*time.Millisecond) + + if totalEntriesLength(messages) != len(entries)+len(moreEntries) { + t.Fatal("messages were not correctly deduped") + } + +} diff --git a/bitswap/peermanager/peermanager_test.go b/bitswap/peermanager/peermanager_test.go index 9b242b55b..9617dad38 100644 --- a/bitswap/peermanager/peermanager_test.go +++ b/bitswap/peermanager/peermanager_test.go @@ -131,13 +131,13 @@ func TestSendingMessagesToPeers(t *testing.T) { peerManager.Connected(peer2, nil) peerManager.Connected(peer3, nil) - entries := testutil.GenerateEntries(5, false) + entries := testutil.GenerateMessageEntries(5, false) ses := testutil.GenerateSessionID() peerManager.SendMessage(entries, nil, ses) peersReceived := collectAndCheckMessages( - ctx, t, messagesSent, entries, ses, 200*time.Millisecond) + ctx, t, messagesSent, entries, ses, 10*time.Millisecond) if len(peersReceived) != 3 { t.Fatal("Incorrect number of peers received messages") } @@ -157,7 +157,7 @@ func TestSendingMessagesToPeers(t *testing.T) { peersToSendTo = append(peersToSendTo, peer1, peer3, peer4) peerManager.SendMessage(entries, peersToSendTo, ses) peersReceived = collectAndCheckMessages( - ctx, t, messagesSent, entries, ses, 200*time.Millisecond) + ctx, t, messagesSent, entries, ses, 10*time.Millisecond) if len(peersReceived) != 2 { t.Fatal("Incorrect number of peers received messages") diff --git a/bitswap/testutil/testutil.go b/bitswap/testutil/testutil.go index 6ac7dcbfb..f768f40dc 100644 --- a/bitswap/testutil/testutil.go +++ b/bitswap/testutil/testutil.go @@ -21,8 +21,19 @@ func GenerateCids(n int) []cid.Cid { return cids } -// GenerateEntries makes fake bitswap message entries -func GenerateEntries(n int, isCancel bool) []*bsmsg.Entry { +// GenerateWantlist makes a populated wantlist +func GenerateWantlist(n int, ses uint64) *wantlist.ThreadSafe { + wl := wantlist.NewThreadSafe() + for i := 0; i < n; i++ { + prioritySeq++ + entry := wantlist.NewRefEntry(blockGenerator.Next().Cid(), prioritySeq) + wl.AddEntry(entry, ses) + } + return wl +} + +// GenerateMessageEntries makes fake bitswap message entries +func GenerateMessageEntries(n int, isCancel bool) []*bsmsg.Entry { bsmsgs := make([]*bsmsg.Entry, 0, n) for i := 0; i < n; i++ { prioritySeq++ From f378f782a475d6542f89f70204779270ee58eded Mon Sep 17 00:00:00 2001 From: hannahhoward Date: Tue, 27 Nov 2018 20:27:02 -0800 Subject: [PATCH 0665/1035] refactor(messagequeue): cleanup and comment This commit was moved from ipfs/go-bitswap@ac45ed058d5fc515ef53cf3803f1506df31b27db --- bitswap/messagequeue/messagequeue.go | 198 ++++++++++++++++----------- 1 file changed, 116 insertions(+), 82 deletions(-) diff --git a/bitswap/messagequeue/messagequeue.go b/bitswap/messagequeue/messagequeue.go index d8421a15a..bed0cd559 100644 --- a/bitswap/messagequeue/messagequeue.go +++ b/bitswap/messagequeue/messagequeue.go @@ -14,11 +14,14 @@ import ( var log = logging.Logger("bitswap") +// MessageNetwork is any network that can connect peers and generate a message +// sender type MessageNetwork interface { ConnectTo(context.Context, peer.ID) error NewMessageSender(context.Context, peer.ID) (bsnet.MessageSender, error) } +// MessageQueue implements queuee of want messages to send to peers type MessageQueue struct { p peer.ID @@ -35,6 +38,7 @@ type MessageQueue struct { done chan struct{} } +// New creats a new MessageQueues func New(p peer.ID, network MessageNetwork) *MessageQueue { return &MessageQueue{ done: make(chan struct{}), @@ -46,52 +50,31 @@ func New(p peer.ID, network MessageNetwork) *MessageQueue { } } +// RefIncrement increments the refcount for a message queue func (mq *MessageQueue) RefIncrement() { mq.refcnt++ } +// RefDecrement decrements the refcount for a message queue and returns true +// if the refcount is now 0 func (mq *MessageQueue) RefDecrement() bool { mq.refcnt-- return mq.refcnt > 0 } +// AddMessage adds new entries to an outgoing message for a given session func (mq *MessageQueue) AddMessage(entries []*bsmsg.Entry, ses uint64) { - var work bool - mq.outlk.Lock() - defer func() { - mq.outlk.Unlock() - if !work { - return - } - select { - case mq.work <- struct{}{}: - default: - } - }() - - // if we have no message held allocate a new one - if mq.out == nil { - mq.out = bsmsg.New(false) + if !mq.addEntries(entries, ses) { + return } - - // TODO: add a msg.Combine(...) method - // otherwise, combine the one we are holding with the - // one passed in - for _, e := range entries { - if e.Cancel { - if mq.wl.Remove(e.Cid, ses) { - work = true - mq.out.Cancel(e.Cid) - } - } else { - if mq.wl.Add(e.Cid, e.Priority, ses) { - work = true - mq.out.AddEntry(e.Cid, e.Priority) - } - } + select { + case mq.work <- struct{}{}: + default: } } +// Startup starts the processing of messages, and creates an initial message +// based on the given initial wantlist func (mq *MessageQueue) Startup(ctx context.Context, initialEntries []*wantlist.Entry) { // new peer, we will want to give them our full wantlist @@ -110,6 +93,7 @@ func (mq *MessageQueue) Startup(ctx context.Context, initialEntries []*wantlist. } +// Shutdown stops the processing of messages for a message queue func (mq *MessageQueue) Shutdown() { close(mq.done) } @@ -133,84 +117,134 @@ func (mq *MessageQueue) runQueue(ctx context.Context) { } } -func (mq *MessageQueue) doWork(ctx context.Context) { - // grab outgoing message +func (mq *MessageQueue) addEntries(entries []*bsmsg.Entry, ses uint64) bool { + var work bool mq.outlk.Lock() - wlm := mq.out + defer mq.outlk.Unlock() + // if we have no message held allocate a new one + if mq.out == nil { + mq.out = bsmsg.New(false) + } + + // TODO: add a msg.Combine(...) method + // otherwise, combine the one we are holding with the + // one passed in + for _, e := range entries { + if e.Cancel { + if mq.wl.Remove(e.Cid, ses) { + work = true + mq.out.Cancel(e.Cid) + } + } else { + if mq.wl.Add(e.Cid, e.Priority, ses) { + work = true + mq.out.AddEntry(e.Cid, e.Priority) + } + } + } + + return work +} + +func (mq *MessageQueue) doWork(ctx context.Context) { + + wlm := mq.extractOutgoingMessage() if wlm == nil || wlm.Empty() { - mq.outlk.Unlock() return } - mq.out = nil - mq.outlk.Unlock() // NB: only open a stream if we actually have data to send - if mq.sender == nil { - err := mq.openSender(ctx) - if err != nil { - log.Infof("cant open message sender to peer %s: %s", mq.p, err) - // TODO: cant connect, what now? - return - } + err := mq.initializeSender(ctx) + if err != nil { + log.Infof("cant open message sender to peer %s: %s", mq.p, err) + // TODO: cant connect, what now? + return } // send wantlist updates for { // try to send this message until we fail. - err := mq.sender.SendMsg(ctx, wlm) - if err == nil { + if mq.attemptSendAndRecovery(ctx, wlm) { return } + } +} - log.Infof("bitswap send error: %s", err) - mq.sender.Reset() - mq.sender = nil +func (mq *MessageQueue) initializeSender(ctx context.Context) error { + if mq.sender != nil { + return nil + } + nsender, err := openSender(ctx, mq.network, mq.p) + if err != nil { + return err + } + mq.sender = nsender + return nil +} - select { - case <-mq.done: - return - case <-ctx.Done(): - return - case <-time.After(time.Millisecond * 100): - // wait 100ms in case disconnect notifications are still propogating - log.Warning("SendMsg errored but neither 'done' nor context.Done() were set") - } +func (mq *MessageQueue) attemptSendAndRecovery(ctx context.Context, wlm bsmsg.BitSwapMessage) bool { + err := mq.sender.SendMsg(ctx, wlm) + if err == nil { + return true + } - err = mq.openSender(ctx) - if err != nil { - log.Infof("couldnt open sender again after SendMsg(%s) failed: %s", mq.p, err) - // TODO(why): what do we do now? - // I think the *right* answer is to probably put the message we're - // trying to send back, and then return to waiting for new work or - // a disconnect. - return - } + log.Infof("bitswap send error: %s", err) + mq.sender.Reset() + mq.sender = nil + + select { + case <-mq.done: + return true + case <-ctx.Done(): + return true + case <-time.After(time.Millisecond * 100): + // wait 100ms in case disconnect notifications are still propogating + log.Warning("SendMsg errored but neither 'done' nor context.Done() were set") + } - // TODO: Is this the same instance for the remote peer? - // If its not, we should resend our entire wantlist to them - /* - if mq.sender.InstanceID() != mq.lastSeenInstanceID { - wlm = mq.getFullWantlistMessage() - } - */ + err = mq.initializeSender(ctx) + if err != nil { + log.Infof("couldnt open sender again after SendMsg(%s) failed: %s", mq.p, err) + // TODO(why): what do we do now? + // I think the *right* answer is to probably put the message we're + // trying to send back, and then return to waiting for new work or + // a disconnect. + return true } + + // TODO: Is this the same instance for the remote peer? + // If its not, we should resend our entire wantlist to them + /* + if mq.sender.InstanceID() != mq.lastSeenInstanceID { + wlm = mq.getFullWantlistMessage() + } + */ + return false +} + +func (mq *MessageQueue) extractOutgoingMessage() bsmsg.BitSwapMessage { + // grab outgoing message + mq.outlk.Lock() + wlm := mq.out + mq.out = nil + mq.outlk.Unlock() + return wlm } -func (mq *MessageQueue) openSender(ctx context.Context) error { +func openSender(ctx context.Context, network MessageNetwork, p peer.ID) (bsnet.MessageSender, error) { // allow ten minutes for connections this includes looking them up in the // dht dialing them, and handshaking conctx, cancel := context.WithTimeout(ctx, time.Minute*10) defer cancel() - err := mq.network.ConnectTo(conctx, mq.p) + err := network.ConnectTo(conctx, p) if err != nil { - return err + return nil, err } - nsender, err := mq.network.NewMessageSender(ctx, mq.p) + nsender, err := network.NewMessageSender(ctx, p) if err != nil { - return err + return nil, err } - mq.sender = nsender - return nil + return nsender, nil } From cd2b81af707a1db6549c0ad13111e9e2d53b6b22 Mon Sep 17 00:00:00 2001 From: hannahhoward Date: Thu, 6 Dec 2018 11:40:10 -0800 Subject: [PATCH 0666/1035] docs(comments): end comment sentences to have full-stop per https://github.com/golang/go/wiki/CodeReviewComments#comment-sentences This commit was moved from ipfs/go-bitswap@c087e275e9c19bebe1a387f24dd936b102de1c63 --- bitswap/bitswap.go | 8 +++---- bitswap/decision/engine.go | 8 +++---- bitswap/decision/peer_request_queue.go | 16 ++++++------- bitswap/message/message.go | 2 +- bitswap/messagequeue/messagequeue.go | 16 ++++++------- bitswap/network/interface.go | 10 ++++---- bitswap/network/ipfs_impl.go | 4 ++-- bitswap/peermanager/peermanager.go | 22 ++++++++--------- bitswap/session.go | 6 ++--- bitswap/sessionmanager/sessionmanager.go | 2 +- .../internet_latency_delay_generator.go | 6 ++--- bitswap/testnet/virtual.go | 4 ++-- bitswap/testutil/testutil.go | 12 +++++----- bitswap/wantlist/wantlist.go | 8 +++---- bitswap/wantmanager/wantmanager.go | 24 +++++++++---------- 15 files changed, 74 insertions(+), 74 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index b3e472d2d..cfaee4a3b 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -307,7 +307,7 @@ func (bs *Bitswap) GetBlocks(ctx context.Context, keys []cid.Cid) (<-chan blocks return out, nil } -// CancelWant removes a given key from the wantlist +// CancelWant removes a given key from the wantlist. func (bs *Bitswap) CancelWants(cids []cid.Cid, ses uint64) { if len(cids) == 0 { return @@ -363,7 +363,7 @@ func (bs *Bitswap) receiveBlockFrom(blk blocks.Block, from peer.ID) error { return nil } -// SessionsForBlock returns a slice of all sessions that may be interested in the given cid +// SessionsForBlock returns a slice of all sessions that may be interested in the given cid. func (bs *Bitswap) SessionsForBlock(c cid.Cid) []*Session { var out []*Session bs.sm.IterateSessions(func(session exchange.Fetcher) { @@ -442,14 +442,14 @@ func (bs *Bitswap) updateReceiveCounters(b blocks.Block) { } } -// Connected/Disconnected warns bitswap about peer connections +// Connected/Disconnected warns bitswap about peer connections. func (bs *Bitswap) PeerConnected(p peer.ID) { initialWants := bs.wm.CurrentBroadcastWants() bs.pm.Connected(p, initialWants) bs.engine.PeerConnected(p) } -// Connected/Disconnected warns bitswap about peer connections +// Connected/Disconnected warns bitswap about peer connections. func (bs *Bitswap) PeerDisconnected(p peer.ID) { bs.pm.Disconnected(p) bs.engine.PeerDisconnected(p) diff --git a/bitswap/decision/engine.go b/bitswap/decision/engine.go index 90155a1df..384c7c698 100644 --- a/bitswap/decision/engine.go +++ b/bitswap/decision/engine.go @@ -56,12 +56,12 @@ const ( maxMessageSize = 512 * 1024 ) -// Envelope contains a message for a Peer +// Envelope contains a message for a Peer. type Envelope struct { - // Peer is the intended recipient + // Peer is the intended recipient. Peer peer.ID - // Message is the payload + // Message is the payload. Message bsmsg.BitSwapMessage // A callback to notify the decision queue that the task is complete @@ -206,7 +206,7 @@ func (e *Engine) Outbox() <-chan (<-chan *Envelope) { return e.outbox } -// Returns a slice of Peers with whom the local node has active sessions +// Peers returns a slice of Peers with whom the local node has active sessions. func (e *Engine) Peers() []peer.ID { e.lock.Lock() defer e.lock.Unlock() diff --git a/bitswap/decision/peer_request_queue.go b/bitswap/decision/peer_request_queue.go index c02329fc3..c7aaf553e 100644 --- a/bitswap/decision/peer_request_queue.go +++ b/bitswap/decision/peer_request_queue.go @@ -45,7 +45,7 @@ type prq struct { frozen map[peer.ID]*activePartner } -// Push currently adds a new peerRequestTask to the end of the list +// Push currently adds a new peerRequestTask to the end of the list. func (tl *prq) Push(to peer.ID, entries ...*wantlist.Entry) { tl.lock.Lock() defer tl.lock.Unlock() @@ -140,7 +140,7 @@ func (tl *prq) Pop() *peerRequestTask { return out } -// Remove removes a task from the queue +// Remove removes a task from the queue. func (tl *prq) Remove(k cid.Cid, p peer.ID) { tl.lock.Lock() t, ok := tl.taskMap[taskEntryKey{p, k}] @@ -210,12 +210,12 @@ type peerRequestTask struct { index int // book-keeping field used by the pq container } -// Index implements pq.Elem +// Index implements pq.Elem. func (t *peerRequestTask) Index() int { return t.index } -// SetIndex implements pq.Elem +// SetIndex implements pq.Elem. func (t *peerRequestTask) SetIndex(i int) { t.index = i } @@ -307,7 +307,7 @@ func partnerCompare(a, b pq.Elem) bool { return pa.active < pb.active } -// StartTask signals that a task was started for this partner +// StartTask signals that a task was started for this partner. func (p *activePartner) StartTask(k cid.Cid) { p.activelk.Lock() p.activeBlocks.Add(k) @@ -315,7 +315,7 @@ func (p *activePartner) StartTask(k cid.Cid) { p.activelk.Unlock() } -// TaskDone signals that a task was completed for this partner +// TaskDone signals that a task was completed for this partner. func (p *activePartner) TaskDone(k cid.Cid) { p.activelk.Lock() p.activeBlocks.Remove(k) @@ -326,12 +326,12 @@ func (p *activePartner) TaskDone(k cid.Cid) { p.activelk.Unlock() } -// Index implements pq.Elem +// Index implements pq.Elem. func (p *activePartner) Index() int { return p.index } -// SetIndex implements pq.Elem +// SetIndex implements pq.Elem. func (p *activePartner) SetIndex(i int) { p.index = i } diff --git a/bitswap/message/message.go b/bitswap/message/message.go index 3289507dd..2b538a2f4 100644 --- a/bitswap/message/message.go +++ b/bitswap/message/message.go @@ -21,7 +21,7 @@ type BitSwapMessage interface { // the sender. Wantlist() []Entry - // Blocks returns a slice of unique blocks + // Blocks returns a slice of unique blocks. Blocks() []blocks.Block // AddEntry adds an entry to the Wantlist. diff --git a/bitswap/messagequeue/messagequeue.go b/bitswap/messagequeue/messagequeue.go index bed0cd559..294bad193 100644 --- a/bitswap/messagequeue/messagequeue.go +++ b/bitswap/messagequeue/messagequeue.go @@ -15,13 +15,13 @@ import ( var log = logging.Logger("bitswap") // MessageNetwork is any network that can connect peers and generate a message -// sender +// sender. type MessageNetwork interface { ConnectTo(context.Context, peer.ID) error NewMessageSender(context.Context, peer.ID) (bsnet.MessageSender, error) } -// MessageQueue implements queuee of want messages to send to peers +// MessageQueue implements queue of want messages to send to peers. type MessageQueue struct { p peer.ID @@ -38,7 +38,7 @@ type MessageQueue struct { done chan struct{} } -// New creats a new MessageQueues +// New creats a new MessageQueue. func New(p peer.ID, network MessageNetwork) *MessageQueue { return &MessageQueue{ done: make(chan struct{}), @@ -50,19 +50,19 @@ func New(p peer.ID, network MessageNetwork) *MessageQueue { } } -// RefIncrement increments the refcount for a message queue +// RefIncrement increments the refcount for a message queue. func (mq *MessageQueue) RefIncrement() { mq.refcnt++ } // RefDecrement decrements the refcount for a message queue and returns true -// if the refcount is now 0 +// if the refcount is now 0. func (mq *MessageQueue) RefDecrement() bool { mq.refcnt-- return mq.refcnt > 0 } -// AddMessage adds new entries to an outgoing message for a given session +// AddMessage adds new entries to an outgoing message for a given session. func (mq *MessageQueue) AddMessage(entries []*bsmsg.Entry, ses uint64) { if !mq.addEntries(entries, ses) { return @@ -74,7 +74,7 @@ func (mq *MessageQueue) AddMessage(entries []*bsmsg.Entry, ses uint64) { } // Startup starts the processing of messages, and creates an initial message -// based on the given initial wantlist +// based on the given initial wantlist. func (mq *MessageQueue) Startup(ctx context.Context, initialEntries []*wantlist.Entry) { // new peer, we will want to give them our full wantlist @@ -93,7 +93,7 @@ func (mq *MessageQueue) Startup(ctx context.Context, initialEntries []*wantlist. } -// Shutdown stops the processing of messages for a message queue +// Shutdown stops the processing of messages for a message queue. func (mq *MessageQueue) Shutdown() { close(mq.done) } diff --git a/bitswap/network/interface.go b/bitswap/network/interface.go index 6c325b1c1..2d2c9b19c 100644 --- a/bitswap/network/interface.go +++ b/bitswap/network/interface.go @@ -19,7 +19,7 @@ var ( ProtocolBitswap protocol.ID = "/ipfs/bitswap/1.1.0" ) -// BitSwapNetwork provides network connectivity for BitSwap sessions +// BitSwapNetwork provides network connectivity for BitSwap sessions. type BitSwapNetwork interface { // SendMessage sends a BitSwap message to a peer. @@ -49,7 +49,7 @@ type MessageSender interface { Reset() error } -// Implement Receiver to receive messages from the BitSwapNetwork +// Implement Receiver to receive messages from the BitSwapNetwork. type Receiver interface { ReceiveMessage( ctx context.Context, @@ -58,16 +58,16 @@ type Receiver interface { ReceiveError(error) - // Connected/Disconnected warns bitswap about peer connections + // Connected/Disconnected warns bitswap about peer connections. PeerConnected(peer.ID) PeerDisconnected(peer.ID) } type Routing interface { - // FindProvidersAsync returns a channel of providers for the given key + // FindProvidersAsync returns a channel of providers for the given key. FindProvidersAsync(context.Context, cid.Cid, int) <-chan peer.ID - // Provide provides the key to the network + // Provide provides the key to the network. Provide(context.Context, cid.Cid) error } diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index f6c04e357..da2a4b4c4 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -26,7 +26,7 @@ var log = logging.Logger("bitswap_network") var sendMessageTimeout = time.Minute * 10 -// NewFromIpfsHost returns a BitSwapNetwork supported by underlying IPFS host +// NewFromIpfsHost returns a BitSwapNetwork supported by underlying IPFS host. func NewFromIpfsHost(host host.Host, r routing.ContentRouting) BitSwapNetwork { bitswapNetwork := impl{ host: host, @@ -149,7 +149,7 @@ func (bsnet *impl) ConnectTo(ctx context.Context, p peer.ID) error { return bsnet.host.Connect(ctx, pstore.PeerInfo{ID: p}) } -// FindProvidersAsync returns a channel of providers for the given key +// FindProvidersAsync returns a channel of providers for the given key. func (bsnet *impl) FindProvidersAsync(ctx context.Context, k cid.Cid, max int) <-chan peer.ID { // Since routing queries are expensive, give bitswap the peers to which we diff --git a/bitswap/peermanager/peermanager.go b/bitswap/peermanager/peermanager.go index 379fd4bd2..30145cc5c 100644 --- a/bitswap/peermanager/peermanager.go +++ b/bitswap/peermanager/peermanager.go @@ -16,7 +16,7 @@ var ( metricsBuckets = []float64{1 << 6, 1 << 10, 1 << 14, 1 << 18, 1<<18 + 15, 1 << 22} ) -// PeerQueue provides a queer of messages to be sent for a single peer +// PeerQueue provides a queer of messages to be sent for a single peer. type PeerQueue interface { RefIncrement() RefDecrement() bool @@ -25,14 +25,14 @@ type PeerQueue interface { Shutdown() } -// PeerQueueFactory provides a function that will create a PeerQueue +// PeerQueueFactory provides a function that will create a PeerQueue. type PeerQueueFactory func(p peer.ID) PeerQueue type peerMessage interface { handle(pm *PeerManager) } -// PeerManager manages a pool of peers and sends messages to peers in the pool +// PeerManager manages a pool of peers and sends messages to peers in the pool. type PeerManager struct { // sync channel for Run loop peerMessages chan peerMessage @@ -45,7 +45,7 @@ type PeerManager struct { cancel func() } -// New creates a new PeerManager, given a context and a peerQueueFactory +// New creates a new PeerManager, given a context and a peerQueueFactory. func New(ctx context.Context, createPeerQueue PeerQueueFactory) *PeerManager { ctx, cancel := context.WithCancel(ctx) return &PeerManager{ @@ -57,7 +57,7 @@ func New(ctx context.Context, createPeerQueue PeerQueueFactory) *PeerManager { } } -// ConnectedPeers returns a list of peers this PeerManager is managing +// ConnectedPeers returns a list of peers this PeerManager is managing. func (pm *PeerManager) ConnectedPeers() []peer.ID { resp := make(chan []peer.ID) pm.peerMessages <- &getPeersMessage{resp} @@ -65,7 +65,7 @@ func (pm *PeerManager) ConnectedPeers() []peer.ID { } // Connected is called to add a new peer to the pool, and send it an initial set -// of wants +// of wants. func (pm *PeerManager) Connected(p peer.ID, initialEntries []*wantlist.Entry) { select { case pm.peerMessages <- &connectPeerMessage{p, initialEntries}: @@ -73,7 +73,7 @@ func (pm *PeerManager) Connected(p peer.ID, initialEntries []*wantlist.Entry) { } } -// Disconnected is called to remove a peer from the pool +// Disconnected is called to remove a peer from the pool. func (pm *PeerManager) Disconnected(p peer.ID) { select { case pm.peerMessages <- &disconnectPeerMessage{p}: @@ -81,8 +81,8 @@ func (pm *PeerManager) Disconnected(p peer.ID) { } } -// SendMessage is called to send a message to all or some peers in the pool -// if targets is nil, it sends to all +// SendMessage is called to send a message to all or some peers in the pool; +// if targets is nil, it sends to all. func (pm *PeerManager) SendMessage(entries []*bsmsg.Entry, targets []peer.ID, from uint64) { select { case pm.peerMessages <- &sendPeerMessage{entries: entries, targets: targets, from: from}: @@ -91,12 +91,12 @@ func (pm *PeerManager) SendMessage(entries []*bsmsg.Entry, targets []peer.ID, fr } // Startup enables the run loop for the PeerManager - no processing will occur -// if startup is not called +// if startup is not called. func (pm *PeerManager) Startup() { go pm.run() } -// Shutdown shutsdown processing for the PeerManager +// Shutdown shutsdown processing for the PeerManager. func (pm *PeerManager) Shutdown() { pm.cancel() } diff --git a/bitswap/session.go b/bitswap/session.go index cd5f645a6..39748e40c 100644 --- a/bitswap/session.go +++ b/bitswap/session.go @@ -20,7 +20,7 @@ const activeWantsLimit = 16 // Session holds state for an individual bitswap transfer operation. // This allows bitswap to make smarter decisions about who to send wantlist -// info to, and who to request blocks from +// info to, and who to request blocks from. type Session struct { ctx context.Context tofetch *cidQueue @@ -51,7 +51,7 @@ type Session struct { } // NewSession creates a new bitswap session whose lifetime is bounded by the -// given context +// given context. func (bs *Bitswap) NewSession(ctx context.Context) exchange.Fetcher { s := &Session{ activePeers: make(map[peer.ID]struct{}), @@ -302,7 +302,7 @@ func (s *Session) GetBlocks(ctx context.Context, keys []cid.Cid) (<-chan blocks. return getBlocksImpl(ctx, keys, s.notif, s.fetch, s.cancelWants) } -// GetBlock fetches a single block +// GetBlock fetches a single block. func (s *Session) GetBlock(parent context.Context, k cid.Cid) (blocks.Block, error) { return getBlock(parent, k, s.GetBlocks) } diff --git a/bitswap/sessionmanager/sessionmanager.go b/bitswap/sessionmanager/sessionmanager.go index 1ebee2fd1..e0e8dec49 100644 --- a/bitswap/sessionmanager/sessionmanager.go +++ b/bitswap/sessionmanager/sessionmanager.go @@ -48,7 +48,7 @@ func (sm *SessionManager) GetNextSessionID() uint64 { type IterateSessionFunc func(session exchange.Fetcher) // IterateSessions loops through all managed sessions and applies the given -// IterateSessionFunc +// IterateSessionFunc. func (sm *SessionManager) IterateSessions(iterate IterateSessionFunc) { sm.sessLk.Lock() defer sm.sessLk.Unlock() diff --git a/bitswap/testnet/internet_latency_delay_generator.go b/bitswap/testnet/internet_latency_delay_generator.go index d1fd3ae15..25b9f5b80 100644 --- a/bitswap/testnet/internet_latency_delay_generator.go +++ b/bitswap/testnet/internet_latency_delay_generator.go @@ -10,7 +10,7 @@ import ( var sharedRNG = rand.New(rand.NewSource(time.Now().UnixNano())) // InternetLatencyDelayGenerator generates three clusters of delays, -// typical of the type of peers you would encounter on the interenet +// typical of the type of peers you would encounter on the interenet. // Given a base delay time T, the wait time generated will be either: // 1. A normalized distribution around the base time // 2. A normalized distribution around the base time plus a "medium" delay @@ -18,9 +18,9 @@ var sharedRNG = rand.New(rand.NewSource(time.Now().UnixNano())) // The size of the medium & large delays are determined when the generator // is constructed, as well as the relative percentages with which delays fall // into each of the three different clusters, and the standard deviation for -// the normalized distribution +// the normalized distribution. // This can be used to generate a number of scenarios typical of latency -// distribution among peers on the internet +// distribution among peers on the internet. func InternetLatencyDelayGenerator( mediumDelay time.Duration, largeDelay time.Duration, diff --git a/bitswap/testnet/virtual.go b/bitswap/testnet/virtual.go index 7d1921174..d5a77494b 100644 --- a/bitswap/testnet/virtual.go +++ b/bitswap/testnet/virtual.go @@ -146,7 +146,7 @@ func (nc *networkClient) Stats() bsnet.NetworkStats { } } -// FindProvidersAsync returns a channel of providers for the given key +// FindProvidersAsync returns a channel of providers for the given key. func (nc *networkClient) FindProvidersAsync(ctx context.Context, k cid.Cid, max int) <-chan peer.ID { // NB: this function duplicates the PeerInfo -> ID transformation in the @@ -200,7 +200,7 @@ func (n *networkClient) NewMessageSender(ctx context.Context, p peer.ID) (bsnet. }, nil } -// Provide provides the key to the network +// Provide provides the key to the network. func (nc *networkClient) Provide(ctx context.Context, k cid.Cid) error { return nc.routing.Provide(ctx, k, true) } diff --git a/bitswap/testutil/testutil.go b/bitswap/testutil/testutil.go index f768f40dc..9cfb38917 100644 --- a/bitswap/testutil/testutil.go +++ b/bitswap/testutil/testutil.go @@ -11,7 +11,7 @@ import ( var blockGenerator = blocksutil.NewBlockGenerator() var prioritySeq int -// GenerateCids produces n content identifiers +// GenerateCids produces n content identifiers. func GenerateCids(n int) []cid.Cid { cids := make([]cid.Cid, 0, n) for i := 0; i < n; i++ { @@ -21,7 +21,7 @@ func GenerateCids(n int) []cid.Cid { return cids } -// GenerateWantlist makes a populated wantlist +// GenerateWantlist makes a populated wantlist. func GenerateWantlist(n int, ses uint64) *wantlist.ThreadSafe { wl := wantlist.NewThreadSafe() for i := 0; i < n; i++ { @@ -32,7 +32,7 @@ func GenerateWantlist(n int, ses uint64) *wantlist.ThreadSafe { return wl } -// GenerateMessageEntries makes fake bitswap message entries +// GenerateMessageEntries makes fake bitswap message entries. func GenerateMessageEntries(n int, isCancel bool) []*bsmsg.Entry { bsmsgs := make([]*bsmsg.Entry, 0, n) for i := 0; i < n; i++ { @@ -48,7 +48,7 @@ func GenerateMessageEntries(n int, isCancel bool) []*bsmsg.Entry { var peerSeq int -// GeneratePeers creates n peer ids +// GeneratePeers creates n peer ids. func GeneratePeers(n int) []peer.ID { peerIds := make([]peer.ID, 0, n) for i := 0; i < n; i++ { @@ -61,13 +61,13 @@ func GeneratePeers(n int) []peer.ID { var nextSession uint64 -// GenerateSessionID make a unit session identifier +// GenerateSessionID make a unit session identifier. func GenerateSessionID() uint64 { nextSession++ return uint64(nextSession) } -// ContainsPeer returns true if a peer is found n a list of peers +// ContainsPeer returns true if a peer is found n a list of peers. func ContainsPeer(peers []peer.ID, p peer.ID) bool { for _, n := range peers { if p == n { diff --git a/bitswap/wantlist/wantlist.go b/bitswap/wantlist/wantlist.go index 83130072d..947c964da 100644 --- a/bitswap/wantlist/wantlist.go +++ b/bitswap/wantlist/wantlist.go @@ -28,7 +28,7 @@ type Entry struct { Trash bool } -// NewRefEntry creates a new reference tracked wantlist entry +// NewRefEntry creates a new reference tracked wantlist entry. func NewRefEntry(c cid.Cid, p int) *Entry { return &Entry{ Cid: c, @@ -59,10 +59,10 @@ func New() *Wantlist { // by the session ID 'ses'. if a cid is added under multiple session IDs, then // it must be removed by each of those sessions before it is no longer 'in the // wantlist'. Calls to Add are idempotent given the same arguments. Subsequent -// calls with different values for priority will not update the priority +// calls with different values for priority will not update the priority. // TODO: think through priority changes here // Add returns true if the cid did not exist in the wantlist before this call -// (even if it was under a different session) +// (even if it was under a different session). func (w *ThreadSafe) Add(c cid.Cid, priority int, ses uint64) bool { w.lk.Lock() defer w.lk.Unlock() @@ -114,7 +114,7 @@ func (w *ThreadSafe) Remove(c cid.Cid, ses uint64) bool { } // Contains returns true if the given cid is in the wantlist tracked by one or -// more sessions +// more sessions. func (w *ThreadSafe) Contains(k cid.Cid) (*Entry, bool) { w.lk.RLock() defer w.lk.RUnlock() diff --git a/bitswap/wantmanager/wantmanager.go b/bitswap/wantmanager/wantmanager.go index 3dcff166b..bf14ea711 100644 --- a/bitswap/wantmanager/wantmanager.go +++ b/bitswap/wantmanager/wantmanager.go @@ -21,7 +21,7 @@ const ( ) // WantSender sends changes out to the network as they get added to the wantlist -// managed by the WantManager +// managed by the WantManager. type WantSender interface { SendMessage(entries []*bsmsg.Entry, targets []peer.ID, from uint64) } @@ -32,7 +32,7 @@ type wantMessage interface { // WantManager manages a global want list. It tracks two seperate want lists - // one for all wants, and one for wants that are specifically broadcast to the -// internet +// internet. type WantManager struct { // channel requests to the run loop // to get predictable behavior while running this in a go routine @@ -50,7 +50,7 @@ type WantManager struct { wantlistGauge metrics.Gauge } -// New initializes a new WantManager for a given context +// New initializes a new WantManager for a given context. func New(ctx context.Context) *WantManager { ctx, cancel := context.WithCancel(ctx) wantlistGauge := metrics.NewCtx(ctx, "wantlist_total", @@ -65,56 +65,56 @@ func New(ctx context.Context) *WantManager { } } -// SetDelegate specifies who will send want changes out to the internet +// SetDelegate specifies who will send want changes out to the internet. func (wm *WantManager) SetDelegate(wantSender WantSender) { wm.wantSender = wantSender } -// WantBlocks adds the given cids to the wantlist, tracked by the given session +// WantBlocks adds the given cids to the wantlist, tracked by the given session. func (wm *WantManager) WantBlocks(ctx context.Context, ks []cid.Cid, peers []peer.ID, ses uint64) { log.Infof("want blocks: %s", ks) wm.addEntries(ctx, ks, peers, false, ses) } -// CancelWants removes the given cids from the wantlist, tracked by the given session +// CancelWants removes the given cids from the wantlist, tracked by the given session. func (wm *WantManager) CancelWants(ctx context.Context, ks []cid.Cid, peers []peer.ID, ses uint64) { wm.addEntries(context.Background(), ks, peers, true, ses) } -// IsWanted returns whether a CID is currently wanted +// IsWanted returns whether a CID is currently wanted. func (wm *WantManager) IsWanted(c cid.Cid) bool { resp := make(chan bool) wm.wantMessages <- &isWantedMessage{c, resp} return <-resp } -// CurrentWants returns the list of current wants +// CurrentWants returns the list of current wants. func (wm *WantManager) CurrentWants() []*wantlist.Entry { resp := make(chan []*wantlist.Entry) wm.wantMessages <- ¤tWantsMessage{resp} return <-resp } -// CurrentBroadcastWants returns the current list of wants that are broadcasts +// CurrentBroadcastWants returns the current list of wants that are broadcasts. func (wm *WantManager) CurrentBroadcastWants() []*wantlist.Entry { resp := make(chan []*wantlist.Entry) wm.wantMessages <- ¤tBroadcastWantsMessage{resp} return <-resp } -// WantCount returns the total count of wants +// WantCount returns the total count of wants. func (wm *WantManager) WantCount() int { resp := make(chan int) wm.wantMessages <- &wantCountMessage{resp} return <-resp } -// Startup starts processing for the WantManager +// Startup starts processing for the WantManager. func (wm *WantManager) Startup() { go wm.run() } -// Shutdown ends processing for the want manager +// Shutdown ends processing for the want manager. func (wm *WantManager) Shutdown() { wm.cancel() } From bc11b67dbcc7c99cddd5c592aa9eafd304c2ad72 Mon Sep 17 00:00:00 2001 From: hannahhoward Date: Wed, 28 Nov 2018 14:26:25 -0800 Subject: [PATCH 0667/1035] refactor(sessions): extract sessions to package - moved sessions out of main bitswap package - modified session manager to manage all sessions - moved get functions to their own package so sessions can directly BREAKING CHANGE: SessionsForBlock, while not used outside of Bitswap, has been removed, and was an exported function This commit was moved from ipfs/go-bitswap@40aa1fb80a274ac4719512df70a8a763dbb3b373 --- bitswap/bitswap.go | 33 +- ..._test.go => bitswap_with_sessions_test.go} | 5 +- bitswap/dup_blocks_test.go | 5 +- bitswap/{get.go => getter/getter.go} | 22 +- bitswap/{ => session}/session.go | 370 ++++++++++-------- bitswap/sessionmanager/sessionmanager.go | 65 ++- 6 files changed, 302 insertions(+), 198 deletions(-) rename bitswap/{session_test.go => bitswap_with_sessions_test.go} (97%) rename bitswap/{get.go => getter/getter.go} (68%) rename bitswap/{ => session}/session.go (55%) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index cfaee4a3b..9dd203f72 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -10,6 +10,7 @@ import ( "time" decision "github.com/ipfs/go-bitswap/decision" + bsgetter "github.com/ipfs/go-bitswap/getter" bsmsg "github.com/ipfs/go-bitswap/message" bsmq "github.com/ipfs/go-bitswap/messagequeue" bsnet "github.com/ipfs/go-bitswap/network" @@ -100,6 +101,7 @@ func New(parent context.Context, network bsnet.BitSwapNetwork, return bsmq.New(p, network) } + wm := bswm.New(ctx) bs := &Bitswap{ blockstore: bstore, notifications: notif, @@ -109,9 +111,9 @@ func New(parent context.Context, network bsnet.BitSwapNetwork, process: px, newBlocks: make(chan cid.Cid, HasBlockBufferSize), provideKeys: make(chan cid.Cid, provideKeysBufferSize), - wm: bswm.New(ctx), + wm: wm, pm: bspm.New(ctx, peerQueueFactory), - sm: bssm.New(), + sm: bssm.New(ctx, wm, network), counters: new(counters), dupMetric: dupHist, allMetric: allHist, @@ -202,7 +204,7 @@ type blockRequest struct { // GetBlock attempts to retrieve a particular block from peers within the // deadline enforced by the context. func (bs *Bitswap) GetBlock(parent context.Context, k cid.Cid) (blocks.Block, error) { - return getBlock(parent, k, bs.GetBlocks) + return bsgetter.SyncGetBlock(parent, k, bs.GetBlocks) } func (bs *Bitswap) WantlistForPeer(p peer.ID) []cid.Cid { @@ -307,7 +309,7 @@ func (bs *Bitswap) GetBlocks(ctx context.Context, keys []cid.Cid) (<-chan blocks return out, nil } -// CancelWant removes a given key from the wantlist. +// CancelWants removes a given key from the wantlist. func (bs *Bitswap) CancelWants(cids []cid.Cid, ses uint64) { if len(cids) == 0 { return @@ -345,12 +347,7 @@ func (bs *Bitswap) receiveBlockFrom(blk blocks.Block, from peer.ID) error { // it now as it requires more thought and isnt causing immediate problems. bs.notifications.Publish(blk) - k := blk.Cid() - ks := []cid.Cid{k} - for _, s := range bs.SessionsForBlock(k) { - s.receiveBlockFrom(from, blk) - bs.CancelWants(ks, s.id) - } + bs.sm.ReceiveBlockFrom(from, blk) bs.engine.AddBlock(blk) @@ -363,18 +360,6 @@ func (bs *Bitswap) receiveBlockFrom(blk blocks.Block, from peer.ID) error { return nil } -// SessionsForBlock returns a slice of all sessions that may be interested in the given cid. -func (bs *Bitswap) SessionsForBlock(c cid.Cid) []*Session { - var out []*Session - bs.sm.IterateSessions(func(session exchange.Fetcher) { - s := session.(*Session) - if s.interestedIn(c) { - out = append(out, s) - } - }) - return out -} - func (bs *Bitswap) ReceiveMessage(ctx context.Context, p peer.ID, incoming bsmsg.BitSwapMessage) { atomic.AddUint64(&bs.counters.messagesRecvd, 1) @@ -477,3 +462,7 @@ func (bs *Bitswap) GetWantlist() []cid.Cid { func (bs *Bitswap) IsOnline() bool { return true } + +func (bs *Bitswap) NewSession(ctx context.Context) exchange.Fetcher { + return bs.sm.NewSession(ctx) +} diff --git a/bitswap/session_test.go b/bitswap/bitswap_with_sessions_test.go similarity index 97% rename from bitswap/session_test.go rename to bitswap/bitswap_with_sessions_test.go index c5a00a90b..5034aaeec 100644 --- a/bitswap/session_test.go +++ b/bitswap/bitswap_with_sessions_test.go @@ -6,6 +6,7 @@ import ( "testing" "time" + bssession "github.com/ipfs/go-bitswap/session" blocks "github.com/ipfs/go-block-format" cid "github.com/ipfs/go-cid" blocksutil "github.com/ipfs/go-ipfs-blocksutil" @@ -132,8 +133,8 @@ func TestSessionSplitFetch(t *testing.T) { cids = append(cids, blk.Cid()) } - ses := inst[10].Exchange.NewSession(ctx).(*Session) - ses.baseTickDelay = time.Millisecond * 10 + ses := inst[10].Exchange.NewSession(ctx).(*bssession.Session) + ses.SetBaseTickDelay(time.Millisecond * 10) for i := 0; i < 10; i++ { ch, err := ses.GetBlocks(ctx, cids[i*10:(i+1)*10]) diff --git a/bitswap/dup_blocks_test.go b/bitswap/dup_blocks_test.go index a48889a3c..58fc96144 100644 --- a/bitswap/dup_blocks_test.go +++ b/bitswap/dup_blocks_test.go @@ -11,6 +11,7 @@ import ( tn "github.com/ipfs/go-bitswap/testnet" + bssession "github.com/ipfs/go-bitswap/session" "github.com/ipfs/go-block-format" cid "github.com/ipfs/go-cid" blocksutil "github.com/ipfs/go-ipfs-blocksutil" @@ -248,14 +249,14 @@ func onePeerPerBlock(b *testing.B, provs []Instance, blks []blocks.Block) { } func oneAtATime(b *testing.B, bs *Bitswap, ks []cid.Cid) { - ses := bs.NewSession(context.Background()).(*Session) + ses := bs.NewSession(context.Background()).(*bssession.Session) for _, c := range ks { _, err := ses.GetBlock(context.Background(), c) if err != nil { b.Fatal(err) } } - b.Logf("Session fetch latency: %s", ses.latTotal/time.Duration(ses.fetchcnt)) + b.Logf("Session fetch latency: %s", ses.GetAverageLatency()) } // fetch data in batches, 10 at a time diff --git a/bitswap/get.go b/bitswap/getter/getter.go similarity index 68% rename from bitswap/get.go rename to bitswap/getter/getter.go index 8578277e8..4f1c29db6 100644 --- a/bitswap/get.go +++ b/bitswap/getter/getter.go @@ -1,19 +1,27 @@ -package bitswap +package getter import ( "context" "errors" notifications "github.com/ipfs/go-bitswap/notifications" + logging "github.com/ipfs/go-log" blocks "github.com/ipfs/go-block-format" cid "github.com/ipfs/go-cid" blockstore "github.com/ipfs/go-ipfs-blockstore" ) -type getBlocksFunc func(context.Context, []cid.Cid) (<-chan blocks.Block, error) +var log = logging.Logger("bitswap") -func getBlock(p context.Context, k cid.Cid, gb getBlocksFunc) (blocks.Block, error) { +// GetBlocksFunc is any function that can take an array of CIDs and return a +// channel of incoming blocks. +type GetBlocksFunc func(context.Context, []cid.Cid) (<-chan blocks.Block, error) + +// SyncGetBlock takes a block cid and an async function for getting several +// blocks that returns a channel, and uses that function to return the +// block syncronously. +func SyncGetBlock(p context.Context, k cid.Cid, gb GetBlocksFunc) (blocks.Block, error) { if !k.Defined() { log.Error("undefined cid in GetBlock") return nil, blockstore.ErrNotFound @@ -49,9 +57,13 @@ func getBlock(p context.Context, k cid.Cid, gb getBlocksFunc) (blocks.Block, err } } -type wantFunc func(context.Context, []cid.Cid) +// WantFunc is any function that can express a want for set of blocks. +type WantFunc func(context.Context, []cid.Cid) -func getBlocksImpl(ctx context.Context, keys []cid.Cid, notif notifications.PubSub, want wantFunc, cwants func([]cid.Cid)) (<-chan blocks.Block, error) { +// AsyncGetBlocks take a set of block cids, a pubsub channel for incoming +// blocks, a want function, and a close function, +// and returns a channel of incoming blocks. +func AsyncGetBlocks(ctx context.Context, keys []cid.Cid, notif notifications.PubSub, want WantFunc, cwants func([]cid.Cid)) (<-chan blocks.Block, error) { if len(keys) == 0 { out := make(chan blocks.Block) close(out) diff --git a/bitswap/session.go b/bitswap/session/session.go similarity index 55% rename from bitswap/session.go rename to bitswap/session/session.go index 39748e40c..ef2ac501e 100644 --- a/bitswap/session.go +++ b/bitswap/session/session.go @@ -1,16 +1,16 @@ -package bitswap +package session import ( "context" "fmt" "time" - notifications "github.com/ipfs/go-bitswap/notifications" - lru "github.com/hashicorp/golang-lru" + bsgetter "github.com/ipfs/go-bitswap/getter" + bsnet "github.com/ipfs/go-bitswap/network" + notifications "github.com/ipfs/go-bitswap/notifications" blocks "github.com/ipfs/go-block-format" cid "github.com/ipfs/go-cid" - exchange "github.com/ipfs/go-ipfs-exchange-interface" logging "github.com/ipfs/go-log" loggables "github.com/libp2p/go-libp2p-loggables" peer "github.com/libp2p/go-libp2p-peer" @@ -18,41 +18,61 @@ import ( const activeWantsLimit = 16 +// SessionWantmanager is an interface that can be used to request blocks +// from given peers. +type SessionWantManager interface { + WantBlocks(ctx context.Context, ks []cid.Cid, peers []peer.ID, ses uint64) + CancelWants(ctx context.Context, ks []cid.Cid, peers []peer.ID, ses uint64) +} + +type interestReq struct { + c cid.Cid + resp chan bool +} + +type blkRecv struct { + from peer.ID + blk blocks.Block +} + // Session holds state for an individual bitswap transfer operation. // This allows bitswap to make smarter decisions about who to send wantlist // info to, and who to request blocks from. type Session struct { - ctx context.Context + // dependencies + ctx context.Context + wm SessionWantManager + network bsnet.BitSwapNetwork + + // channels + incoming chan blkRecv + newReqs chan []cid.Cid + cancelKeys chan []cid.Cid + interestReqs chan interestReq + latencyReqs chan chan time.Duration + tickDelayReqs chan time.Duration + + // do not touch outside run loop tofetch *cidQueue activePeers map[peer.ID]struct{} activePeersArr []peer.ID - - bs *Bitswap - incoming chan blkRecv - newReqs chan []cid.Cid - cancelKeys chan []cid.Cid - interestReqs chan interestReq - - interest *lru.Cache - liveWants map[cid.Cid]time.Time - - tick *time.Timer - baseTickDelay time.Duration - - latTotal time.Duration - fetchcnt int - + interest *lru.Cache + liveWants map[cid.Cid]time.Time + tick *time.Timer + baseTickDelay time.Duration + latTotal time.Duration + fetchcnt int + + // identifiers notif notifications.PubSub - - uuid logging.Loggable - - id uint64 - tag string + uuid logging.Loggable + id uint64 + tag string } -// NewSession creates a new bitswap session whose lifetime is bounded by the +// New creates a new bitswap session whose lifetime is bounded by the // given context. -func (bs *Bitswap) NewSession(ctx context.Context) exchange.Fetcher { +func New(ctx context.Context, id uint64, wm SessionWantManager, network bsnet.BitSwapNetwork) *Session { s := &Session{ activePeers: make(map[peer.ID]struct{}), liveWants: make(map[cid.Cid]time.Time), @@ -60,13 +80,16 @@ func (bs *Bitswap) NewSession(ctx context.Context) exchange.Fetcher { cancelKeys: make(chan []cid.Cid), tofetch: newCidQueue(), interestReqs: make(chan interestReq), + latencyReqs: make(chan chan time.Duration), + tickDelayReqs: make(chan time.Duration), ctx: ctx, - bs: bs, + wm: wm, + network: network, incoming: make(chan blkRecv), notif: notifications.New(), uuid: loggables.Uuid("GetBlockRequest"), baseTickDelay: time.Millisecond * 500, - id: bs.sm.GetNextSessionID(), + id: id, } s.tag = fmt.Sprint("bs-ses-", s.id) @@ -74,39 +97,63 @@ func (bs *Bitswap) NewSession(ctx context.Context) exchange.Fetcher { cache, _ := lru.New(2048) s.interest = cache - bs.sm.AddSession(s) go s.run(ctx) return s } -func (bs *Bitswap) removeSession(s *Session) { - s.notif.Shutdown() - - live := make([]cid.Cid, 0, len(s.liveWants)) - for c := range s.liveWants { - live = append(live, c) +// ReceiveBlockFrom receives an incoming block from the given peer. +func (s *Session) ReceiveBlockFrom(from peer.ID, blk blocks.Block) { + select { + case s.incoming <- blkRecv{from: from, blk: blk}: + case <-s.ctx.Done(): } - bs.CancelWants(live, s.id) +} - bs.sm.RemoveSession(s) +// InterestedIn returns true if this session is interested in the given Cid. +func (s *Session) InterestedIn(c cid.Cid) bool { + return s.interest.Contains(c) || s.isLiveWant(c) } -type blkRecv struct { - from peer.ID - blk blocks.Block +// GetBlock fetches a single block. +func (s *Session) GetBlock(parent context.Context, k cid.Cid) (blocks.Block, error) { + return bsgetter.SyncGetBlock(parent, k, s.GetBlocks) +} + +// GetBlocks fetches a set of blocks within the context of this session and +// returns a channel that found blocks will be returned on. No order is +// guaranteed on the returned blocks. +func (s *Session) GetBlocks(ctx context.Context, keys []cid.Cid) (<-chan blocks.Block, error) { + ctx = logging.ContextWithLoggable(ctx, s.uuid) + return bsgetter.AsyncGetBlocks(ctx, keys, s.notif, s.fetch, s.cancel) +} + +// ID returns the sessions identifier. +func (s *Session) ID() uint64 { + return s.id } -func (s *Session) receiveBlockFrom(from peer.ID, blk blocks.Block) { +func (s *Session) GetAverageLatency() time.Duration { + resp := make(chan time.Duration) select { - case s.incoming <- blkRecv{from: from, blk: blk}: + case s.latencyReqs <- resp: + case <-s.ctx.Done(): + return -1 * time.Millisecond + } + + select { + case latency := <-resp: + return latency case <-s.ctx.Done(): + return -1 * time.Millisecond } } -type interestReq struct { - c cid.Cid - resp chan bool +func (s *Session) SetBaseTickDelay(baseTickDelay time.Duration) { + select { + case s.tickDelayReqs <- baseTickDelay: + case <-s.ctx.Done(): + } } // TODO: PERF: this is using a channel to guard a map access against race @@ -135,114 +182,147 @@ func (s *Session) isLiveWant(c cid.Cid) bool { } } -func (s *Session) interestedIn(c cid.Cid) bool { - return s.interest.Contains(c) || s.isLiveWant(c) -} - -const provSearchDelay = time.Second * 10 - -func (s *Session) addActivePeer(p peer.ID) { - if _, ok := s.activePeers[p]; !ok { - s.activePeers[p] = struct{}{} - s.activePeersArr = append(s.activePeersArr, p) - - cmgr := s.bs.network.ConnectionManager() - cmgr.TagPeer(p, s.tag, 10) +func (s *Session) fetch(ctx context.Context, keys []cid.Cid) { + select { + case s.newReqs <- keys: + case <-ctx.Done(): + case <-s.ctx.Done(): } } -func (s *Session) resetTick() { - if s.latTotal == 0 { - s.tick.Reset(provSearchDelay) - } else { - avLat := s.latTotal / time.Duration(s.fetchcnt) - s.tick.Reset(s.baseTickDelay + (3 * avLat)) +func (s *Session) cancel(keys []cid.Cid) { + select { + case s.cancelKeys <- keys: + case <-s.ctx.Done(): } } +const provSearchDelay = time.Second * 10 + +// Session run loop -- everything function below here should not be called +// of this loop func (s *Session) run(ctx context.Context) { s.tick = time.NewTimer(provSearchDelay) newpeers := make(chan peer.ID, 16) for { select { case blk := <-s.incoming: - s.tick.Stop() - - if blk.from != "" { - s.addActivePeer(blk.from) - } - - s.receiveBlock(ctx, blk.blk) - - s.resetTick() + s.handleIncomingBlock(ctx, blk) case keys := <-s.newReqs: - for _, k := range keys { - s.interest.Add(k, nil) - } - if len(s.liveWants) < activeWantsLimit { - toadd := activeWantsLimit - len(s.liveWants) - if toadd > len(keys) { - toadd = len(keys) - } - - now := keys[:toadd] - keys = keys[toadd:] - - s.wantBlocks(ctx, now) - } - for _, k := range keys { - s.tofetch.Push(k) - } + s.handleNewRequest(ctx, keys) case keys := <-s.cancelKeys: - s.cancel(keys) - + s.handleCancel(keys) case <-s.tick.C: - live := make([]cid.Cid, 0, len(s.liveWants)) - now := time.Now() - for c := range s.liveWants { - live = append(live, c) - s.liveWants[c] = now - } - - // Broadcast these keys to everyone we're connected to - s.bs.wm.WantBlocks(ctx, live, nil, s.id) - - if len(live) > 0 { - go func(k cid.Cid) { - // TODO: have a task queue setup for this to: - // - rate limit - // - manage timeouts - // - ensure two 'findprovs' calls for the same block don't run concurrently - // - share peers between sessions based on interest set - for p := range s.bs.network.FindProvidersAsync(ctx, k, 10) { - newpeers <- p - } - }(live[0]) - } - s.resetTick() + s.handleTick(ctx, newpeers) case p := <-newpeers: s.addActivePeer(p) case lwchk := <-s.interestReqs: lwchk.resp <- s.cidIsWanted(lwchk.c) + case resp := <-s.latencyReqs: + resp <- s.averageLatency() + case baseTickDelay := <-s.tickDelayReqs: + s.baseTickDelay = baseTickDelay case <-ctx.Done(): - s.tick.Stop() - s.bs.removeSession(s) - - cmgr := s.bs.network.ConnectionManager() - for _, p := range s.activePeersArr { - cmgr.UntagPeer(p, s.tag) - } + s.handleShutdown() return } } } +func (s *Session) handleIncomingBlock(ctx context.Context, blk blkRecv) { + s.tick.Stop() + + if blk.from != "" { + s.addActivePeer(blk.from) + } + + s.receiveBlock(ctx, blk.blk) + + s.resetTick() +} + +func (s *Session) handleNewRequest(ctx context.Context, keys []cid.Cid) { + for _, k := range keys { + s.interest.Add(k, nil) + } + if len(s.liveWants) < activeWantsLimit { + toadd := activeWantsLimit - len(s.liveWants) + if toadd > len(keys) { + toadd = len(keys) + } + + now := keys[:toadd] + keys = keys[toadd:] + + s.wantBlocks(ctx, now) + } + for _, k := range keys { + s.tofetch.Push(k) + } +} + +func (s *Session) handleCancel(keys []cid.Cid) { + for _, c := range keys { + s.tofetch.Remove(c) + } +} + +func (s *Session) handleTick(ctx context.Context, newpeers chan<- peer.ID) { + live := make([]cid.Cid, 0, len(s.liveWants)) + now := time.Now() + for c := range s.liveWants { + live = append(live, c) + s.liveWants[c] = now + } + + // Broadcast these keys to everyone we're connected to + s.wm.WantBlocks(ctx, live, nil, s.id) + + if len(live) > 0 { + go func(k cid.Cid) { + // TODO: have a task queue setup for this to: + // - rate limit + // - manage timeouts + // - ensure two 'findprovs' calls for the same block don't run concurrently + // - share peers between sessions based on interest set + for p := range s.network.FindProvidersAsync(ctx, k, 10) { + newpeers <- p + } + }(live[0]) + } + s.resetTick() +} + +func (s *Session) addActivePeer(p peer.ID) { + if _, ok := s.activePeers[p]; !ok { + s.activePeers[p] = struct{}{} + s.activePeersArr = append(s.activePeersArr, p) + + cmgr := s.network.ConnectionManager() + cmgr.TagPeer(p, s.tag, 10) + } +} + +func (s *Session) handleShutdown() { + s.tick.Stop() + s.notif.Shutdown() + + live := make([]cid.Cid, 0, len(s.liveWants)) + for c := range s.liveWants { + live = append(live, c) + } + s.wm.CancelWants(s.ctx, live, nil, s.id) + cmgr := s.network.ConnectionManager() + for _, p := range s.activePeersArr { + cmgr.UntagPeer(p, s.tag) + } +} + func (s *Session) cidIsWanted(c cid.Cid) bool { _, ok := s.liveWants[c] if !ok { ok = s.tofetch.Has(c) } - return ok } @@ -270,43 +350,21 @@ func (s *Session) wantBlocks(ctx context.Context, ks []cid.Cid) { for _, c := range ks { s.liveWants[c] = now } - s.bs.wm.WantBlocks(ctx, ks, s.activePeersArr, s.id) + s.wm.WantBlocks(ctx, ks, s.activePeersArr, s.id) } -func (s *Session) cancel(keys []cid.Cid) { - for _, c := range keys { - s.tofetch.Remove(c) - } -} - -func (s *Session) cancelWants(keys []cid.Cid) { - select { - case s.cancelKeys <- keys: - case <-s.ctx.Done(): - } +func (s *Session) averageLatency() time.Duration { + return s.latTotal / time.Duration(s.fetchcnt) } - -func (s *Session) fetch(ctx context.Context, keys []cid.Cid) { - select { - case s.newReqs <- keys: - case <-ctx.Done(): - case <-s.ctx.Done(): +func (s *Session) resetTick() { + if s.latTotal == 0 { + s.tick.Reset(provSearchDelay) + } else { + avLat := s.averageLatency() + s.tick.Reset(s.baseTickDelay + (3 * avLat)) } } -// GetBlocks fetches a set of blocks within the context of this session and -// returns a channel that found blocks will be returned on. No order is -// guaranteed on the returned blocks. -func (s *Session) GetBlocks(ctx context.Context, keys []cid.Cid) (<-chan blocks.Block, error) { - ctx = logging.ContextWithLoggable(ctx, s.uuid) - return getBlocksImpl(ctx, keys, s.notif, s.fetch, s.cancelWants) -} - -// GetBlock fetches a single block. -func (s *Session) GetBlock(parent context.Context, k cid.Cid) (blocks.Block, error) { - return getBlock(parent, k, s.GetBlocks) -} - type cidQueue struct { elems []cid.Cid eset *cid.Set diff --git a/bitswap/sessionmanager/sessionmanager.go b/bitswap/sessionmanager/sessionmanager.go index e0e8dec49..05aa916ac 100644 --- a/bitswap/sessionmanager/sessionmanager.go +++ b/bitswap/sessionmanager/sessionmanager.go @@ -1,32 +1,71 @@ package sessionmanager import ( + "context" "sync" + blocks "github.com/ipfs/go-block-format" + cid "github.com/ipfs/go-cid" + + bsnet "github.com/ipfs/go-bitswap/network" + bssession "github.com/ipfs/go-bitswap/session" + bswm "github.com/ipfs/go-bitswap/wantmanager" exchange "github.com/ipfs/go-ipfs-exchange-interface" + peer "github.com/libp2p/go-libp2p-peer" ) +// SessionManager is responsible for creating, managing, and dispatching to +// sessions. type SessionManager struct { + wm *bswm.WantManager + network bsnet.BitSwapNetwork + ctx context.Context // Sessions sessLk sync.Mutex - sessions []exchange.Fetcher + sessions []*bssession.Session // Session Index sessIDLk sync.Mutex sessID uint64 } -func New() *SessionManager { - return &SessionManager{} +// New creates a new SessionManager. +func New(ctx context.Context, wm *bswm.WantManager, network bsnet.BitSwapNetwork) *SessionManager { + return &SessionManager{ + ctx: ctx, + wm: wm, + network: network, + } } -func (sm *SessionManager) AddSession(session exchange.Fetcher) { +// NewSession initializes a session with the given context, and adds to the +// session manager. +func (sm *SessionManager) NewSession(ctx context.Context) exchange.Fetcher { + id := sm.GetNextSessionID() + sessionctx, cancel := context.WithCancel(ctx) + + session := bssession.New(sessionctx, id, sm.wm, sm.network) sm.sessLk.Lock() sm.sessions = append(sm.sessions, session) sm.sessLk.Unlock() + go func() { + for { + defer cancel() + select { + case <-sm.ctx.Done(): + sm.removeSession(session) + return + case <-ctx.Done(): + sm.removeSession(session) + return + } + } + }() + + return session } -func (sm *SessionManager) RemoveSession(session exchange.Fetcher) { +func (sm *SessionManager) removeSession(session exchange.Fetcher) { sm.sessLk.Lock() defer sm.sessLk.Unlock() for i := 0; i < len(sm.sessions); i++ { @@ -38,6 +77,7 @@ func (sm *SessionManager) RemoveSession(session exchange.Fetcher) { } } +// GetNextSessionID returns the next sequentional identifier for a session. func (sm *SessionManager) GetNextSessionID() uint64 { sm.sessIDLk.Lock() defer sm.sessIDLk.Unlock() @@ -45,15 +85,18 @@ func (sm *SessionManager) GetNextSessionID() uint64 { return sm.sessID } -type IterateSessionFunc func(session exchange.Fetcher) - -// IterateSessions loops through all managed sessions and applies the given -// IterateSessionFunc. -func (sm *SessionManager) IterateSessions(iterate IterateSessionFunc) { +// ReceiveBlockFrom receives a block from a peer and dispatches to interested +// sessions. +func (sm *SessionManager) ReceiveBlockFrom(from peer.ID, blk blocks.Block) { sm.sessLk.Lock() defer sm.sessLk.Unlock() + k := blk.Cid() + ks := []cid.Cid{k} for _, s := range sm.sessions { - iterate(s) + if s.InterestedIn(k) { + s.ReceiveBlockFrom(from, blk) + sm.wm.CancelWants(sm.ctx, ks, nil, s.ID()) + } } } From 89e942f50543ae88cfba21d65c781f466d1e54d4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Wed, 12 Dec 2018 16:37:43 +0200 Subject: [PATCH 0668/1035] Fix debug log formatting issues This commit was moved from ipfs/go-bitswap@eddd2b9dc75275fe2b2d12b3295859ed4f1bfd50 --- bitswap/workers.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/bitswap/workers.go b/bitswap/workers.go index 99a967068..32f9da813 100644 --- a/bitswap/workers.go +++ b/bitswap/workers.go @@ -205,7 +205,7 @@ func (bs *Bitswap) rebroadcastWorker(parent context.Context) { case <-tick.C: n := bs.wm.WantCount() if n > 0 { - log.Debug(n, " keys in bitswap wantlist") + log.Debugf("%d keys in bitswap wantlist", n) } case <-broadcastSignal.C: // resend unfulfilled wantlist keys log.Event(ctx, "Bitswap.Rebroadcast.active") @@ -259,7 +259,7 @@ func (bs *Bitswap) providerQueryManager(ctx context.Context) { defer wg.Done() err := bs.network.ConnectTo(child, p) if err != nil { - log.Debug("failed to connect to provider %s: %s", p, err) + log.Debugf("failed to connect to provider %s: %s", p, err) } }(p) } From 840c142eef2a11ee444efbf51cc35544ced8e9c5 Mon Sep 17 00:00:00 2001 From: hannahhoward Date: Thu, 13 Dec 2018 10:40:04 -0800 Subject: [PATCH 0669/1035] fix(sessions): minor cleanup remove for loop not needed, cleanup spelling This commit was moved from ipfs/go-bitswap@bf5cc6918b58ee765f25fe8061db2e3fd68a95fe --- bitswap/session/session.go | 2 +- bitswap/sessionmanager/sessionmanager.go | 16 ++++++---------- 2 files changed, 7 insertions(+), 11 deletions(-) diff --git a/bitswap/session/session.go b/bitswap/session/session.go index ef2ac501e..8b30216e4 100644 --- a/bitswap/session/session.go +++ b/bitswap/session/session.go @@ -18,7 +18,7 @@ import ( const activeWantsLimit = 16 -// SessionWantmanager is an interface that can be used to request blocks +// SessionWantManager is an interface that can be used to request blocks // from given peers. type SessionWantManager interface { WantBlocks(ctx context.Context, ks []cid.Cid, peers []peer.ID, ses uint64) diff --git a/bitswap/sessionmanager/sessionmanager.go b/bitswap/sessionmanager/sessionmanager.go index 05aa916ac..f2df196f4 100644 --- a/bitswap/sessionmanager/sessionmanager.go +++ b/bitswap/sessionmanager/sessionmanager.go @@ -49,16 +49,12 @@ func (sm *SessionManager) NewSession(ctx context.Context) exchange.Fetcher { sm.sessions = append(sm.sessions, session) sm.sessLk.Unlock() go func() { - for { - defer cancel() - select { - case <-sm.ctx.Done(): - sm.removeSession(session) - return - case <-ctx.Done(): - sm.removeSession(session) - return - } + defer cancel() + select { + case <-sm.ctx.Done(): + sm.removeSession(session) + case <-ctx.Done(): + sm.removeSession(session) } }() From 8a010ddfeb2dd1ff7d1df3eaeab5b8fcfe848c1e Mon Sep 17 00:00:00 2001 From: hannahhoward Date: Wed, 28 Nov 2018 15:30:09 -0800 Subject: [PATCH 0670/1035] refactor(sessions): extract peer management extract the job of finding and managing peers for a session from the job of requesting blocks This commit was moved from ipfs/go-bitswap@9e8912681452cff949cb729cc819247a477def72 --- bitswap/session/session.go | 83 +++++------- bitswap/sessionmanager/sessionmanager.go | 34 ++--- .../sessionpeermanager/sessionpeermanager.go | 118 ++++++++++++++++++ 3 files changed, 168 insertions(+), 67 deletions(-) create mode 100644 bitswap/sessionpeermanager/sessionpeermanager.go diff --git a/bitswap/session/session.go b/bitswap/session/session.go index 8b30216e4..a1a4fdfad 100644 --- a/bitswap/session/session.go +++ b/bitswap/session/session.go @@ -2,12 +2,10 @@ package session import ( "context" - "fmt" "time" lru "github.com/hashicorp/golang-lru" bsgetter "github.com/ipfs/go-bitswap/getter" - bsnet "github.com/ipfs/go-bitswap/network" notifications "github.com/ipfs/go-bitswap/notifications" blocks "github.com/ipfs/go-block-format" cid "github.com/ipfs/go-cid" @@ -18,13 +16,20 @@ import ( const activeWantsLimit = 16 -// SessionWantManager is an interface that can be used to request blocks +// Wantmanager is an interface that can be used to request blocks // from given peers. -type SessionWantManager interface { +type WantManager interface { WantBlocks(ctx context.Context, ks []cid.Cid, peers []peer.ID, ses uint64) CancelWants(ctx context.Context, ks []cid.Cid, peers []peer.ID, ses uint64) } +type PeerManager interface { + FindMorePeers(context.Context, cid.Cid) + GetOptimizedPeers() []peer.ID + RecordPeerRequests([]peer.ID, []cid.Cid) + RecordPeerResponse(peer.ID, cid.Cid) +} + type interestReq struct { c cid.Cid resp chan bool @@ -40,9 +45,9 @@ type blkRecv struct { // info to, and who to request blocks from. type Session struct { // dependencies - ctx context.Context - wm SessionWantManager - network bsnet.BitSwapNetwork + ctx context.Context + wm WantManager + pm PeerManager // channels incoming chan blkRecv @@ -53,28 +58,24 @@ type Session struct { tickDelayReqs chan time.Duration // do not touch outside run loop - tofetch *cidQueue - activePeers map[peer.ID]struct{} - activePeersArr []peer.ID - interest *lru.Cache - liveWants map[cid.Cid]time.Time - tick *time.Timer - baseTickDelay time.Duration - latTotal time.Duration - fetchcnt int + tofetch *cidQueue + interest *lru.Cache + liveWants map[cid.Cid]time.Time + tick *time.Timer + baseTickDelay time.Duration + latTotal time.Duration + fetchcnt int // identifiers notif notifications.PubSub uuid logging.Loggable id uint64 - tag string } // New creates a new bitswap session whose lifetime is bounded by the // given context. -func New(ctx context.Context, id uint64, wm SessionWantManager, network bsnet.BitSwapNetwork) *Session { +func New(ctx context.Context, id uint64, wm WantManager, pm PeerManager) *Session { s := &Session{ - activePeers: make(map[peer.ID]struct{}), liveWants: make(map[cid.Cid]time.Time), newReqs: make(chan []cid.Cid), cancelKeys: make(chan []cid.Cid), @@ -84,7 +85,7 @@ func New(ctx context.Context, id uint64, wm SessionWantManager, network bsnet.Bi tickDelayReqs: make(chan time.Duration), ctx: ctx, wm: wm, - network: network, + pm: pm, incoming: make(chan blkRecv), notif: notifications.New(), uuid: loggables.Uuid("GetBlockRequest"), @@ -92,8 +93,6 @@ func New(ctx context.Context, id uint64, wm SessionWantManager, network bsnet.Bi id: id, } - s.tag = fmt.Sprint("bs-ses-", s.id) - cache, _ := lru.New(2048) s.interest = cache @@ -203,7 +202,6 @@ const provSearchDelay = time.Second * 10 // of this loop func (s *Session) run(ctx context.Context) { s.tick = time.NewTimer(provSearchDelay) - newpeers := make(chan peer.ID, 16) for { select { case blk := <-s.incoming: @@ -213,9 +211,7 @@ func (s *Session) run(ctx context.Context) { case keys := <-s.cancelKeys: s.handleCancel(keys) case <-s.tick.C: - s.handleTick(ctx, newpeers) - case p := <-newpeers: - s.addActivePeer(p) + s.handleTick(ctx) case lwchk := <-s.interestReqs: lwchk.resp <- s.cidIsWanted(lwchk.c) case resp := <-s.latencyReqs: @@ -233,7 +229,7 @@ func (s *Session) handleIncomingBlock(ctx context.Context, blk blkRecv) { s.tick.Stop() if blk.from != "" { - s.addActivePeer(blk.from) + s.pm.RecordPeerResponse(blk.from, blk.blk.Cid()) } s.receiveBlock(ctx, blk.blk) @@ -267,7 +263,7 @@ func (s *Session) handleCancel(keys []cid.Cid) { } } -func (s *Session) handleTick(ctx context.Context, newpeers chan<- peer.ID) { +func (s *Session) handleTick(ctx context.Context) { live := make([]cid.Cid, 0, len(s.liveWants)) now := time.Now() for c := range s.liveWants { @@ -276,33 +272,15 @@ func (s *Session) handleTick(ctx context.Context, newpeers chan<- peer.ID) { } // Broadcast these keys to everyone we're connected to + s.pm.RecordPeerRequests(nil, live) s.wm.WantBlocks(ctx, live, nil, s.id) if len(live) > 0 { - go func(k cid.Cid) { - // TODO: have a task queue setup for this to: - // - rate limit - // - manage timeouts - // - ensure two 'findprovs' calls for the same block don't run concurrently - // - share peers between sessions based on interest set - for p := range s.network.FindProvidersAsync(ctx, k, 10) { - newpeers <- p - } - }(live[0]) + s.pm.FindMorePeers(ctx, live[0]) } s.resetTick() } -func (s *Session) addActivePeer(p peer.ID) { - if _, ok := s.activePeers[p]; !ok { - s.activePeers[p] = struct{}{} - s.activePeersArr = append(s.activePeersArr, p) - - cmgr := s.network.ConnectionManager() - cmgr.TagPeer(p, s.tag, 10) - } -} - func (s *Session) handleShutdown() { s.tick.Stop() s.notif.Shutdown() @@ -312,10 +290,6 @@ func (s *Session) handleShutdown() { live = append(live, c) } s.wm.CancelWants(s.ctx, live, nil, s.id) - cmgr := s.network.ConnectionManager() - for _, p := range s.activePeersArr { - cmgr.UntagPeer(p, s.tag) - } } func (s *Session) cidIsWanted(c cid.Cid) bool { @@ -350,7 +324,10 @@ func (s *Session) wantBlocks(ctx context.Context, ks []cid.Cid) { for _, c := range ks { s.liveWants[c] = now } - s.wm.WantBlocks(ctx, ks, s.activePeersArr, s.id) + peers := s.pm.GetOptimizedPeers() + // right now we're requesting each block from every peer, but soon, maybe not + s.pm.RecordPeerRequests(peers, ks) + s.wm.WantBlocks(ctx, ks, peers, s.id) } func (s *Session) averageLatency() time.Duration { diff --git a/bitswap/sessionmanager/sessionmanager.go b/bitswap/sessionmanager/sessionmanager.go index f2df196f4..c57d319e3 100644 --- a/bitswap/sessionmanager/sessionmanager.go +++ b/bitswap/sessionmanager/sessionmanager.go @@ -7,22 +7,26 @@ import ( blocks "github.com/ipfs/go-block-format" cid "github.com/ipfs/go-cid" - bsnet "github.com/ipfs/go-bitswap/network" bssession "github.com/ipfs/go-bitswap/session" - bswm "github.com/ipfs/go-bitswap/wantmanager" + bsspm "github.com/ipfs/go-bitswap/sessionpeermanager" exchange "github.com/ipfs/go-ipfs-exchange-interface" peer "github.com/libp2p/go-libp2p-peer" ) +type sesTrk struct { + session *bssession.Session + pm *bsspm.SessionPeerManager +} + // SessionManager is responsible for creating, managing, and dispatching to // sessions. type SessionManager struct { - wm *bswm.WantManager - network bsnet.BitSwapNetwork + wm bssession.WantManager + network bsspm.PeerNetwork ctx context.Context // Sessions sessLk sync.Mutex - sessions []*bssession.Session + sessions []sesTrk // Session Index sessIDLk sync.Mutex @@ -30,7 +34,7 @@ type SessionManager struct { } // New creates a new SessionManager. -func New(ctx context.Context, wm *bswm.WantManager, network bsnet.BitSwapNetwork) *SessionManager { +func New(ctx context.Context, wm bssession.WantManager, network bsspm.PeerNetwork) *SessionManager { return &SessionManager{ ctx: ctx, wm: wm, @@ -44,24 +48,26 @@ func (sm *SessionManager) NewSession(ctx context.Context) exchange.Fetcher { id := sm.GetNextSessionID() sessionctx, cancel := context.WithCancel(ctx) - session := bssession.New(sessionctx, id, sm.wm, sm.network) + pm := bsspm.New(sessionctx, id, sm.network) + session := bssession.New(sessionctx, id, sm.wm, pm) + tracked := sesTrk{session, pm} sm.sessLk.Lock() - sm.sessions = append(sm.sessions, session) + sm.sessions = append(sm.sessions, tracked) sm.sessLk.Unlock() go func() { defer cancel() select { case <-sm.ctx.Done(): - sm.removeSession(session) + sm.removeSession(tracked) case <-ctx.Done(): - sm.removeSession(session) + sm.removeSession(tracked) } }() return session } -func (sm *SessionManager) removeSession(session exchange.Fetcher) { +func (sm *SessionManager) removeSession(session sesTrk) { sm.sessLk.Lock() defer sm.sessLk.Unlock() for i := 0; i < len(sm.sessions); i++ { @@ -90,9 +96,9 @@ func (sm *SessionManager) ReceiveBlockFrom(from peer.ID, blk blocks.Block) { k := blk.Cid() ks := []cid.Cid{k} for _, s := range sm.sessions { - if s.InterestedIn(k) { - s.ReceiveBlockFrom(from, blk) - sm.wm.CancelWants(sm.ctx, ks, nil, s.ID()) + if s.session.InterestedIn(k) { + s.session.ReceiveBlockFrom(from, blk) + sm.wm.CancelWants(sm.ctx, ks, nil, s.session.ID()) } } } diff --git a/bitswap/sessionpeermanager/sessionpeermanager.go b/bitswap/sessionpeermanager/sessionpeermanager.go new file mode 100644 index 000000000..0f77ff11e --- /dev/null +++ b/bitswap/sessionpeermanager/sessionpeermanager.go @@ -0,0 +1,118 @@ +package sessionpeermanager + +import ( + "context" + "fmt" + + cid "github.com/ipfs/go-cid" + ifconnmgr "github.com/libp2p/go-libp2p-interface-connmgr" + peer "github.com/libp2p/go-libp2p-peer" +) + +type PeerNetwork interface { + ConnectionManager() ifconnmgr.ConnManager + FindProvidersAsync(context.Context, cid.Cid, int) <-chan peer.ID +} + +type SessionPeerManager struct { + ctx context.Context + network PeerNetwork + tag string + + newPeers chan peer.ID + peerReqs chan chan []peer.ID + + // do not touch outside of run loop + activePeers map[peer.ID]struct{} + activePeersArr []peer.ID +} + +func New(ctx context.Context, id uint64, network PeerNetwork) *SessionPeerManager { + spm := &SessionPeerManager{ + ctx: ctx, + network: network, + newPeers: make(chan peer.ID, 16), + peerReqs: make(chan chan []peer.ID), + activePeers: make(map[peer.ID]struct{}), + } + + spm.tag = fmt.Sprint("bs-ses-", id) + + go spm.run(ctx) + return spm +} + +func (spm *SessionPeerManager) RecordPeerResponse(p peer.ID, k cid.Cid) { + // at the moment, we're just adding peers here + // in the future, we'll actually use this to record metrics + select { + case spm.newPeers <- p: + case <-spm.ctx.Done(): + } +} + +func (spm *SessionPeerManager) RecordPeerRequests(p []peer.ID, ks []cid.Cid) { + // at the moment, we're not doing anything here + // soon we'll use this to track latency by peer +} + +func (spm *SessionPeerManager) GetOptimizedPeers() []peer.ID { + // right now this just returns all peers, but soon we might return peers + // ordered by optimization, or only a subset + resp := make(chan []peer.ID) + select { + case spm.peerReqs <- resp: + case <-spm.ctx.Done(): + return nil + } + + select { + case peers := <-resp: + return peers + case <-spm.ctx.Done(): + return nil + } +} + +func (spm *SessionPeerManager) FindMorePeers(ctx context.Context, c cid.Cid) { + go func(k cid.Cid) { + // TODO: have a task queue setup for this to: + // - rate limit + // - manage timeouts + // - ensure two 'findprovs' calls for the same block don't run concurrently + // - share peers between sessions based on interest set + for p := range spm.network.FindProvidersAsync(ctx, k, 10) { + spm.newPeers <- p + } + }(c) +} + +func (spm *SessionPeerManager) run(ctx context.Context) { + for { + select { + case p := <-spm.newPeers: + spm.addActivePeer(p) + case resp := <-spm.peerReqs: + resp <- spm.activePeersArr + case <-ctx.Done(): + spm.handleShutdown() + return + } + } +} +func (spm *SessionPeerManager) addActivePeer(p peer.ID) { + if _, ok := spm.activePeers[p]; !ok { + spm.activePeers[p] = struct{}{} + spm.activePeersArr = append(spm.activePeersArr, p) + + cmgr := spm.network.ConnectionManager() + cmgr.TagPeer(p, spm.tag, 10) + } +} + +func (spm *SessionPeerManager) handleShutdown() { + cmgr := spm.network.ConnectionManager() + for _, p := range spm.activePeersArr { + cmgr.UntagPeer(p, spm.tag) + } +} From 590a06fe0bc7fe2a469d60348f9b9cde0ce86c10 Mon Sep 17 00:00:00 2001 From: hannahhoward Date: Wed, 28 Nov 2018 16:50:53 -0800 Subject: [PATCH 0671/1035] refactor(session): cleanup sessions This commit was moved from ipfs/go-bitswap@d7a532d03b341fed5f527799da283f874e0d1d82 --- bitswap/session/cidqueue.go | 46 ++++++++++++++++++++++++++++++++ bitswap/session/session.go | 52 ++++++------------------------------- 2 files changed, 54 insertions(+), 44 deletions(-) create mode 100644 bitswap/session/cidqueue.go diff --git a/bitswap/session/cidqueue.go b/bitswap/session/cidqueue.go new file mode 100644 index 000000000..cf461a6cb --- /dev/null +++ b/bitswap/session/cidqueue.go @@ -0,0 +1,46 @@ +package session + +import cid "github.com/ipfs/go-cid" + +type cidQueue struct { + elems []cid.Cid + eset *cid.Set +} + +func newCidQueue() *cidQueue { + return &cidQueue{eset: cid.NewSet()} +} + +func (cq *cidQueue) Pop() cid.Cid { + for { + if len(cq.elems) == 0 { + return cid.Cid{} + } + + out := cq.elems[0] + cq.elems = cq.elems[1:] + + if cq.eset.Has(out) { + cq.eset.Remove(out) + return out + } + } +} + +func (cq *cidQueue) Push(c cid.Cid) { + if cq.eset.Visit(c) { + cq.elems = append(cq.elems, c) + } +} + +func (cq *cidQueue) Remove(c cid.Cid) { + cq.eset.Remove(c) +} + +func (cq *cidQueue) Has(c cid.Cid) bool { + return cq.eset.Has(c) +} + +func (cq *cidQueue) Len() int { + return cq.eset.Len() +} diff --git a/bitswap/session/session.go b/bitswap/session/session.go index a1a4fdfad..9620f07b1 100644 --- a/bitswap/session/session.go +++ b/bitswap/session/session.go @@ -16,13 +16,15 @@ import ( const activeWantsLimit = 16 -// Wantmanager is an interface that can be used to request blocks +// WantManager is an interface that can be used to request blocks // from given peers. type WantManager interface { WantBlocks(ctx context.Context, ks []cid.Cid, peers []peer.ID, ses uint64) CancelWants(ctx context.Context, ks []cid.Cid, peers []peer.ID, ses uint64) } +// PeerManager provides an interface for tracking and optimize peers, and +// requesting more when neccesary. type PeerManager interface { FindMorePeers(context.Context, cid.Cid) GetOptimizedPeers() []peer.ID @@ -107,6 +109,9 @@ func (s *Session) ReceiveBlockFrom(from peer.ID, blk blocks.Block) { case s.incoming <- blkRecv{from: from, blk: blk}: case <-s.ctx.Done(): } + ks := []cid.Cid{blk.Cid()} + s.wm.CancelWants(s.ctx, ks, nil, s.id) + } // InterestedIn returns true if this session is interested in the given Cid. @@ -132,6 +137,7 @@ func (s *Session) ID() uint64 { return s.id } +// GetAverageLatency returns the average latency for block requests. func (s *Session) GetAverageLatency() time.Duration { resp := make(chan time.Duration) select { @@ -148,6 +154,7 @@ func (s *Session) GetAverageLatency() time.Duration { } } +// SetBaseTickDelay changes the rate at which ticks happen. func (s *Session) SetBaseTickDelay(baseTickDelay time.Duration) { select { case s.tickDelayReqs <- baseTickDelay: @@ -341,46 +348,3 @@ func (s *Session) resetTick() { s.tick.Reset(s.baseTickDelay + (3 * avLat)) } } - -type cidQueue struct { - elems []cid.Cid - eset *cid.Set -} - -func newCidQueue() *cidQueue { - return &cidQueue{eset: cid.NewSet()} -} - -func (cq *cidQueue) Pop() cid.Cid { - for { - if len(cq.elems) == 0 { - return cid.Cid{} - } - - out := cq.elems[0] - cq.elems = cq.elems[1:] - - if cq.eset.Has(out) { - cq.eset.Remove(out) - return out - } - } -} - -func (cq *cidQueue) Push(c cid.Cid) { - if cq.eset.Visit(c) { - cq.elems = append(cq.elems, c) - } -} - -func (cq *cidQueue) Remove(c cid.Cid) { - cq.eset.Remove(c) -} - -func (cq *cidQueue) Has(c cid.Cid) bool { - return cq.eset.Has(c) -} - -func (cq *cidQueue) Len() int { - return cq.eset.Len() -} From e1e4b584c121a11e8bc9a7fb0f0b9f599f5024c5 Mon Sep 17 00:00:00 2001 From: hannahhoward Date: Wed, 28 Nov 2018 16:51:31 -0800 Subject: [PATCH 0672/1035] test(sessionmanager): Add unit test Add a unit test and do some additional decoupling This commit was moved from ipfs/go-bitswap@e1a25234046f371f5cf3161cc1a410adfd581e28 --- bitswap/bitswap.go | 12 +- bitswap/sessionmanager/sessionmanager.go | 38 ++-- bitswap/sessionmanager/sessionmanager_test.go | 163 ++++++++++++++++++ 3 files changed, 197 insertions(+), 16 deletions(-) create mode 100644 bitswap/sessionmanager/sessionmanager_test.go diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 9dd203f72..29afee24e 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -16,9 +16,10 @@ import ( bsnet "github.com/ipfs/go-bitswap/network" notifications "github.com/ipfs/go-bitswap/notifications" bspm "github.com/ipfs/go-bitswap/peermanager" + bssession "github.com/ipfs/go-bitswap/session" bssm "github.com/ipfs/go-bitswap/sessionmanager" + bsspm "github.com/ipfs/go-bitswap/sessionpeermanager" bswm "github.com/ipfs/go-bitswap/wantmanager" - blocks "github.com/ipfs/go-block-format" cid "github.com/ipfs/go-cid" blockstore "github.com/ipfs/go-ipfs-blockstore" @@ -102,6 +103,13 @@ func New(parent context.Context, network bsnet.BitSwapNetwork, } wm := bswm.New(ctx) + sessionFactory := func(ctx context.Context, id uint64, pm bssession.PeerManager) bssm.Session { + return bssession.New(ctx, id, wm, pm) + } + sessionPeerManagerFactory := func(ctx context.Context, id uint64) bssession.PeerManager { + return bsspm.New(ctx, id, network) + } + bs := &Bitswap{ blockstore: bstore, notifications: notif, @@ -113,7 +121,7 @@ func New(parent context.Context, network bsnet.BitSwapNetwork, provideKeys: make(chan cid.Cid, provideKeysBufferSize), wm: wm, pm: bspm.New(ctx, peerQueueFactory), - sm: bssm.New(ctx, wm, network), + sm: bssm.New(ctx, sessionFactory, sessionPeerManagerFactory), counters: new(counters), dupMetric: dupHist, allMetric: allHist, diff --git a/bitswap/sessionmanager/sessionmanager.go b/bitswap/sessionmanager/sessionmanager.go index c57d319e3..7e3fe2a5d 100644 --- a/bitswap/sessionmanager/sessionmanager.go +++ b/bitswap/sessionmanager/sessionmanager.go @@ -8,22 +8,34 @@ import ( cid "github.com/ipfs/go-cid" bssession "github.com/ipfs/go-bitswap/session" - bsspm "github.com/ipfs/go-bitswap/sessionpeermanager" exchange "github.com/ipfs/go-ipfs-exchange-interface" peer "github.com/libp2p/go-libp2p-peer" ) +// Session is a session that is managed by the session manager +type Session interface { + exchange.Fetcher + InterestedIn(cid.Cid) bool + ReceiveBlockFrom(peer.ID, blocks.Block) +} + type sesTrk struct { - session *bssession.Session - pm *bsspm.SessionPeerManager + session Session + pm bssession.PeerManager } +// SessionFactory generates a new session for the SessionManager to track. +type SessionFactory func(ctx context.Context, id uint64, pm bssession.PeerManager) Session + +// PeerManagerFactory generates a new peer manager for a session. +type PeerManagerFactory func(ctx context.Context, id uint64) bssession.PeerManager + // SessionManager is responsible for creating, managing, and dispatching to // sessions. type SessionManager struct { - wm bssession.WantManager - network bsspm.PeerNetwork - ctx context.Context + ctx context.Context + sessionFactory SessionFactory + peerManagerFactory PeerManagerFactory // Sessions sessLk sync.Mutex sessions []sesTrk @@ -34,11 +46,11 @@ type SessionManager struct { } // New creates a new SessionManager. -func New(ctx context.Context, wm bssession.WantManager, network bsspm.PeerNetwork) *SessionManager { +func New(ctx context.Context, sessionFactory SessionFactory, peerManagerFactory PeerManagerFactory) *SessionManager { return &SessionManager{ - ctx: ctx, - wm: wm, - network: network, + ctx: ctx, + sessionFactory: sessionFactory, + peerManagerFactory: peerManagerFactory, } } @@ -48,8 +60,8 @@ func (sm *SessionManager) NewSession(ctx context.Context) exchange.Fetcher { id := sm.GetNextSessionID() sessionctx, cancel := context.WithCancel(ctx) - pm := bsspm.New(sessionctx, id, sm.network) - session := bssession.New(sessionctx, id, sm.wm, pm) + pm := sm.peerManagerFactory(sessionctx, id) + session := sm.sessionFactory(sessionctx, id, pm) tracked := sesTrk{session, pm} sm.sessLk.Lock() sm.sessions = append(sm.sessions, tracked) @@ -94,11 +106,9 @@ func (sm *SessionManager) ReceiveBlockFrom(from peer.ID, blk blocks.Block) { defer sm.sessLk.Unlock() k := blk.Cid() - ks := []cid.Cid{k} for _, s := range sm.sessions { if s.session.InterestedIn(k) { s.session.ReceiveBlockFrom(from, blk) - sm.wm.CancelWants(sm.ctx, ks, nil, s.session.ID()) } } } diff --git a/bitswap/sessionmanager/sessionmanager_test.go b/bitswap/sessionmanager/sessionmanager_test.go new file mode 100644 index 000000000..b030c0132 --- /dev/null +++ b/bitswap/sessionmanager/sessionmanager_test.go @@ -0,0 +1,163 @@ +package sessionmanager + +import ( + "context" + "testing" + "time" + + bssession "github.com/ipfs/go-bitswap/session" + + blocks "github.com/ipfs/go-block-format" + cid "github.com/ipfs/go-cid" + peer "github.com/libp2p/go-libp2p-peer" +) + +type fakeSession struct { + interested bool + receivedBlock bool + id uint64 + pm *fakePeerManager +} + +func (*fakeSession) GetBlock(context.Context, cid.Cid) (blocks.Block, error) { + return nil, nil +} +func (*fakeSession) GetBlocks(context.Context, []cid.Cid) (<-chan blocks.Block, error) { + return nil, nil +} +func (fs *fakeSession) InterestedIn(cid.Cid) bool { return fs.interested } +func (fs *fakeSession) ReceiveBlockFrom(peer.ID, blocks.Block) { fs.receivedBlock = true } + +type fakePeerManager struct { + id uint64 +} + +func (*fakePeerManager) FindMorePeers(context.Context, cid.Cid) {} +func (*fakePeerManager) GetOptimizedPeers() []peer.ID { return nil } +func (*fakePeerManager) RecordPeerRequests([]peer.ID, []cid.Cid) {} +func (*fakePeerManager) RecordPeerResponse(peer.ID, cid.Cid) {} + +var nextInterestedIn bool + +func sessionFactory(ctx context.Context, id uint64, pm bssession.PeerManager) Session { + return &fakeSession{ + interested: nextInterestedIn, + receivedBlock: false, + id: id, + pm: pm.(*fakePeerManager), + } +} + +func peerManagerFactory(ctx context.Context, id uint64) bssession.PeerManager { + return &fakePeerManager{id} +} + +func TestAddingSessions(t *testing.T) { + ctx := context.Background() + ctx, cancel := context.WithCancel(ctx) + defer cancel() + sm := New(ctx, sessionFactory, peerManagerFactory) + + p := peer.ID(123) + block := blocks.NewBlock([]byte("block")) + // we'll be interested in all blocks for this test + nextInterestedIn = true + + currentID := sm.GetNextSessionID() + firstSession := sm.NewSession(ctx).(*fakeSession) + if firstSession.id != firstSession.pm.id || + firstSession.id != currentID+1 { + t.Fatal("session does not have correct id set") + } + secondSession := sm.NewSession(ctx).(*fakeSession) + if secondSession.id != secondSession.pm.id || + secondSession.id != firstSession.id+1 { + t.Fatal("session does not have correct id set") + } + sm.GetNextSessionID() + thirdSession := sm.NewSession(ctx).(*fakeSession) + if thirdSession.id != thirdSession.pm.id || + thirdSession.id != secondSession.id+2 { + t.Fatal("session does not have correct id set") + } + sm.ReceiveBlockFrom(p, block) + if !firstSession.receivedBlock || + !secondSession.receivedBlock || + !thirdSession.receivedBlock { + t.Fatal("should have received blocks but didn't") + } +} + +func TestReceivingBlocksWhenNotInterested(t *testing.T) { + ctx := context.Background() + ctx, cancel := context.WithCancel(ctx) + defer cancel() + sm := New(ctx, sessionFactory, peerManagerFactory) + + p := peer.ID(123) + block := blocks.NewBlock([]byte("block")) + // we'll be interested in all blocks for this test + nextInterestedIn = false + firstSession := sm.NewSession(ctx).(*fakeSession) + nextInterestedIn = true + secondSession := sm.NewSession(ctx).(*fakeSession) + nextInterestedIn = false + thirdSession := sm.NewSession(ctx).(*fakeSession) + + sm.ReceiveBlockFrom(p, block) + if firstSession.receivedBlock || + !secondSession.receivedBlock || + thirdSession.receivedBlock { + t.Fatal("did not receive blocks only for interested sessions") + } +} + +func TestRemovingPeersWhenManagerContextCancelled(t *testing.T) { + ctx := context.Background() + ctx, cancel := context.WithCancel(ctx) + sm := New(ctx, sessionFactory, peerManagerFactory) + + p := peer.ID(123) + block := blocks.NewBlock([]byte("block")) + // we'll be interested in all blocks for this test + nextInterestedIn = true + firstSession := sm.NewSession(ctx).(*fakeSession) + secondSession := sm.NewSession(ctx).(*fakeSession) + thirdSession := sm.NewSession(ctx).(*fakeSession) + + cancel() + // wait for sessions to get removed + time.Sleep(10 * time.Millisecond) + sm.ReceiveBlockFrom(p, block) + if firstSession.receivedBlock || + secondSession.receivedBlock || + thirdSession.receivedBlock { + t.Fatal("received blocks for sessions after manager is shutdown") + } +} + +func TestRemovingPeersWhenSessionContextCancelled(t *testing.T) { + ctx := context.Background() + ctx, cancel := context.WithCancel(ctx) + defer cancel() + sm := New(ctx, sessionFactory, peerManagerFactory) + + p := peer.ID(123) + block := blocks.NewBlock([]byte("block")) + // we'll be interested in all blocks for this test + nextInterestedIn = true + firstSession := sm.NewSession(ctx).(*fakeSession) + sessionCtx, sessionCancel := context.WithCancel(ctx) + secondSession := sm.NewSession(sessionCtx).(*fakeSession) + thirdSession := sm.NewSession(ctx).(*fakeSession) + + sessionCancel() + // wait for sessions to get removed + time.Sleep(10 * time.Millisecond) + sm.ReceiveBlockFrom(p, block) + if !firstSession.receivedBlock || + secondSession.receivedBlock || + !thirdSession.receivedBlock { + t.Fatal("received blocks for sessions that are canceled") + } +} From 272b5526076ba62c47c05a8bfdee45aaaf9ac716 Mon Sep 17 00:00:00 2001 From: hannahhoward Date: Wed, 28 Nov 2018 17:44:38 -0800 Subject: [PATCH 0673/1035] test(sessionpeermanager): Add unit test Add unit test for sessionpeermanger and comment exported methods This commit was moved from ipfs/go-bitswap@ec47a3d0f47894924a2404d9900287d7e033d9cf --- .../sessionpeermanager/sessionpeermanager.go | 11 ++ .../sessionpeermanager_test.go | 136 ++++++++++++++++++ 2 files changed, 147 insertions(+) create mode 100644 bitswap/sessionpeermanager/sessionpeermanager_test.go diff --git a/bitswap/sessionpeermanager/sessionpeermanager.go b/bitswap/sessionpeermanager/sessionpeermanager.go index 0f77ff11e..c4a9378e1 100644 --- a/bitswap/sessionpeermanager/sessionpeermanager.go +++ b/bitswap/sessionpeermanager/sessionpeermanager.go @@ -9,11 +9,14 @@ import ( peer "github.com/libp2p/go-libp2p-peer" ) +// PeerNetwork is an interface for finding providers and managing connections type PeerNetwork interface { ConnectionManager() ifconnmgr.ConnManager FindProvidersAsync(context.Context, cid.Cid, int) <-chan peer.ID } +// SessionPeerManager tracks and manages peers for a session, and provides +// the best ones to the session type SessionPeerManager struct { ctx context.Context network PeerNetwork @@ -27,6 +30,7 @@ type SessionPeerManager struct { activePeersArr []peer.ID } +// New creates a new SessionPeerManager func New(ctx context.Context, id uint64, network PeerNetwork) *SessionPeerManager { spm := &SessionPeerManager{ ctx: ctx, @@ -42,7 +46,10 @@ func New(ctx context.Context, id uint64, network PeerNetwork) *SessionPeerManage return spm } +// RecordPeerResponse records that a peer received a block, and adds to it +// the list of peers if it wasn't already added func (spm *SessionPeerManager) RecordPeerResponse(p peer.ID, k cid.Cid) { + // at the moment, we're just adding peers here // in the future, we'll actually use this to record metrics select { @@ -51,11 +58,13 @@ func (spm *SessionPeerManager) RecordPeerResponse(p peer.ID, k cid.Cid) { } } +// RecordPeerRequests records that a given set of peers requested the given cids func (spm *SessionPeerManager) RecordPeerRequests(p []peer.ID, ks []cid.Cid) { // at the moment, we're not doing anything here // soon we'll use this to track latency by peer } +// GetOptimizedPeers returns the best peers available for a session func (spm *SessionPeerManager) GetOptimizedPeers() []peer.ID { // right now this just returns all peers, but soon we might return peers // ordered by optimization, or only a subset @@ -74,6 +83,8 @@ func (spm *SessionPeerManager) GetOptimizedPeers() []peer.ID { } } +// FindMorePeers attempts to find more peers for a session by searching for +// providers for the given Cid func (spm *SessionPeerManager) FindMorePeers(ctx context.Context, c cid.Cid) { go func(k cid.Cid) { // TODO: have a task queue setup for this to: diff --git a/bitswap/sessionpeermanager/sessionpeermanager_test.go b/bitswap/sessionpeermanager/sessionpeermanager_test.go new file mode 100644 index 000000000..77f59fcd9 --- /dev/null +++ b/bitswap/sessionpeermanager/sessionpeermanager_test.go @@ -0,0 +1,136 @@ +package sessionpeermanager + +import ( + "context" + "testing" + "time" + + "github.com/ipfs/go-bitswap/testutil" + + cid "github.com/ipfs/go-cid" + ifconnmgr "github.com/libp2p/go-libp2p-interface-connmgr" + inet "github.com/libp2p/go-libp2p-net" + peer "github.com/libp2p/go-libp2p-peer" +) + +type fakePeerNetwork struct { + peers []peer.ID + connManager ifconnmgr.ConnManager +} + +func (fpn *fakePeerNetwork) ConnectionManager() ifconnmgr.ConnManager { + return fpn.connManager +} + +func (fpn *fakePeerNetwork) FindProvidersAsync(ctx context.Context, c cid.Cid, num int) <-chan peer.ID { + peerCh := make(chan peer.ID) + go func() { + defer close(peerCh) + for _, p := range fpn.peers { + select { + case peerCh <- p: + case <-ctx.Done(): + return + } + } + }() + return peerCh +} + +type fakeConnManager struct { + taggedPeers []peer.ID +} + +func (fcm *fakeConnManager) TagPeer(p peer.ID, tag string, n int) { + fcm.taggedPeers = append(fcm.taggedPeers, p) +} +func (fcm *fakeConnManager) UntagPeer(p peer.ID, tag string) { + for i := 0; i < len(fcm.taggedPeers); i++ { + if fcm.taggedPeers[i] == p { + fcm.taggedPeers[i] = fcm.taggedPeers[len(fcm.taggedPeers)-1] + fcm.taggedPeers = fcm.taggedPeers[:len(fcm.taggedPeers)-1] + return + } + } +} +func (*fakeConnManager) GetTagInfo(p peer.ID) *ifconnmgr.TagInfo { return nil } +func (*fakeConnManager) TrimOpenConns(ctx context.Context) {} +func (*fakeConnManager) Notifee() inet.Notifiee { return nil } + +func TestFindingMorePeers(t *testing.T) { + ctx := context.Background() + ctx, cancel := context.WithCancel(ctx) + defer cancel() + peers := testutil.GeneratePeers(5) + fcm := &fakeConnManager{} + fpn := &fakePeerNetwork{peers, fcm} + c := testutil.GenerateCids(1)[0] + id := testutil.GenerateSessionID() + + sessionPeerManager := New(ctx, id, fpn) + + findCtx, findCancel := context.WithTimeout(ctx, 10*time.Millisecond) + defer findCancel() + sessionPeerManager.FindMorePeers(ctx, c) + <-findCtx.Done() + sessionPeers := sessionPeerManager.GetOptimizedPeers() + if len(sessionPeers) != len(peers) { + t.Fatal("incorrect number of peers found") + } + for _, p := range sessionPeers { + if !testutil.ContainsPeer(peers, p) { + t.Fatal("incorrect peer found through finding providers") + } + } + if len(fcm.taggedPeers) != len(peers) { + t.Fatal("Peers were not tagged!") + } +} + +func TestRecordingReceivedBlocks(t *testing.T) { + ctx := context.Background() + ctx, cancel := context.WithCancel(ctx) + defer cancel() + p := testutil.GeneratePeers(1)[0] + fcm := &fakeConnManager{} + fpn := &fakePeerNetwork{nil, fcm} + c := testutil.GenerateCids(1)[0] + id := testutil.GenerateSessionID() + + sessionPeerManager := New(ctx, id, fpn) + sessionPeerManager.RecordPeerResponse(p, c) + time.Sleep(10 * time.Millisecond) + sessionPeers := sessionPeerManager.GetOptimizedPeers() + if len(sessionPeers) != 1 { + t.Fatal("did not add peer on receive") + } + if sessionPeers[0] != p { + t.Fatal("incorrect peer added on receive") + } + if len(fcm.taggedPeers) != 1 { + t.Fatal("Peers was not tagged!") + } +} + +func TestUntaggingPeers(t *testing.T) { + ctx := context.Background() + ctx, cancel := context.WithTimeout(ctx, 10*time.Millisecond) + defer cancel() + peers := testutil.GeneratePeers(5) + fcm := &fakeConnManager{} + fpn := &fakePeerNetwork{peers, fcm} + c := testutil.GenerateCids(1)[0] + id := testutil.GenerateSessionID() + + sessionPeerManager := New(ctx, id, fpn) + + sessionPeerManager.FindMorePeers(ctx, c) + time.Sleep(5 * time.Millisecond) + if len(fcm.taggedPeers) != len(peers) { + t.Fatal("Peers were not tagged!") + } + <-ctx.Done() + if len(fcm.taggedPeers) != 0 { + t.Fatal("Peers were not untagged!") + } +} From 9c2fb04829defe89bb9e28f92d6e1a7bf7114f28 Mon Sep 17 00:00:00 2001 From: hannahhoward Date: Wed, 28 Nov 2018 19:09:01 -0800 Subject: [PATCH 0674/1035] test(session): Add unit test Add a unit test for session package This commit was moved from ipfs/go-bitswap@fa93c81a34757028f8c2d08a1adf8254d784d1d2 --- bitswap/session/session_test.go | 229 ++++++++++++++++++++++++++++++++ bitswap/testutil/testutil.go | 11 ++ 2 files changed, 240 insertions(+) create mode 100644 bitswap/session/session_test.go diff --git a/bitswap/session/session_test.go b/bitswap/session/session_test.go new file mode 100644 index 000000000..30a1762c5 --- /dev/null +++ b/bitswap/session/session_test.go @@ -0,0 +1,229 @@ +package session + +import ( + "context" + "fmt" + "sync" + "testing" + "time" + + "github.com/ipfs/go-block-format" + + "github.com/ipfs/go-bitswap/testutil" + cid "github.com/ipfs/go-cid" + blocksutil "github.com/ipfs/go-ipfs-blocksutil" + peer "github.com/libp2p/go-libp2p-peer" +) + +type wantReq struct { + cids []cid.Cid + peers []peer.ID + isCancel bool +} + +type fakeWantManager struct { + lk sync.RWMutex + wantReqs []wantReq +} + +func (fwm *fakeWantManager) WantBlocks(ctx context.Context, cids []cid.Cid, peers []peer.ID, ses uint64) { + fwm.lk.Lock() + fwm.wantReqs = append(fwm.wantReqs, wantReq{cids, peers, false}) + fwm.lk.Unlock() +} + +func (fwm *fakeWantManager) CancelWants(ctx context.Context, cids []cid.Cid, peers []peer.ID, ses uint64) { + fwm.lk.Lock() + fwm.wantReqs = append(fwm.wantReqs, wantReq{cids, peers, true}) + fwm.lk.Unlock() +} + +type fakePeerManager struct { + peers []peer.ID + findMorePeersRequested bool +} + +func (fpm *fakePeerManager) FindMorePeers(context.Context, cid.Cid) { + fpm.findMorePeersRequested = true +} + +func (fpm *fakePeerManager) GetOptimizedPeers() []peer.ID { + return fpm.peers +} + +func (fpm *fakePeerManager) RecordPeerRequests([]peer.ID, []cid.Cid) {} +func (fpm *fakePeerManager) RecordPeerResponse(p peer.ID, c cid.Cid) { + fpm.peers = append(fpm.peers, p) +} + +func TestSessionGetBlocks(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond) + defer cancel() + fwm := &fakeWantManager{} + fpm := &fakePeerManager{} + id := testutil.GenerateSessionID() + session := New(ctx, id, fwm, fpm) + blockGenerator := blocksutil.NewBlockGenerator() + blks := blockGenerator.Blocks(activeWantsLimit * 2) + var cids []cid.Cid + for _, block := range blks { + cids = append(cids, block.Cid()) + } + var receivedBlocks []blocks.Block + getBlocksCh, err := session.GetBlocks(ctx, cids) + go func() { + for block := range getBlocksCh { + receivedBlocks = append(receivedBlocks, block) + } + }() + if err != nil { + t.Fatal("error getting blocks") + } + + // check initial want request + time.Sleep(3 * time.Millisecond) + if len(fwm.wantReqs) != 1 { + t.Fatal("failed to enqueue wants") + } + fwm.lk.Lock() + receivedWantReq := fwm.wantReqs[0] + if len(receivedWantReq.cids) != activeWantsLimit { + t.Fatal("did not enqueue correct initial number of wants") + } + if receivedWantReq.peers != nil { + t.Fatal("first want request should be a broadcast") + } + + fwm.wantReqs = nil + fwm.lk.Unlock() + + // now receive the first set of blocks + peers := testutil.GeneratePeers(activeWantsLimit) + for i, p := range peers { + session.ReceiveBlockFrom(p, blks[i]) + } + time.Sleep(3 * time.Millisecond) + + // verify new peers were recorded + if len(fpm.peers) != activeWantsLimit { + t.Fatal("received blocks not recorded by the peer manager") + } + for _, p := range fpm.peers { + if !testutil.ContainsPeer(peers, p) { + t.Fatal("incorrect peer recorded to peer manager") + } + } + + // look at new interactions with want manager + var cancelReqs []wantReq + var newBlockReqs []wantReq + + fwm.lk.Lock() + for _, w := range fwm.wantReqs { + if w.isCancel { + cancelReqs = append(cancelReqs, w) + } else { + newBlockReqs = append(newBlockReqs, w) + } + } + // should have cancelled each received block + if len(cancelReqs) != activeWantsLimit { + t.Fatal("did not cancel each block once it was received") + } + // new session reqs should be targeted + totalEnqueued := 0 + for _, w := range newBlockReqs { + if len(w.peers) == 0 { + t.Fatal("should not have broadcast again after initial broadcast") + } + totalEnqueued += len(w.cids) + } + fwm.lk.Unlock() + + // full new round of cids should be requested + if totalEnqueued != activeWantsLimit { + t.Fatal("new blocks were not requested") + } + + // receive remaining blocks + for i, p := range peers { + session.ReceiveBlockFrom(p, blks[i+activeWantsLimit]) + } + + // wait for everything to wrap up + <-ctx.Done() + + // check that we got everything + fmt.Printf("%d\n", len(receivedBlocks)) + + if len(receivedBlocks) != len(blks) { + t.Fatal("did not receive enough blocks") + } + for _, block := range receivedBlocks { + if !testutil.ContainsBlock(blks, block) { + t.Fatal("received incorrect block") + } + } +} + +func TestSessionFindMorePeers(t *testing.T) { + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond) + defer cancel() + fwm := &fakeWantManager{} + fpm := &fakePeerManager{} + id := testutil.GenerateSessionID() + session := New(ctx, id, fwm, fpm) + session.SetBaseTickDelay(1 * time.Millisecond) + blockGenerator := blocksutil.NewBlockGenerator() + blks := blockGenerator.Blocks(activeWantsLimit * 2) + var cids []cid.Cid + for _, block := range blks { + cids = append(cids, block.Cid()) + } + var receivedBlocks []blocks.Block + getBlocksCh, err := session.GetBlocks(ctx, cids) + go func() { + for block := range getBlocksCh { + receivedBlocks = append(receivedBlocks, block) + } + }() + if err != nil { + t.Fatal("error getting blocks") + } + + // receive a block to trigger a tick reset + time.Sleep(1 * time.Millisecond) + p := testutil.GeneratePeers(1)[0] + session.ReceiveBlockFrom(p, blks[0]) + + // wait then clear the want list + time.Sleep(1 * time.Millisecond) + fwm.lk.Lock() + fwm.wantReqs = nil + fwm.lk.Unlock() + + // wait long enough for a tick to occur + // baseTickDelay + 3 * latency = 4ms + time.Sleep(6 * time.Millisecond) + + // trigger to find providers should have happened + if fpm.findMorePeersRequested != true { + t.Fatal("should have attempted to find more peers but didn't") + } + + // verify a broadcast was made + fwm.lk.Lock() + if len(fwm.wantReqs) != 1 { + t.Fatal("did not make a new broadcast") + } + receivedWantReq := fwm.wantReqs[0] + if len(receivedWantReq.cids) != activeWantsLimit { + t.Fatal("did not rebroadcast whole live list") + } + if receivedWantReq.peers != nil { + t.Fatal("did not make a broadcast") + } + fwm.wantReqs = nil + fwm.lk.Unlock() +} diff --git a/bitswap/testutil/testutil.go b/bitswap/testutil/testutil.go index 9cfb38917..4ba4f5bab 100644 --- a/bitswap/testutil/testutil.go +++ b/bitswap/testutil/testutil.go @@ -3,6 +3,7 @@ package testutil import ( bsmsg "github.com/ipfs/go-bitswap/message" "github.com/ipfs/go-bitswap/wantlist" + "github.com/ipfs/go-block-format" cid "github.com/ipfs/go-cid" blocksutil "github.com/ipfs/go-ipfs-blocksutil" peer "github.com/libp2p/go-libp2p-peer" @@ -76,3 +77,13 @@ func ContainsPeer(peers []peer.ID, p peer.ID) bool { } return false } + +// ContainsBlock returns true if a block is found n a list of blocks +func ContainsBlock(blks []blocks.Block, block blocks.Block) bool { + for _, n := range blks { + if block.Cid() == n.Cid() { + return true + } + } + return false +} From 86aece9fab8ebb29fe26c1abb5b98a676d0d334b Mon Sep 17 00:00:00 2001 From: hannahhoward Date: Thu, 29 Nov 2018 10:30:46 -0800 Subject: [PATCH 0675/1035] refactor(session): readability improvements This commit was moved from ipfs/go-bitswap@c5f9a91e09542748563530e39cc06d58af338374 --- bitswap/session/session.go | 90 ++++++++++++++++++-------------------- 1 file changed, 42 insertions(+), 48 deletions(-) diff --git a/bitswap/session/session.go b/bitswap/session/session.go index 9620f07b1..97a9a1c9d 100644 --- a/bitswap/session/session.go +++ b/bitswap/session/session.go @@ -116,7 +116,32 @@ func (s *Session) ReceiveBlockFrom(from peer.ID, blk blocks.Block) { // InterestedIn returns true if this session is interested in the given Cid. func (s *Session) InterestedIn(c cid.Cid) bool { - return s.interest.Contains(c) || s.isLiveWant(c) + if s.interest.Contains(c) { + return true + } + // TODO: PERF: this is using a channel to guard a map access against race + // conditions. This is definitely much slower than a mutex, though its unclear + // if it will actually induce any noticeable slowness. This is implemented this + // way to avoid adding a more complex set of mutexes around the liveWants map. + // note that in the average case (where this session *is* interested in the + // block we received) this function will not be called, as the cid will likely + // still be in the interest cache. + resp := make(chan bool, 1) + select { + case s.interestReqs <- interestReq{ + c: c, + resp: resp, + }: + case <-s.ctx.Done(): + return false + } + + select { + case want := <-resp: + return want + case <-s.ctx.Done(): + return false + } } // GetBlock fetches a single block. @@ -129,12 +154,21 @@ func (s *Session) GetBlock(parent context.Context, k cid.Cid) (blocks.Block, err // guaranteed on the returned blocks. func (s *Session) GetBlocks(ctx context.Context, keys []cid.Cid) (<-chan blocks.Block, error) { ctx = logging.ContextWithLoggable(ctx, s.uuid) - return bsgetter.AsyncGetBlocks(ctx, keys, s.notif, s.fetch, s.cancel) -} - -// ID returns the sessions identifier. -func (s *Session) ID() uint64 { - return s.id + return bsgetter.AsyncGetBlocks(ctx, keys, s.notif, + func(ctx context.Context, keys []cid.Cid) { + select { + case s.newReqs <- keys: + case <-ctx.Done(): + case <-s.ctx.Done(): + } + }, + func(keys []cid.Cid) { + select { + case s.cancelKeys <- keys: + case <-s.ctx.Done(): + } + }, + ) } // GetAverageLatency returns the average latency for block requests. @@ -162,47 +196,6 @@ func (s *Session) SetBaseTickDelay(baseTickDelay time.Duration) { } } -// TODO: PERF: this is using a channel to guard a map access against race -// conditions. This is definitely much slower than a mutex, though its unclear -// if it will actually induce any noticeable slowness. This is implemented this -// way to avoid adding a more complex set of mutexes around the liveWants map. -// note that in the average case (where this session *is* interested in the -// block we received) this function will not be called, as the cid will likely -// still be in the interest cache. -func (s *Session) isLiveWant(c cid.Cid) bool { - resp := make(chan bool, 1) - select { - case s.interestReqs <- interestReq{ - c: c, - resp: resp, - }: - case <-s.ctx.Done(): - return false - } - - select { - case want := <-resp: - return want - case <-s.ctx.Done(): - return false - } -} - -func (s *Session) fetch(ctx context.Context, keys []cid.Cid) { - select { - case s.newReqs <- keys: - case <-ctx.Done(): - case <-s.ctx.Done(): - } -} - -func (s *Session) cancel(keys []cid.Cid) { - select { - case s.cancelKeys <- keys: - case <-s.ctx.Done(): - } -} - const provSearchDelay = time.Second * 10 // Session run loop -- everything function below here should not be called @@ -340,6 +333,7 @@ func (s *Session) wantBlocks(ctx context.Context, ks []cid.Cid) { func (s *Session) averageLatency() time.Duration { return s.latTotal / time.Duration(s.fetchcnt) } + func (s *Session) resetTick() { if s.latTotal == 0 { s.tick.Reset(provSearchDelay) From 2d31a44b9b946f0013052f697b4354a1456ced76 Mon Sep 17 00:00:00 2001 From: hannahhoward Date: Fri, 30 Nov 2018 15:51:48 -0800 Subject: [PATCH 0676/1035] test(session): make test more reliable This commit was moved from ipfs/go-bitswap@16f00de5206cef30c202545b0307bbbc763c722f --- bitswap/session/session_test.go | 126 ++++++++---------- .../sessionpeermanager_test.go | 1 + bitswap/testutil/testutil.go | 17 ++- 3 files changed, 65 insertions(+), 79 deletions(-) diff --git a/bitswap/session/session_test.go b/bitswap/session/session_test.go index 30a1762c5..1e6a89151 100644 --- a/bitswap/session/session_test.go +++ b/bitswap/session/session_test.go @@ -2,7 +2,6 @@ package session import ( "context" - "fmt" "sync" "testing" "time" @@ -16,50 +15,54 @@ import ( ) type wantReq struct { - cids []cid.Cid - peers []peer.ID - isCancel bool + cids []cid.Cid + peers []peer.ID } type fakeWantManager struct { - lk sync.RWMutex - wantReqs []wantReq + wantReqs chan wantReq + cancelReqs chan wantReq } func (fwm *fakeWantManager) WantBlocks(ctx context.Context, cids []cid.Cid, peers []peer.ID, ses uint64) { - fwm.lk.Lock() - fwm.wantReqs = append(fwm.wantReqs, wantReq{cids, peers, false}) - fwm.lk.Unlock() + fwm.wantReqs <- wantReq{cids, peers} } func (fwm *fakeWantManager) CancelWants(ctx context.Context, cids []cid.Cid, peers []peer.ID, ses uint64) { - fwm.lk.Lock() - fwm.wantReqs = append(fwm.wantReqs, wantReq{cids, peers, true}) - fwm.lk.Unlock() + fwm.cancelReqs <- wantReq{cids, peers} } type fakePeerManager struct { + lk sync.RWMutex peers []peer.ID findMorePeersRequested bool } func (fpm *fakePeerManager) FindMorePeers(context.Context, cid.Cid) { + fpm.lk.Lock() fpm.findMorePeersRequested = true + fpm.lk.Unlock() } func (fpm *fakePeerManager) GetOptimizedPeers() []peer.ID { + fpm.lk.Lock() + defer fpm.lk.Unlock() return fpm.peers } func (fpm *fakePeerManager) RecordPeerRequests([]peer.ID, []cid.Cid) {} func (fpm *fakePeerManager) RecordPeerResponse(p peer.ID, c cid.Cid) { + fpm.lk.Lock() fpm.peers = append(fpm.peers, p) + fpm.lk.Unlock() } func TestSessionGetBlocks(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond) defer cancel() - fwm := &fakeWantManager{} + wantReqs := make(chan wantReq, 1) + cancelReqs := make(chan wantReq, 1) + fwm := &fakeWantManager{wantReqs, cancelReqs} fpm := &fakePeerManager{} id := testutil.GenerateSessionID() session := New(ctx, id, fwm, fpm) @@ -69,24 +72,15 @@ func TestSessionGetBlocks(t *testing.T) { for _, block := range blks { cids = append(cids, block.Cid()) } - var receivedBlocks []blocks.Block getBlocksCh, err := session.GetBlocks(ctx, cids) - go func() { - for block := range getBlocksCh { - receivedBlocks = append(receivedBlocks, block) - } - }() + if err != nil { t.Fatal("error getting blocks") } // check initial want request - time.Sleep(3 * time.Millisecond) - if len(fwm.wantReqs) != 1 { - t.Fatal("failed to enqueue wants") - } - fwm.lk.Lock() - receivedWantReq := fwm.wantReqs[0] + receivedWantReq := <-fwm.wantReqs + if len(receivedWantReq.cids) != activeWantsLimit { t.Fatal("did not enqueue correct initial number of wants") } @@ -94,17 +88,23 @@ func TestSessionGetBlocks(t *testing.T) { t.Fatal("first want request should be a broadcast") } - fwm.wantReqs = nil - fwm.lk.Unlock() - // now receive the first set of blocks peers := testutil.GeneratePeers(activeWantsLimit) + var newCancelReqs []wantReq + var newBlockReqs []wantReq + var receivedBlocks []blocks.Block for i, p := range peers { - session.ReceiveBlockFrom(p, blks[i]) + session.ReceiveBlockFrom(p, blks[testutil.IndexOf(blks, receivedWantReq.cids[i])]) + receivedBlock := <-getBlocksCh + receivedBlocks = append(receivedBlocks, receivedBlock) + cancelBlock := <-cancelReqs + newCancelReqs = append(newCancelReqs, cancelBlock) + wantBlock := <-wantReqs + newBlockReqs = append(newBlockReqs, wantBlock) } - time.Sleep(3 * time.Millisecond) // verify new peers were recorded + fpm.lk.Lock() if len(fpm.peers) != activeWantsLimit { t.Fatal("received blocks not recorded by the peer manager") } @@ -113,21 +113,12 @@ func TestSessionGetBlocks(t *testing.T) { t.Fatal("incorrect peer recorded to peer manager") } } + fpm.lk.Unlock() // look at new interactions with want manager - var cancelReqs []wantReq - var newBlockReqs []wantReq - fwm.lk.Lock() - for _, w := range fwm.wantReqs { - if w.isCancel { - cancelReqs = append(cancelReqs, w) - } else { - newBlockReqs = append(newBlockReqs, w) - } - } // should have cancelled each received block - if len(cancelReqs) != activeWantsLimit { + if len(newCancelReqs) != activeWantsLimit { t.Fatal("did not cancel each block once it was received") } // new session reqs should be targeted @@ -138,7 +129,6 @@ func TestSessionGetBlocks(t *testing.T) { } totalEnqueued += len(w.cids) } - fwm.lk.Unlock() // full new round of cids should be requested if totalEnqueued != activeWantsLimit { @@ -147,15 +137,13 @@ func TestSessionGetBlocks(t *testing.T) { // receive remaining blocks for i, p := range peers { - session.ReceiveBlockFrom(p, blks[i+activeWantsLimit]) + session.ReceiveBlockFrom(p, blks[testutil.IndexOf(blks, newBlockReqs[i].cids[0])]) + receivedBlock := <-getBlocksCh + receivedBlocks = append(receivedBlocks, receivedBlock) + cancelBlock := <-cancelReqs + newCancelReqs = append(newCancelReqs, cancelBlock) } - // wait for everything to wrap up - <-ctx.Done() - - // check that we got everything - fmt.Printf("%d\n", len(receivedBlocks)) - if len(receivedBlocks) != len(blks) { t.Fatal("did not receive enough blocks") } @@ -170,60 +158,52 @@ func TestSessionFindMorePeers(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond) defer cancel() - fwm := &fakeWantManager{} + wantReqs := make(chan wantReq, 1) + cancelReqs := make(chan wantReq, 1) + fwm := &fakeWantManager{wantReqs, cancelReqs} fpm := &fakePeerManager{} id := testutil.GenerateSessionID() session := New(ctx, id, fwm, fpm) - session.SetBaseTickDelay(1 * time.Millisecond) + session.SetBaseTickDelay(200 * time.Microsecond) blockGenerator := blocksutil.NewBlockGenerator() blks := blockGenerator.Blocks(activeWantsLimit * 2) var cids []cid.Cid for _, block := range blks { cids = append(cids, block.Cid()) } - var receivedBlocks []blocks.Block getBlocksCh, err := session.GetBlocks(ctx, cids) - go func() { - for block := range getBlocksCh { - receivedBlocks = append(receivedBlocks, block) - } - }() if err != nil { t.Fatal("error getting blocks") } + // clear the initial block of wants + <-wantReqs + // receive a block to trigger a tick reset - time.Sleep(1 * time.Millisecond) + time.Sleep(200 * time.Microsecond) p := testutil.GeneratePeers(1)[0] session.ReceiveBlockFrom(p, blks[0]) - - // wait then clear the want list - time.Sleep(1 * time.Millisecond) - fwm.lk.Lock() - fwm.wantReqs = nil - fwm.lk.Unlock() + <-getBlocksCh + <-wantReqs + <-cancelReqs // wait long enough for a tick to occur - // baseTickDelay + 3 * latency = 4ms - time.Sleep(6 * time.Millisecond) + time.Sleep(20 * time.Millisecond) // trigger to find providers should have happened + fpm.lk.Lock() if fpm.findMorePeersRequested != true { t.Fatal("should have attempted to find more peers but didn't") } + fpm.lk.Unlock() // verify a broadcast was made - fwm.lk.Lock() - if len(fwm.wantReqs) != 1 { - t.Fatal("did not make a new broadcast") - } - receivedWantReq := fwm.wantReqs[0] + receivedWantReq := <-wantReqs if len(receivedWantReq.cids) != activeWantsLimit { t.Fatal("did not rebroadcast whole live list") } if receivedWantReq.peers != nil { t.Fatal("did not make a broadcast") } - fwm.wantReqs = nil - fwm.lk.Unlock() + <-ctx.Done() } diff --git a/bitswap/sessionpeermanager/sessionpeermanager_test.go b/bitswap/sessionpeermanager/sessionpeermanager_test.go index 77f59fcd9..821752a0e 100644 --- a/bitswap/sessionpeermanager/sessionpeermanager_test.go +++ b/bitswap/sessionpeermanager/sessionpeermanager_test.go @@ -130,6 +130,7 @@ func TestUntaggingPeers(t *testing.T) { t.Fatal("Peers were not tagged!") } <-ctx.Done() + time.Sleep(5 * time.Millisecond) if len(fcm.taggedPeers) != 0 { t.Fatal("Peers were not untagged!") } diff --git a/bitswap/testutil/testutil.go b/bitswap/testutil/testutil.go index 4ba4f5bab..6e3f2aa45 100644 --- a/bitswap/testutil/testutil.go +++ b/bitswap/testutil/testutil.go @@ -78,12 +78,17 @@ func ContainsPeer(peers []peer.ID, p peer.ID) bool { return false } -// ContainsBlock returns true if a block is found n a list of blocks -func ContainsBlock(blks []blocks.Block, block blocks.Block) bool { - for _, n := range blks { - if block.Cid() == n.Cid() { - return true +// IndexOf returns the index of a given cid in an array of blocks +func IndexOf(blks []blocks.Block, c cid.Cid) int { + for i, n := range blks { + if n.Cid() == c { + return i } } - return false + return -1 +} + +// ContainsBlock returns true if a block is found n a list of blocks +func ContainsBlock(blks []blocks.Block, block blocks.Block) bool { + return IndexOf(blks, block.Cid()) != -1 } From 3f085dc56658c4264e234eb1551bcd4835205251 Mon Sep 17 00:00:00 2001 From: hannahhoward Date: Tue, 18 Dec 2018 15:34:16 -0800 Subject: [PATCH 0677/1035] fix(tests): stabilize unreliable session tests fix #43 This commit was moved from ipfs/go-bitswap@78d4f3873f8b07c27f0fa16431e4d0ec488fef3b --- bitswap/session/session_test.go | 17 ++++------------- .../sessionpeermanager_test.go | 11 ++++++++++- 2 files changed, 14 insertions(+), 14 deletions(-) diff --git a/bitswap/session/session_test.go b/bitswap/session/session_test.go index 1e6a89151..b00f8bd0a 100644 --- a/bitswap/session/session_test.go +++ b/bitswap/session/session_test.go @@ -35,13 +35,11 @@ func (fwm *fakeWantManager) CancelWants(ctx context.Context, cids []cid.Cid, pee type fakePeerManager struct { lk sync.RWMutex peers []peer.ID - findMorePeersRequested bool + findMorePeersRequested chan struct{} } func (fpm *fakePeerManager) FindMorePeers(context.Context, cid.Cid) { - fpm.lk.Lock() - fpm.findMorePeersRequested = true - fpm.lk.Unlock() + fpm.findMorePeersRequested <- struct{}{} } func (fpm *fakePeerManager) GetOptimizedPeers() []peer.ID { @@ -161,7 +159,7 @@ func TestSessionFindMorePeers(t *testing.T) { wantReqs := make(chan wantReq, 1) cancelReqs := make(chan wantReq, 1) fwm := &fakeWantManager{wantReqs, cancelReqs} - fpm := &fakePeerManager{} + fpm := &fakePeerManager{findMorePeersRequested: make(chan struct{})} id := testutil.GenerateSessionID() session := New(ctx, id, fwm, fpm) session.SetBaseTickDelay(200 * time.Microsecond) @@ -188,14 +186,7 @@ func TestSessionFindMorePeers(t *testing.T) { <-cancelReqs // wait long enough for a tick to occur - time.Sleep(20 * time.Millisecond) - - // trigger to find providers should have happened - fpm.lk.Lock() - if fpm.findMorePeersRequested != true { - t.Fatal("should have attempted to find more peers but didn't") - } - fpm.lk.Unlock() + <-fpm.findMorePeersRequested // verify a broadcast was made receivedWantReq := <-wantReqs diff --git a/bitswap/sessionpeermanager/sessionpeermanager_test.go b/bitswap/sessionpeermanager/sessionpeermanager_test.go index 821752a0e..c26bf1748 100644 --- a/bitswap/sessionpeermanager/sessionpeermanager_test.go +++ b/bitswap/sessionpeermanager/sessionpeermanager_test.go @@ -2,6 +2,7 @@ package sessionpeermanager import ( "context" + "sync" "testing" "time" @@ -39,12 +40,17 @@ func (fpn *fakePeerNetwork) FindProvidersAsync(ctx context.Context, c cid.Cid, n type fakeConnManager struct { taggedPeers []peer.ID + wait sync.WaitGroup } func (fcm *fakeConnManager) TagPeer(p peer.ID, tag string, n int) { + fcm.wait.Add(1) fcm.taggedPeers = append(fcm.taggedPeers, p) } + func (fcm *fakeConnManager) UntagPeer(p peer.ID, tag string) { + fcm.wait.Done() + for i := 0; i < len(fcm.taggedPeers); i++ { if fcm.taggedPeers[i] == p { fcm.taggedPeers[i] = fcm.taggedPeers[len(fcm.taggedPeers)-1] @@ -52,7 +58,9 @@ func (fcm *fakeConnManager) UntagPeer(p peer.ID, tag string) { return } } + } + func (*fakeConnManager) GetTagInfo(p peer.ID) *ifconnmgr.TagInfo { return nil } func (*fakeConnManager) TrimOpenConns(ctx context.Context) {} func (*fakeConnManager) Notifee() inet.Notifiee { return nil } @@ -130,7 +138,8 @@ func TestUntaggingPeers(t *testing.T) { t.Fatal("Peers were not tagged!") } <-ctx.Done() - time.Sleep(5 * time.Millisecond) + fcm.wait.Wait() + if len(fcm.taggedPeers) != 0 { t.Fatal("Peers were not untagged!") } From 5f10d599e217572469163fa373b9c166dd6d1368 Mon Sep 17 00:00:00 2001 From: hannahhoward Date: Tue, 18 Dec 2018 17:52:40 -0800 Subject: [PATCH 0678/1035] fix(tests): minor fix for waitgroup This commit was moved from ipfs/go-bitswap@6b3042fe0ec4b5af0faa0db685a796e91bba2836 --- bitswap/session/session_test.go | 2 +- bitswap/sessionpeermanager/sessionpeermanager_test.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/bitswap/session/session_test.go b/bitswap/session/session_test.go index b00f8bd0a..8ae87cfd7 100644 --- a/bitswap/session/session_test.go +++ b/bitswap/session/session_test.go @@ -185,7 +185,7 @@ func TestSessionFindMorePeers(t *testing.T) { <-wantReqs <-cancelReqs - // wait long enough for a tick to occur + // wait for a request to get more peers to occur <-fpm.findMorePeersRequested // verify a broadcast was made diff --git a/bitswap/sessionpeermanager/sessionpeermanager_test.go b/bitswap/sessionpeermanager/sessionpeermanager_test.go index c26bf1748..f84b3d67b 100644 --- a/bitswap/sessionpeermanager/sessionpeermanager_test.go +++ b/bitswap/sessionpeermanager/sessionpeermanager_test.go @@ -49,7 +49,7 @@ func (fcm *fakeConnManager) TagPeer(p peer.ID, tag string, n int) { } func (fcm *fakeConnManager) UntagPeer(p peer.ID, tag string) { - fcm.wait.Done() + defer fcm.wait.Done() for i := 0; i < len(fcm.taggedPeers); i++ { if fcm.taggedPeers[i] == p { From 1c592b1fe152dfcb19c892b94c82bd13ad416bb9 Mon Sep 17 00:00:00 2001 From: hannahhoward Date: Mon, 3 Dec 2018 11:21:42 -0800 Subject: [PATCH 0679/1035] test(benchmarks): improve output make both performance benchmarks write to a tmp dir and put in the .gitignore This commit was moved from ipfs/go-bitswap@5c7498ca594e63eeabf743ca4450133b8c820306 --- bitswap/dup_blocks_test.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/bitswap/dup_blocks_test.go b/bitswap/dup_blocks_test.go index 58fc96144..28f97ca30 100644 --- a/bitswap/dup_blocks_test.go +++ b/bitswap/dup_blocks_test.go @@ -34,6 +34,7 @@ type runStats struct { var benchmarkLog []runStats func BenchmarkDups2Nodes(b *testing.B) { + benchmarkLog = nil fixedDelay := delay.Fixed(10 * time.Millisecond) b.Run("AllToAll-OneAtATime", func(b *testing.B) { subtestDistributeAndFetch(b, 3, 100, fixedDelay, allToAll, oneAtATime) @@ -93,7 +94,7 @@ func BenchmarkDups2Nodes(b *testing.B) { subtestDistributeAndFetch(b, 200, 20, fixedDelay, allToAll, batchFetchAll) }) out, _ := json.MarshalIndent(benchmarkLog, "", " ") - ioutil.WriteFile("benchmark.json", out, 0666) + ioutil.WriteFile("tmp/benchmark.json", out, 0666) } const fastSpeed = 60 * time.Millisecond @@ -103,6 +104,7 @@ const superSlowSpeed = 4000 * time.Millisecond const distribution = 20 * time.Millisecond func BenchmarkDupsManyNodesRealWorldNetwork(b *testing.B) { + benchmarkLog = nil fastNetworkDelayGenerator := tn.InternetLatencyDelayGenerator( mediumSpeed-fastSpeed, slowSpeed-fastSpeed, 0.0, 0.0, distribution, nil) @@ -125,6 +127,8 @@ func BenchmarkDupsManyNodesRealWorldNetwork(b *testing.B) { b.Run("200Nodes-AllToAll-BigBatch-SlowVariableSpeedNetwork", func(b *testing.B) { subtestDistributeAndFetch(b, 300, 200, slowNetworkDelay, allToAll, batchFetchAll) }) + out, _ := json.MarshalIndent(benchmarkLog, "", " ") + ioutil.WriteFile("tmp/rw-benchmark.json", out, 0666) } func subtestDistributeAndFetch(b *testing.B, numnodes, numblks int, d delay.D, df distFunc, ff fetchFunc) { From f486119b228447bfb998bd45271d54406fdbe271 Mon Sep 17 00:00:00 2001 From: hannahhoward Date: Tue, 18 Dec 2018 14:24:28 -0800 Subject: [PATCH 0680/1035] test(Benchmarks): Add bandwidth restrictions Limits connection bandwidth in real world benchmarks so that blocks are delayed if single peer is overused fix #40 This commit was moved from ipfs/go-bitswap@fe0a25326f98bbaf7e82bf7d03e9eacb87604934 --- ...{dup_blocks_test.go => benchmarks_test.go} | 39 ++++++++-- bitswap/testnet/rate_limit_generators.go | 42 +++++++++++ bitswap/testnet/virtual.go | 71 +++++++++++++++---- bitswap/testutil/testutil.go | 23 ++++++ 4 files changed, 159 insertions(+), 16 deletions(-) rename bitswap/{dup_blocks_test.go => benchmarks_test.go} (84%) create mode 100644 bitswap/testnet/rate_limit_generators.go diff --git a/bitswap/dup_blocks_test.go b/bitswap/benchmarks_test.go similarity index 84% rename from bitswap/dup_blocks_test.go rename to bitswap/benchmarks_test.go index 28f97ca30..b8c90d97a 100644 --- a/bitswap/dup_blocks_test.go +++ b/bitswap/benchmarks_test.go @@ -9,9 +9,10 @@ import ( "testing" "time" - tn "github.com/ipfs/go-bitswap/testnet" + "github.com/ipfs/go-bitswap/testutil" bssession "github.com/ipfs/go-bitswap/session" + tn "github.com/ipfs/go-bitswap/testnet" "github.com/ipfs/go-block-format" cid "github.com/ipfs/go-cid" blocksutil "github.com/ipfs/go-ipfs-blocksutil" @@ -102,6 +103,13 @@ const mediumSpeed = 200 * time.Millisecond const slowSpeed = 800 * time.Millisecond const superSlowSpeed = 4000 * time.Millisecond const distribution = 20 * time.Millisecond +const fastBandwidth = 1250000.0 +const fastBandwidthDeviation = 300000.0 +const mediumBandwidth = 500000.0 +const mediumBandwidthDeviation = 80000.0 +const slowBandwidth = 100000.0 +const slowBandwidthDeviation = 16500.0 +const stdBlockSize = 8000 func BenchmarkDupsManyNodesRealWorldNetwork(b *testing.B) { benchmarkLog = nil @@ -109,23 +117,26 @@ func BenchmarkDupsManyNodesRealWorldNetwork(b *testing.B) { mediumSpeed-fastSpeed, slowSpeed-fastSpeed, 0.0, 0.0, distribution, nil) fastNetworkDelay := delay.Delay(fastSpeed, fastNetworkDelayGenerator) + fastBandwidthGenerator := tn.VariableRateLimitGenerator(fastBandwidth, fastBandwidthDeviation, nil) averageNetworkDelayGenerator := tn.InternetLatencyDelayGenerator( mediumSpeed-fastSpeed, slowSpeed-fastSpeed, 0.3, 0.3, distribution, nil) averageNetworkDelay := delay.Delay(fastSpeed, averageNetworkDelayGenerator) + averageBandwidthGenerator := tn.VariableRateLimitGenerator(mediumBandwidth, mediumBandwidthDeviation, nil) slowNetworkDelayGenerator := tn.InternetLatencyDelayGenerator( mediumSpeed-fastSpeed, superSlowSpeed-fastSpeed, 0.3, 0.3, distribution, nil) slowNetworkDelay := delay.Delay(fastSpeed, slowNetworkDelayGenerator) + slowBandwidthGenerator := tn.VariableRateLimitGenerator(slowBandwidth, slowBandwidthDeviation, nil) b.Run("200Nodes-AllToAll-BigBatch-FastNetwork", func(b *testing.B) { - subtestDistributeAndFetch(b, 300, 200, fastNetworkDelay, allToAll, batchFetchAll) + subtestDistributeAndFetchRateLimited(b, 300, 200, fastNetworkDelay, fastBandwidthGenerator, stdBlockSize, allToAll, batchFetchAll) }) b.Run("200Nodes-AllToAll-BigBatch-AverageVariableSpeedNetwork", func(b *testing.B) { - subtestDistributeAndFetch(b, 300, 200, averageNetworkDelay, allToAll, batchFetchAll) + subtestDistributeAndFetchRateLimited(b, 300, 200, averageNetworkDelay, averageBandwidthGenerator, stdBlockSize, allToAll, batchFetchAll) }) b.Run("200Nodes-AllToAll-BigBatch-SlowVariableSpeedNetwork", func(b *testing.B) { - subtestDistributeAndFetch(b, 300, 200, slowNetworkDelay, allToAll, batchFetchAll) + subtestDistributeAndFetchRateLimited(b, 300, 200, slowNetworkDelay, slowBandwidthGenerator, stdBlockSize, allToAll, batchFetchAll) }) out, _ := json.MarshalIndent(benchmarkLog, "", " ") ioutil.WriteFile("tmp/rw-benchmark.json", out, 0666) @@ -134,6 +145,7 @@ func BenchmarkDupsManyNodesRealWorldNetwork(b *testing.B) { func subtestDistributeAndFetch(b *testing.B, numnodes, numblks int, d delay.D, df distFunc, ff fetchFunc) { start := time.Now() net := tn.VirtualNetwork(mockrouting.NewServer(), d) + sg := NewTestSessionGenerator(net) defer sg.Close() @@ -141,6 +153,25 @@ func subtestDistributeAndFetch(b *testing.B, numnodes, numblks int, d delay.D, d instances := sg.Instances(numnodes) blocks := bg.Blocks(numblks) + runDistribution(b, instances, blocks, df, ff, start) +} + +func subtestDistributeAndFetchRateLimited(b *testing.B, numnodes, numblks int, d delay.D, rateLimitGenerator tn.RateLimitGenerator, blockSize int64, df distFunc, ff fetchFunc) { + start := time.Now() + net := tn.RateLimitedVirtualNetwork(mockrouting.NewServer(), d, rateLimitGenerator) + + sg := NewTestSessionGenerator(net) + defer sg.Close() + + instances := sg.Instances(numnodes) + blocks := testutil.GenerateBlocksOfSize(numblks, blockSize) + + runDistribution(b, instances, blocks, df, ff, start) +} + +func runDistribution(b *testing.B, instances []Instance, blocks []blocks.Block, df distFunc, ff fetchFunc, start time.Time) { + + numnodes := len(instances) fetcher := instances[numnodes-1] diff --git a/bitswap/testnet/rate_limit_generators.go b/bitswap/testnet/rate_limit_generators.go new file mode 100644 index 000000000..2c4a1cd56 --- /dev/null +++ b/bitswap/testnet/rate_limit_generators.go @@ -0,0 +1,42 @@ +package bitswap + +import ( + "math/rand" +) + +type fixedRateLimitGenerator struct { + rateLimit float64 +} + +// FixedRateLimitGenerator returns a rate limit generatoe that always generates +// the specified rate limit in bytes/sec. +func FixedRateLimitGenerator(rateLimit float64) RateLimitGenerator { + return &fixedRateLimitGenerator{rateLimit} +} + +func (rateLimitGenerator *fixedRateLimitGenerator) NextRateLimit() float64 { + return rateLimitGenerator.rateLimit +} + +type variableRateLimitGenerator struct { + rateLimit float64 + std float64 + rng *rand.Rand +} + +// VariableRateLimitGenerator makes rate limites that following a normal distribution. +func VariableRateLimitGenerator(rateLimit float64, std float64, rng *rand.Rand) RateLimitGenerator { + if rng == nil { + rng = sharedRNG + } + + return &variableRateLimitGenerator{ + std: std, + rng: rng, + rateLimit: rateLimit, + } +} + +func (rateLimitGenerator *variableRateLimitGenerator) NextRateLimit() float64 { + return rateLimitGenerator.rng.NormFloat64()*rateLimitGenerator.std + rateLimitGenerator.rateLimit +} diff --git a/bitswap/testnet/virtual.go b/bitswap/testnet/virtual.go index d5a77494b..010c74c55 100644 --- a/bitswap/testnet/virtual.go +++ b/bitswap/testnet/virtual.go @@ -18,6 +18,7 @@ import ( ifconnmgr "github.com/libp2p/go-libp2p-interface-connmgr" peer "github.com/libp2p/go-libp2p-peer" routing "github.com/libp2p/go-libp2p-routing" + mocknet "github.com/libp2p/go-libp2p/p2p/net/mock" testutil "github.com/libp2p/go-testutil" ) @@ -25,21 +26,47 @@ var log = logging.Logger("bstestnet") func VirtualNetwork(rs mockrouting.Server, d delay.D) Network { return &network{ - latencies: make(map[peer.ID]map[peer.ID]time.Duration), - clients: make(map[peer.ID]*receiverQueue), - delay: d, - routingserver: rs, - conns: make(map[string]struct{}), + latencies: make(map[peer.ID]map[peer.ID]time.Duration), + clients: make(map[peer.ID]*receiverQueue), + delay: d, + routingserver: rs, + isRateLimited: false, + rateLimitGenerator: nil, + conns: make(map[string]struct{}), + } +} + +type rateLimiter interface { + Limit(dataSize int) time.Duration +} + +type RateLimitGenerator interface { + NextRateLimit() float64 +} + +func RateLimitedVirtualNetwork(rs mockrouting.Server, d delay.D, rateLimitGenerator RateLimitGenerator) Network { + return &network{ + latencies: make(map[peer.ID]map[peer.ID]time.Duration), + rateLimiters: make(map[peer.ID]map[peer.ID]rateLimiter), + clients: make(map[peer.ID]*receiverQueue), + delay: d, + routingserver: rs, + isRateLimited: true, + rateLimitGenerator: rateLimitGenerator, + conns: make(map[string]struct{}), } } type network struct { - mu sync.Mutex - latencies map[peer.ID]map[peer.ID]time.Duration - clients map[peer.ID]*receiverQueue - routingserver mockrouting.Server - delay delay.D - conns map[string]struct{} + mu sync.Mutex + latencies map[peer.ID]map[peer.ID]time.Duration + rateLimiters map[peer.ID]map[peer.ID]rateLimiter + clients map[peer.ID]*receiverQueue + routingserver mockrouting.Server + delay delay.D + isRateLimited bool + rateLimitGenerator RateLimitGenerator + conns map[string]struct{} } type message struct { @@ -102,6 +129,26 @@ func (n *network) SendMessage( latencies[to] = latency } + var bandwidthDelay time.Duration + if n.isRateLimited { + rateLimiters, ok := n.rateLimiters[from] + if !ok { + rateLimiters = make(map[peer.ID]rateLimiter) + n.rateLimiters[from] = rateLimiters + } + + rl, ok := rateLimiters[to] + if !ok { + rl = mocknet.NewRatelimiter(n.rateLimitGenerator.NextRateLimit()) + rateLimiters[to] = rl + } + + size := mes.ToProtoV1().Size() + bandwidthDelay = rl.Limit(size) + } else { + bandwidthDelay = 0 + } + receiver, ok := n.clients[to] if !ok { return errors.New("cannot locate peer on network") @@ -113,7 +160,7 @@ func (n *network) SendMessage( msg := &message{ from: from, msg: mes, - shouldSend: time.Now().Add(latency), + shouldSend: time.Now().Add(latency).Add(bandwidthDelay), } receiver.enqueue(msg) diff --git a/bitswap/testutil/testutil.go b/bitswap/testutil/testutil.go index 6e3f2aa45..b25c1d355 100644 --- a/bitswap/testutil/testutil.go +++ b/bitswap/testutil/testutil.go @@ -1,6 +1,10 @@ package testutil import ( + "bytes" + + random "github.com/jbenet/go-random" + bsmsg "github.com/ipfs/go-bitswap/message" "github.com/ipfs/go-bitswap/wantlist" "github.com/ipfs/go-block-format" @@ -11,6 +15,25 @@ import ( var blockGenerator = blocksutil.NewBlockGenerator() var prioritySeq int +var seedSeq int64 + +func randomBytes(n int64, seed int64) []byte { + data := new(bytes.Buffer) + random.WritePseudoRandomBytes(n, data, seed) + return data.Bytes() +} + +// GenerateBlocksOfSize generates a series of blocks of the given byte size +func GenerateBlocksOfSize(n int, size int64) []blocks.Block { + generatedBlocks := make([]blocks.Block, 0, n) + for i := 0; i < n; i++ { + seedSeq++ + b := blocks.NewBlock(randomBytes(size, seedSeq)) + generatedBlocks = append(generatedBlocks, b) + + } + return generatedBlocks +} // GenerateCids produces n content identifiers. func GenerateCids(n int) []cid.Cid { From 34fb4cdf85a311b6a712163420cc8357b9db2c42 Mon Sep 17 00:00:00 2001 From: hannahhoward Date: Wed, 19 Dec 2018 11:42:20 -0800 Subject: [PATCH 0681/1035] fix(deps): update libp2p for cleanup Updated Libp2p and used it's newly exposed RateLimiter public interface This commit was moved from ipfs/go-bitswap@48f53bbcb3286fe0001ec74a6a35266a6d8c4ca3 --- bitswap/testnet/virtual.go | 18 +++++++----------- 1 file changed, 7 insertions(+), 11 deletions(-) diff --git a/bitswap/testnet/virtual.go b/bitswap/testnet/virtual.go index 010c74c55..e3af99d09 100644 --- a/bitswap/testnet/virtual.go +++ b/bitswap/testnet/virtual.go @@ -36,10 +36,6 @@ func VirtualNetwork(rs mockrouting.Server, d delay.D) Network { } } -type rateLimiter interface { - Limit(dataSize int) time.Duration -} - type RateLimitGenerator interface { NextRateLimit() float64 } @@ -47,7 +43,7 @@ type RateLimitGenerator interface { func RateLimitedVirtualNetwork(rs mockrouting.Server, d delay.D, rateLimitGenerator RateLimitGenerator) Network { return &network{ latencies: make(map[peer.ID]map[peer.ID]time.Duration), - rateLimiters: make(map[peer.ID]map[peer.ID]rateLimiter), + rateLimiters: make(map[peer.ID]map[peer.ID]*mocknet.RateLimiter), clients: make(map[peer.ID]*receiverQueue), delay: d, routingserver: rs, @@ -60,7 +56,7 @@ func RateLimitedVirtualNetwork(rs mockrouting.Server, d delay.D, rateLimitGenera type network struct { mu sync.Mutex latencies map[peer.ID]map[peer.ID]time.Duration - rateLimiters map[peer.ID]map[peer.ID]rateLimiter + rateLimiters map[peer.ID]map[peer.ID]*mocknet.RateLimiter clients map[peer.ID]*receiverQueue routingserver mockrouting.Server delay delay.D @@ -133,18 +129,18 @@ func (n *network) SendMessage( if n.isRateLimited { rateLimiters, ok := n.rateLimiters[from] if !ok { - rateLimiters = make(map[peer.ID]rateLimiter) + rateLimiters = make(map[peer.ID]*mocknet.RateLimiter) n.rateLimiters[from] = rateLimiters } - rl, ok := rateLimiters[to] + rateLimiter, ok := rateLimiters[to] if !ok { - rl = mocknet.NewRatelimiter(n.rateLimitGenerator.NextRateLimit()) - rateLimiters[to] = rl + rateLimiter = mocknet.NewRateLimiter(n.rateLimitGenerator.NextRateLimit()) + rateLimiters[to] = rateLimiter } size := mes.ToProtoV1().Size() - bandwidthDelay = rl.Limit(size) + bandwidthDelay = rateLimiter.Limit(size) } else { bandwidthDelay = 0 } From f783dca6dd839e1f130b43a05a4a4890fbfaeacd Mon Sep 17 00:00:00 2001 From: hannahhoward Date: Mon, 3 Dec 2018 18:03:07 -0800 Subject: [PATCH 0682/1035] feat(sessions): optimize peers Order optimized peers by most recent to receive a block This commit was moved from ipfs/go-bitswap@4951001bee8ed53439b17980997c6f20b4dd83ab --- .../sessionpeermanager/sessionpeermanager.go | 124 ++++++++++++++---- .../sessionpeermanager_test.go | 64 +++++++++ 2 files changed, 166 insertions(+), 22 deletions(-) diff --git a/bitswap/sessionpeermanager/sessionpeermanager.go b/bitswap/sessionpeermanager/sessionpeermanager.go index c4a9378e1..59d36b2f3 100644 --- a/bitswap/sessionpeermanager/sessionpeermanager.go +++ b/bitswap/sessionpeermanager/sessionpeermanager.go @@ -3,18 +3,28 @@ package sessionpeermanager import ( "context" "fmt" + "math/rand" cid "github.com/ipfs/go-cid" ifconnmgr "github.com/libp2p/go-libp2p-interface-connmgr" peer "github.com/libp2p/go-libp2p-peer" ) +const ( + maxOptimizedPeers = 25 + reservePeers = 2 +) + // PeerNetwork is an interface for finding providers and managing connections type PeerNetwork interface { ConnectionManager() ifconnmgr.ConnManager FindProvidersAsync(context.Context, cid.Cid, int) <-chan peer.ID } +type peerMessage interface { + handle(spm *SessionPeerManager) +} + // SessionPeerManager tracks and manages peers for a session, and provides // the best ones to the session type SessionPeerManager struct { @@ -22,22 +32,21 @@ type SessionPeerManager struct { network PeerNetwork tag string - newPeers chan peer.ID - peerReqs chan chan []peer.ID + peerMessages chan peerMessage // do not touch outside of run loop - activePeers map[peer.ID]struct{} - activePeersArr []peer.ID + activePeers map[peer.ID]bool + unoptimizedPeersArr []peer.ID + optimizedPeersArr []peer.ID } // New creates a new SessionPeerManager func New(ctx context.Context, id uint64, network PeerNetwork) *SessionPeerManager { spm := &SessionPeerManager{ - ctx: ctx, - network: network, - newPeers: make(chan peer.ID, 16), - peerReqs: make(chan chan []peer.ID), - activePeers: make(map[peer.ID]struct{}), + ctx: ctx, + network: network, + peerMessages: make(chan peerMessage, 16), + activePeers: make(map[peer.ID]bool), } spm.tag = fmt.Sprint("bs-ses-", id) @@ -53,7 +62,7 @@ func (spm *SessionPeerManager) RecordPeerResponse(p peer.ID, k cid.Cid) { // at the moment, we're just adding peers here // in the future, we'll actually use this to record metrics select { - case spm.newPeers <- p: + case spm.peerMessages <- &peerResponseMessage{p}: case <-spm.ctx.Done(): } } @@ -70,7 +79,7 @@ func (spm *SessionPeerManager) GetOptimizedPeers() []peer.ID { // ordered by optimization, or only a subset resp := make(chan []peer.ID) select { - case spm.peerReqs <- resp: + case spm.peerMessages <- &peerReqMessage{resp}: case <-spm.ctx.Done(): return nil } @@ -93,7 +102,7 @@ func (spm *SessionPeerManager) FindMorePeers(ctx context.Context, c cid.Cid) { // - ensure two 'findprovs' calls for the same block don't run concurrently // - share peers between sessions based on interest set for p := range spm.network.FindProvidersAsync(ctx, k, 10) { - spm.newPeers <- p + spm.peerMessages <- &peerFoundMessage{p} } }(c) } @@ -101,29 +110,100 @@ func (spm *SessionPeerManager) FindMorePeers(ctx context.Context, c cid.Cid) { func (spm *SessionPeerManager) run(ctx context.Context) { for { select { - case p := <-spm.newPeers: - spm.addActivePeer(p) - case resp := <-spm.peerReqs: - resp <- spm.activePeersArr + case pm := <-spm.peerMessages: + pm.handle(spm) case <-ctx.Done(): spm.handleShutdown() return } } } -func (spm *SessionPeerManager) addActivePeer(p peer.ID) { + +func (spm *SessionPeerManager) tagPeer(p peer.ID) { + cmgr := spm.network.ConnectionManager() + cmgr.TagPeer(p, spm.tag, 10) +} + +func (spm *SessionPeerManager) insertOptimizedPeer(p peer.ID) { + if len(spm.optimizedPeersArr) >= (maxOptimizedPeers - reservePeers) { + tailPeer := spm.optimizedPeersArr[len(spm.optimizedPeersArr)-1] + spm.optimizedPeersArr = spm.optimizedPeersArr[:len(spm.optimizedPeersArr)-1] + spm.unoptimizedPeersArr = append(spm.unoptimizedPeersArr, tailPeer) + } + + spm.optimizedPeersArr = append([]peer.ID{p}, spm.optimizedPeersArr...) +} + +type peerFoundMessage struct { + p peer.ID +} + +func (pfm *peerFoundMessage) handle(spm *SessionPeerManager) { + p := pfm.p if _, ok := spm.activePeers[p]; !ok { - spm.activePeers[p] = struct{}{} - spm.activePeersArr = append(spm.activePeersArr, p) + spm.activePeers[p] = false + spm.unoptimizedPeersArr = append(spm.unoptimizedPeersArr, p) + spm.tagPeer(p) + } +} + +type peerResponseMessage struct { + p peer.ID +} + +func (prm *peerResponseMessage) handle(spm *SessionPeerManager) { + + p := prm.p + isOptimized, ok := spm.activePeers[p] + if !ok { + spm.activePeers[p] = true + spm.tagPeer(p) + } else { + if isOptimized { + if spm.optimizedPeersArr[0] == p { + return + } + for i := 0; i < len(spm.optimizedPeersArr); i++ { + if spm.optimizedPeersArr[i] == p { + spm.optimizedPeersArr = append(spm.optimizedPeersArr[:i], spm.optimizedPeersArr[i+1:]...) + break + } + } + } else { + spm.activePeers[p] = true + for i := 0; i < len(spm.unoptimizedPeersArr); i++ { + if spm.unoptimizedPeersArr[i] == p { + spm.unoptimizedPeersArr[i] = spm.unoptimizedPeersArr[len(spm.unoptimizedPeersArr)-1] + spm.unoptimizedPeersArr = spm.unoptimizedPeersArr[:len(spm.unoptimizedPeersArr)-1] + break + } + } + } + } + spm.insertOptimizedPeer(p) +} + +type peerReqMessage struct { + resp chan<- []peer.ID +} + +func (prm *peerReqMessage) handle(spm *SessionPeerManager) { + randomOrder := rand.Perm(len(spm.unoptimizedPeersArr)) + maxPeers := len(spm.unoptimizedPeersArr) + len(spm.optimizedPeersArr) + if maxPeers > maxOptimizedPeers { + maxPeers = maxOptimizedPeers + } - cmgr := spm.network.ConnectionManager() - cmgr.TagPeer(p, spm.tag, 10) + extraPeers := make([]peer.ID, maxPeers-len(spm.optimizedPeersArr)) + for i := range extraPeers { + extraPeers[i] = spm.unoptimizedPeersArr[randomOrder[i]] } + prm.resp <- append(spm.optimizedPeersArr, extraPeers...) } func (spm *SessionPeerManager) handleShutdown() { cmgr := spm.network.ConnectionManager() - for _, p := range spm.activePeersArr { + for p := range spm.activePeers { cmgr.UntagPeer(p, spm.tag) } } diff --git a/bitswap/sessionpeermanager/sessionpeermanager_test.go b/bitswap/sessionpeermanager/sessionpeermanager_test.go index f84b3d67b..ba23c87d5 100644 --- a/bitswap/sessionpeermanager/sessionpeermanager_test.go +++ b/bitswap/sessionpeermanager/sessionpeermanager_test.go @@ -3,6 +3,7 @@ package sessionpeermanager import ( "context" "sync" + "math/rand" "testing" "time" @@ -120,6 +121,69 @@ func TestRecordingReceivedBlocks(t *testing.T) { } } +func TestOrderingPeers(t *testing.T) { + ctx := context.Background() + ctx, cancel := context.WithCancel(ctx) + defer cancel() + peers := testutil.GeneratePeers(100) + fcm := &fakeConnManager{} + fpn := &fakePeerNetwork{peers, fcm} + c := testutil.GenerateCids(1) + id := testutil.GenerateSessionID() + sessionPeerManager := New(ctx, id, fpn) + + // add all peers to session + sessionPeerManager.FindMorePeers(ctx, c[0]) + + // record broadcast + sessionPeerManager.RecordPeerRequests(nil, c) + + // record receives + peer1 := peers[rand.Intn(100)] + peer2 := peers[rand.Intn(100)] + peer3 := peers[rand.Intn(100)] + time.Sleep(1 * time.Millisecond) + sessionPeerManager.RecordPeerResponse(peer1, c[0]) + time.Sleep(1 * time.Millisecond) + sessionPeerManager.RecordPeerResponse(peer2, c[0]) + time.Sleep(1 * time.Millisecond) + sessionPeerManager.RecordPeerResponse(peer3, c[0]) + + sessionPeers := sessionPeerManager.GetOptimizedPeers() + if len(sessionPeers) != maxOptimizedPeers { + t.Fatal("Should not return more than the max of optimized peers") + } + + // should prioritize peers which have received blocks + if (sessionPeers[0] != peer3) || (sessionPeers[1] != peer2) || (sessionPeers[2] != peer1) { + t.Fatal("Did not prioritize peers that received blocks") + } + + // Receive a second time from same node + sessionPeerManager.RecordPeerResponse(peer3, c[0]) + + // call again + nextSessionPeers := sessionPeerManager.GetOptimizedPeers() + if len(nextSessionPeers) != maxOptimizedPeers { + t.Fatal("Should not return more than the max of optimized peers") + } + + // should not duplicate + if (nextSessionPeers[0] != peer3) || (nextSessionPeers[1] != peer2) || (nextSessionPeers[2] != peer1) { + t.Fatal("Did dedup peers which received multiple blocks") + } + + // should randomize other peers + totalSame := 0 + for i := 3; i < maxOptimizedPeers; i++ { + if sessionPeers[i] == nextSessionPeers[i] { + totalSame++ + } + } + if totalSame >= maxOptimizedPeers-3 { + t.Fatal("should not return the same random peers each time") + } +} func TestUntaggingPeers(t *testing.T) { ctx := context.Background() ctx, cancel := context.WithTimeout(ctx, 10*time.Millisecond) From 6091fa585839535da64437aab560db18d17f89b7 Mon Sep 17 00:00:00 2001 From: hannahhoward Date: Wed, 5 Dec 2018 12:02:39 -0800 Subject: [PATCH 0683/1035] feat(sessions): reduce duplicates Reduce duplicates through splits of requests This commit was moved from ipfs/go-bitswap@2ea8ba8288078b75baf55d63a2f50ff2d7f8ba71 --- bitswap/bitswap.go | 2 +- bitswap/session/session.go | 142 +++++++++++++++--- bitswap/session/session_test.go | 16 +- bitswap/sessionmanager/sessionmanager.go | 12 ++ bitswap/sessionmanager/sessionmanager_test.go | 10 +- .../sessionpeermanager/sessionpeermanager.go | 2 +- 6 files changed, 152 insertions(+), 32 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 29afee24e..1bc4e7460 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -391,7 +391,7 @@ func (bs *Bitswap) ReceiveMessage(ctx context.Context, p peer.ID, incoming bsmsg defer wg.Done() bs.updateReceiveCounters(b) - + bs.sm.UpdateReceiveCounters(b) log.Debugf("got block %s from %s", b, p) // skip received blocks that are not in the wantlist diff --git a/bitswap/session/session.go b/bitswap/session/session.go index 97a9a1c9d..91b8dc500 100644 --- a/bitswap/session/session.go +++ b/bitswap/session/session.go @@ -2,6 +2,7 @@ package session import ( "context" + "math/rand" "time" lru "github.com/hashicorp/golang-lru" @@ -14,7 +15,15 @@ import ( peer "github.com/libp2p/go-libp2p-peer" ) -const activeWantsLimit = 16 +const ( + minReceivedToSplit = 2 + maxSplit = 32 + maxAcceptableDupes = 0.4 + minDuplesToTryLessSplits = 0.2 + initialSplit = 2 + broadcastLiveWantsLimit = 4 + targetedLiveWantsLimit = 32 +) // WantManager is an interface that can be used to request blocks // from given peers. @@ -38,8 +47,9 @@ type interestReq struct { } type blkRecv struct { - from peer.ID - blk blocks.Block + from peer.ID + blk blocks.Block + counterMessage bool } // Session holds state for an individual bitswap transfer operation. @@ -60,14 +70,17 @@ type Session struct { tickDelayReqs chan time.Duration // do not touch outside run loop - tofetch *cidQueue - interest *lru.Cache - liveWants map[cid.Cid]time.Time - tick *time.Timer - baseTickDelay time.Duration - latTotal time.Duration - fetchcnt int - + tofetch *cidQueue + interest *lru.Cache + pastWants *cidQueue + liveWants map[cid.Cid]time.Time + tick *time.Timer + baseTickDelay time.Duration + latTotal time.Duration + fetchcnt int + receivedCount int + split int + duplicateReceivedCount int // identifiers notif notifications.PubSub uuid logging.Loggable @@ -82,12 +95,14 @@ func New(ctx context.Context, id uint64, wm WantManager, pm PeerManager) *Sessio newReqs: make(chan []cid.Cid), cancelKeys: make(chan []cid.Cid), tofetch: newCidQueue(), + pastWants: newCidQueue(), interestReqs: make(chan interestReq), latencyReqs: make(chan chan time.Duration), tickDelayReqs: make(chan time.Duration), ctx: ctx, wm: wm, pm: pm, + split: initialSplit, incoming: make(chan blkRecv), notif: notifications.New(), uuid: loggables.Uuid("GetBlockRequest"), @@ -106,7 +121,7 @@ func New(ctx context.Context, id uint64, wm WantManager, pm PeerManager) *Sessio // ReceiveBlockFrom receives an incoming block from the given peer. func (s *Session) ReceiveBlockFrom(from peer.ID, blk blocks.Block) { select { - case s.incoming <- blkRecv{from: from, blk: blk}: + case s.incoming <- blkRecv{from: from, blk: blk, counterMessage: false}: case <-s.ctx.Done(): } ks := []cid.Cid{blk.Cid()} @@ -114,6 +129,15 @@ func (s *Session) ReceiveBlockFrom(from peer.ID, blk blocks.Block) { } +// UpdateReceiveCounters updates receive counters for a block, +// which may be a duplicate and adjusts the split factor based on that. +func (s *Session) UpdateReceiveCounters(blk blocks.Block) { + select { + case s.incoming <- blkRecv{from: "", blk: blk, counterMessage: true}: + case <-s.ctx.Done(): + } +} + // InterestedIn returns true if this session is interested in the given Cid. func (s *Session) InterestedIn(c cid.Cid) bool { if s.interest.Contains(c) { @@ -205,7 +229,11 @@ func (s *Session) run(ctx context.Context) { for { select { case blk := <-s.incoming: - s.handleIncomingBlock(ctx, blk) + if blk.counterMessage { + s.updateReceiveCounters(ctx, blk.blk) + } else { + s.handleIncomingBlock(ctx, blk) + } case keys := <-s.newReqs: s.handleNewRequest(ctx, keys) case keys := <-s.cancelKeys: @@ -241,8 +269,7 @@ func (s *Session) handleNewRequest(ctx context.Context, keys []cid.Cid) { for _, k := range keys { s.interest.Add(k, nil) } - if len(s.liveWants) < activeWantsLimit { - toadd := activeWantsLimit - len(s.liveWants) + if toadd := s.wantBudget(); toadd > 0 { if toadd > len(keys) { toadd = len(keys) } @@ -264,6 +291,7 @@ func (s *Session) handleCancel(keys []cid.Cid) { } func (s *Session) handleTick(ctx context.Context) { + live := make([]cid.Cid, 0, len(s.liveWants)) now := time.Now() for c := range s.liveWants { @@ -316,6 +344,28 @@ func (s *Session) receiveBlock(ctx context.Context, blk blocks.Block) { if next := s.tofetch.Pop(); next.Defined() { s.wantBlocks(ctx, []cid.Cid{next}) } + + s.pastWants.Push(c) + } +} + +func (s *Session) duplicateRatio() float64 { + return float64(s.duplicateReceivedCount) / float64(s.receivedCount) +} +func (s *Session) updateReceiveCounters(ctx context.Context, blk blocks.Block) { + if s.pastWants.Has(blk.Cid()) { + s.receivedCount++ + s.duplicateReceivedCount++ + if (s.receivedCount > minReceivedToSplit) && (s.duplicateRatio() > maxAcceptableDupes) && (s.split < maxSplit) { + s.split++ + } + } else { + if s.cidIsWanted(blk.Cid()) { + s.receivedCount++ + if (s.split > 1) && (s.duplicateRatio() < minDuplesToTryLessSplits) { + s.split-- + } + } } } @@ -325,9 +375,18 @@ func (s *Session) wantBlocks(ctx context.Context, ks []cid.Cid) { s.liveWants[c] = now } peers := s.pm.GetOptimizedPeers() - // right now we're requesting each block from every peer, but soon, maybe not - s.pm.RecordPeerRequests(peers, ks) - s.wm.WantBlocks(ctx, ks, peers, s.id) + if len(peers) > 0 { + splitRequests := split(ks, peers, s.split) + for i, currentKeys := range splitRequests.ks { + currentPeers := splitRequests.peers[i] + // right now we're requesting each block from every peer, but soon, maybe not + s.pm.RecordPeerRequests(currentPeers, currentKeys) + s.wm.WantBlocks(ctx, currentKeys, currentPeers, s.id) + } + } else { + s.pm.RecordPeerRequests(nil, ks) + s.wm.WantBlocks(ctx, ks, nil, s.id) + } } func (s *Session) averageLatency() time.Duration { @@ -342,3 +401,50 @@ func (s *Session) resetTick() { s.tick.Reset(s.baseTickDelay + (3 * avLat)) } } + +type splitRec struct { + ks [][]cid.Cid + peers [][]peer.ID +} + +func split(ks []cid.Cid, peers []peer.ID, split int) *splitRec { + peerSplit := split + if len(peers) < peerSplit { + peerSplit = len(peers) + } + keySplit := split + if len(ks) < keySplit { + keySplit = len(ks) + } + if keySplit > peerSplit { + keySplit = peerSplit + } + out := &splitRec{ + ks: make([][]cid.Cid, keySplit), + peers: make([][]peer.ID, peerSplit), + } + for i, c := range ks { + pos := i % keySplit + out.ks[pos] = append(out.ks[pos], c) + } + peerOrder := rand.Perm(len(peers)) + for i, po := range peerOrder { + pos := i % peerSplit + out.peers[pos] = append(out.peers[pos], peers[po]) + } + return out +} + +func (s *Session) wantBudget() int { + live := len(s.liveWants) + var budget int + if len(s.pm.GetOptimizedPeers()) > 0 { + budget = targetedLiveWantsLimit - live + } else { + budget = broadcastLiveWantsLimit - live + } + if budget < 0 { + budget = 0 + } + return budget +} diff --git a/bitswap/session/session_test.go b/bitswap/session/session_test.go index 8ae87cfd7..8cb25cc3c 100644 --- a/bitswap/session/session_test.go +++ b/bitswap/session/session_test.go @@ -65,7 +65,7 @@ func TestSessionGetBlocks(t *testing.T) { id := testutil.GenerateSessionID() session := New(ctx, id, fwm, fpm) blockGenerator := blocksutil.NewBlockGenerator() - blks := blockGenerator.Blocks(activeWantsLimit * 2) + blks := blockGenerator.Blocks(broadcastLiveWantsLimit * 2) var cids []cid.Cid for _, block := range blks { cids = append(cids, block.Cid()) @@ -79,7 +79,7 @@ func TestSessionGetBlocks(t *testing.T) { // check initial want request receivedWantReq := <-fwm.wantReqs - if len(receivedWantReq.cids) != activeWantsLimit { + if len(receivedWantReq.cids) != broadcastLiveWantsLimit { t.Fatal("did not enqueue correct initial number of wants") } if receivedWantReq.peers != nil { @@ -87,7 +87,7 @@ func TestSessionGetBlocks(t *testing.T) { } // now receive the first set of blocks - peers := testutil.GeneratePeers(activeWantsLimit) + peers := testutil.GeneratePeers(broadcastLiveWantsLimit) var newCancelReqs []wantReq var newBlockReqs []wantReq var receivedBlocks []blocks.Block @@ -103,7 +103,7 @@ func TestSessionGetBlocks(t *testing.T) { // verify new peers were recorded fpm.lk.Lock() - if len(fpm.peers) != activeWantsLimit { + if len(fpm.peers) != broadcastLiveWantsLimit { t.Fatal("received blocks not recorded by the peer manager") } for _, p := range fpm.peers { @@ -116,7 +116,7 @@ func TestSessionGetBlocks(t *testing.T) { // look at new interactions with want manager // should have cancelled each received block - if len(newCancelReqs) != activeWantsLimit { + if len(newCancelReqs) != broadcastLiveWantsLimit { t.Fatal("did not cancel each block once it was received") } // new session reqs should be targeted @@ -129,7 +129,7 @@ func TestSessionGetBlocks(t *testing.T) { } // full new round of cids should be requested - if totalEnqueued != activeWantsLimit { + if totalEnqueued != broadcastLiveWantsLimit { t.Fatal("new blocks were not requested") } @@ -164,7 +164,7 @@ func TestSessionFindMorePeers(t *testing.T) { session := New(ctx, id, fwm, fpm) session.SetBaseTickDelay(200 * time.Microsecond) blockGenerator := blocksutil.NewBlockGenerator() - blks := blockGenerator.Blocks(activeWantsLimit * 2) + blks := blockGenerator.Blocks(broadcastLiveWantsLimit * 2) var cids []cid.Cid for _, block := range blks { cids = append(cids, block.Cid()) @@ -190,7 +190,7 @@ func TestSessionFindMorePeers(t *testing.T) { // verify a broadcast was made receivedWantReq := <-wantReqs - if len(receivedWantReq.cids) != activeWantsLimit { + if len(receivedWantReq.cids) != broadcastLiveWantsLimit { t.Fatal("did not rebroadcast whole live list") } if receivedWantReq.peers != nil { diff --git a/bitswap/sessionmanager/sessionmanager.go b/bitswap/sessionmanager/sessionmanager.go index 7e3fe2a5d..54b11348d 100644 --- a/bitswap/sessionmanager/sessionmanager.go +++ b/bitswap/sessionmanager/sessionmanager.go @@ -17,6 +17,7 @@ type Session interface { exchange.Fetcher InterestedIn(cid.Cid) bool ReceiveBlockFrom(peer.ID, blocks.Block) + UpdateReceiveCounters(blocks.Block) } type sesTrk struct { @@ -112,3 +113,14 @@ func (sm *SessionManager) ReceiveBlockFrom(from peer.ID, blk blocks.Block) { } } } + +// UpdateReceiveCounters records the fact that a block was received, allowing +// sessions to track duplicates +func (sm *SessionManager) UpdateReceiveCounters(blk blocks.Block) { + sm.sessLk.Lock() + defer sm.sessLk.Unlock() + + for _, s := range sm.sessions { + s.session.UpdateReceiveCounters(blk) + } +} diff --git a/bitswap/sessionmanager/sessionmanager_test.go b/bitswap/sessionmanager/sessionmanager_test.go index b030c0132..c32e7be3f 100644 --- a/bitswap/sessionmanager/sessionmanager_test.go +++ b/bitswap/sessionmanager/sessionmanager_test.go @@ -13,10 +13,11 @@ import ( ) type fakeSession struct { - interested bool - receivedBlock bool - id uint64 - pm *fakePeerManager + interested bool + receivedBlock bool + updateReceiveCounters bool + id uint64 + pm *fakePeerManager } func (*fakeSession) GetBlock(context.Context, cid.Cid) (blocks.Block, error) { @@ -27,6 +28,7 @@ func (*fakeSession) GetBlocks(context.Context, []cid.Cid) (<-chan blocks.Block, } func (fs *fakeSession) InterestedIn(cid.Cid) bool { return fs.interested } func (fs *fakeSession) ReceiveBlockFrom(peer.ID, blocks.Block) { fs.receivedBlock = true } +func (fs *fakeSession) UpdateReceiveCounters(blocks.Block) { fs.updateReceiveCounters = true } type fakePeerManager struct { id uint64 diff --git a/bitswap/sessionpeermanager/sessionpeermanager.go b/bitswap/sessionpeermanager/sessionpeermanager.go index 59d36b2f3..00a4d598b 100644 --- a/bitswap/sessionpeermanager/sessionpeermanager.go +++ b/bitswap/sessionpeermanager/sessionpeermanager.go @@ -11,7 +11,7 @@ import ( ) const ( - maxOptimizedPeers = 25 + maxOptimizedPeers = 32 reservePeers = 2 ) From cfbbd5f0357efb0aa39f8105a21b8d3a65ed29bb Mon Sep 17 00:00:00 2001 From: hannahhoward Date: Mon, 10 Dec 2018 17:39:02 -0800 Subject: [PATCH 0684/1035] feat(sessions): use all of wantBudget As soon as peers appear, consume all of the want budget This commit was moved from ipfs/go-bitswap@7f9589bca199dba71c5f04c4a5ead4da27d0b2aa --- bitswap/session/session.go | 12 ++++++++++-- bitswap/session/session_test.go | 17 ++++++++++------- 2 files changed, 20 insertions(+), 9 deletions(-) diff --git a/bitswap/session/session.go b/bitswap/session/session.go index 91b8dc500..1e7e0324a 100644 --- a/bitswap/session/session.go +++ b/bitswap/session/session.go @@ -341,8 +341,16 @@ func (s *Session) receiveBlock(ctx context.Context, blk blocks.Block) { s.fetchcnt++ s.notif.Publish(blk) - if next := s.tofetch.Pop(); next.Defined() { - s.wantBlocks(ctx, []cid.Cid{next}) + toAdd := s.wantBudget() + if toAdd > s.tofetch.Len() { + toAdd = s.tofetch.Len() + } + if toAdd > 0 { + var keys []cid.Cid + for i := 0; i < toAdd; i++ { + keys = append(keys, s.tofetch.Pop()) + } + s.wantBlocks(ctx, keys) } s.pastWants.Push(c) diff --git a/bitswap/session/session_test.go b/bitswap/session/session_test.go index 8cb25cc3c..86ad1d71f 100644 --- a/bitswap/session/session_test.go +++ b/bitswap/session/session_test.go @@ -97,8 +97,11 @@ func TestSessionGetBlocks(t *testing.T) { receivedBlocks = append(receivedBlocks, receivedBlock) cancelBlock := <-cancelReqs newCancelReqs = append(newCancelReqs, cancelBlock) - wantBlock := <-wantReqs - newBlockReqs = append(newBlockReqs, wantBlock) + select { + case wantBlock := <-wantReqs: + newBlockReqs = append(newBlockReqs, wantBlock) + default: + } } // verify new peers were recorded @@ -120,22 +123,22 @@ func TestSessionGetBlocks(t *testing.T) { t.Fatal("did not cancel each block once it was received") } // new session reqs should be targeted - totalEnqueued := 0 + var newCidsRequested []cid.Cid for _, w := range newBlockReqs { if len(w.peers) == 0 { t.Fatal("should not have broadcast again after initial broadcast") } - totalEnqueued += len(w.cids) + newCidsRequested = append(newCidsRequested, w.cids...) } // full new round of cids should be requested - if totalEnqueued != broadcastLiveWantsLimit { + if len(newCidsRequested) != broadcastLiveWantsLimit { t.Fatal("new blocks were not requested") } // receive remaining blocks for i, p := range peers { - session.ReceiveBlockFrom(p, blks[testutil.IndexOf(blks, newBlockReqs[i].cids[0])]) + session.ReceiveBlockFrom(p, blks[testutil.IndexOf(blks, newCidsRequested[i])]) receivedBlock := <-getBlocksCh receivedBlocks = append(receivedBlocks, receivedBlock) cancelBlock := <-cancelReqs @@ -190,7 +193,7 @@ func TestSessionFindMorePeers(t *testing.T) { // verify a broadcast was made receivedWantReq := <-wantReqs - if len(receivedWantReq.cids) != broadcastLiveWantsLimit { + if len(receivedWantReq.cids) < broadcastLiveWantsLimit { t.Fatal("did not rebroadcast whole live list") } if receivedWantReq.peers != nil { From c82e321ca193852fbfed456e609d068754c33042 Mon Sep 17 00:00:00 2001 From: hannahhoward Date: Fri, 14 Dec 2018 16:35:14 -0800 Subject: [PATCH 0685/1035] refactor(sessions): extract request splitting Move the job of splitting requests to its own package This commit was moved from ipfs/go-bitswap@a0fd23cda00e02b405a67181d8df46af68953383 --- bitswap/bitswap.go | 11 +- bitswap/session/session.go | 110 ++++-------- bitswap/session/session_test.go | 17 +- bitswap/sessionmanager/sessionmanager.go | 28 +-- bitswap/sessionmanager/sessionmanager_test.go | 27 ++- .../sessionrequestsplitter.go | 163 ++++++++++++++++++ .../sessionrequestsplitter_test.go | 96 +++++++++++ 7 files changed, 356 insertions(+), 96 deletions(-) create mode 100644 bitswap/sessionrequestsplitter/sessionrequestsplitter.go create mode 100644 bitswap/sessionrequestsplitter/sessionrequestsplitter_test.go diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 1bc4e7460..c4b8e8879 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -9,6 +9,8 @@ import ( "sync/atomic" "time" + bssrs "github.com/ipfs/go-bitswap/sessionrequestsplitter" + decision "github.com/ipfs/go-bitswap/decision" bsgetter "github.com/ipfs/go-bitswap/getter" bsmsg "github.com/ipfs/go-bitswap/message" @@ -103,12 +105,15 @@ func New(parent context.Context, network bsnet.BitSwapNetwork, } wm := bswm.New(ctx) - sessionFactory := func(ctx context.Context, id uint64, pm bssession.PeerManager) bssm.Session { - return bssession.New(ctx, id, wm, pm) + sessionFactory := func(ctx context.Context, id uint64, pm bssession.PeerManager, srs bssession.RequestSplitter) bssm.Session { + return bssession.New(ctx, id, wm, pm, srs) } sessionPeerManagerFactory := func(ctx context.Context, id uint64) bssession.PeerManager { return bsspm.New(ctx, id, network) } + sessionRequestSplitterFactory := func(ctx context.Context) bssession.RequestSplitter { + return bssrs.New(ctx) + } bs := &Bitswap{ blockstore: bstore, @@ -121,7 +126,7 @@ func New(parent context.Context, network bsnet.BitSwapNetwork, provideKeys: make(chan cid.Cid, provideKeysBufferSize), wm: wm, pm: bspm.New(ctx, peerQueueFactory), - sm: bssm.New(ctx, sessionFactory, sessionPeerManagerFactory), + sm: bssm.New(ctx, sessionFactory, sessionPeerManagerFactory, sessionRequestSplitterFactory), counters: new(counters), dupMetric: dupHist, allMetric: allHist, diff --git a/bitswap/session/session.go b/bitswap/session/session.go index 1e7e0324a..282a44ef1 100644 --- a/bitswap/session/session.go +++ b/bitswap/session/session.go @@ -2,7 +2,6 @@ package session import ( "context" - "math/rand" "time" lru "github.com/hashicorp/golang-lru" @@ -13,16 +12,13 @@ import ( logging "github.com/ipfs/go-log" loggables "github.com/libp2p/go-libp2p-loggables" peer "github.com/libp2p/go-libp2p-peer" + + bssrs "github.com/ipfs/go-bitswap/sessionrequestsplitter" ) const ( - minReceivedToSplit = 2 - maxSplit = 32 - maxAcceptableDupes = 0.4 - minDuplesToTryLessSplits = 0.2 - initialSplit = 2 - broadcastLiveWantsLimit = 4 - targetedLiveWantsLimit = 32 + broadcastLiveWantsLimit = 4 + targetedLiveWantsLimit = 32 ) // WantManager is an interface that can be used to request blocks @@ -41,6 +37,14 @@ type PeerManager interface { RecordPeerResponse(peer.ID, cid.Cid) } +// RequestSplitter provides an interface for splitting +// a request for Cids up among peers. +type RequestSplitter interface { + SplitRequest([]peer.ID, []cid.Cid) []*bssrs.PartialRequest + RecordDuplicateBlock() + RecordUniqueBlock() +} + type interestReq struct { c cid.Cid resp chan bool @@ -60,6 +64,7 @@ type Session struct { ctx context.Context wm WantManager pm PeerManager + srs RequestSplitter // channels incoming chan blkRecv @@ -70,17 +75,14 @@ type Session struct { tickDelayReqs chan time.Duration // do not touch outside run loop - tofetch *cidQueue - interest *lru.Cache - pastWants *cidQueue - liveWants map[cid.Cid]time.Time - tick *time.Timer - baseTickDelay time.Duration - latTotal time.Duration - fetchcnt int - receivedCount int - split int - duplicateReceivedCount int + tofetch *cidQueue + interest *lru.Cache + pastWants *cidQueue + liveWants map[cid.Cid]time.Time + tick *time.Timer + baseTickDelay time.Duration + latTotal time.Duration + fetchcnt int // identifiers notif notifications.PubSub uuid logging.Loggable @@ -89,7 +91,7 @@ type Session struct { // New creates a new bitswap session whose lifetime is bounded by the // given context. -func New(ctx context.Context, id uint64, wm WantManager, pm PeerManager) *Session { +func New(ctx context.Context, id uint64, wm WantManager, pm PeerManager, srs RequestSplitter) *Session { s := &Session{ liveWants: make(map[cid.Cid]time.Time), newReqs: make(chan []cid.Cid), @@ -102,7 +104,7 @@ func New(ctx context.Context, id uint64, wm WantManager, pm PeerManager) *Sessio ctx: ctx, wm: wm, pm: pm, - split: initialSplit, + srs: srs, incoming: make(chan blkRecv), notif: notifications.New(), uuid: loggables.Uuid("GetBlockRequest"), @@ -230,7 +232,7 @@ func (s *Session) run(ctx context.Context) { select { case blk := <-s.incoming: if blk.counterMessage { - s.updateReceiveCounters(ctx, blk.blk) + s.updateReceiveCounters(ctx, blk) } else { s.handleIncomingBlock(ctx, blk) } @@ -357,22 +359,13 @@ func (s *Session) receiveBlock(ctx context.Context, blk blocks.Block) { } } -func (s *Session) duplicateRatio() float64 { - return float64(s.duplicateReceivedCount) / float64(s.receivedCount) -} -func (s *Session) updateReceiveCounters(ctx context.Context, blk blocks.Block) { - if s.pastWants.Has(blk.Cid()) { - s.receivedCount++ - s.duplicateReceivedCount++ - if (s.receivedCount > minReceivedToSplit) && (s.duplicateRatio() > maxAcceptableDupes) && (s.split < maxSplit) { - s.split++ - } +func (s *Session) updateReceiveCounters(ctx context.Context, blk blkRecv) { + ks := blk.blk.Cid() + if s.pastWants.Has(ks) { + s.srs.RecordDuplicateBlock() } else { - if s.cidIsWanted(blk.Cid()) { - s.receivedCount++ - if (s.split > 1) && (s.duplicateRatio() < minDuplesToTryLessSplits) { - s.split-- - } + if s.cidIsWanted(ks) { + s.srs.RecordUniqueBlock() } } } @@ -384,12 +377,10 @@ func (s *Session) wantBlocks(ctx context.Context, ks []cid.Cid) { } peers := s.pm.GetOptimizedPeers() if len(peers) > 0 { - splitRequests := split(ks, peers, s.split) - for i, currentKeys := range splitRequests.ks { - currentPeers := splitRequests.peers[i] - // right now we're requesting each block from every peer, but soon, maybe not - s.pm.RecordPeerRequests(currentPeers, currentKeys) - s.wm.WantBlocks(ctx, currentKeys, currentPeers, s.id) + splitRequests := s.srs.SplitRequest(peers, ks) + for _, splitRequest := range splitRequests { + s.pm.RecordPeerRequests(splitRequest.Peers, splitRequest.Keys) + s.wm.WantBlocks(ctx, splitRequest.Keys, splitRequest.Peers, s.id) } } else { s.pm.RecordPeerRequests(nil, ks) @@ -410,39 +401,6 @@ func (s *Session) resetTick() { } } -type splitRec struct { - ks [][]cid.Cid - peers [][]peer.ID -} - -func split(ks []cid.Cid, peers []peer.ID, split int) *splitRec { - peerSplit := split - if len(peers) < peerSplit { - peerSplit = len(peers) - } - keySplit := split - if len(ks) < keySplit { - keySplit = len(ks) - } - if keySplit > peerSplit { - keySplit = peerSplit - } - out := &splitRec{ - ks: make([][]cid.Cid, keySplit), - peers: make([][]peer.ID, peerSplit), - } - for i, c := range ks { - pos := i % keySplit - out.ks[pos] = append(out.ks[pos], c) - } - peerOrder := rand.Perm(len(peers)) - for i, po := range peerOrder { - pos := i % peerSplit - out.peers[pos] = append(out.peers[pos], peers[po]) - } - return out -} - func (s *Session) wantBudget() int { live := len(s.liveWants) var budget int diff --git a/bitswap/session/session_test.go b/bitswap/session/session_test.go index 86ad1d71f..a75894a52 100644 --- a/bitswap/session/session_test.go +++ b/bitswap/session/session_test.go @@ -8,6 +8,7 @@ import ( "github.com/ipfs/go-block-format" + bssrs "github.com/ipfs/go-bitswap/sessionrequestsplitter" "github.com/ipfs/go-bitswap/testutil" cid "github.com/ipfs/go-cid" blocksutil "github.com/ipfs/go-ipfs-blocksutil" @@ -55,6 +56,16 @@ func (fpm *fakePeerManager) RecordPeerResponse(p peer.ID, c cid.Cid) { fpm.lk.Unlock() } +type fakeRequestSplitter struct { +} + +func (frs *fakeRequestSplitter) SplitRequest(peers []peer.ID, keys []cid.Cid) []*bssrs.PartialRequest { + return []*bssrs.PartialRequest{&bssrs.PartialRequest{Peers: peers, Keys: keys}} +} + +func (frs *fakeRequestSplitter) RecordDuplicateBlock() {} +func (frs *fakeRequestSplitter) RecordUniqueBlock() {} + func TestSessionGetBlocks(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond) defer cancel() @@ -62,8 +73,9 @@ func TestSessionGetBlocks(t *testing.T) { cancelReqs := make(chan wantReq, 1) fwm := &fakeWantManager{wantReqs, cancelReqs} fpm := &fakePeerManager{} + frs := &fakeRequestSplitter{} id := testutil.GenerateSessionID() - session := New(ctx, id, fwm, fpm) + session := New(ctx, id, fwm, fpm, frs) blockGenerator := blocksutil.NewBlockGenerator() blks := blockGenerator.Blocks(broadcastLiveWantsLimit * 2) var cids []cid.Cid @@ -163,8 +175,9 @@ func TestSessionFindMorePeers(t *testing.T) { cancelReqs := make(chan wantReq, 1) fwm := &fakeWantManager{wantReqs, cancelReqs} fpm := &fakePeerManager{findMorePeersRequested: make(chan struct{})} + frs := &fakeRequestSplitter{} id := testutil.GenerateSessionID() - session := New(ctx, id, fwm, fpm) + session := New(ctx, id, fwm, fpm, frs) session.SetBaseTickDelay(200 * time.Microsecond) blockGenerator := blocksutil.NewBlockGenerator() blks := blockGenerator.Blocks(broadcastLiveWantsLimit * 2) diff --git a/bitswap/sessionmanager/sessionmanager.go b/bitswap/sessionmanager/sessionmanager.go index 54b11348d..ac1bb700a 100644 --- a/bitswap/sessionmanager/sessionmanager.go +++ b/bitswap/sessionmanager/sessionmanager.go @@ -23,10 +23,14 @@ type Session interface { type sesTrk struct { session Session pm bssession.PeerManager + srs bssession.RequestSplitter } // SessionFactory generates a new session for the SessionManager to track. -type SessionFactory func(ctx context.Context, id uint64, pm bssession.PeerManager) Session +type SessionFactory func(ctx context.Context, id uint64, pm bssession.PeerManager, srs bssession.RequestSplitter) Session + +// RequestSplitterFactory generates a new request splitter for a session. +type RequestSplitterFactory func(ctx context.Context) bssession.RequestSplitter // PeerManagerFactory generates a new peer manager for a session. type PeerManagerFactory func(ctx context.Context, id uint64) bssession.PeerManager @@ -34,9 +38,11 @@ type PeerManagerFactory func(ctx context.Context, id uint64) bssession.PeerManag // SessionManager is responsible for creating, managing, and dispatching to // sessions. type SessionManager struct { - ctx context.Context - sessionFactory SessionFactory - peerManagerFactory PeerManagerFactory + ctx context.Context + sessionFactory SessionFactory + peerManagerFactory PeerManagerFactory + requestSplitterFactory RequestSplitterFactory + // Sessions sessLk sync.Mutex sessions []sesTrk @@ -47,11 +53,12 @@ type SessionManager struct { } // New creates a new SessionManager. -func New(ctx context.Context, sessionFactory SessionFactory, peerManagerFactory PeerManagerFactory) *SessionManager { +func New(ctx context.Context, sessionFactory SessionFactory, peerManagerFactory PeerManagerFactory, requestSplitterFactory RequestSplitterFactory) *SessionManager { return &SessionManager{ - ctx: ctx, - sessionFactory: sessionFactory, - peerManagerFactory: peerManagerFactory, + ctx: ctx, + sessionFactory: sessionFactory, + peerManagerFactory: peerManagerFactory, + requestSplitterFactory: requestSplitterFactory, } } @@ -62,8 +69,9 @@ func (sm *SessionManager) NewSession(ctx context.Context) exchange.Fetcher { sessionctx, cancel := context.WithCancel(ctx) pm := sm.peerManagerFactory(sessionctx, id) - session := sm.sessionFactory(sessionctx, id, pm) - tracked := sesTrk{session, pm} + srs := sm.requestSplitterFactory(sessionctx) + session := sm.sessionFactory(sessionctx, id, pm, srs) + tracked := sesTrk{session, pm, srs} sm.sessLk.Lock() sm.sessions = append(sm.sessions, tracked) sm.sessLk.Unlock() diff --git a/bitswap/sessionmanager/sessionmanager_test.go b/bitswap/sessionmanager/sessionmanager_test.go index c32e7be3f..1310ac978 100644 --- a/bitswap/sessionmanager/sessionmanager_test.go +++ b/bitswap/sessionmanager/sessionmanager_test.go @@ -5,6 +5,8 @@ import ( "testing" "time" + bssrs "github.com/ipfs/go-bitswap/sessionrequestsplitter" + bssession "github.com/ipfs/go-bitswap/session" blocks "github.com/ipfs/go-block-format" @@ -18,6 +20,7 @@ type fakeSession struct { updateReceiveCounters bool id uint64 pm *fakePeerManager + srs *fakeRequestSplitter } func (*fakeSession) GetBlock(context.Context, cid.Cid) (blocks.Block, error) { @@ -39,14 +42,24 @@ func (*fakePeerManager) GetOptimizedPeers() []peer.ID { return nil } func (*fakePeerManager) RecordPeerRequests([]peer.ID, []cid.Cid) {} func (*fakePeerManager) RecordPeerResponse(peer.ID, cid.Cid) {} +type fakeRequestSplitter struct { +} + +func (frs *fakeRequestSplitter) SplitRequest(peers []peer.ID, keys []cid.Cid) []*bssrs.PartialRequest { + return nil +} +func (frs *fakeRequestSplitter) RecordDuplicateBlock() {} +func (frs *fakeRequestSplitter) RecordUniqueBlock() {} + var nextInterestedIn bool -func sessionFactory(ctx context.Context, id uint64, pm bssession.PeerManager) Session { +func sessionFactory(ctx context.Context, id uint64, pm bssession.PeerManager, srs bssession.RequestSplitter) Session { return &fakeSession{ interested: nextInterestedIn, receivedBlock: false, id: id, pm: pm.(*fakePeerManager), + srs: srs.(*fakeRequestSplitter), } } @@ -54,11 +67,15 @@ func peerManagerFactory(ctx context.Context, id uint64) bssession.PeerManager { return &fakePeerManager{id} } +func requestSplitterFactory(ctx context.Context) bssession.RequestSplitter { + return &fakeRequestSplitter{} +} + func TestAddingSessions(t *testing.T) { ctx := context.Background() ctx, cancel := context.WithCancel(ctx) defer cancel() - sm := New(ctx, sessionFactory, peerManagerFactory) + sm := New(ctx, sessionFactory, peerManagerFactory, requestSplitterFactory) p := peer.ID(123) block := blocks.NewBlock([]byte("block")) @@ -94,7 +111,7 @@ func TestReceivingBlocksWhenNotInterested(t *testing.T) { ctx := context.Background() ctx, cancel := context.WithCancel(ctx) defer cancel() - sm := New(ctx, sessionFactory, peerManagerFactory) + sm := New(ctx, sessionFactory, peerManagerFactory, requestSplitterFactory) p := peer.ID(123) block := blocks.NewBlock([]byte("block")) @@ -117,7 +134,7 @@ func TestReceivingBlocksWhenNotInterested(t *testing.T) { func TestRemovingPeersWhenManagerContextCancelled(t *testing.T) { ctx := context.Background() ctx, cancel := context.WithCancel(ctx) - sm := New(ctx, sessionFactory, peerManagerFactory) + sm := New(ctx, sessionFactory, peerManagerFactory, requestSplitterFactory) p := peer.ID(123) block := blocks.NewBlock([]byte("block")) @@ -142,7 +159,7 @@ func TestRemovingPeersWhenSessionContextCancelled(t *testing.T) { ctx := context.Background() ctx, cancel := context.WithCancel(ctx) defer cancel() - sm := New(ctx, sessionFactory, peerManagerFactory) + sm := New(ctx, sessionFactory, peerManagerFactory, requestSplitterFactory) p := peer.ID(123) block := blocks.NewBlock([]byte("block")) diff --git a/bitswap/sessionrequestsplitter/sessionrequestsplitter.go b/bitswap/sessionrequestsplitter/sessionrequestsplitter.go new file mode 100644 index 000000000..32dcf1fc8 --- /dev/null +++ b/bitswap/sessionrequestsplitter/sessionrequestsplitter.go @@ -0,0 +1,163 @@ +package sessionrequestsplitter + +import ( + "context" + + "github.com/ipfs/go-cid" + "github.com/libp2p/go-libp2p-peer" +) + +const ( + minReceivedToAdjustSplit = 2 + maxSplit = 16 + maxAcceptableDupes = 0.4 + minDuplesToTryLessSplits = 0.2 + initialSplit = 2 +) + +// PartialRequest is represents one slice of an over request split among peers +type PartialRequest struct { + Peers []peer.ID + Keys []cid.Cid +} + +type srsMessage interface { + handle(srs *SessionRequestSplitter) +} + +// SessionRequestSplitter track how many duplicate and unique blocks come in and +// uses that to determine how much to split up each set of wants among peers. +type SessionRequestSplitter struct { + ctx context.Context + messages chan srsMessage + + // data, do not touch outside run loop + receivedCount int + split int + duplicateReceivedCount int +} + +// New returns a new SessionRequestSplitter. +func New(ctx context.Context) *SessionRequestSplitter { + srs := &SessionRequestSplitter{ + ctx: ctx, + messages: make(chan srsMessage, 10), + split: initialSplit, + } + go srs.run() + return srs +} + +// SplitRequest splits a request for the given cids one or more times among the +// given peers. +func (srs *SessionRequestSplitter) SplitRequest(peers []peer.ID, ks []cid.Cid) []*PartialRequest { + resp := make(chan []*PartialRequest) + + select { + case srs.messages <- &splitRequestMessage{peers, ks, resp}: + case <-srs.ctx.Done(): + return nil + } + select { + case splitRequests := <-resp: + return splitRequests + case <-srs.ctx.Done(): + return nil + } + +} + +// RecordDuplicateBlock records the fact that the session received a duplicate +// block and adjusts split factor as neccesary. +func (srs *SessionRequestSplitter) RecordDuplicateBlock() { + select { + case srs.messages <- &recordDuplicateMessage{}: + case <-srs.ctx.Done(): + } +} + +// RecordUniqueBlock records the fact that the session received unique block +// and adjusts the split factor as neccesary. +func (srs *SessionRequestSplitter) RecordUniqueBlock() { + select { + case srs.messages <- &recordUniqueMessage{}: + case <-srs.ctx.Done(): + } +} + +func (srs *SessionRequestSplitter) run() { + for { + select { + case message := <-srs.messages: + message.handle(srs) + case <-srs.ctx.Done(): + return + } + } +} + +func (srs *SessionRequestSplitter) duplicateRatio() float64 { + return float64(srs.duplicateReceivedCount) / float64(srs.receivedCount) +} + +type splitRequestMessage struct { + peers []peer.ID + ks []cid.Cid + resp chan []*PartialRequest +} + +func (s *splitRequestMessage) handle(srs *SessionRequestSplitter) { + split := srs.split + peers := s.peers + ks := s.ks + if len(peers) < split { + split = len(peers) + } + peerSplits := splitPeers(peers, split) + if len(ks) < split { + split = len(ks) + } + keySplits := splitKeys(ks, split) + splitRequests := make([]*PartialRequest, len(keySplits)) + for i := range splitRequests { + splitRequests[i] = &PartialRequest{peerSplits[i], keySplits[i]} + } + s.resp <- splitRequests +} + +type recordDuplicateMessage struct{} + +func (r *recordDuplicateMessage) handle(srs *SessionRequestSplitter) { + srs.receivedCount++ + srs.duplicateReceivedCount++ + if (srs.receivedCount > minReceivedToAdjustSplit) && (srs.duplicateRatio() > maxAcceptableDupes) && (srs.split < maxSplit) { + srs.split++ + } +} + +type recordUniqueMessage struct{} + +func (r *recordUniqueMessage) handle(srs *SessionRequestSplitter) { + srs.receivedCount++ + if (srs.split > 1) && (srs.duplicateRatio() < minDuplesToTryLessSplits) { + srs.split-- + } + +} +func splitKeys(ks []cid.Cid, split int) [][]cid.Cid { + splits := make([][]cid.Cid, split) + for i, c := range ks { + pos := i % split + splits[pos] = append(splits[pos], c) + } + return splits +} + +func splitPeers(peers []peer.ID, split int) [][]peer.ID { + splits := make([][]peer.ID, split) + for i, p := range peers { + pos := i % split + splits[pos] = append(splits[pos], p) + } + return splits +} diff --git a/bitswap/sessionrequestsplitter/sessionrequestsplitter_test.go b/bitswap/sessionrequestsplitter/sessionrequestsplitter_test.go new file mode 100644 index 000000000..35c5fe2a4 --- /dev/null +++ b/bitswap/sessionrequestsplitter/sessionrequestsplitter_test.go @@ -0,0 +1,96 @@ +package sessionrequestsplitter + +import ( + "context" + "testing" + + "github.com/ipfs/go-bitswap/testutil" +) + +func TestSplittingRequests(t *testing.T) { + ctx := context.Background() + peers := testutil.GeneratePeers(10) + keys := testutil.GenerateCids(6) + + srs := New(ctx) + + partialRequests := srs.SplitRequest(peers, keys) + if len(partialRequests) != 2 { + t.Fatal("Did not generate right number of partial requests") + } + for _, partialRequest := range partialRequests { + if len(partialRequest.Peers) != 5 && len(partialRequest.Keys) != 3 { + t.Fatal("Did not split request into even partial requests") + } + } +} + +func TestSplittingRequestsTooFewKeys(t *testing.T) { + ctx := context.Background() + peers := testutil.GeneratePeers(10) + keys := testutil.GenerateCids(1) + + srs := New(ctx) + + partialRequests := srs.SplitRequest(peers, keys) + if len(partialRequests) != 1 { + t.Fatal("Should only generate as many requests as keys") + } + for _, partialRequest := range partialRequests { + if len(partialRequest.Peers) != 5 && len(partialRequest.Keys) != 1 { + t.Fatal("Should still split peers up between keys") + } + } +} + +func TestSplittingRequestsTooFewPeers(t *testing.T) { + ctx := context.Background() + peers := testutil.GeneratePeers(1) + keys := testutil.GenerateCids(6) + + srs := New(ctx) + + partialRequests := srs.SplitRequest(peers, keys) + if len(partialRequests) != 1 { + t.Fatal("Should only generate as many requests as peers") + } + for _, partialRequest := range partialRequests { + if len(partialRequest.Peers) != 1 && len(partialRequest.Keys) != 6 { + t.Fatal("Should not split keys if there are not enough peers") + } + } +} + +func TestSplittingRequestsIncreasingSplitDueToDupes(t *testing.T) { + ctx := context.Background() + peers := testutil.GeneratePeers(maxSplit) + keys := testutil.GenerateCids(maxSplit) + + srs := New(ctx) + + for i := 0; i < maxSplit+minReceivedToAdjustSplit; i++ { + srs.RecordDuplicateBlock() + } + + partialRequests := srs.SplitRequest(peers, keys) + if len(partialRequests) != maxSplit { + t.Fatal("Did not adjust split up as duplicates came in") + } +} + +func TestSplittingRequestsDecreasingSplitDueToNoDupes(t *testing.T) { + ctx := context.Background() + peers := testutil.GeneratePeers(maxSplit) + keys := testutil.GenerateCids(maxSplit) + + srs := New(ctx) + + for i := 0; i < 5+minReceivedToAdjustSplit; i++ { + srs.RecordUniqueBlock() + } + + partialRequests := srs.SplitRequest(peers, keys) + if len(partialRequests) != 1 { + t.Fatal("Did not adjust split down as unique blocks came in") + } +} From de2056f67315b38e47b9b6bc418c0f5dc8bed6ad Mon Sep 17 00:00:00 2001 From: hannahhoward Date: Thu, 20 Dec 2018 14:05:29 -0800 Subject: [PATCH 0686/1035] refactor(sessions): minor cleanup Encapsulate functions for readability, and move code for understanding This commit was moved from ipfs/go-bitswap@b1a82dcba9e76cbff9a6b0ddda8976a8a8405208 --- bitswap/session/session.go | 5 +-- bitswap/session/session_test.go | 2 +- .../sessionpeermanager/sessionpeermanager.go | 37 +++++++++++-------- 3 files changed, 23 insertions(+), 21 deletions(-) diff --git a/bitswap/session/session.go b/bitswap/session/session.go index 282a44ef1..bae52bd06 100644 --- a/bitswap/session/session.go +++ b/bitswap/session/session.go @@ -333,6 +333,7 @@ func (s *Session) cidIsWanted(c cid.Cid) bool { func (s *Session) receiveBlock(ctx context.Context, blk blocks.Block) { c := blk.Cid() if s.cidIsWanted(c) { + s.srs.RecordUniqueBlock() tval, ok := s.liveWants[c] if ok { s.latTotal += time.Since(tval) @@ -363,10 +364,6 @@ func (s *Session) updateReceiveCounters(ctx context.Context, blk blkRecv) { ks := blk.blk.Cid() if s.pastWants.Has(ks) { s.srs.RecordDuplicateBlock() - } else { - if s.cidIsWanted(ks) { - s.srs.RecordUniqueBlock() - } } } diff --git a/bitswap/session/session_test.go b/bitswap/session/session_test.go index a75894a52..d578f7a73 100644 --- a/bitswap/session/session_test.go +++ b/bitswap/session/session_test.go @@ -174,7 +174,7 @@ func TestSessionFindMorePeers(t *testing.T) { wantReqs := make(chan wantReq, 1) cancelReqs := make(chan wantReq, 1) fwm := &fakeWantManager{wantReqs, cancelReqs} - fpm := &fakePeerManager{findMorePeersRequested: make(chan struct{})} + fpm := &fakePeerManager{findMorePeersRequested: make(chan struct{}, 1)} frs := &fakeRequestSplitter{} id := testutil.GenerateSessionID() session := New(ctx, id, fwm, fpm, frs) diff --git a/bitswap/sessionpeermanager/sessionpeermanager.go b/bitswap/sessionpeermanager/sessionpeermanager.go index 00a4d598b..3b951c42e 100644 --- a/bitswap/sessionpeermanager/sessionpeermanager.go +++ b/bitswap/sessionpeermanager/sessionpeermanager.go @@ -134,6 +134,25 @@ func (spm *SessionPeerManager) insertOptimizedPeer(p peer.ID) { spm.optimizedPeersArr = append([]peer.ID{p}, spm.optimizedPeersArr...) } +func (spm *SessionPeerManager) removeOptimizedPeer(p peer.ID) { + for i := 0; i < len(spm.optimizedPeersArr); i++ { + if spm.optimizedPeersArr[i] == p { + spm.optimizedPeersArr = append(spm.optimizedPeersArr[:i], spm.optimizedPeersArr[i+1:]...) + return + } + } +} + +func (spm *SessionPeerManager) removeUnoptimizedPeer(p peer.ID) { + for i := 0; i < len(spm.unoptimizedPeersArr); i++ { + if spm.unoptimizedPeersArr[i] == p { + spm.unoptimizedPeersArr[i] = spm.unoptimizedPeersArr[len(spm.unoptimizedPeersArr)-1] + spm.unoptimizedPeersArr = spm.unoptimizedPeersArr[:len(spm.unoptimizedPeersArr)-1] + return + } + } +} + type peerFoundMessage struct { p peer.ID } @@ -160,24 +179,10 @@ func (prm *peerResponseMessage) handle(spm *SessionPeerManager) { spm.tagPeer(p) } else { if isOptimized { - if spm.optimizedPeersArr[0] == p { - return - } - for i := 0; i < len(spm.optimizedPeersArr); i++ { - if spm.optimizedPeersArr[i] == p { - spm.optimizedPeersArr = append(spm.optimizedPeersArr[:i], spm.optimizedPeersArr[i+1:]...) - break - } - } + spm.removeOptimizedPeer(p) } else { spm.activePeers[p] = true - for i := 0; i < len(spm.unoptimizedPeersArr); i++ { - if spm.unoptimizedPeersArr[i] == p { - spm.unoptimizedPeersArr[i] = spm.unoptimizedPeersArr[len(spm.unoptimizedPeersArr)-1] - spm.unoptimizedPeersArr = spm.unoptimizedPeersArr[:len(spm.unoptimizedPeersArr)-1] - break - } - } + spm.removeUnoptimizedPeer(p) } } spm.insertOptimizedPeer(p) From add3f89530fdbcb359c8f8880d05b952bb0de277 Mon Sep 17 00:00:00 2001 From: hannahhoward Date: Thu, 10 Jan 2019 11:52:41 -0800 Subject: [PATCH 0687/1035] fix(sessions): explicitly connect found peers when providers are found in a session, explicitly connect them so they get added to the peer manager fix #53 This commit was moved from ipfs/go-bitswap@4ccbbc8d783870eab1aa1a87e755ec295bc4f86e --- bitswap/bitswap_with_sessions_test.go | 40 +++++++++++++++++++ bitswap/session/session.go | 2 +- .../sessionpeermanager/sessionpeermanager.go | 18 ++++++++- .../sessionpeermanager_test.go | 6 ++- 4 files changed, 63 insertions(+), 3 deletions(-) diff --git a/bitswap/bitswap_with_sessions_test.go b/bitswap/bitswap_with_sessions_test.go index 5034aaeec..f0b97ba82 100644 --- a/bitswap/bitswap_with_sessions_test.go +++ b/bitswap/bitswap_with_sessions_test.go @@ -152,6 +152,46 @@ func TestSessionSplitFetch(t *testing.T) { } } +func TestFetchNotConnected(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) + defer cancel() + + vnet := getVirtualNetwork() + sesgen := NewTestSessionGenerator(vnet) + defer sesgen.Close() + bgen := blocksutil.NewBlockGenerator() + + other := sesgen.Next() + + blks := bgen.Blocks(10) + for _, block := range blks { + if err := other.Exchange.HasBlock(block); err != nil { + t.Fatal(err) + } + } + + var cids []cid.Cid + for _, blk := range blks { + cids = append(cids, blk.Cid()) + } + + thisNode := sesgen.Next() + ses := thisNode.Exchange.NewSession(ctx).(*bssession.Session) + ses.SetBaseTickDelay(time.Millisecond * 10) + + ch, err := ses.GetBlocks(ctx, cids) + if err != nil { + t.Fatal(err) + } + + var got []blocks.Block + for b := range ch { + got = append(got, b) + } + if err := assertBlockLists(got, blks); err != nil { + t.Fatal(err) + } +} func TestInterestCacheOverflow(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() diff --git a/bitswap/session/session.go b/bitswap/session/session.go index bae52bd06..c17b45a57 100644 --- a/bitswap/session/session.go +++ b/bitswap/session/session.go @@ -222,7 +222,7 @@ func (s *Session) SetBaseTickDelay(baseTickDelay time.Duration) { } } -const provSearchDelay = time.Second * 10 +const provSearchDelay = time.Second // Session run loop -- everything function below here should not be called // of this loop diff --git a/bitswap/sessionpeermanager/sessionpeermanager.go b/bitswap/sessionpeermanager/sessionpeermanager.go index 3b951c42e..ebd3cb5f6 100644 --- a/bitswap/sessionpeermanager/sessionpeermanager.go +++ b/bitswap/sessionpeermanager/sessionpeermanager.go @@ -4,12 +4,17 @@ import ( "context" "fmt" "math/rand" + "sync" + + logging "github.com/ipfs/go-log" cid "github.com/ipfs/go-cid" ifconnmgr "github.com/libp2p/go-libp2p-interface-connmgr" peer "github.com/libp2p/go-libp2p-peer" ) +var log = logging.Logger("bitswap") + const ( maxOptimizedPeers = 32 reservePeers = 2 @@ -18,6 +23,7 @@ const ( // PeerNetwork is an interface for finding providers and managing connections type PeerNetwork interface { ConnectionManager() ifconnmgr.ConnManager + ConnectTo(context.Context, peer.ID) error FindProvidersAsync(context.Context, cid.Cid, int) <-chan peer.ID } @@ -101,9 +107,19 @@ func (spm *SessionPeerManager) FindMorePeers(ctx context.Context, c cid.Cid) { // - manage timeouts // - ensure two 'findprovs' calls for the same block don't run concurrently // - share peers between sessions based on interest set + wg := &sync.WaitGroup{} for p := range spm.network.FindProvidersAsync(ctx, k, 10) { - spm.peerMessages <- &peerFoundMessage{p} + wg.Add(1) + go func(p peer.ID) { + defer wg.Done() + err := spm.network.ConnectTo(ctx, p) + if err != nil { + log.Debugf("failed to connect to provider %s: %s", p, err) + } + spm.peerMessages <- &peerFoundMessage{p} + }(p) } + wg.Wait() }(c) } diff --git a/bitswap/sessionpeermanager/sessionpeermanager_test.go b/bitswap/sessionpeermanager/sessionpeermanager_test.go index ba23c87d5..b4e723b10 100644 --- a/bitswap/sessionpeermanager/sessionpeermanager_test.go +++ b/bitswap/sessionpeermanager/sessionpeermanager_test.go @@ -2,8 +2,8 @@ package sessionpeermanager import ( "context" - "sync" "math/rand" + "sync" "testing" "time" @@ -24,6 +24,10 @@ func (fpn *fakePeerNetwork) ConnectionManager() ifconnmgr.ConnManager { return fpn.connManager } +func (fpn *fakePeerNetwork) ConnectTo(context.Context, peer.ID) error { + return nil +} + func (fpn *fakePeerNetwork) FindProvidersAsync(ctx context.Context, c cid.Cid, num int) <-chan peer.ID { peerCh := make(chan peer.ID) go func() { From b3325229279c7bdebd941538a07d46c66f2532c1 Mon Sep 17 00:00:00 2001 From: hannahhoward Date: Fri, 11 Jan 2019 15:13:54 -0800 Subject: [PATCH 0688/1035] fix(session): make provSearchDelay configurable This commit was moved from ipfs/go-bitswap@48875c4da4317d10fc0ad093e8c39e7ddb12900b --- bitswap/bitswap_with_sessions_test.go | 1 + bitswap/session/session.go | 7 ++++++- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/bitswap/bitswap_with_sessions_test.go b/bitswap/bitswap_with_sessions_test.go index f0b97ba82..0be7bc97c 100644 --- a/bitswap/bitswap_with_sessions_test.go +++ b/bitswap/bitswap_with_sessions_test.go @@ -156,6 +156,7 @@ func TestFetchNotConnected(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) defer cancel() + bssession.SetProviderSearchDelay(10 * time.Millisecond) vnet := getVirtualNetwork() sesgen := NewTestSessionGenerator(vnet) defer sesgen.Close() diff --git a/bitswap/session/session.go b/bitswap/session/session.go index c17b45a57..b57f472e6 100644 --- a/bitswap/session/session.go +++ b/bitswap/session/session.go @@ -222,7 +222,12 @@ func (s *Session) SetBaseTickDelay(baseTickDelay time.Duration) { } } -const provSearchDelay = time.Second +var provSearchDelay = time.Second + +// SetProviderSearchDelay overwrites the global provider search delay +func SetProviderSearchDelay(newProvSearchDelay time.Duration) { + provSearchDelay = newProvSearchDelay +} // Session run loop -- everything function below here should not be called // of this loop From 29af725e7b14838c35fec1bf1ab17ce66d8a84f2 Mon Sep 17 00:00:00 2001 From: hannahhoward Date: Fri, 11 Jan 2019 15:15:32 -0800 Subject: [PATCH 0689/1035] fix(sessionpeermanager): remove waitGroup Remove sync.waitGroup in SessionPeerManager till it's needed This commit was moved from ipfs/go-bitswap@6f7a77e0658c25b573bfdd226ee9056d58727ef1 --- bitswap/sessionpeermanager/sessionpeermanager.go | 5 ----- 1 file changed, 5 deletions(-) diff --git a/bitswap/sessionpeermanager/sessionpeermanager.go b/bitswap/sessionpeermanager/sessionpeermanager.go index ebd3cb5f6..2e7338324 100644 --- a/bitswap/sessionpeermanager/sessionpeermanager.go +++ b/bitswap/sessionpeermanager/sessionpeermanager.go @@ -4,7 +4,6 @@ import ( "context" "fmt" "math/rand" - "sync" logging "github.com/ipfs/go-log" @@ -107,11 +106,8 @@ func (spm *SessionPeerManager) FindMorePeers(ctx context.Context, c cid.Cid) { // - manage timeouts // - ensure two 'findprovs' calls for the same block don't run concurrently // - share peers between sessions based on interest set - wg := &sync.WaitGroup{} for p := range spm.network.FindProvidersAsync(ctx, k, 10) { - wg.Add(1) go func(p peer.ID) { - defer wg.Done() err := spm.network.ConnectTo(ctx, p) if err != nil { log.Debugf("failed to connect to provider %s: %s", p, err) @@ -119,7 +115,6 @@ func (spm *SessionPeerManager) FindMorePeers(ctx context.Context, c cid.Cid) { spm.peerMessages <- &peerFoundMessage{p} }(p) } - wg.Wait() }(c) } From d9820dbd7ebb383e9aa56f348f22a6586cbc3b78 Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Tue, 22 Jan 2019 08:39:53 -0800 Subject: [PATCH 0690/1035] contexts: make sure to abort when a context is canceled Also, buffer single-use channels we may walk away from. This was showing up (rarely) in a go-ipfs test. This commit was moved from ipfs/go-bitswap@0cbfff776a4960d3720ac05bb854ce2a9bdcba20 --- bitswap/peermanager/peermanager.go | 15 ++++- .../sessionpeermanager/sessionpeermanager.go | 9 ++- .../sessionrequestsplitter.go | 2 +- bitswap/wantmanager/wantmanager.go | 60 +++++++++++++++---- bitswap/workers.go | 8 ++- 5 files changed, 74 insertions(+), 20 deletions(-) diff --git a/bitswap/peermanager/peermanager.go b/bitswap/peermanager/peermanager.go index 30145cc5c..fed1b3f76 100644 --- a/bitswap/peermanager/peermanager.go +++ b/bitswap/peermanager/peermanager.go @@ -59,9 +59,18 @@ func New(ctx context.Context, createPeerQueue PeerQueueFactory) *PeerManager { // ConnectedPeers returns a list of peers this PeerManager is managing. func (pm *PeerManager) ConnectedPeers() []peer.ID { - resp := make(chan []peer.ID) - pm.peerMessages <- &getPeersMessage{resp} - return <-resp + resp := make(chan []peer.ID, 1) + select { + case pm.peerMessages <- &getPeersMessage{resp}: + case <-pm.ctx.Done(): + return nil + } + select { + case peers := <-resp: + return peers + case <-pm.ctx.Done(): + return nil + } } // Connected is called to add a new peer to the pool, and send it an initial set diff --git a/bitswap/sessionpeermanager/sessionpeermanager.go b/bitswap/sessionpeermanager/sessionpeermanager.go index 2e7338324..225f19017 100644 --- a/bitswap/sessionpeermanager/sessionpeermanager.go +++ b/bitswap/sessionpeermanager/sessionpeermanager.go @@ -82,7 +82,7 @@ func (spm *SessionPeerManager) RecordPeerRequests(p []peer.ID, ks []cid.Cid) { func (spm *SessionPeerManager) GetOptimizedPeers() []peer.ID { // right now this just returns all peers, but soon we might return peers // ordered by optimization, or only a subset - resp := make(chan []peer.ID) + resp := make(chan []peer.ID, 1) select { case spm.peerMessages <- &peerReqMessage{resp}: case <-spm.ctx.Done(): @@ -108,11 +108,16 @@ func (spm *SessionPeerManager) FindMorePeers(ctx context.Context, c cid.Cid) { // - share peers between sessions based on interest set for p := range spm.network.FindProvidersAsync(ctx, k, 10) { go func(p peer.ID) { + // TODO: Also use context from spm. err := spm.network.ConnectTo(ctx, p) if err != nil { log.Debugf("failed to connect to provider %s: %s", p, err) } - spm.peerMessages <- &peerFoundMessage{p} + select { + case spm.peerMessages <- &peerFoundMessage{p}: + case <-ctx.Done(): + case <-spm.ctx.Done(): + } }(p) } }(c) diff --git a/bitswap/sessionrequestsplitter/sessionrequestsplitter.go b/bitswap/sessionrequestsplitter/sessionrequestsplitter.go index 32dcf1fc8..1305b73b2 100644 --- a/bitswap/sessionrequestsplitter/sessionrequestsplitter.go +++ b/bitswap/sessionrequestsplitter/sessionrequestsplitter.go @@ -51,7 +51,7 @@ func New(ctx context.Context) *SessionRequestSplitter { // SplitRequest splits a request for the given cids one or more times among the // given peers. func (srs *SessionRequestSplitter) SplitRequest(peers []peer.ID, ks []cid.Cid) []*PartialRequest { - resp := make(chan []*PartialRequest) + resp := make(chan []*PartialRequest, 1) select { case srs.messages <- &splitRequestMessage{peers, ks, resp}: diff --git a/bitswap/wantmanager/wantmanager.go b/bitswap/wantmanager/wantmanager.go index bf14ea711..3e5a6c9ab 100644 --- a/bitswap/wantmanager/wantmanager.go +++ b/bitswap/wantmanager/wantmanager.go @@ -83,30 +83,66 @@ func (wm *WantManager) CancelWants(ctx context.Context, ks []cid.Cid, peers []pe // IsWanted returns whether a CID is currently wanted. func (wm *WantManager) IsWanted(c cid.Cid) bool { - resp := make(chan bool) - wm.wantMessages <- &isWantedMessage{c, resp} - return <-resp + resp := make(chan bool, 1) + select { + case wm.wantMessages <- &isWantedMessage{c, resp}: + case <-wm.ctx.Done(): + return false + } + select { + case wanted := <-resp: + return wanted + case <-wm.ctx.Done(): + return false + } } // CurrentWants returns the list of current wants. func (wm *WantManager) CurrentWants() []*wantlist.Entry { - resp := make(chan []*wantlist.Entry) - wm.wantMessages <- ¤tWantsMessage{resp} - return <-resp + resp := make(chan []*wantlist.Entry, 1) + select { + case wm.wantMessages <- ¤tWantsMessage{resp}: + case <-wm.ctx.Done(): + return nil + } + select { + case wantlist := <-resp: + return wantlist + case <-wm.ctx.Done(): + return nil + } } // CurrentBroadcastWants returns the current list of wants that are broadcasts. func (wm *WantManager) CurrentBroadcastWants() []*wantlist.Entry { - resp := make(chan []*wantlist.Entry) - wm.wantMessages <- ¤tBroadcastWantsMessage{resp} - return <-resp + resp := make(chan []*wantlist.Entry, 1) + select { + case wm.wantMessages <- ¤tBroadcastWantsMessage{resp}: + case <-wm.ctx.Done(): + return nil + } + select { + case wl := <-resp: + return wl + case <-wm.ctx.Done(): + return nil + } } // WantCount returns the total count of wants. func (wm *WantManager) WantCount() int { - resp := make(chan int) - wm.wantMessages <- &wantCountMessage{resp} - return <-resp + resp := make(chan int, 1) + select { + case wm.wantMessages <- &wantCountMessage{resp}: + case <-wm.ctx.Done(): + return 0 + } + select { + case count := <-resp: + return count + case <-wm.ctx.Done(): + return 0 + } } // Startup starts processing for the WantManager. diff --git a/bitswap/workers.go b/bitswap/workers.go index 32f9da813..688a1d99d 100644 --- a/bitswap/workers.go +++ b/bitswap/workers.go @@ -217,11 +217,15 @@ func (bs *Bitswap) rebroadcastWorker(parent context.Context) { // TODO: come up with a better strategy for determining when to search // for new providers for blocks. i := rand.Intn(len(entries)) - bs.findKeys <- &blockRequest{ + select { + case bs.findKeys <- &blockRequest{ Cid: entries[i].Cid, Ctx: ctx, + }: + case <-ctx.Done(): + return } - case <-parent.Done(): + case <-ctx.Done(): return } } From a6edb415820c86f114e84b81e627f607dade0c16 Mon Sep 17 00:00:00 2001 From: hannahhoward Date: Thu, 24 Jan 2019 15:40:43 -0800 Subject: [PATCH 0691/1035] fix(tests): stabilize session tests Improve stability of tests for Session and SessionPeerManager fix #61 This commit was moved from ipfs/go-bitswap@03e10a06768f3bcdc89aeb9ea45bfb0d354b08ee --- bitswap/session/session_test.go | 90 ++++++++++++++----- .../sessionpeermanager_test.go | 82 ++++++++++++++--- 2 files changed, 137 insertions(+), 35 deletions(-) diff --git a/bitswap/session/session_test.go b/bitswap/session/session_test.go index d578f7a73..9f6aef549 100644 --- a/bitswap/session/session_test.go +++ b/bitswap/session/session_test.go @@ -26,11 +26,17 @@ type fakeWantManager struct { } func (fwm *fakeWantManager) WantBlocks(ctx context.Context, cids []cid.Cid, peers []peer.ID, ses uint64) { - fwm.wantReqs <- wantReq{cids, peers} + select { + case fwm.wantReqs <- wantReq{cids, peers}: + case <-ctx.Done(): + } } func (fwm *fakeWantManager) CancelWants(ctx context.Context, cids []cid.Cid, peers []peer.ID, ses uint64) { - fwm.cancelReqs <- wantReq{cids, peers} + select { + case fwm.cancelReqs <- wantReq{cids, peers}: + case <-ctx.Done(): + } } type fakePeerManager struct { @@ -39,8 +45,11 @@ type fakePeerManager struct { findMorePeersRequested chan struct{} } -func (fpm *fakePeerManager) FindMorePeers(context.Context, cid.Cid) { - fpm.findMorePeersRequested <- struct{}{} +func (fpm *fakePeerManager) FindMorePeers(ctx context.Context, k cid.Cid) { + select { + case fpm.findMorePeersRequested <- struct{}{}: + case <-ctx.Done(): + } } func (fpm *fakePeerManager) GetOptimizedPeers() []peer.ID { @@ -105,10 +114,20 @@ func TestSessionGetBlocks(t *testing.T) { var receivedBlocks []blocks.Block for i, p := range peers { session.ReceiveBlockFrom(p, blks[testutil.IndexOf(blks, receivedWantReq.cids[i])]) - receivedBlock := <-getBlocksCh - receivedBlocks = append(receivedBlocks, receivedBlock) - cancelBlock := <-cancelReqs - newCancelReqs = append(newCancelReqs, cancelBlock) + select { + case cancelBlock := <-cancelReqs: + newCancelReqs = append(newCancelReqs, cancelBlock) + case <-ctx.Done(): + t.Fatal("did not cancel block want") + } + + select { + case receivedBlock := <-getBlocksCh: + receivedBlocks = append(receivedBlocks, receivedBlock) + case <-ctx.Done(): + t.Fatal("Did not receive block!") + } + select { case wantBlock := <-wantReqs: newBlockReqs = append(newBlockReqs, wantBlock) @@ -169,7 +188,7 @@ func TestSessionGetBlocks(t *testing.T) { func TestSessionFindMorePeers(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond) + ctx, cancel := context.WithTimeout(context.Background(), 900*time.Millisecond) defer cancel() wantReqs := make(chan wantReq, 1) cancelReqs := make(chan wantReq, 1) @@ -191,26 +210,51 @@ func TestSessionFindMorePeers(t *testing.T) { } // clear the initial block of wants - <-wantReqs + select { + case <-wantReqs: + case <-ctx.Done(): + t.Fatal("Did not make first want request ") + } // receive a block to trigger a tick reset - time.Sleep(200 * time.Microsecond) + time.Sleep(20 * time.Millisecond) // need to make sure some latency registers + // or there will be no tick set -- time precision on Windows in go is in the + // millisecond range p := testutil.GeneratePeers(1)[0] session.ReceiveBlockFrom(p, blks[0]) - <-getBlocksCh - <-wantReqs - <-cancelReqs - - // wait for a request to get more peers to occur - <-fpm.findMorePeersRequested + select { + case <-cancelReqs: + case <-ctx.Done(): + t.Fatal("Did not cancel block") + } + select { + case <-getBlocksCh: + case <-ctx.Done(): + t.Fatal("Did not get block") + } + select { + case <-wantReqs: + case <-ctx.Done(): + t.Fatal("Did not make second want request ") + } // verify a broadcast was made - receivedWantReq := <-wantReqs - if len(receivedWantReq.cids) < broadcastLiveWantsLimit { - t.Fatal("did not rebroadcast whole live list") + select { + case receivedWantReq := <-wantReqs: + if len(receivedWantReq.cids) < broadcastLiveWantsLimit { + t.Fatal("did not rebroadcast whole live list") + } + if receivedWantReq.peers != nil { + t.Fatal("did not make a broadcast") + } + case <-ctx.Done(): + t.Fatal("Never rebroadcast want list") } - if receivedWantReq.peers != nil { - t.Fatal("did not make a broadcast") + + // wait for a request to get more peers to occur + select { + case <-fpm.findMorePeersRequested: + case <-ctx.Done(): + t.Fatal("Did not find more peers") } - <-ctx.Done() } diff --git a/bitswap/sessionpeermanager/sessionpeermanager_test.go b/bitswap/sessionpeermanager/sessionpeermanager_test.go index b4e723b10..2ec38f0a4 100644 --- a/bitswap/sessionpeermanager/sessionpeermanager_test.go +++ b/bitswap/sessionpeermanager/sessionpeermanager_test.go @@ -2,6 +2,7 @@ package sessionpeermanager import ( "context" + "errors" "math/rand" "sync" "testing" @@ -18,27 +19,40 @@ import ( type fakePeerNetwork struct { peers []peer.ID connManager ifconnmgr.ConnManager + completed chan struct{} + connect chan struct{} } func (fpn *fakePeerNetwork) ConnectionManager() ifconnmgr.ConnManager { return fpn.connManager } -func (fpn *fakePeerNetwork) ConnectTo(context.Context, peer.ID) error { - return nil +func (fpn *fakePeerNetwork) ConnectTo(ctx context.Context, p peer.ID) error { + select { + case fpn.connect <- struct{}{}: + return nil + case <-ctx.Done(): + return errors.New("Timeout Occurred") + } } func (fpn *fakePeerNetwork) FindProvidersAsync(ctx context.Context, c cid.Cid, num int) <-chan peer.ID { peerCh := make(chan peer.ID) go func() { - defer close(peerCh) for _, p := range fpn.peers { select { case peerCh <- p: case <-ctx.Done(): + close(peerCh) return } } + close(peerCh) + + select { + case fpn.completed <- struct{}{}: + case <-ctx.Done(): + } }() return peerCh } @@ -55,7 +69,6 @@ func (fcm *fakeConnManager) TagPeer(p peer.ID, tag string, n int) { func (fcm *fakeConnManager) UntagPeer(p peer.ID, tag string) { defer fcm.wait.Done() - for i := 0; i < len(fcm.taggedPeers); i++ { if fcm.taggedPeers[i] == p { fcm.taggedPeers[i] = fcm.taggedPeers[len(fcm.taggedPeers)-1] @@ -63,7 +76,6 @@ func (fcm *fakeConnManager) UntagPeer(p peer.ID, tag string) { return } } - } func (*fakeConnManager) GetTagInfo(p peer.ID) *ifconnmgr.TagInfo { return nil } @@ -74,9 +86,12 @@ func TestFindingMorePeers(t *testing.T) { ctx := context.Background() ctx, cancel := context.WithCancel(ctx) defer cancel() + completed := make(chan struct{}) + connect := make(chan struct{}) + peers := testutil.GeneratePeers(5) fcm := &fakeConnManager{} - fpn := &fakePeerNetwork{peers, fcm} + fpn := &fakePeerNetwork{peers, fcm, completed, connect} c := testutil.GenerateCids(1)[0] id := testutil.GenerateSessionID() @@ -85,7 +100,20 @@ func TestFindingMorePeers(t *testing.T) { findCtx, findCancel := context.WithTimeout(ctx, 10*time.Millisecond) defer findCancel() sessionPeerManager.FindMorePeers(ctx, c) - <-findCtx.Done() + select { + case <-completed: + case <-findCtx.Done(): + t.Fatal("Did not finish finding providers") + } + for range peers { + select { + case <-connect: + case <-findCtx.Done(): + t.Fatal("Did not connect to peer") + } + } + time.Sleep(2 * time.Millisecond) + sessionPeers := sessionPeerManager.GetOptimizedPeers() if len(sessionPeers) != len(peers) { t.Fatal("incorrect number of peers found") @@ -106,7 +134,7 @@ func TestRecordingReceivedBlocks(t *testing.T) { defer cancel() p := testutil.GeneratePeers(1)[0] fcm := &fakeConnManager{} - fpn := &fakePeerNetwork{nil, fcm} + fpn := &fakePeerNetwork{nil, fcm, nil, nil} c := testutil.GenerateCids(1)[0] id := testutil.GenerateSessionID() @@ -127,17 +155,32 @@ func TestRecordingReceivedBlocks(t *testing.T) { func TestOrderingPeers(t *testing.T) { ctx := context.Background() - ctx, cancel := context.WithCancel(ctx) + ctx, cancel := context.WithTimeout(ctx, 30*time.Millisecond) defer cancel() peers := testutil.GeneratePeers(100) + completed := make(chan struct{}) + connect := make(chan struct{}) fcm := &fakeConnManager{} - fpn := &fakePeerNetwork{peers, fcm} + fpn := &fakePeerNetwork{peers, fcm, completed, connect} c := testutil.GenerateCids(1) id := testutil.GenerateSessionID() sessionPeerManager := New(ctx, id, fpn) // add all peers to session sessionPeerManager.FindMorePeers(ctx, c[0]) + select { + case <-completed: + case <-ctx.Done(): + t.Fatal("Did not finish finding providers") + } + for range peers { + select { + case <-connect: + case <-ctx.Done(): + t.Fatal("Did not connect to peer") + } + } + time.Sleep(2 * time.Millisecond) // record broadcast sessionPeerManager.RecordPeerRequests(nil, c) @@ -193,15 +236,30 @@ func TestUntaggingPeers(t *testing.T) { ctx, cancel := context.WithTimeout(ctx, 10*time.Millisecond) defer cancel() peers := testutil.GeneratePeers(5) + completed := make(chan struct{}) + connect := make(chan struct{}) fcm := &fakeConnManager{} - fpn := &fakePeerNetwork{peers, fcm} + fpn := &fakePeerNetwork{peers, fcm, completed, connect} c := testutil.GenerateCids(1)[0] id := testutil.GenerateSessionID() sessionPeerManager := New(ctx, id, fpn) sessionPeerManager.FindMorePeers(ctx, c) - time.Sleep(5 * time.Millisecond) + select { + case <-completed: + case <-ctx.Done(): + t.Fatal("Did not finish finding providers") + } + for range peers { + select { + case <-connect: + case <-ctx.Done(): + t.Fatal("Did not connect to peer") + } + } + time.Sleep(2 * time.Millisecond) + if len(fcm.taggedPeers) != len(peers) { t.Fatal("Peers were not tagged!") } From e81b87ce5aab6f8df8f4a1f83dd4d9926ad8737a Mon Sep 17 00:00:00 2001 From: hannahhoward Date: Tue, 22 Jan 2019 17:55:05 -0800 Subject: [PATCH 0692/1035] feat(bitswap): Add a ProvideQueryManager Add a manger for querying providers on blocks, in charge of managing requests, deduping, and rate limiting This commit was moved from ipfs/go-bitswap@5db627fe21da6f7355756ed402c676f5507ee9e3 --- .../providerquerymanager.go | 343 ++++++++++++++++++ .../providerquerymanager_test.go | 274 ++++++++++++++ 2 files changed, 617 insertions(+) create mode 100644 bitswap/providerquerymanager/providerquerymanager.go create mode 100644 bitswap/providerquerymanager/providerquerymanager_test.go diff --git a/bitswap/providerquerymanager/providerquerymanager.go b/bitswap/providerquerymanager/providerquerymanager.go new file mode 100644 index 000000000..49075a20d --- /dev/null +++ b/bitswap/providerquerymanager/providerquerymanager.go @@ -0,0 +1,343 @@ +package providerquerymanager + +import ( + "context" + "sync" + + "github.com/ipfs/go-cid" + logging "github.com/ipfs/go-log" + peer "github.com/libp2p/go-libp2p-peer" +) + +var log = logging.Logger("bitswap") + +const ( + maxProviders = 10 + maxInProcessRequests = 6 +) + +type inProgressRequestStatus struct { + providersSoFar []peer.ID + listeners map[uint64]chan peer.ID +} + +// ProviderQueryNetwork is an interface for finding providers and connecting to +// peers. +type ProviderQueryNetwork interface { + ConnectTo(context.Context, peer.ID) error + FindProvidersAsync(context.Context, cid.Cid, int) <-chan peer.ID +} + +type providerQueryMessage interface { + handle(pqm *ProviderQueryManager) +} + +type receivedProviderMessage struct { + k cid.Cid + p peer.ID +} + +type finishedProviderQueryMessage struct { + k cid.Cid +} + +type newProvideQueryMessage struct { + ses uint64 + k cid.Cid + inProgressRequestChan chan<- inProgressRequest +} + +type cancelRequestMessage struct { + ses uint64 + k cid.Cid +} + +// ProviderQueryManager manages requests to find more providers for blocks +// for bitswap sessions. It's main goals are to: +// - rate limit requests -- don't have too many find provider calls running +// simultaneously +// - connect to found peers and filter them if it can't connect +// - ensure two findprovider calls for the same block don't run concurrently +// TODO: +// - manage timeouts +type ProviderQueryManager struct { + ctx context.Context + network ProviderQueryNetwork + providerQueryMessages chan providerQueryMessage + + // do not touch outside the run loop + providerRequestsProcessing chan cid.Cid + incomingFindProviderRequests chan cid.Cid + inProgressRequestStatuses map[cid.Cid]*inProgressRequestStatus +} + +// New initializes a new ProviderQueryManager for a given context and a given +// network provider. +func New(ctx context.Context, network ProviderQueryNetwork) *ProviderQueryManager { + return &ProviderQueryManager{ + ctx: ctx, + network: network, + providerQueryMessages: make(chan providerQueryMessage, 16), + providerRequestsProcessing: make(chan cid.Cid), + incomingFindProviderRequests: make(chan cid.Cid), + inProgressRequestStatuses: make(map[cid.Cid]*inProgressRequestStatus), + } +} + +// Startup starts processing for the ProviderQueryManager. +func (pqm *ProviderQueryManager) Startup() { + go pqm.run() +} + +type inProgressRequest struct { + providersSoFar []peer.ID + incoming <-chan peer.ID +} + +// FindProvidersAsync finds providers for the given block. +func (pqm *ProviderQueryManager) FindProvidersAsync(sessionCtx context.Context, k cid.Cid, ses uint64) <-chan peer.ID { + inProgressRequestChan := make(chan inProgressRequest) + + select { + case pqm.providerQueryMessages <- &newProvideQueryMessage{ + ses: ses, + k: k, + inProgressRequestChan: inProgressRequestChan, + }: + case <-pqm.ctx.Done(): + return nil + case <-sessionCtx.Done(): + return nil + } + + var receivedInProgressRequest inProgressRequest + select { + case <-sessionCtx.Done(): + return nil + case receivedInProgressRequest = <-inProgressRequestChan: + } + + return pqm.receiveProviders(sessionCtx, k, ses, receivedInProgressRequest) +} + +func (pqm *ProviderQueryManager) receiveProviders(sessionCtx context.Context, k cid.Cid, ses uint64, receivedInProgressRequest inProgressRequest) <-chan peer.ID { + // maintains an unbuffered queue for incoming providers for given request for a given session + // essentially, as a provider comes in, for a given CID, we want to immediately broadcast to all + // sessions that queried that CID, without worrying about whether the client code is actually + // reading from the returned channel -- so that the broadcast never blocks + // based on: https://medium.com/capital-one-tech/building-an-unbounded-channel-in-go-789e175cd2cd + returnedProviders := make(chan peer.ID) + receivedProviders := append([]peer.ID(nil), receivedInProgressRequest.providersSoFar[0:]...) + incomingProviders := receivedInProgressRequest.incoming + + go func() { + defer close(returnedProviders) + outgoingProviders := func() chan<- peer.ID { + if len(receivedProviders) == 0 { + return nil + } + return returnedProviders + } + nextProvider := func() peer.ID { + if len(receivedProviders) == 0 { + return "" + } + return receivedProviders[0] + } + for len(receivedProviders) > 0 || incomingProviders != nil { + select { + case <-sessionCtx.Done(): + pqm.providerQueryMessages <- &cancelRequestMessage{ + ses: ses, + k: k, + } + // clear out any remaining providers + for range incomingProviders { + } + return + case provider, ok := <-incomingProviders: + if !ok { + incomingProviders = nil + } else { + receivedProviders = append(receivedProviders, provider) + } + case outgoingProviders() <- nextProvider(): + receivedProviders = receivedProviders[1:] + } + } + }() + return returnedProviders +} + +func (pqm *ProviderQueryManager) findProviderWorker() { + // findProviderWorker just cycles through incoming provider queries one + // at a time. We have six of these workers running at once + // to let requests go in parallel but keep them rate limited + for { + select { + case k, ok := <-pqm.providerRequestsProcessing: + if !ok { + return + } + + providers := pqm.network.FindProvidersAsync(pqm.ctx, k, maxProviders) + wg := &sync.WaitGroup{} + for p := range providers { + wg.Add(1) + go func(p peer.ID) { + defer wg.Done() + err := pqm.network.ConnectTo(pqm.ctx, p) + if err != nil { + log.Debugf("failed to connect to provider %s: %s", p, err) + return + } + select { + case pqm.providerQueryMessages <- &receivedProviderMessage{ + k: k, + p: p, + }: + case <-pqm.ctx.Done(): + return + } + }(p) + } + wg.Wait() + select { + case pqm.providerQueryMessages <- &finishedProviderQueryMessage{ + k: k, + }: + case <-pqm.ctx.Done(): + } + case <-pqm.ctx.Done(): + return + } + } +} + +func (pqm *ProviderQueryManager) providerRequestBufferWorker() { + // the provider request buffer worker just maintains an unbounded + // buffer for incoming provider queries and dispatches to the find + // provider workers as they become available + // based on: https://medium.com/capital-one-tech/building-an-unbounded-channel-in-go-789e175cd2cd + var providerQueryRequestBuffer []cid.Cid + nextProviderQuery := func() cid.Cid { + if len(providerQueryRequestBuffer) == 0 { + return cid.Cid{} + } + return providerQueryRequestBuffer[0] + } + outgoingRequests := func() chan<- cid.Cid { + if len(providerQueryRequestBuffer) == 0 { + return nil + } + return pqm.providerRequestsProcessing + } + + for { + select { + case incomingRequest, ok := <-pqm.incomingFindProviderRequests: + if !ok { + return + } + providerQueryRequestBuffer = append(providerQueryRequestBuffer, incomingRequest) + case outgoingRequests() <- nextProviderQuery(): + providerQueryRequestBuffer = providerQueryRequestBuffer[1:] + case <-pqm.ctx.Done(): + return + } + } +} + +func (pqm *ProviderQueryManager) cleanupInProcessRequests() { + for _, requestStatus := range pqm.inProgressRequestStatuses { + for _, listener := range requestStatus.listeners { + close(listener) + } + } +} + +func (pqm *ProviderQueryManager) run() { + defer close(pqm.incomingFindProviderRequests) + defer close(pqm.providerRequestsProcessing) + defer pqm.cleanupInProcessRequests() + + go pqm.providerRequestBufferWorker() + for i := 0; i < maxInProcessRequests; i++ { + go pqm.findProviderWorker() + } + + for { + select { + case nextMessage := <-pqm.providerQueryMessages: + nextMessage.handle(pqm) + case <-pqm.ctx.Done(): + return + } + } +} + +func (rpm *receivedProviderMessage) handle(pqm *ProviderQueryManager) { + requestStatus, ok := pqm.inProgressRequestStatuses[rpm.k] + if !ok { + log.Errorf("Received provider (%s) for cid (%s) not requested", rpm.p.String(), rpm.k.String()) + return + } + requestStatus.providersSoFar = append(requestStatus.providersSoFar, rpm.p) + for _, listener := range requestStatus.listeners { + select { + case listener <- rpm.p: + case <-pqm.ctx.Done(): + return + } + } +} + +func (fpqm *finishedProviderQueryMessage) handle(pqm *ProviderQueryManager) { + requestStatus, ok := pqm.inProgressRequestStatuses[fpqm.k] + if !ok { + log.Errorf("Ended request for cid (%s) not in progress", fpqm.k.String()) + return + } + for _, listener := range requestStatus.listeners { + close(listener) + } + delete(pqm.inProgressRequestStatuses, fpqm.k) +} + +func (npqm *newProvideQueryMessage) handle(pqm *ProviderQueryManager) { + requestStatus, ok := pqm.inProgressRequestStatuses[npqm.k] + if !ok { + requestStatus = &inProgressRequestStatus{ + listeners: make(map[uint64]chan peer.ID), + } + pqm.inProgressRequestStatuses[npqm.k] = requestStatus + select { + case pqm.incomingFindProviderRequests <- npqm.k: + case <-pqm.ctx.Done(): + return + } + } + requestStatus.listeners[npqm.ses] = make(chan peer.ID) + select { + case npqm.inProgressRequestChan <- inProgressRequest{ + providersSoFar: requestStatus.providersSoFar, + incoming: requestStatus.listeners[npqm.ses], + }: + case <-pqm.ctx.Done(): + } +} + +func (crm *cancelRequestMessage) handle(pqm *ProviderQueryManager) { + requestStatus, ok := pqm.inProgressRequestStatuses[crm.k] + if !ok { + log.Errorf("Attempt to cancel request for session (%d) for cid (%s) not in progress", crm.ses, crm.k.String()) + return + } + listener, ok := requestStatus.listeners[crm.ses] + if !ok { + log.Errorf("Attempt to cancel request for session (%d) for cid (%s) this is not a listener", crm.ses, crm.k.String()) + return + } + close(listener) + delete(requestStatus.listeners, crm.ses) +} diff --git a/bitswap/providerquerymanager/providerquerymanager_test.go b/bitswap/providerquerymanager/providerquerymanager_test.go new file mode 100644 index 000000000..68893198e --- /dev/null +++ b/bitswap/providerquerymanager/providerquerymanager_test.go @@ -0,0 +1,274 @@ +package providerquerymanager + +import ( + "context" + "errors" + "reflect" + "testing" + "time" + + "github.com/ipfs/go-bitswap/testutil" + + cid "github.com/ipfs/go-cid" + "github.com/libp2p/go-libp2p-peer" +) + +type fakeProviderNetwork struct { + peersFound []peer.ID + connectError error + delay time.Duration + connectDelay time.Duration + queriesMade int +} + +func (fpn *fakeProviderNetwork) ConnectTo(context.Context, peer.ID) error { + time.Sleep(fpn.connectDelay) + return fpn.connectError +} + +func (fpn *fakeProviderNetwork) FindProvidersAsync(ctx context.Context, k cid.Cid, max int) <-chan peer.ID { + fpn.queriesMade++ + incomingPeers := make(chan peer.ID) + go func() { + defer close(incomingPeers) + for _, p := range fpn.peersFound { + time.Sleep(fpn.delay) + select { + case incomingPeers <- p: + case <-ctx.Done(): + return + } + } + }() + return incomingPeers +} + +func TestNormalSimultaneousFetch(t *testing.T) { + peers := testutil.GeneratePeers(10) + fpn := &fakeProviderNetwork{ + peersFound: peers, + delay: 1 * time.Millisecond, + } + ctx := context.Background() + providerQueryManager := New(ctx, fpn) + providerQueryManager.Startup() + keys := testutil.GenerateCids(2) + sessionID1 := testutil.GenerateSessionID() + sessionID2 := testutil.GenerateSessionID() + + sessionCtx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) + defer cancel() + firstRequestChan := providerQueryManager.FindProvidersAsync(sessionCtx, keys[0], sessionID1) + secondRequestChan := providerQueryManager.FindProvidersAsync(sessionCtx, keys[1], sessionID2) + + var firstPeersReceived []peer.ID + for p := range firstRequestChan { + firstPeersReceived = append(firstPeersReceived, p) + } + + var secondPeersReceived []peer.ID + for p := range secondRequestChan { + secondPeersReceived = append(secondPeersReceived, p) + } + + if len(firstPeersReceived) != len(peers) || len(secondPeersReceived) != len(peers) { + t.Fatal("Did not collect all peers for request that was completed") + } + + if fpn.queriesMade != 2 { + t.Fatal("Did not dedup provider requests running simultaneously") + } +} + +func TestDedupingProviderRequests(t *testing.T) { + peers := testutil.GeneratePeers(10) + fpn := &fakeProviderNetwork{ + peersFound: peers, + delay: 1 * time.Millisecond, + } + ctx := context.Background() + providerQueryManager := New(ctx, fpn) + providerQueryManager.Startup() + key := testutil.GenerateCids(1)[0] + sessionID1 := testutil.GenerateSessionID() + sessionID2 := testutil.GenerateSessionID() + + sessionCtx, cancel := context.WithTimeout(ctx, 20*time.Millisecond) + defer cancel() + firstRequestChan := providerQueryManager.FindProvidersAsync(sessionCtx, key, sessionID1) + secondRequestChan := providerQueryManager.FindProvidersAsync(sessionCtx, key, sessionID2) + + var firstPeersReceived []peer.ID + for p := range firstRequestChan { + firstPeersReceived = append(firstPeersReceived, p) + } + + var secondPeersReceived []peer.ID + for p := range secondRequestChan { + secondPeersReceived = append(secondPeersReceived, p) + } + + if len(firstPeersReceived) != len(peers) || len(secondPeersReceived) != len(peers) { + t.Fatal("Did not collect all peers for request that was completed") + } + + if !reflect.DeepEqual(firstPeersReceived, secondPeersReceived) { + t.Fatal("Did not receive the same response to both find provider requests") + } + + if fpn.queriesMade != 1 { + t.Fatal("Did not dedup provider requests running simultaneously") + } +} + +func TestCancelOneRequestDoesNotTerminateAnother(t *testing.T) { + peers := testutil.GeneratePeers(10) + fpn := &fakeProviderNetwork{ + peersFound: peers, + delay: 1 * time.Millisecond, + } + ctx := context.Background() + providerQueryManager := New(ctx, fpn) + providerQueryManager.Startup() + + key := testutil.GenerateCids(1)[0] + sessionID1 := testutil.GenerateSessionID() + sessionID2 := testutil.GenerateSessionID() + + // first session will cancel before done + firstSessionCtx, firstCancel := context.WithTimeout(ctx, 3*time.Millisecond) + defer firstCancel() + firstRequestChan := providerQueryManager.FindProvidersAsync(firstSessionCtx, key, sessionID1) + secondSessionCtx, secondCancel := context.WithTimeout(ctx, 20*time.Millisecond) + defer secondCancel() + secondRequestChan := providerQueryManager.FindProvidersAsync(secondSessionCtx, key, sessionID2) + + var firstPeersReceived []peer.ID + for p := range firstRequestChan { + firstPeersReceived = append(firstPeersReceived, p) + } + + var secondPeersReceived []peer.ID + for p := range secondRequestChan { + secondPeersReceived = append(secondPeersReceived, p) + } + + if len(secondPeersReceived) != len(peers) { + t.Fatal("Did not collect all peers for request that was completed") + } + + if len(firstPeersReceived) >= len(peers) { + t.Fatal("Collected all peers on cancelled peer, should have been cancelled immediately") + } + + if fpn.queriesMade != 1 { + t.Fatal("Did not dedup provider requests running simultaneously") + } +} + +func TestCancelManagerExitsGracefully(t *testing.T) { + peers := testutil.GeneratePeers(10) + fpn := &fakeProviderNetwork{ + peersFound: peers, + delay: 1 * time.Millisecond, + } + ctx := context.Background() + managerCtx, managerCancel := context.WithTimeout(ctx, 5*time.Millisecond) + defer managerCancel() + providerQueryManager := New(managerCtx, fpn) + providerQueryManager.Startup() + + key := testutil.GenerateCids(1)[0] + sessionID1 := testutil.GenerateSessionID() + sessionID2 := testutil.GenerateSessionID() + + sessionCtx, cancel := context.WithTimeout(ctx, 20*time.Millisecond) + defer cancel() + firstRequestChan := providerQueryManager.FindProvidersAsync(sessionCtx, key, sessionID1) + secondRequestChan := providerQueryManager.FindProvidersAsync(sessionCtx, key, sessionID2) + + var firstPeersReceived []peer.ID + for p := range firstRequestChan { + firstPeersReceived = append(firstPeersReceived, p) + } + + var secondPeersReceived []peer.ID + for p := range secondRequestChan { + secondPeersReceived = append(secondPeersReceived, p) + } + + if len(firstPeersReceived) <= 0 || + len(firstPeersReceived) >= len(peers) || + len(secondPeersReceived) <= 0 || + len(secondPeersReceived) >= len(peers) { + t.Fatal("Did not cancel requests in progress correctly") + } +} + +func TestPeersWithConnectionErrorsNotAddedToPeerList(t *testing.T) { + peers := testutil.GeneratePeers(10) + fpn := &fakeProviderNetwork{ + peersFound: peers, + connectError: errors.New("not able to connect"), + delay: 1 * time.Millisecond, + } + ctx := context.Background() + providerQueryManager := New(ctx, fpn) + providerQueryManager.Startup() + + key := testutil.GenerateCids(1)[0] + sessionID1 := testutil.GenerateSessionID() + sessionID2 := testutil.GenerateSessionID() + + sessionCtx, cancel := context.WithTimeout(ctx, 20*time.Millisecond) + defer cancel() + firstRequestChan := providerQueryManager.FindProvidersAsync(sessionCtx, key, sessionID1) + secondRequestChan := providerQueryManager.FindProvidersAsync(sessionCtx, key, sessionID2) + + var firstPeersReceived []peer.ID + for p := range firstRequestChan { + firstPeersReceived = append(firstPeersReceived, p) + } + + var secondPeersReceived []peer.ID + for p := range secondRequestChan { + secondPeersReceived = append(secondPeersReceived, p) + } + + if len(firstPeersReceived) != 0 || len(secondPeersReceived) != 0 { + t.Fatal("Did not filter out peers with connection issues") + } + +} + +func TestRateLimitingRequests(t *testing.T) { + peers := testutil.GeneratePeers(10) + fpn := &fakeProviderNetwork{ + peersFound: peers, + delay: 1 * time.Millisecond, + } + ctx := context.Background() + providerQueryManager := New(ctx, fpn) + providerQueryManager.Startup() + + keys := testutil.GenerateCids(maxInProcessRequests + 1) + sessionID := testutil.GenerateSessionID() + sessionCtx, cancel := context.WithTimeout(ctx, 20*time.Millisecond) + defer cancel() + var requestChannels []<-chan peer.ID + for i := 0; i < maxInProcessRequests+1; i++ { + requestChannels = append(requestChannels, providerQueryManager.FindProvidersAsync(sessionCtx, keys[i], sessionID)) + } + time.Sleep(2 * time.Millisecond) + if fpn.queriesMade != maxInProcessRequests { + t.Fatal("Did not limit parallel requests to rate limit") + } + for i := 0; i < maxInProcessRequests+1; i++ { + for range requestChannels[i] { + } + } + + if fpn.queriesMade != maxInProcessRequests+1 { + t.Fatal("Did not make all seperate requests") + } +} From 3e42226964c2fd3313b333c5c7639f3a7c3aaade Mon Sep 17 00:00:00 2001 From: hannahhoward Date: Tue, 22 Jan 2019 18:18:29 -0800 Subject: [PATCH 0693/1035] feat(ProviderQueryManager): manage timeouts Add functionality to timeout find provider requests so they don't run forever This commit was moved from ipfs/go-bitswap@1f2b49efe3f888ace93fd7ccf1b200a134627243 --- .../providerquerymanager.go | 32 ++++++++++++++----- .../providerquerymanager_test.go | 26 +++++++++++++++ 2 files changed, 50 insertions(+), 8 deletions(-) diff --git a/bitswap/providerquerymanager/providerquerymanager.go b/bitswap/providerquerymanager/providerquerymanager.go index 49075a20d..d2ba9e72b 100644 --- a/bitswap/providerquerymanager/providerquerymanager.go +++ b/bitswap/providerquerymanager/providerquerymanager.go @@ -3,6 +3,7 @@ package providerquerymanager import ( "context" "sync" + "time" "github.com/ipfs/go-cid" logging "github.com/ipfs/go-log" @@ -14,6 +15,7 @@ var log = logging.Logger("bitswap") const ( maxProviders = 10 maxInProcessRequests = 6 + defaultTimeout = 10 * time.Second ) type inProgressRequestStatus struct { @@ -58,17 +60,19 @@ type cancelRequestMessage struct { // simultaneously // - connect to found peers and filter them if it can't connect // - ensure two findprovider calls for the same block don't run concurrently -// TODO: // - manage timeouts type ProviderQueryManager struct { - ctx context.Context - network ProviderQueryNetwork - providerQueryMessages chan providerQueryMessage - - // do not touch outside the run loop + ctx context.Context + network ProviderQueryNetwork + providerQueryMessages chan providerQueryMessage providerRequestsProcessing chan cid.Cid incomingFindProviderRequests chan cid.Cid - inProgressRequestStatuses map[cid.Cid]*inProgressRequestStatus + + findProviderTimeout time.Duration + timeoutMutex sync.RWMutex + + // do not touch outside the run loop + inProgressRequestStatuses map[cid.Cid]*inProgressRequestStatus } // New initializes a new ProviderQueryManager for a given context and a given @@ -81,6 +85,7 @@ func New(ctx context.Context, network ProviderQueryNetwork) *ProviderQueryManage providerRequestsProcessing: make(chan cid.Cid), incomingFindProviderRequests: make(chan cid.Cid), inProgressRequestStatuses: make(map[cid.Cid]*inProgressRequestStatus), + findProviderTimeout: defaultTimeout, } } @@ -94,6 +99,13 @@ type inProgressRequest struct { incoming <-chan peer.ID } +// SetFindProviderTimeout changes the timeout for finding providers +func (pqm *ProviderQueryManager) SetFindProviderTimeout(findProviderTimeout time.Duration) { + pqm.timeoutMutex.Lock() + pqm.findProviderTimeout = findProviderTimeout + pqm.timeoutMutex.Unlock() +} + // FindProvidersAsync finds providers for the given block. func (pqm *ProviderQueryManager) FindProvidersAsync(sessionCtx context.Context, k cid.Cid, ses uint64) <-chan peer.ID { inProgressRequestChan := make(chan inProgressRequest) @@ -180,7 +192,11 @@ func (pqm *ProviderQueryManager) findProviderWorker() { return } - providers := pqm.network.FindProvidersAsync(pqm.ctx, k, maxProviders) + pqm.timeoutMutex.RLock() + findProviderCtx, cancel := context.WithTimeout(pqm.ctx, pqm.findProviderTimeout) + pqm.timeoutMutex.RUnlock() + defer cancel() + providers := pqm.network.FindProvidersAsync(findProviderCtx, k, maxProviders) wg := &sync.WaitGroup{} for p := range providers { wg.Add(1) diff --git a/bitswap/providerquerymanager/providerquerymanager_test.go b/bitswap/providerquerymanager/providerquerymanager_test.go index 68893198e..f2e6f0362 100644 --- a/bitswap/providerquerymanager/providerquerymanager_test.go +++ b/bitswap/providerquerymanager/providerquerymanager_test.go @@ -272,3 +272,29 @@ func TestRateLimitingRequests(t *testing.T) { t.Fatal("Did not make all seperate requests") } } + +func TestFindProviderTimeout(t *testing.T) { + peers := testutil.GeneratePeers(10) + fpn := &fakeProviderNetwork{ + peersFound: peers, + delay: 1 * time.Millisecond, + } + ctx := context.Background() + providerQueryManager := New(ctx, fpn) + providerQueryManager.Startup() + providerQueryManager.SetFindProviderTimeout(3 * time.Millisecond) + keys := testutil.GenerateCids(1) + sessionID1 := testutil.GenerateSessionID() + + sessionCtx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) + defer cancel() + firstRequestChan := providerQueryManager.FindProvidersAsync(sessionCtx, keys[0], sessionID1) + var firstPeersReceived []peer.ID + for p := range firstRequestChan { + firstPeersReceived = append(firstPeersReceived, p) + } + if len(firstPeersReceived) <= 0 || + len(firstPeersReceived) >= len(peers) { + t.Fatal("Find provider request should have timed out, did not") + } +} From a6b76cc52431d4eec108f3556128b3e627940134 Mon Sep 17 00:00:00 2001 From: hannahhoward Date: Tue, 22 Jan 2019 18:46:42 -0800 Subject: [PATCH 0694/1035] feat(ProviderQueryManager): integrate in sessions Integrate the ProviderQueryManager into the SessionPeerManager and bitswap in general re #52, re #49 This commit was moved from ipfs/go-bitswap@843391e63fe3534c85f1c3fc4892b809fd850d72 --- bitswap/bitswap.go | 10 +- .../sessionpeermanager/sessionpeermanager.go | 66 +++++----- .../sessionpeermanager_test.go | 114 ++++++------------ 3 files changed, 74 insertions(+), 116 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index c4b8e8879..ee0c939f3 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -18,6 +18,7 @@ import ( bsnet "github.com/ipfs/go-bitswap/network" notifications "github.com/ipfs/go-bitswap/notifications" bspm "github.com/ipfs/go-bitswap/peermanager" + bspqm "github.com/ipfs/go-bitswap/providerquerymanager" bssession "github.com/ipfs/go-bitswap/session" bssm "github.com/ipfs/go-bitswap/sessionmanager" bsspm "github.com/ipfs/go-bitswap/sessionpeermanager" @@ -105,11 +106,13 @@ func New(parent context.Context, network bsnet.BitSwapNetwork, } wm := bswm.New(ctx) + pqm := bspqm.New(ctx, network) + sessionFactory := func(ctx context.Context, id uint64, pm bssession.PeerManager, srs bssession.RequestSplitter) bssm.Session { return bssession.New(ctx, id, wm, pm, srs) } sessionPeerManagerFactory := func(ctx context.Context, id uint64) bssession.PeerManager { - return bsspm.New(ctx, id, network) + return bsspm.New(ctx, id, network.ConnectionManager(), pqm) } sessionRequestSplitterFactory := func(ctx context.Context) bssession.RequestSplitter { return bssrs.New(ctx) @@ -125,6 +128,7 @@ func New(parent context.Context, network bsnet.BitSwapNetwork, newBlocks: make(chan cid.Cid, HasBlockBufferSize), provideKeys: make(chan cid.Cid, provideKeysBufferSize), wm: wm, + pqm: pqm, pm: bspm.New(ctx, peerQueueFactory), sm: bssm.New(ctx, sessionFactory, sessionPeerManagerFactory, sessionRequestSplitterFactory), counters: new(counters), @@ -136,6 +140,7 @@ func New(parent context.Context, network bsnet.BitSwapNetwork, bs.wm.SetDelegate(bs.pm) bs.pm.Startup() bs.wm.Startup() + bs.pqm.Startup() network.SetDelegate(bs) // Start up bitswaps async worker routines @@ -161,6 +166,9 @@ type Bitswap struct { // the wantlist tracks global wants for bitswap wm *bswm.WantManager + // the provider query manager manages requests to find providers + pqm *bspqm.ProviderQueryManager + // the engine is the bit of logic that decides who to send which blocks to engine *decision.Engine diff --git a/bitswap/sessionpeermanager/sessionpeermanager.go b/bitswap/sessionpeermanager/sessionpeermanager.go index 225f19017..091e1c7ef 100644 --- a/bitswap/sessionpeermanager/sessionpeermanager.go +++ b/bitswap/sessionpeermanager/sessionpeermanager.go @@ -8,7 +8,6 @@ import ( logging "github.com/ipfs/go-log" cid "github.com/ipfs/go-cid" - ifconnmgr "github.com/libp2p/go-libp2p-interface-connmgr" peer "github.com/libp2p/go-libp2p-peer" ) @@ -19,11 +18,15 @@ const ( reservePeers = 2 ) -// PeerNetwork is an interface for finding providers and managing connections -type PeerNetwork interface { - ConnectionManager() ifconnmgr.ConnManager - ConnectTo(context.Context, peer.ID) error - FindProvidersAsync(context.Context, cid.Cid, int) <-chan peer.ID +// PeerTagger is an interface for tagging peers with metadata +type PeerTagger interface { + TagPeer(peer.ID, string, int) + UntagPeer(p peer.ID, tag string) +} + +// PeerProviderFinder is an interface for finding providers +type PeerProviderFinder interface { + FindProvidersAsync(context.Context, cid.Cid, uint64) <-chan peer.ID } type peerMessage interface { @@ -33,9 +36,11 @@ type peerMessage interface { // SessionPeerManager tracks and manages peers for a session, and provides // the best ones to the session type SessionPeerManager struct { - ctx context.Context - network PeerNetwork - tag string + ctx context.Context + tagger PeerTagger + providerFinder PeerProviderFinder + tag string + id uint64 peerMessages chan peerMessage @@ -46,12 +51,14 @@ type SessionPeerManager struct { } // New creates a new SessionPeerManager -func New(ctx context.Context, id uint64, network PeerNetwork) *SessionPeerManager { +func New(ctx context.Context, id uint64, tagger PeerTagger, providerFinder PeerProviderFinder) *SessionPeerManager { spm := &SessionPeerManager{ - ctx: ctx, - network: network, - peerMessages: make(chan peerMessage, 16), - activePeers: make(map[peer.ID]bool), + id: id, + ctx: ctx, + tagger: tagger, + providerFinder: providerFinder, + peerMessages: make(chan peerMessage, 16), + activePeers: make(map[peer.ID]bool), } spm.tag = fmt.Sprint("bs-ses-", id) @@ -101,24 +108,13 @@ func (spm *SessionPeerManager) GetOptimizedPeers() []peer.ID { // providers for the given Cid func (spm *SessionPeerManager) FindMorePeers(ctx context.Context, c cid.Cid) { go func(k cid.Cid) { - // TODO: have a task queue setup for this to: - // - rate limit - // - manage timeouts - // - ensure two 'findprovs' calls for the same block don't run concurrently - // - share peers between sessions based on interest set - for p := range spm.network.FindProvidersAsync(ctx, k, 10) { - go func(p peer.ID) { - // TODO: Also use context from spm. - err := spm.network.ConnectTo(ctx, p) - if err != nil { - log.Debugf("failed to connect to provider %s: %s", p, err) - } - select { - case spm.peerMessages <- &peerFoundMessage{p}: - case <-ctx.Done(): - case <-spm.ctx.Done(): - } - }(p) + for p := range spm.providerFinder.FindProvidersAsync(ctx, k, spm.id) { + + select { + case spm.peerMessages <- &peerFoundMessage{p}: + case <-ctx.Done(): + case <-spm.ctx.Done(): + } } }(c) } @@ -136,8 +132,7 @@ func (spm *SessionPeerManager) run(ctx context.Context) { } func (spm *SessionPeerManager) tagPeer(p peer.ID) { - cmgr := spm.network.ConnectionManager() - cmgr.TagPeer(p, spm.tag, 10) + spm.tagger.TagPeer(p, spm.tag, 10) } func (spm *SessionPeerManager) insertOptimizedPeer(p peer.ID) { @@ -223,8 +218,7 @@ func (prm *peerReqMessage) handle(spm *SessionPeerManager) { } func (spm *SessionPeerManager) handleShutdown() { - cmgr := spm.network.ConnectionManager() for p := range spm.activePeers { - cmgr.UntagPeer(p, spm.tag) + spm.tagger.UntagPeer(p, spm.tag) } } diff --git a/bitswap/sessionpeermanager/sessionpeermanager_test.go b/bitswap/sessionpeermanager/sessionpeermanager_test.go index 2ec38f0a4..68862942c 100644 --- a/bitswap/sessionpeermanager/sessionpeermanager_test.go +++ b/bitswap/sessionpeermanager/sessionpeermanager_test.go @@ -2,7 +2,6 @@ package sessionpeermanager import ( "context" - "errors" "math/rand" "sync" "testing" @@ -11,35 +10,19 @@ import ( "github.com/ipfs/go-bitswap/testutil" cid "github.com/ipfs/go-cid" - ifconnmgr "github.com/libp2p/go-libp2p-interface-connmgr" - inet "github.com/libp2p/go-libp2p-net" peer "github.com/libp2p/go-libp2p-peer" ) -type fakePeerNetwork struct { - peers []peer.ID - connManager ifconnmgr.ConnManager - completed chan struct{} - connect chan struct{} +type fakePeerProviderFinder struct { + peers []peer.ID + completed chan struct{} } -func (fpn *fakePeerNetwork) ConnectionManager() ifconnmgr.ConnManager { - return fpn.connManager -} - -func (fpn *fakePeerNetwork) ConnectTo(ctx context.Context, p peer.ID) error { - select { - case fpn.connect <- struct{}{}: - return nil - case <-ctx.Done(): - return errors.New("Timeout Occurred") - } -} - -func (fpn *fakePeerNetwork) FindProvidersAsync(ctx context.Context, c cid.Cid, num int) <-chan peer.ID { +func (fppf *fakePeerProviderFinder) FindProvidersAsync(ctx context.Context, c cid.Cid, ses uint64) <-chan peer.ID { peerCh := make(chan peer.ID) go func() { - for _, p := range fpn.peers { + + for _, p := range fppf.peers { select { case peerCh <- p: case <-ctx.Done(): @@ -50,52 +33,48 @@ func (fpn *fakePeerNetwork) FindProvidersAsync(ctx context.Context, c cid.Cid, n close(peerCh) select { - case fpn.completed <- struct{}{}: + case fppf.completed <- struct{}{}: case <-ctx.Done(): } }() return peerCh } -type fakeConnManager struct { +type fakePeerTagger struct { taggedPeers []peer.ID wait sync.WaitGroup } -func (fcm *fakeConnManager) TagPeer(p peer.ID, tag string, n int) { - fcm.wait.Add(1) - fcm.taggedPeers = append(fcm.taggedPeers, p) +func (fpt *fakePeerTagger) TagPeer(p peer.ID, tag string, n int) { + fpt.wait.Add(1) + fpt.taggedPeers = append(fpt.taggedPeers, p) } -func (fcm *fakeConnManager) UntagPeer(p peer.ID, tag string) { - defer fcm.wait.Done() - for i := 0; i < len(fcm.taggedPeers); i++ { - if fcm.taggedPeers[i] == p { - fcm.taggedPeers[i] = fcm.taggedPeers[len(fcm.taggedPeers)-1] - fcm.taggedPeers = fcm.taggedPeers[:len(fcm.taggedPeers)-1] +func (fpt *fakePeerTagger) UntagPeer(p peer.ID, tag string) { + defer fpt.wait.Done() + + for i := 0; i < len(fpt.taggedPeers); i++ { + if fpt.taggedPeers[i] == p { + fpt.taggedPeers[i] = fpt.taggedPeers[len(fpt.taggedPeers)-1] + fpt.taggedPeers = fpt.taggedPeers[:len(fpt.taggedPeers)-1] return } } } -func (*fakeConnManager) GetTagInfo(p peer.ID) *ifconnmgr.TagInfo { return nil } -func (*fakeConnManager) TrimOpenConns(ctx context.Context) {} -func (*fakeConnManager) Notifee() inet.Notifiee { return nil } - func TestFindingMorePeers(t *testing.T) { ctx := context.Background() ctx, cancel := context.WithCancel(ctx) defer cancel() completed := make(chan struct{}) - connect := make(chan struct{}) peers := testutil.GeneratePeers(5) - fcm := &fakeConnManager{} - fpn := &fakePeerNetwork{peers, fcm, completed, connect} + fpt := &fakePeerTagger{} + fppf := &fakePeerProviderFinder{peers, completed} c := testutil.GenerateCids(1)[0] id := testutil.GenerateSessionID() - sessionPeerManager := New(ctx, id, fpn) + sessionPeerManager := New(ctx, id, fpt, fppf) findCtx, findCancel := context.WithTimeout(ctx, 10*time.Millisecond) defer findCancel() @@ -105,13 +84,6 @@ func TestFindingMorePeers(t *testing.T) { case <-findCtx.Done(): t.Fatal("Did not finish finding providers") } - for range peers { - select { - case <-connect: - case <-findCtx.Done(): - t.Fatal("Did not connect to peer") - } - } time.Sleep(2 * time.Millisecond) sessionPeers := sessionPeerManager.GetOptimizedPeers() @@ -123,7 +95,7 @@ func TestFindingMorePeers(t *testing.T) { t.Fatal("incorrect peer found through finding providers") } } - if len(fcm.taggedPeers) != len(peers) { + if len(fpt.taggedPeers) != len(peers) { t.Fatal("Peers were not tagged!") } } @@ -133,12 +105,12 @@ func TestRecordingReceivedBlocks(t *testing.T) { ctx, cancel := context.WithCancel(ctx) defer cancel() p := testutil.GeneratePeers(1)[0] - fcm := &fakeConnManager{} - fpn := &fakePeerNetwork{nil, fcm, nil, nil} + fpt := &fakePeerTagger{} + fppf := &fakePeerProviderFinder{} c := testutil.GenerateCids(1)[0] id := testutil.GenerateSessionID() - sessionPeerManager := New(ctx, id, fpn) + sessionPeerManager := New(ctx, id, fpt, fppf) sessionPeerManager.RecordPeerResponse(p, c) time.Sleep(10 * time.Millisecond) sessionPeers := sessionPeerManager.GetOptimizedPeers() @@ -148,7 +120,7 @@ func TestRecordingReceivedBlocks(t *testing.T) { if sessionPeers[0] != p { t.Fatal("incorrect peer added on receive") } - if len(fcm.taggedPeers) != 1 { + if len(fpt.taggedPeers) != 1 { t.Fatal("Peers was not tagged!") } } @@ -159,12 +131,11 @@ func TestOrderingPeers(t *testing.T) { defer cancel() peers := testutil.GeneratePeers(100) completed := make(chan struct{}) - connect := make(chan struct{}) - fcm := &fakeConnManager{} - fpn := &fakePeerNetwork{peers, fcm, completed, connect} + fpt := &fakePeerTagger{} + fppf := &fakePeerProviderFinder{peers, completed} c := testutil.GenerateCids(1) id := testutil.GenerateSessionID() - sessionPeerManager := New(ctx, id, fpn) + sessionPeerManager := New(ctx, id, fpt, fppf) // add all peers to session sessionPeerManager.FindMorePeers(ctx, c[0]) @@ -173,13 +144,6 @@ func TestOrderingPeers(t *testing.T) { case <-ctx.Done(): t.Fatal("Did not finish finding providers") } - for range peers { - select { - case <-connect: - case <-ctx.Done(): - t.Fatal("Did not connect to peer") - } - } time.Sleep(2 * time.Millisecond) // record broadcast @@ -237,13 +201,12 @@ func TestUntaggingPeers(t *testing.T) { defer cancel() peers := testutil.GeneratePeers(5) completed := make(chan struct{}) - connect := make(chan struct{}) - fcm := &fakeConnManager{} - fpn := &fakePeerNetwork{peers, fcm, completed, connect} + fpt := &fakePeerTagger{} + fppf := &fakePeerProviderFinder{peers, completed} c := testutil.GenerateCids(1)[0] id := testutil.GenerateSessionID() - sessionPeerManager := New(ctx, id, fpn) + sessionPeerManager := New(ctx, id, fpt, fppf) sessionPeerManager.FindMorePeers(ctx, c) select { @@ -251,22 +214,15 @@ func TestUntaggingPeers(t *testing.T) { case <-ctx.Done(): t.Fatal("Did not finish finding providers") } - for range peers { - select { - case <-connect: - case <-ctx.Done(): - t.Fatal("Did not connect to peer") - } - } time.Sleep(2 * time.Millisecond) - if len(fcm.taggedPeers) != len(peers) { + if len(fpt.taggedPeers) != len(peers) { t.Fatal("Peers were not tagged!") } <-ctx.Done() - fcm.wait.Wait() + fpt.wait.Wait() - if len(fcm.taggedPeers) != 0 { + if len(fpt.taggedPeers) != 0 { t.Fatal("Peers were not untagged!") } } From d88971be375f73ef68061caaf3b1ed178f1d974a Mon Sep 17 00:00:00 2001 From: hannahhoward Date: Wed, 23 Jan 2019 14:01:53 -0800 Subject: [PATCH 0695/1035] fix(ProviderQueryManager): fix test + add logging Add debug logging for the provider query manager and make tests more reliable This commit was moved from ipfs/go-bitswap@1eb28a223413168af69fdf5499a12db0cecec7a7 --- .../providerquerymanager.go | 22 ++++++++- .../providerquerymanager_test.go | 48 +++++++++++++------ 2 files changed, 54 insertions(+), 16 deletions(-) diff --git a/bitswap/providerquerymanager/providerquerymanager.go b/bitswap/providerquerymanager/providerquerymanager.go index d2ba9e72b..21cfcd0d0 100644 --- a/bitswap/providerquerymanager/providerquerymanager.go +++ b/bitswap/providerquerymanager/providerquerymanager.go @@ -2,6 +2,7 @@ package providerquerymanager import ( "context" + "fmt" "sync" "time" @@ -31,6 +32,7 @@ type ProviderQueryNetwork interface { } type providerQueryMessage interface { + debugMessage() string handle(pqm *ProviderQueryManager) } @@ -192,6 +194,7 @@ func (pqm *ProviderQueryManager) findProviderWorker() { return } + log.Debugf("Beginning Find Provider Request for cid: %s", k.String()) pqm.timeoutMutex.RLock() findProviderCtx, cancel := context.WithTimeout(pqm.ctx, pqm.findProviderTimeout) pqm.timeoutMutex.RUnlock() @@ -273,8 +276,6 @@ func (pqm *ProviderQueryManager) cleanupInProcessRequests() { } func (pqm *ProviderQueryManager) run() { - defer close(pqm.incomingFindProviderRequests) - defer close(pqm.providerRequestsProcessing) defer pqm.cleanupInProcessRequests() go pqm.providerRequestBufferWorker() @@ -285,6 +286,7 @@ func (pqm *ProviderQueryManager) run() { for { select { case nextMessage := <-pqm.providerQueryMessages: + log.Debug(nextMessage.debugMessage()) nextMessage.handle(pqm) case <-pqm.ctx.Done(): return @@ -292,6 +294,10 @@ func (pqm *ProviderQueryManager) run() { } } +func (rpm *receivedProviderMessage) debugMessage() string { + return fmt.Sprintf("Received provider (%s) for cid (%s)", rpm.p.String(), rpm.k.String()) +} + func (rpm *receivedProviderMessage) handle(pqm *ProviderQueryManager) { requestStatus, ok := pqm.inProgressRequestStatuses[rpm.k] if !ok { @@ -308,6 +314,10 @@ func (rpm *receivedProviderMessage) handle(pqm *ProviderQueryManager) { } } +func (fpqm *finishedProviderQueryMessage) debugMessage() string { + return fmt.Sprintf("Finished Provider Query on cid: %s", fpqm.k.String()) +} + func (fpqm *finishedProviderQueryMessage) handle(pqm *ProviderQueryManager) { requestStatus, ok := pqm.inProgressRequestStatuses[fpqm.k] if !ok { @@ -320,6 +330,10 @@ func (fpqm *finishedProviderQueryMessage) handle(pqm *ProviderQueryManager) { delete(pqm.inProgressRequestStatuses, fpqm.k) } +func (npqm *newProvideQueryMessage) debugMessage() string { + return fmt.Sprintf("New Provider Query on cid: %s from session: %d", npqm.k.String(), npqm.ses) +} + func (npqm *newProvideQueryMessage) handle(pqm *ProviderQueryManager) { requestStatus, ok := pqm.inProgressRequestStatuses[npqm.k] if !ok { @@ -343,6 +357,10 @@ func (npqm *newProvideQueryMessage) handle(pqm *ProviderQueryManager) { } } +func (crm *cancelRequestMessage) debugMessage() string { + return fmt.Sprintf("Cancel provider query on cid: %s from session: %d", crm.k.String(), crm.ses) +} + func (crm *cancelRequestMessage) handle(pqm *ProviderQueryManager) { requestStatus, ok := pqm.inProgressRequestStatuses[crm.k] if !ok { diff --git a/bitswap/providerquerymanager/providerquerymanager_test.go b/bitswap/providerquerymanager/providerquerymanager_test.go index f2e6f0362..f5b6db1ee 100644 --- a/bitswap/providerquerymanager/providerquerymanager_test.go +++ b/bitswap/providerquerymanager/providerquerymanager_test.go @@ -4,6 +4,7 @@ import ( "context" "errors" "reflect" + "sync" "testing" "time" @@ -14,11 +15,12 @@ import ( ) type fakeProviderNetwork struct { - peersFound []peer.ID - connectError error - delay time.Duration - connectDelay time.Duration - queriesMade int + peersFound []peer.ID + connectError error + delay time.Duration + connectDelay time.Duration + queriesMadeMutex sync.RWMutex + queriesMade int } func (fpn *fakeProviderNetwork) ConnectTo(context.Context, peer.ID) error { @@ -27,13 +29,20 @@ func (fpn *fakeProviderNetwork) ConnectTo(context.Context, peer.ID) error { } func (fpn *fakeProviderNetwork) FindProvidersAsync(ctx context.Context, k cid.Cid, max int) <-chan peer.ID { + fpn.queriesMadeMutex.Lock() fpn.queriesMade++ + fpn.queriesMadeMutex.Unlock() incomingPeers := make(chan peer.ID) go func() { defer close(incomingPeers) for _, p := range fpn.peersFound { time.Sleep(fpn.delay) select { + case <-ctx.Done(): + return + default: + } + select { case incomingPeers <- p: case <-ctx.Done(): return @@ -75,9 +84,12 @@ func TestNormalSimultaneousFetch(t *testing.T) { t.Fatal("Did not collect all peers for request that was completed") } + fpn.queriesMadeMutex.Lock() + defer fpn.queriesMadeMutex.Unlock() if fpn.queriesMade != 2 { t.Fatal("Did not dedup provider requests running simultaneously") } + } func TestDedupingProviderRequests(t *testing.T) { @@ -93,7 +105,7 @@ func TestDedupingProviderRequests(t *testing.T) { sessionID1 := testutil.GenerateSessionID() sessionID2 := testutil.GenerateSessionID() - sessionCtx, cancel := context.WithTimeout(ctx, 20*time.Millisecond) + sessionCtx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) defer cancel() firstRequestChan := providerQueryManager.FindProvidersAsync(sessionCtx, key, sessionID1) secondRequestChan := providerQueryManager.FindProvidersAsync(sessionCtx, key, sessionID2) @@ -115,7 +127,8 @@ func TestDedupingProviderRequests(t *testing.T) { if !reflect.DeepEqual(firstPeersReceived, secondPeersReceived) { t.Fatal("Did not receive the same response to both find provider requests") } - + fpn.queriesMadeMutex.Lock() + defer fpn.queriesMadeMutex.Unlock() if fpn.queriesMade != 1 { t.Fatal("Did not dedup provider requests running simultaneously") } @@ -139,7 +152,7 @@ func TestCancelOneRequestDoesNotTerminateAnother(t *testing.T) { firstSessionCtx, firstCancel := context.WithTimeout(ctx, 3*time.Millisecond) defer firstCancel() firstRequestChan := providerQueryManager.FindProvidersAsync(firstSessionCtx, key, sessionID1) - secondSessionCtx, secondCancel := context.WithTimeout(ctx, 20*time.Millisecond) + secondSessionCtx, secondCancel := context.WithTimeout(ctx, 100*time.Millisecond) defer secondCancel() secondRequestChan := providerQueryManager.FindProvidersAsync(secondSessionCtx, key, sessionID2) @@ -160,7 +173,8 @@ func TestCancelOneRequestDoesNotTerminateAnother(t *testing.T) { if len(firstPeersReceived) >= len(peers) { t.Fatal("Collected all peers on cancelled peer, should have been cancelled immediately") } - + fpn.queriesMadeMutex.Lock() + defer fpn.queriesMadeMutex.Unlock() if fpn.queriesMade != 1 { t.Fatal("Did not dedup provider requests running simultaneously") } @@ -248,26 +262,33 @@ func TestRateLimitingRequests(t *testing.T) { delay: 1 * time.Millisecond, } ctx := context.Background() + ctx, cancel := context.WithCancel(ctx) + defer cancel() providerQueryManager := New(ctx, fpn) providerQueryManager.Startup() keys := testutil.GenerateCids(maxInProcessRequests + 1) sessionID := testutil.GenerateSessionID() - sessionCtx, cancel := context.WithTimeout(ctx, 20*time.Millisecond) + sessionCtx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) defer cancel() var requestChannels []<-chan peer.ID for i := 0; i < maxInProcessRequests+1; i++ { requestChannels = append(requestChannels, providerQueryManager.FindProvidersAsync(sessionCtx, keys[i], sessionID)) } - time.Sleep(2 * time.Millisecond) + time.Sleep(9 * time.Millisecond) + fpn.queriesMadeMutex.Lock() if fpn.queriesMade != maxInProcessRequests { + t.Logf("Queries made: %d\n", fpn.queriesMade) t.Fatal("Did not limit parallel requests to rate limit") } + fpn.queriesMadeMutex.Unlock() for i := 0; i < maxInProcessRequests+1; i++ { for range requestChannels[i] { } } + fpn.queriesMadeMutex.Lock() + defer fpn.queriesMadeMutex.Unlock() if fpn.queriesMade != maxInProcessRequests+1 { t.Fatal("Did not make all seperate requests") } @@ -282,7 +303,7 @@ func TestFindProviderTimeout(t *testing.T) { ctx := context.Background() providerQueryManager := New(ctx, fpn) providerQueryManager.Startup() - providerQueryManager.SetFindProviderTimeout(3 * time.Millisecond) + providerQueryManager.SetFindProviderTimeout(2 * time.Millisecond) keys := testutil.GenerateCids(1) sessionID1 := testutil.GenerateSessionID() @@ -293,8 +314,7 @@ func TestFindProviderTimeout(t *testing.T) { for p := range firstRequestChan { firstPeersReceived = append(firstPeersReceived, p) } - if len(firstPeersReceived) <= 0 || - len(firstPeersReceived) >= len(peers) { + if len(firstPeersReceived) >= len(peers) { t.Fatal("Find provider request should have timed out, did not") } } From e6c2b655350db13424bc5617ec195f7861067c05 Mon Sep 17 00:00:00 2001 From: hannahhoward Date: Wed, 30 Jan 2019 13:16:51 -0800 Subject: [PATCH 0696/1035] fix(providequerymanager): improve test stability Removed a minor condition check that could fail in some cases just due to timing, but not a code issue This commit was moved from ipfs/go-bitswap@56d9e3fcf95a94dbb255e67c0a2fa8d6ace84dce --- bitswap/providerquerymanager/providerquerymanager_test.go | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/bitswap/providerquerymanager/providerquerymanager_test.go b/bitswap/providerquerymanager/providerquerymanager_test.go index f5b6db1ee..21d7004ca 100644 --- a/bitswap/providerquerymanager/providerquerymanager_test.go +++ b/bitswap/providerquerymanager/providerquerymanager_test.go @@ -211,9 +211,7 @@ func TestCancelManagerExitsGracefully(t *testing.T) { secondPeersReceived = append(secondPeersReceived, p) } - if len(firstPeersReceived) <= 0 || - len(firstPeersReceived) >= len(peers) || - len(secondPeersReceived) <= 0 || + if len(firstPeersReceived) >= len(peers) || len(secondPeersReceived) >= len(peers) { t.Fatal("Did not cancel requests in progress correctly") } From 2c57403128bae67005f916f9546980a50d7acfbc Mon Sep 17 00:00:00 2001 From: hannahhoward Date: Mon, 4 Feb 2019 11:50:52 -0800 Subject: [PATCH 0697/1035] refactor(providerquerymanager): don't use session ids removed session id user completely from providerquerymanager This commit was moved from ipfs/go-bitswap@92717dbb67953ebee5675555a273b375cbae13d4 --- .../providerquerymanager.go | 45 +++++++++---------- .../providerquerymanager_test.go | 36 +++++---------- .../sessionpeermanager/sessionpeermanager.go | 6 +-- .../sessionpeermanager_test.go | 2 +- 4 files changed, 38 insertions(+), 51 deletions(-) diff --git a/bitswap/providerquerymanager/providerquerymanager.go b/bitswap/providerquerymanager/providerquerymanager.go index 21cfcd0d0..8c20b022f 100644 --- a/bitswap/providerquerymanager/providerquerymanager.go +++ b/bitswap/providerquerymanager/providerquerymanager.go @@ -21,7 +21,7 @@ const ( type inProgressRequestStatus struct { providersSoFar []peer.ID - listeners map[uint64]chan peer.ID + listeners map[chan peer.ID]struct{} } // ProviderQueryNetwork is an interface for finding providers and connecting to @@ -46,14 +46,13 @@ type finishedProviderQueryMessage struct { } type newProvideQueryMessage struct { - ses uint64 k cid.Cid inProgressRequestChan chan<- inProgressRequest } type cancelRequestMessage struct { - ses uint64 - k cid.Cid + incomingProviders chan peer.ID + k cid.Cid } // ProviderQueryManager manages requests to find more providers for blocks @@ -98,7 +97,7 @@ func (pqm *ProviderQueryManager) Startup() { type inProgressRequest struct { providersSoFar []peer.ID - incoming <-chan peer.ID + incoming chan peer.ID } // SetFindProviderTimeout changes the timeout for finding providers @@ -109,12 +108,11 @@ func (pqm *ProviderQueryManager) SetFindProviderTimeout(findProviderTimeout time } // FindProvidersAsync finds providers for the given block. -func (pqm *ProviderQueryManager) FindProvidersAsync(sessionCtx context.Context, k cid.Cid, ses uint64) <-chan peer.ID { +func (pqm *ProviderQueryManager) FindProvidersAsync(sessionCtx context.Context, k cid.Cid) <-chan peer.ID { inProgressRequestChan := make(chan inProgressRequest) select { case pqm.providerQueryMessages <- &newProvideQueryMessage{ - ses: ses, k: k, inProgressRequestChan: inProgressRequestChan, }: @@ -131,10 +129,10 @@ func (pqm *ProviderQueryManager) FindProvidersAsync(sessionCtx context.Context, case receivedInProgressRequest = <-inProgressRequestChan: } - return pqm.receiveProviders(sessionCtx, k, ses, receivedInProgressRequest) + return pqm.receiveProviders(sessionCtx, k, receivedInProgressRequest) } -func (pqm *ProviderQueryManager) receiveProviders(sessionCtx context.Context, k cid.Cid, ses uint64, receivedInProgressRequest inProgressRequest) <-chan peer.ID { +func (pqm *ProviderQueryManager) receiveProviders(sessionCtx context.Context, k cid.Cid, receivedInProgressRequest inProgressRequest) <-chan peer.ID { // maintains an unbuffered queue for incoming providers for given request for a given session // essentially, as a provider comes in, for a given CID, we want to immediately broadcast to all // sessions that queried that CID, without worrying about whether the client code is actually @@ -162,8 +160,8 @@ func (pqm *ProviderQueryManager) receiveProviders(sessionCtx context.Context, k select { case <-sessionCtx.Done(): pqm.providerQueryMessages <- &cancelRequestMessage{ - ses: ses, - k: k, + incomingProviders: incomingProviders, + k: k, } // clear out any remaining providers for range incomingProviders { @@ -269,7 +267,7 @@ func (pqm *ProviderQueryManager) providerRequestBufferWorker() { func (pqm *ProviderQueryManager) cleanupInProcessRequests() { for _, requestStatus := range pqm.inProgressRequestStatuses { - for _, listener := range requestStatus.listeners { + for listener := range requestStatus.listeners { close(listener) } } @@ -305,7 +303,7 @@ func (rpm *receivedProviderMessage) handle(pqm *ProviderQueryManager) { return } requestStatus.providersSoFar = append(requestStatus.providersSoFar, rpm.p) - for _, listener := range requestStatus.listeners { + for listener := range requestStatus.listeners { select { case listener <- rpm.p: case <-pqm.ctx.Done(): @@ -324,21 +322,21 @@ func (fpqm *finishedProviderQueryMessage) handle(pqm *ProviderQueryManager) { log.Errorf("Ended request for cid (%s) not in progress", fpqm.k.String()) return } - for _, listener := range requestStatus.listeners { + for listener := range requestStatus.listeners { close(listener) } delete(pqm.inProgressRequestStatuses, fpqm.k) } func (npqm *newProvideQueryMessage) debugMessage() string { - return fmt.Sprintf("New Provider Query on cid: %s from session: %d", npqm.k.String(), npqm.ses) + return fmt.Sprintf("New Provider Query on cid: %s", npqm.k.String()) } func (npqm *newProvideQueryMessage) handle(pqm *ProviderQueryManager) { requestStatus, ok := pqm.inProgressRequestStatuses[npqm.k] if !ok { requestStatus = &inProgressRequestStatus{ - listeners: make(map[uint64]chan peer.ID), + listeners: make(map[chan peer.ID]struct{}), } pqm.inProgressRequestStatuses[npqm.k] = requestStatus select { @@ -347,31 +345,32 @@ func (npqm *newProvideQueryMessage) handle(pqm *ProviderQueryManager) { return } } - requestStatus.listeners[npqm.ses] = make(chan peer.ID) + inProgressChan := make(chan peer.ID) + requestStatus.listeners[inProgressChan] = struct{}{} select { case npqm.inProgressRequestChan <- inProgressRequest{ providersSoFar: requestStatus.providersSoFar, - incoming: requestStatus.listeners[npqm.ses], + incoming: inProgressChan, }: case <-pqm.ctx.Done(): } } func (crm *cancelRequestMessage) debugMessage() string { - return fmt.Sprintf("Cancel provider query on cid: %s from session: %d", crm.k.String(), crm.ses) + return fmt.Sprintf("Cancel provider query on cid: %s", crm.k.String()) } func (crm *cancelRequestMessage) handle(pqm *ProviderQueryManager) { requestStatus, ok := pqm.inProgressRequestStatuses[crm.k] if !ok { - log.Errorf("Attempt to cancel request for session (%d) for cid (%s) not in progress", crm.ses, crm.k.String()) + log.Errorf("Attempt to cancel request for cid (%s) not in progress", crm.k.String()) return } - listener, ok := requestStatus.listeners[crm.ses] + listener := crm.incomingProviders if !ok { - log.Errorf("Attempt to cancel request for session (%d) for cid (%s) this is not a listener", crm.ses, crm.k.String()) + log.Errorf("Attempt to cancel request for for cid (%s) this is not a listener", crm.k.String()) return } close(listener) - delete(requestStatus.listeners, crm.ses) + delete(requestStatus.listeners, listener) } diff --git a/bitswap/providerquerymanager/providerquerymanager_test.go b/bitswap/providerquerymanager/providerquerymanager_test.go index 21d7004ca..3abe6b0e8 100644 --- a/bitswap/providerquerymanager/providerquerymanager_test.go +++ b/bitswap/providerquerymanager/providerquerymanager_test.go @@ -62,13 +62,11 @@ func TestNormalSimultaneousFetch(t *testing.T) { providerQueryManager := New(ctx, fpn) providerQueryManager.Startup() keys := testutil.GenerateCids(2) - sessionID1 := testutil.GenerateSessionID() - sessionID2 := testutil.GenerateSessionID() sessionCtx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) defer cancel() - firstRequestChan := providerQueryManager.FindProvidersAsync(sessionCtx, keys[0], sessionID1) - secondRequestChan := providerQueryManager.FindProvidersAsync(sessionCtx, keys[1], sessionID2) + firstRequestChan := providerQueryManager.FindProvidersAsync(sessionCtx, keys[0]) + secondRequestChan := providerQueryManager.FindProvidersAsync(sessionCtx, keys[1]) var firstPeersReceived []peer.ID for p := range firstRequestChan { @@ -102,13 +100,11 @@ func TestDedupingProviderRequests(t *testing.T) { providerQueryManager := New(ctx, fpn) providerQueryManager.Startup() key := testutil.GenerateCids(1)[0] - sessionID1 := testutil.GenerateSessionID() - sessionID2 := testutil.GenerateSessionID() sessionCtx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) defer cancel() - firstRequestChan := providerQueryManager.FindProvidersAsync(sessionCtx, key, sessionID1) - secondRequestChan := providerQueryManager.FindProvidersAsync(sessionCtx, key, sessionID2) + firstRequestChan := providerQueryManager.FindProvidersAsync(sessionCtx, key) + secondRequestChan := providerQueryManager.FindProvidersAsync(sessionCtx, key) var firstPeersReceived []peer.ID for p := range firstRequestChan { @@ -145,16 +141,14 @@ func TestCancelOneRequestDoesNotTerminateAnother(t *testing.T) { providerQueryManager.Startup() key := testutil.GenerateCids(1)[0] - sessionID1 := testutil.GenerateSessionID() - sessionID2 := testutil.GenerateSessionID() // first session will cancel before done firstSessionCtx, firstCancel := context.WithTimeout(ctx, 3*time.Millisecond) defer firstCancel() - firstRequestChan := providerQueryManager.FindProvidersAsync(firstSessionCtx, key, sessionID1) + firstRequestChan := providerQueryManager.FindProvidersAsync(firstSessionCtx, key) secondSessionCtx, secondCancel := context.WithTimeout(ctx, 100*time.Millisecond) defer secondCancel() - secondRequestChan := providerQueryManager.FindProvidersAsync(secondSessionCtx, key, sessionID2) + secondRequestChan := providerQueryManager.FindProvidersAsync(secondSessionCtx, key) var firstPeersReceived []peer.ID for p := range firstRequestChan { @@ -193,13 +187,11 @@ func TestCancelManagerExitsGracefully(t *testing.T) { providerQueryManager.Startup() key := testutil.GenerateCids(1)[0] - sessionID1 := testutil.GenerateSessionID() - sessionID2 := testutil.GenerateSessionID() sessionCtx, cancel := context.WithTimeout(ctx, 20*time.Millisecond) defer cancel() - firstRequestChan := providerQueryManager.FindProvidersAsync(sessionCtx, key, sessionID1) - secondRequestChan := providerQueryManager.FindProvidersAsync(sessionCtx, key, sessionID2) + firstRequestChan := providerQueryManager.FindProvidersAsync(sessionCtx, key) + secondRequestChan := providerQueryManager.FindProvidersAsync(sessionCtx, key) var firstPeersReceived []peer.ID for p := range firstRequestChan { @@ -229,13 +221,11 @@ func TestPeersWithConnectionErrorsNotAddedToPeerList(t *testing.T) { providerQueryManager.Startup() key := testutil.GenerateCids(1)[0] - sessionID1 := testutil.GenerateSessionID() - sessionID2 := testutil.GenerateSessionID() sessionCtx, cancel := context.WithTimeout(ctx, 20*time.Millisecond) defer cancel() - firstRequestChan := providerQueryManager.FindProvidersAsync(sessionCtx, key, sessionID1) - secondRequestChan := providerQueryManager.FindProvidersAsync(sessionCtx, key, sessionID2) + firstRequestChan := providerQueryManager.FindProvidersAsync(sessionCtx, key) + secondRequestChan := providerQueryManager.FindProvidersAsync(sessionCtx, key) var firstPeersReceived []peer.ID for p := range firstRequestChan { @@ -266,12 +256,11 @@ func TestRateLimitingRequests(t *testing.T) { providerQueryManager.Startup() keys := testutil.GenerateCids(maxInProcessRequests + 1) - sessionID := testutil.GenerateSessionID() sessionCtx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) defer cancel() var requestChannels []<-chan peer.ID for i := 0; i < maxInProcessRequests+1; i++ { - requestChannels = append(requestChannels, providerQueryManager.FindProvidersAsync(sessionCtx, keys[i], sessionID)) + requestChannels = append(requestChannels, providerQueryManager.FindProvidersAsync(sessionCtx, keys[i])) } time.Sleep(9 * time.Millisecond) fpn.queriesMadeMutex.Lock() @@ -303,11 +292,10 @@ func TestFindProviderTimeout(t *testing.T) { providerQueryManager.Startup() providerQueryManager.SetFindProviderTimeout(2 * time.Millisecond) keys := testutil.GenerateCids(1) - sessionID1 := testutil.GenerateSessionID() sessionCtx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) defer cancel() - firstRequestChan := providerQueryManager.FindProvidersAsync(sessionCtx, keys[0], sessionID1) + firstRequestChan := providerQueryManager.FindProvidersAsync(sessionCtx, keys[0]) var firstPeersReceived []peer.ID for p := range firstRequestChan { firstPeersReceived = append(firstPeersReceived, p) diff --git a/bitswap/sessionpeermanager/sessionpeermanager.go b/bitswap/sessionpeermanager/sessionpeermanager.go index 091e1c7ef..0b02a2a2b 100644 --- a/bitswap/sessionpeermanager/sessionpeermanager.go +++ b/bitswap/sessionpeermanager/sessionpeermanager.go @@ -26,7 +26,7 @@ type PeerTagger interface { // PeerProviderFinder is an interface for finding providers type PeerProviderFinder interface { - FindProvidersAsync(context.Context, cid.Cid, uint64) <-chan peer.ID + FindProvidersAsync(context.Context, cid.Cid) <-chan peer.ID } type peerMessage interface { @@ -108,8 +108,8 @@ func (spm *SessionPeerManager) GetOptimizedPeers() []peer.ID { // providers for the given Cid func (spm *SessionPeerManager) FindMorePeers(ctx context.Context, c cid.Cid) { go func(k cid.Cid) { - for p := range spm.providerFinder.FindProvidersAsync(ctx, k, spm.id) { - + for p := range spm.providerFinder.FindProvidersAsync(ctx, k) { + select { case spm.peerMessages <- &peerFoundMessage{p}: case <-ctx.Done(): diff --git a/bitswap/sessionpeermanager/sessionpeermanager_test.go b/bitswap/sessionpeermanager/sessionpeermanager_test.go index 68862942c..d6d1440a4 100644 --- a/bitswap/sessionpeermanager/sessionpeermanager_test.go +++ b/bitswap/sessionpeermanager/sessionpeermanager_test.go @@ -18,7 +18,7 @@ type fakePeerProviderFinder struct { completed chan struct{} } -func (fppf *fakePeerProviderFinder) FindProvidersAsync(ctx context.Context, c cid.Cid, ses uint64) <-chan peer.ID { +func (fppf *fakePeerProviderFinder) FindProvidersAsync(ctx context.Context, c cid.Cid) <-chan peer.ID { peerCh := make(chan peer.ID) go func() { From 34a5e4a7313f4d2d26636a28ab34080862121439 Mon Sep 17 00:00:00 2001 From: hannahhoward Date: Mon, 4 Feb 2019 12:31:20 -0800 Subject: [PATCH 0698/1035] fix(providerquerymanager): minor fixes to capture all cancellations This commit was moved from ipfs/go-bitswap@51e82a6552f657f91cd28b91682e4ff456182336 --- .../providerquerymanager.go | 36 ++++++++++++------- 1 file changed, 24 insertions(+), 12 deletions(-) diff --git a/bitswap/providerquerymanager/providerquerymanager.go b/bitswap/providerquerymanager/providerquerymanager.go index 8c20b022f..26602bc58 100644 --- a/bitswap/providerquerymanager/providerquerymanager.go +++ b/bitswap/providerquerymanager/providerquerymanager.go @@ -124,6 +124,8 @@ func (pqm *ProviderQueryManager) FindProvidersAsync(sessionCtx context.Context, var receivedInProgressRequest inProgressRequest select { + case <-pqm.ctx.Done(): + return nil case <-sessionCtx.Done(): return nil case receivedInProgressRequest = <-inProgressRequestChan: @@ -158,15 +160,25 @@ func (pqm *ProviderQueryManager) receiveProviders(sessionCtx context.Context, k } for len(receivedProviders) > 0 || incomingProviders != nil { select { + case <-pqm.ctx.Done(): + return case <-sessionCtx.Done(): pqm.providerQueryMessages <- &cancelRequestMessage{ incomingProviders: incomingProviders, k: k, } - // clear out any remaining providers - for range incomingProviders { + // clear out any remaining providers, in case and "incoming provider" + // messages get processed before our cancel message + for { + select { + case _, ok := <-incomingProviders: + if !ok { + return + } + case <-pqm.ctx.Done(): + return + } } - return case provider, ok := <-incomingProviders: if !ok { incomingProviders = nil @@ -362,15 +374,15 @@ func (crm *cancelRequestMessage) debugMessage() string { func (crm *cancelRequestMessage) handle(pqm *ProviderQueryManager) { requestStatus, ok := pqm.inProgressRequestStatuses[crm.k] - if !ok { + if ok { + _, ok := requestStatus.listeners[crm.incomingProviders] + if ok { + delete(requestStatus.listeners, crm.incomingProviders) + } else { + log.Errorf("Attempt to cancel request for for cid (%s) this is not a listener", crm.k.String()) + } + } else { log.Errorf("Attempt to cancel request for cid (%s) not in progress", crm.k.String()) - return - } - listener := crm.incomingProviders - if !ok { - log.Errorf("Attempt to cancel request for for cid (%s) this is not a listener", crm.k.String()) - return } - close(listener) - delete(requestStatus.listeners, listener) + close(crm.incomingProviders) } From 9dd58ac3ba5921c1e2520e5af671e0a8d5c0c15e Mon Sep 17 00:00:00 2001 From: hannahhoward Date: Mon, 4 Feb 2019 14:58:46 -0800 Subject: [PATCH 0699/1035] feat(providerquerymanager): cancel FindProvidersAsync correctly Make sure if all requestors cancel their request to find providers on a peer, the overall query gets cancelled This commit was moved from ipfs/go-bitswap@b48b3c33ee4ecacff165220fea06520efb21d45d --- .../providerquerymanager.go | 43 +++++++++++++------ 1 file changed, 31 insertions(+), 12 deletions(-) diff --git a/bitswap/providerquerymanager/providerquerymanager.go b/bitswap/providerquerymanager/providerquerymanager.go index 26602bc58..b84463a7f 100644 --- a/bitswap/providerquerymanager/providerquerymanager.go +++ b/bitswap/providerquerymanager/providerquerymanager.go @@ -20,10 +20,17 @@ const ( ) type inProgressRequestStatus struct { + ctx context.Context + cancelFn func() providersSoFar []peer.ID listeners map[chan peer.ID]struct{} } +type findProviderRequest struct { + k cid.Cid + ctx context.Context +} + // ProviderQueryNetwork is an interface for finding providers and connecting to // peers. type ProviderQueryNetwork interface { @@ -66,8 +73,8 @@ type ProviderQueryManager struct { ctx context.Context network ProviderQueryNetwork providerQueryMessages chan providerQueryMessage - providerRequestsProcessing chan cid.Cid - incomingFindProviderRequests chan cid.Cid + providerRequestsProcessing chan *findProviderRequest + incomingFindProviderRequests chan *findProviderRequest findProviderTimeout time.Duration timeoutMutex sync.RWMutex @@ -83,8 +90,8 @@ func New(ctx context.Context, network ProviderQueryNetwork) *ProviderQueryManage ctx: ctx, network: network, providerQueryMessages: make(chan providerQueryMessage, 16), - providerRequestsProcessing: make(chan cid.Cid), - incomingFindProviderRequests: make(chan cid.Cid), + providerRequestsProcessing: make(chan *findProviderRequest), + incomingFindProviderRequests: make(chan *findProviderRequest), inProgressRequestStatuses: make(map[cid.Cid]*inProgressRequestStatus), findProviderTimeout: defaultTimeout, } @@ -199,14 +206,14 @@ func (pqm *ProviderQueryManager) findProviderWorker() { // to let requests go in parallel but keep them rate limited for { select { - case k, ok := <-pqm.providerRequestsProcessing: + case fpr, ok := <-pqm.providerRequestsProcessing: if !ok { return } - + k := fpr.k log.Debugf("Beginning Find Provider Request for cid: %s", k.String()) pqm.timeoutMutex.RLock() - findProviderCtx, cancel := context.WithTimeout(pqm.ctx, pqm.findProviderTimeout) + findProviderCtx, cancel := context.WithTimeout(fpr.ctx, pqm.findProviderTimeout) pqm.timeoutMutex.RUnlock() defer cancel() providers := pqm.network.FindProvidersAsync(findProviderCtx, k, maxProviders) @@ -248,14 +255,14 @@ func (pqm *ProviderQueryManager) providerRequestBufferWorker() { // buffer for incoming provider queries and dispatches to the find // provider workers as they become available // based on: https://medium.com/capital-one-tech/building-an-unbounded-channel-in-go-789e175cd2cd - var providerQueryRequestBuffer []cid.Cid - nextProviderQuery := func() cid.Cid { + var providerQueryRequestBuffer []*findProviderRequest + nextProviderQuery := func() *findProviderRequest { if len(providerQueryRequestBuffer) == 0 { - return cid.Cid{} + return nil } return providerQueryRequestBuffer[0] } - outgoingRequests := func() chan<- cid.Cid { + outgoingRequests := func() chan<- *findProviderRequest { if len(providerQueryRequestBuffer) == 0 { return nil } @@ -282,6 +289,7 @@ func (pqm *ProviderQueryManager) cleanupInProcessRequests() { for listener := range requestStatus.listeners { close(listener) } + requestStatus.cancelFn() } } @@ -338,6 +346,7 @@ func (fpqm *finishedProviderQueryMessage) handle(pqm *ProviderQueryManager) { close(listener) } delete(pqm.inProgressRequestStatuses, fpqm.k) + requestStatus.cancelFn() } func (npqm *newProvideQueryMessage) debugMessage() string { @@ -347,12 +356,18 @@ func (npqm *newProvideQueryMessage) debugMessage() string { func (npqm *newProvideQueryMessage) handle(pqm *ProviderQueryManager) { requestStatus, ok := pqm.inProgressRequestStatuses[npqm.k] if !ok { + ctx, cancelFn := context.WithCancel(pqm.ctx) requestStatus = &inProgressRequestStatus{ listeners: make(map[chan peer.ID]struct{}), + ctx: ctx, + cancelFn: cancelFn, } pqm.inProgressRequestStatuses[npqm.k] = requestStatus select { - case pqm.incomingFindProviderRequests <- npqm.k: + case pqm.incomingFindProviderRequests <- &findProviderRequest{ + k: npqm.k, + ctx: ctx, + }: case <-pqm.ctx.Done(): return } @@ -378,6 +393,10 @@ func (crm *cancelRequestMessage) handle(pqm *ProviderQueryManager) { _, ok := requestStatus.listeners[crm.incomingProviders] if ok { delete(requestStatus.listeners, crm.incomingProviders) + if len(requestStatus.listeners) == 0 { + delete(pqm.inProgressRequestStatuses, crm.k) + requestStatus.cancelFn() + } } else { log.Errorf("Attempt to cancel request for for cid (%s) this is not a listener", crm.k.String()) } From c3113416b19a9c226fd1dac0ac4b9092dc02e890 Mon Sep 17 00:00:00 2001 From: hannahhoward Date: Tue, 5 Feb 2019 10:56:16 -0800 Subject: [PATCH 0700/1035] fix(providerquerymanager): minor channel cleanup Keep channels unblocked in cancelling request -- refactored to function. Also cancel find provider context as soon as it can be. This commit was moved from ipfs/go-bitswap@30f40ecec4f34dd7637f78b0b90dff6e25208be2 --- .../providerquerymanager.go | 65 ++++++++++--------- 1 file changed, 36 insertions(+), 29 deletions(-) diff --git a/bitswap/providerquerymanager/providerquerymanager.go b/bitswap/providerquerymanager/providerquerymanager.go index b84463a7f..38471479e 100644 --- a/bitswap/providerquerymanager/providerquerymanager.go +++ b/bitswap/providerquerymanager/providerquerymanager.go @@ -170,22 +170,8 @@ func (pqm *ProviderQueryManager) receiveProviders(sessionCtx context.Context, k case <-pqm.ctx.Done(): return case <-sessionCtx.Done(): - pqm.providerQueryMessages <- &cancelRequestMessage{ - incomingProviders: incomingProviders, - k: k, - } - // clear out any remaining providers, in case and "incoming provider" - // messages get processed before our cancel message - for { - select { - case _, ok := <-incomingProviders: - if !ok { - return - } - case <-pqm.ctx.Done(): - return - } - } + pqm.cancelProviderRequest(k, incomingProviders) + return case provider, ok := <-incomingProviders: if !ok { incomingProviders = nil @@ -200,6 +186,27 @@ func (pqm *ProviderQueryManager) receiveProviders(sessionCtx context.Context, k return returnedProviders } +func (pqm *ProviderQueryManager) cancelProviderRequest(k cid.Cid, incomingProviders chan peer.ID) { + cancelMessageChannel := pqm.providerQueryMessages + for { + select { + case cancelMessageChannel <- &cancelRequestMessage{ + incomingProviders: incomingProviders, + k: k, + }: + cancelMessageChannel = nil + // clear out any remaining providers, in case and "incoming provider" + // messages get processed before our cancel message + case _, ok := <-incomingProviders: + if !ok { + return + } + case <-pqm.ctx.Done(): + return + } + } +} + func (pqm *ProviderQueryManager) findProviderWorker() { // findProviderWorker just cycles through incoming provider queries one // at a time. We have six of these workers running at once @@ -215,7 +222,6 @@ func (pqm *ProviderQueryManager) findProviderWorker() { pqm.timeoutMutex.RLock() findProviderCtx, cancel := context.WithTimeout(fpr.ctx, pqm.findProviderTimeout) pqm.timeoutMutex.RUnlock() - defer cancel() providers := pqm.network.FindProvidersAsync(findProviderCtx, k, maxProviders) wg := &sync.WaitGroup{} for p := range providers { @@ -237,6 +243,7 @@ func (pqm *ProviderQueryManager) findProviderWorker() { } }(p) } + cancel() wg.Wait() select { case pqm.providerQueryMessages <- &finishedProviderQueryMessage{ @@ -389,19 +396,19 @@ func (crm *cancelRequestMessage) debugMessage() string { func (crm *cancelRequestMessage) handle(pqm *ProviderQueryManager) { requestStatus, ok := pqm.inProgressRequestStatuses[crm.k] - if ok { - _, ok := requestStatus.listeners[crm.incomingProviders] - if ok { - delete(requestStatus.listeners, crm.incomingProviders) - if len(requestStatus.listeners) == 0 { - delete(pqm.inProgressRequestStatuses, crm.k) - requestStatus.cancelFn() - } - } else { - log.Errorf("Attempt to cancel request for for cid (%s) this is not a listener", crm.k.String()) - } - } else { + if !ok { log.Errorf("Attempt to cancel request for cid (%s) not in progress", crm.k.String()) + return } + _, ok = requestStatus.listeners[crm.incomingProviders] + if !ok { + log.Errorf("Attempt to cancel request for for cid (%s) this is not a listener", crm.k.String()) + return + } + delete(requestStatus.listeners, crm.incomingProviders) close(crm.incomingProviders) + if len(requestStatus.listeners) == 0 { + delete(pqm.inProgressRequestStatuses, crm.k) + requestStatus.cancelFn() + } } From 312029e842193429eced0aeaa3daa8ec335fd775 Mon Sep 17 00:00:00 2001 From: hannahhoward Date: Fri, 25 Jan 2019 18:12:57 -0800 Subject: [PATCH 0701/1035] refactor(GetBlocks): Merge session/non-session Make Bitswap GetBlocks just create a temporary session and use that code fix #52 fix #49 This commit was moved from ipfs/go-bitswap@7643ad2d8783b8224ae6027f68332a61a183d522 --- bitswap/bitswap.go | 94 +----------------------------------- bitswap/bitswap_test.go | 4 +- bitswap/workers.go | 103 ---------------------------------------- 3 files changed, 4 insertions(+), 197 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index ee0c939f3..0bd53b3d0 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -16,7 +16,6 @@ import ( bsmsg "github.com/ipfs/go-bitswap/message" bsmq "github.com/ipfs/go-bitswap/messagequeue" bsnet "github.com/ipfs/go-bitswap/network" - notifications "github.com/ipfs/go-bitswap/notifications" bspm "github.com/ipfs/go-bitswap/peermanager" bspqm "github.com/ipfs/go-bitswap/providerquerymanager" bssession "github.com/ipfs/go-bitswap/session" @@ -95,9 +94,7 @@ func New(parent context.Context, network bsnet.BitSwapNetwork, sentHistogram := metrics.NewCtx(ctx, "sent_all_blocks_bytes", "Histogram of blocks sent by"+ " this bitswap").Histogram(metricsBuckets) - notif := notifications.New() px := process.WithTeardown(func() error { - notif.Shutdown() return nil }) @@ -120,10 +117,8 @@ func New(parent context.Context, network bsnet.BitSwapNetwork, bs := &Bitswap{ blockstore: bstore, - notifications: notif, engine: decision.NewEngine(ctx, bstore), // TODO close the engine with Close() method network: network, - findKeys: make(chan *blockRequest, sizeBatchRequestChan), process: px, newBlocks: make(chan cid.Cid, HasBlockBufferSize), provideKeys: make(chan cid.Cid, provideKeysBufferSize), @@ -179,12 +174,6 @@ type Bitswap struct { // NB: ensure threadsafety blockstore blockstore.Blockstore - // notifications engine for receiving new blocks and routing them to the - // appropriate user requests - notifications notifications.PubSub - - // findKeys sends keys to a worker to find and connect to providers for them - findKeys chan *blockRequest // newBlocks is a channel for newly added blocks to be provided to the // network. blocks pushed down this channel get buffered and fed to the // provideKeys channel later on to avoid too much network activity @@ -248,86 +237,8 @@ func (bs *Bitswap) LedgerForPeer(p peer.ID) *decision.Receipt { // resources, provide a context with a reasonably short deadline (ie. not one // that lasts throughout the lifetime of the server) func (bs *Bitswap) GetBlocks(ctx context.Context, keys []cid.Cid) (<-chan blocks.Block, error) { - if len(keys) == 0 { - out := make(chan blocks.Block) - close(out) - return out, nil - } - - select { - case <-bs.process.Closing(): - return nil, errors.New("bitswap is closed") - default: - } - promise := bs.notifications.Subscribe(ctx, keys...) - - for _, k := range keys { - log.Event(ctx, "Bitswap.GetBlockRequest.Start", k) - } - - mses := bs.sm.GetNextSessionID() - - bs.wm.WantBlocks(ctx, keys, nil, mses) - - remaining := cid.NewSet() - for _, k := range keys { - remaining.Add(k) - } - - out := make(chan blocks.Block) - go func() { - ctx, cancel := context.WithCancel(ctx) - defer cancel() - defer close(out) - defer func() { - // can't just defer this call on its own, arguments are resolved *when* the defer is created - bs.CancelWants(remaining.Keys(), mses) - }() - findProvsDelay := time.NewTimer(findProviderDelay) - defer findProvsDelay.Stop() - - findProvsDelayCh := findProvsDelay.C - req := &blockRequest{ - Cid: keys[0], - Ctx: ctx, - } - - var findProvsReqCh chan<- *blockRequest - - for { - select { - case <-findProvsDelayCh: - // NB: Optimization. Assumes that providers of key[0] are likely to - // be able to provide for all keys. This currently holds true in most - // every situation. Later, this assumption may not hold as true. - findProvsReqCh = bs.findKeys - findProvsDelayCh = nil - case findProvsReqCh <- req: - findProvsReqCh = nil - case blk, ok := <-promise: - if !ok { - return - } - - // No need to find providers now. - findProvsDelay.Stop() - findProvsDelayCh = nil - findProvsReqCh = nil - - bs.CancelWants([]cid.Cid{blk.Cid()}, mses) - remaining.Remove(blk.Cid()) - select { - case out <- blk: - case <-ctx.Done(): - return - } - case <-ctx.Done(): - return - } - } - }() - - return out, nil + session := bs.sm.NewSession(ctx) + return session.GetBlocks(ctx, keys) } // CancelWants removes a given key from the wantlist. @@ -366,7 +277,6 @@ func (bs *Bitswap) receiveBlockFrom(blk blocks.Block, from peer.ID) error { // is waiting on a GetBlock for that object, they will receive a reference // to the same node. We should address this soon, but i'm not going to do // it now as it requires more thought and isnt causing immediate problems. - bs.notifications.Publish(blk) bs.sm.ReceiveBlockFrom(from, blk) diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index ef2d73b8d..7882147ee 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -533,8 +533,8 @@ func TestWantlistCleanup(t *testing.T) { } time.Sleep(time.Millisecond * 50) - if len(bswap.GetWantlist()) != 11 { - t.Fatal("should have 11 keys in wantlist") + if len(bswap.GetWantlist()) != 5 { + t.Fatal("should have 5 keys in wantlist") } cancel() diff --git a/bitswap/workers.go b/bitswap/workers.go index 688a1d99d..614f95c1d 100644 --- a/bitswap/workers.go +++ b/bitswap/workers.go @@ -2,9 +2,6 @@ package bitswap import ( "context" - "math/rand" - "sync" - "time" engine "github.com/ipfs/go-bitswap/decision" bsmsg "github.com/ipfs/go-bitswap/message" @@ -12,16 +9,11 @@ import ( logging "github.com/ipfs/go-log" process "github.com/jbenet/goprocess" procctx "github.com/jbenet/goprocess/context" - peer "github.com/libp2p/go-libp2p-peer" ) var TaskWorkerCount = 8 func (bs *Bitswap) startWorkers(px process.Process, ctx context.Context) { - // Start up a worker to handle block requests this node is making - px.Go(func(px process.Process) { - bs.providerQueryManager(ctx) - }) // Start up workers to handle requests from other nodes for the data on this node for i := 0; i < TaskWorkerCount; i++ { @@ -31,11 +23,6 @@ func (bs *Bitswap) startWorkers(px process.Process, ctx context.Context) { }) } - // Start up a worker to manage periodically resending our wantlist out to peers - px.Go(func(px process.Process) { - bs.rebroadcastWorker(ctx) - }) - // Start up a worker to manage sending out provides messages px.Go(func(px process.Process) { bs.provideCollector(ctx) @@ -188,93 +175,3 @@ func (bs *Bitswap) provideCollector(ctx context.Context) { } } } - -func (bs *Bitswap) rebroadcastWorker(parent context.Context) { - ctx, cancel := context.WithCancel(parent) - defer cancel() - - broadcastSignal := time.NewTicker(rebroadcastDelay.Get()) - defer broadcastSignal.Stop() - - tick := time.NewTicker(10 * time.Second) - defer tick.Stop() - - for { - log.Event(ctx, "Bitswap.Rebroadcast.idle") - select { - case <-tick.C: - n := bs.wm.WantCount() - if n > 0 { - log.Debugf("%d keys in bitswap wantlist", n) - } - case <-broadcastSignal.C: // resend unfulfilled wantlist keys - log.Event(ctx, "Bitswap.Rebroadcast.active") - entries := bs.wm.CurrentWants() - if len(entries) == 0 { - continue - } - - // TODO: come up with a better strategy for determining when to search - // for new providers for blocks. - i := rand.Intn(len(entries)) - select { - case bs.findKeys <- &blockRequest{ - Cid: entries[i].Cid, - Ctx: ctx, - }: - case <-ctx.Done(): - return - } - case <-ctx.Done(): - return - } - } -} - -func (bs *Bitswap) providerQueryManager(ctx context.Context) { - var activeLk sync.Mutex - kset := cid.NewSet() - - for { - select { - case e := <-bs.findKeys: - select { // make sure its not already cancelled - case <-e.Ctx.Done(): - continue - default: - } - - activeLk.Lock() - if kset.Has(e.Cid) { - activeLk.Unlock() - continue - } - kset.Add(e.Cid) - activeLk.Unlock() - - go func(e *blockRequest) { - child, cancel := context.WithTimeout(e.Ctx, providerRequestTimeout) - defer cancel() - providers := bs.network.FindProvidersAsync(child, e.Cid, maxProvidersPerRequest) - wg := &sync.WaitGroup{} - for p := range providers { - wg.Add(1) - go func(p peer.ID) { - defer wg.Done() - err := bs.network.ConnectTo(child, p) - if err != nil { - log.Debugf("failed to connect to provider %s: %s", p, err) - } - }(p) - } - wg.Wait() - activeLk.Lock() - kset.Remove(e.Cid) - activeLk.Unlock() - }(e) - - case <-ctx.Done(): - return - } - } -} From 0510c1319b61c71445949ad3e353243b0ccb7e36 Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Tue, 22 Jan 2019 08:48:35 -0800 Subject: [PATCH 0702/1035] providers: don't add every connected node as a provider We now do exactly what the comment is warning about: track peers providing keys. This commit was moved from ipfs/go-bitswap@586a5c00d8db17285f30cd31feaca8105186dd01 --- bitswap/network/ipfs_impl.go | 17 +---------------- 1 file changed, 1 insertion(+), 16 deletions(-) diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index da2a4b4c4..ec8037b10 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -151,22 +151,7 @@ func (bsnet *impl) ConnectTo(ctx context.Context, p peer.ID) error { // FindProvidersAsync returns a channel of providers for the given key. func (bsnet *impl) FindProvidersAsync(ctx context.Context, k cid.Cid, max int) <-chan peer.ID { - - // Since routing queries are expensive, give bitswap the peers to which we - // have open connections. Note that this may cause issues if bitswap starts - // precisely tracking which peers provide certain keys. This optimization - // would be misleading. In the long run, this may not be the most - // appropriate place for this optimization, but it won't cause any harm in - // the short term. - connectedPeers := bsnet.host.Network().Peers() - out := make(chan peer.ID, len(connectedPeers)) // just enough buffer for these connectedPeers - for _, id := range connectedPeers { - if id == bsnet.host.ID() { - continue // ignore self as provider - } - out <- id - } - + out := make(chan peer.ID, max) go func() { defer close(out) providers := bsnet.routing.FindProvidersAsync(ctx, k, max) From c54820dc0322d78d7a59eea5824906e72c0f669f Mon Sep 17 00:00:00 2001 From: Jakub Sztandera Date: Mon, 18 Feb 2019 17:10:45 +0100 Subject: [PATCH 0703/1035] gx publish 1.1.23 This commit was moved from ipfs/go-bitswap@294bd92a81f8f0c0eb5d90e9c924ef15127fd8b7 --- bitswap/message/pb/message.pb.go | 201 +++++++++++++++++++------------ 1 file changed, 123 insertions(+), 78 deletions(-) diff --git a/bitswap/message/pb/message.pb.go b/bitswap/message/pb/message.pb.go index 9a6b2821b..34eacb298 100644 --- a/bitswap/message/pb/message.pb.go +++ b/bitswap/message/pb/message.pb.go @@ -3,12 +3,13 @@ package bitswap_message_pb -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" -import _ "github.com/gogo/protobuf/gogoproto" - -import io "io" +import ( + fmt "fmt" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -22,18 +23,16 @@ var _ = math.Inf const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package type Message struct { - Wantlist Message_Wantlist `protobuf:"bytes,1,opt,name=wantlist" json:"wantlist"` - Blocks [][]byte `protobuf:"bytes,2,rep,name=blocks" json:"blocks,omitempty"` - Payload []Message_Block `protobuf:"bytes,3,rep,name=payload" json:"payload"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_sizecache int32 `json:"-"` + Wantlist Message_Wantlist `protobuf:"bytes,1,opt,name=wantlist,proto3" json:"wantlist"` + Blocks [][]byte `protobuf:"bytes,2,rep,name=blocks,proto3" json:"blocks,omitempty"` + Payload []Message_Block `protobuf:"bytes,3,rep,name=payload,proto3" json:"payload"` } func (m *Message) Reset() { *m = Message{} } func (m *Message) String() string { return proto.CompactTextString(m) } func (*Message) ProtoMessage() {} func (*Message) Descriptor() ([]byte, []int) { - return fileDescriptor_message_c28309e4affd853b, []int{0} + return fileDescriptor_33c57e4bae7b9afd, []int{0} } func (m *Message) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -50,8 +49,8 @@ func (m *Message) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return b[:n], nil } } -func (dst *Message) XXX_Merge(src proto.Message) { - xxx_messageInfo_Message.Merge(dst, src) +func (m *Message) XXX_Merge(src proto.Message) { + xxx_messageInfo_Message.Merge(m, src) } func (m *Message) XXX_Size() int { return m.Size() @@ -84,17 +83,15 @@ func (m *Message) GetPayload() []Message_Block { } type Message_Wantlist struct { - Entries []Message_Wantlist_Entry `protobuf:"bytes,1,rep,name=entries" json:"entries"` - Full bool `protobuf:"varint,2,opt,name=full,proto3" json:"full,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_sizecache int32 `json:"-"` + Entries []Message_Wantlist_Entry `protobuf:"bytes,1,rep,name=entries,proto3" json:"entries"` + Full bool `protobuf:"varint,2,opt,name=full,proto3" json:"full,omitempty"` } func (m *Message_Wantlist) Reset() { *m = Message_Wantlist{} } func (m *Message_Wantlist) String() string { return proto.CompactTextString(m) } func (*Message_Wantlist) ProtoMessage() {} func (*Message_Wantlist) Descriptor() ([]byte, []int) { - return fileDescriptor_message_c28309e4affd853b, []int{0, 0} + return fileDescriptor_33c57e4bae7b9afd, []int{0, 0} } func (m *Message_Wantlist) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -111,8 +108,8 @@ func (m *Message_Wantlist) XXX_Marshal(b []byte, deterministic bool) ([]byte, er return b[:n], nil } } -func (dst *Message_Wantlist) XXX_Merge(src proto.Message) { - xxx_messageInfo_Message_Wantlist.Merge(dst, src) +func (m *Message_Wantlist) XXX_Merge(src proto.Message) { + xxx_messageInfo_Message_Wantlist.Merge(m, src) } func (m *Message_Wantlist) XXX_Size() int { return m.Size() @@ -138,18 +135,16 @@ func (m *Message_Wantlist) GetFull() bool { } type Message_Wantlist_Entry struct { - Block []byte `protobuf:"bytes,1,opt,name=block,proto3" json:"block,omitempty"` - Priority int32 `protobuf:"varint,2,opt,name=priority,proto3" json:"priority,omitempty"` - Cancel bool `protobuf:"varint,3,opt,name=cancel,proto3" json:"cancel,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_sizecache int32 `json:"-"` + Block []byte `protobuf:"bytes,1,opt,name=block,proto3" json:"block,omitempty"` + Priority int32 `protobuf:"varint,2,opt,name=priority,proto3" json:"priority,omitempty"` + Cancel bool `protobuf:"varint,3,opt,name=cancel,proto3" json:"cancel,omitempty"` } func (m *Message_Wantlist_Entry) Reset() { *m = Message_Wantlist_Entry{} } func (m *Message_Wantlist_Entry) String() string { return proto.CompactTextString(m) } func (*Message_Wantlist_Entry) ProtoMessage() {} func (*Message_Wantlist_Entry) Descriptor() ([]byte, []int) { - return fileDescriptor_message_c28309e4affd853b, []int{0, 0, 0} + return fileDescriptor_33c57e4bae7b9afd, []int{0, 0, 0} } func (m *Message_Wantlist_Entry) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -166,8 +161,8 @@ func (m *Message_Wantlist_Entry) XXX_Marshal(b []byte, deterministic bool) ([]by return b[:n], nil } } -func (dst *Message_Wantlist_Entry) XXX_Merge(src proto.Message) { - xxx_messageInfo_Message_Wantlist_Entry.Merge(dst, src) +func (m *Message_Wantlist_Entry) XXX_Merge(src proto.Message) { + xxx_messageInfo_Message_Wantlist_Entry.Merge(m, src) } func (m *Message_Wantlist_Entry) XXX_Size() int { return m.Size() @@ -200,17 +195,15 @@ func (m *Message_Wantlist_Entry) GetCancel() bool { } type Message_Block struct { - Prefix []byte `protobuf:"bytes,1,opt,name=prefix,proto3" json:"prefix,omitempty"` - Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_sizecache int32 `json:"-"` + Prefix []byte `protobuf:"bytes,1,opt,name=prefix,proto3" json:"prefix,omitempty"` + Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` } func (m *Message_Block) Reset() { *m = Message_Block{} } func (m *Message_Block) String() string { return proto.CompactTextString(m) } func (*Message_Block) ProtoMessage() {} func (*Message_Block) Descriptor() ([]byte, []int) { - return fileDescriptor_message_c28309e4affd853b, []int{0, 1} + return fileDescriptor_33c57e4bae7b9afd, []int{0, 1} } func (m *Message_Block) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -227,8 +220,8 @@ func (m *Message_Block) XXX_Marshal(b []byte, deterministic bool) ([]byte, error return b[:n], nil } } -func (dst *Message_Block) XXX_Merge(src proto.Message) { - xxx_messageInfo_Message_Block.Merge(dst, src) +func (m *Message_Block) XXX_Merge(src proto.Message) { + xxx_messageInfo_Message_Block.Merge(m, src) } func (m *Message_Block) XXX_Size() int { return m.Size() @@ -259,6 +252,34 @@ func init() { proto.RegisterType((*Message_Wantlist_Entry)(nil), "bitswap.message.pb.Message.Wantlist.Entry") proto.RegisterType((*Message_Block)(nil), "bitswap.message.pb.Message.Block") } + +func init() { proto.RegisterFile("message.proto", fileDescriptor_33c57e4bae7b9afd) } + +var fileDescriptor_33c57e4bae7b9afd = []byte{ + // 335 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x91, 0xcf, 0x4a, 0xf3, 0x40, + 0x14, 0xc5, 0x33, 0x4d, 0xd3, 0x86, 0xdb, 0x7e, 0x9b, 0xe1, 0x43, 0x86, 0x2c, 0x62, 0x14, 0x17, + 0x41, 0x70, 0x0a, 0xed, 0x13, 0x58, 0xd0, 0x85, 0xe0, 0xc2, 0x6c, 0x5c, 0x4f, 0xd2, 0x34, 0x0e, + 0xa6, 0x99, 0x90, 0x4c, 0xa9, 0x7d, 0x0b, 0x5f, 0xc1, 0x07, 0x71, 0xdf, 0x65, 0x97, 0xae, 0x44, + 0xda, 0x17, 0x91, 0xdc, 0x4e, 0xb3, 0x11, 0xc4, 0xdd, 0x3d, 0xc3, 0x39, 0xbf, 0xfb, 0x67, 0xe0, + 0xdf, 0x22, 0xad, 0x6b, 0x91, 0xa5, 0xbc, 0xac, 0x94, 0x56, 0x94, 0xc6, 0x52, 0xd7, 0x2b, 0x51, + 0xf2, 0xf6, 0x39, 0xf6, 0xae, 0x32, 0xa9, 0x9f, 0x96, 0x31, 0x4f, 0xd4, 0x62, 0x94, 0xa9, 0x4c, + 0x8d, 0xd0, 0x1a, 0x2f, 0xe7, 0xa8, 0x50, 0x60, 0x75, 0x40, 0x9c, 0xbf, 0xd9, 0xd0, 0xbf, 0x3f, + 0xa4, 0xe9, 0x2d, 0xb8, 0x2b, 0x51, 0xe8, 0x5c, 0xd6, 0x9a, 0x91, 0x80, 0x84, 0x83, 0xf1, 0x05, + 0xff, 0xd9, 0x81, 0x1b, 0x3b, 0x7f, 0x34, 0xde, 0x69, 0x77, 0xf3, 0x79, 0x6a, 0x45, 0x6d, 0x96, + 0x9e, 0x40, 0x2f, 0xce, 0x55, 0xf2, 0x5c, 0xb3, 0x4e, 0x60, 0x87, 0xc3, 0xc8, 0x28, 0x7a, 0x0d, + 0xfd, 0x52, 0xac, 0x73, 0x25, 0x66, 0xcc, 0x0e, 0xec, 0x70, 0x30, 0x3e, 0xfb, 0x0d, 0x3f, 0x6d, + 0x42, 0x86, 0x7d, 0xcc, 0x79, 0xef, 0x04, 0xdc, 0x63, 0x5f, 0x7a, 0x07, 0xfd, 0xb4, 0xd0, 0x95, + 0x4c, 0x6b, 0x46, 0x90, 0x77, 0xf9, 0x97, 0x71, 0xf9, 0x4d, 0xa1, 0xab, 0xf5, 0x11, 0x6c, 0x00, + 0x94, 0x42, 0x77, 0xbe, 0xcc, 0x73, 0xd6, 0x09, 0x48, 0xe8, 0x46, 0x58, 0x7b, 0x0f, 0xe0, 0xa0, + 0x97, 0xfe, 0x07, 0x07, 0x57, 0xc0, 0xab, 0x0c, 0xa3, 0x83, 0xa0, 0x1e, 0xb8, 0x65, 0x25, 0x55, + 0x25, 0xf5, 0x1a, 0x63, 0x4e, 0xd4, 0xea, 0xe6, 0x04, 0x89, 0x28, 0x92, 0x34, 0x67, 0x36, 0x02, + 0x8d, 0xf2, 0x26, 0xe0, 0xe0, 0x5e, 0x8d, 0xa1, 0xac, 0xd2, 0xb9, 0x7c, 0x31, 0x4c, 0xa3, 0x9a, + 0x39, 0x66, 0x42, 0x0b, 0x04, 0x0e, 0x23, 0xac, 0xa7, 0x6c, 0xb3, 0xf3, 0xc9, 0x76, 0xe7, 0x93, + 0xaf, 0x9d, 0x4f, 0x5e, 0xf7, 0xbe, 0xb5, 0xdd, 0xfb, 0xd6, 0xc7, 0xde, 0xb7, 0xe2, 0x1e, 0x7e, + 0xe2, 0xe4, 0x3b, 0x00, 0x00, 0xff, 0xff, 0x5d, 0x1d, 0x6e, 0x21, 0x18, 0x02, 0x00, 0x00, +} + func (m *Message) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -424,6 +445,9 @@ func encodeVarintMessage(dAtA []byte, offset int, v uint64) int { return offset + 1 } func (m *Message) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.Wantlist.Size() @@ -444,6 +468,9 @@ func (m *Message) Size() (n int) { } func (m *Message_Wantlist) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.Entries) > 0 { @@ -459,6 +486,9 @@ func (m *Message_Wantlist) Size() (n int) { } func (m *Message_Wantlist_Entry) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Block) @@ -475,6 +505,9 @@ func (m *Message_Wantlist_Entry) Size() (n int) { } func (m *Message_Block) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Prefix) @@ -516,7 +549,7 @@ func (m *Message) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -544,7 +577,7 @@ func (m *Message) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -553,6 +586,9 @@ func (m *Message) Unmarshal(dAtA []byte) error { return ErrInvalidLengthMessage } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMessage + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -574,7 +610,7 @@ func (m *Message) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= (int(b) & 0x7F) << shift + byteLen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -583,6 +619,9 @@ func (m *Message) Unmarshal(dAtA []byte) error { return ErrInvalidLengthMessage } postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthMessage + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -603,7 +642,7 @@ func (m *Message) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -612,6 +651,9 @@ func (m *Message) Unmarshal(dAtA []byte) error { return ErrInvalidLengthMessage } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMessage + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -629,6 +671,9 @@ func (m *Message) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthMessage } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthMessage + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -656,7 +701,7 @@ func (m *Message_Wantlist) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -684,7 +729,7 @@ func (m *Message_Wantlist) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -693,6 +738,9 @@ func (m *Message_Wantlist) Unmarshal(dAtA []byte) error { return ErrInvalidLengthMessage } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMessage + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -715,7 +763,7 @@ func (m *Message_Wantlist) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -730,6 +778,9 @@ func (m *Message_Wantlist) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthMessage } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthMessage + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -757,7 +808,7 @@ func (m *Message_Wantlist_Entry) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -785,7 +836,7 @@ func (m *Message_Wantlist_Entry) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= (int(b) & 0x7F) << shift + byteLen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -794,6 +845,9 @@ func (m *Message_Wantlist_Entry) Unmarshal(dAtA []byte) error { return ErrInvalidLengthMessage } postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthMessage + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -816,7 +870,7 @@ func (m *Message_Wantlist_Entry) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Priority |= (int32(b) & 0x7F) << shift + m.Priority |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -835,7 +889,7 @@ func (m *Message_Wantlist_Entry) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -850,6 +904,9 @@ func (m *Message_Wantlist_Entry) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthMessage } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthMessage + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -877,7 +934,7 @@ func (m *Message_Block) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -905,7 +962,7 @@ func (m *Message_Block) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= (int(b) & 0x7F) << shift + byteLen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -914,6 +971,9 @@ func (m *Message_Block) Unmarshal(dAtA []byte) error { return ErrInvalidLengthMessage } postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthMessage + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -936,7 +996,7 @@ func (m *Message_Block) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= (int(b) & 0x7F) << shift + byteLen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -945,6 +1005,9 @@ func (m *Message_Block) Unmarshal(dAtA []byte) error { return ErrInvalidLengthMessage } postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthMessage + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -962,6 +1025,9 @@ func (m *Message_Block) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthMessage } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthMessage + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1028,10 +1094,13 @@ func skipMessage(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthMessage } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthMessage + } return iNdEx, nil case 3: for { @@ -1060,6 +1129,9 @@ func skipMessage(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthMessage + } } return iNdEx, nil case 4: @@ -1078,30 +1150,3 @@ var ( ErrInvalidLengthMessage = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowMessage = fmt.Errorf("proto: integer overflow") ) - -func init() { proto.RegisterFile("message.proto", fileDescriptor_message_c28309e4affd853b) } - -var fileDescriptor_message_c28309e4affd853b = []byte{ - // 328 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x91, 0xcf, 0x4a, 0xf3, 0x40, - 0x14, 0xc5, 0x3b, 0x4d, 0xd3, 0x86, 0xdb, 0x7e, 0xf0, 0x31, 0x88, 0x84, 0x2c, 0x62, 0x14, 0x17, - 0x41, 0x70, 0x0a, 0xed, 0x13, 0x58, 0xd0, 0x85, 0xe0, 0xc2, 0x6c, 0x5c, 0x4f, 0xd2, 0x34, 0x0e, - 0xa6, 0x99, 0x30, 0x33, 0xa5, 0xf6, 0x2d, 0x7c, 0x05, 0x1f, 0xc4, 0x7d, 0x97, 0x3e, 0x81, 0x48, - 0x7d, 0x11, 0xc9, 0xed, 0x34, 0x1b, 0x41, 0xdc, 0xdd, 0x33, 0x9c, 0xf3, 0xbb, 0x7f, 0x06, 0xfe, - 0x2d, 0x73, 0xad, 0x79, 0x91, 0xb3, 0x5a, 0x49, 0x23, 0x29, 0x4d, 0x85, 0xd1, 0x6b, 0x5e, 0xb3, - 0xf6, 0x39, 0x0d, 0x2e, 0x0b, 0x61, 0x1e, 0x57, 0x29, 0xcb, 0xe4, 0x72, 0x5c, 0xc8, 0x42, 0x8e, - 0xd1, 0x9a, 0xae, 0x16, 0xa8, 0x50, 0x60, 0xb5, 0x47, 0x9c, 0xbd, 0x3a, 0x30, 0xb8, 0xdb, 0xa7, - 0xe9, 0x0d, 0x78, 0x6b, 0x5e, 0x99, 0x52, 0x68, 0xe3, 0x93, 0x88, 0xc4, 0xc3, 0xc9, 0x39, 0xfb, - 0xd9, 0x81, 0x59, 0x3b, 0x7b, 0xb0, 0xde, 0x59, 0x6f, 0xfb, 0x71, 0xd2, 0x49, 0xda, 0x2c, 0x3d, - 0x86, 0x7e, 0x5a, 0xca, 0xec, 0x49, 0xfb, 0xdd, 0xc8, 0x89, 0x47, 0x89, 0x55, 0xf4, 0x0a, 0x06, - 0x35, 0xdf, 0x94, 0x92, 0xcf, 0x7d, 0x27, 0x72, 0xe2, 0xe1, 0xe4, 0xf4, 0x37, 0xfc, 0xac, 0x09, - 0x59, 0xf6, 0x21, 0x17, 0xbc, 0x11, 0xf0, 0x0e, 0x7d, 0xe9, 0x2d, 0x0c, 0xf2, 0xca, 0x28, 0x91, - 0x6b, 0x9f, 0x20, 0xef, 0xe2, 0x2f, 0xe3, 0xb2, 0xeb, 0xca, 0xa8, 0xcd, 0x01, 0x6c, 0x01, 0x94, - 0x42, 0x6f, 0xb1, 0x2a, 0x4b, 0xbf, 0x1b, 0x91, 0xd8, 0x4b, 0xb0, 0x0e, 0xee, 0xc1, 0x45, 0x2f, - 0x3d, 0x02, 0x17, 0x57, 0xc0, 0xab, 0x8c, 0x92, 0xbd, 0xa0, 0x01, 0x78, 0xb5, 0x12, 0x52, 0x09, - 0xb3, 0xc1, 0x98, 0x9b, 0xb4, 0xba, 0x39, 0x41, 0xc6, 0xab, 0x2c, 0x2f, 0x7d, 0x07, 0x81, 0x56, - 0x05, 0x53, 0x70, 0x71, 0xaf, 0xc6, 0x50, 0xab, 0x7c, 0x21, 0x9e, 0x2d, 0xd3, 0xaa, 0x66, 0x8e, - 0x39, 0x37, 0x1c, 0x81, 0xa3, 0x04, 0xeb, 0xd9, 0xff, 0xed, 0x2e, 0x24, 0xef, 0xbb, 0x90, 0x7c, - 0xee, 0x42, 0xf2, 0xf2, 0x15, 0x76, 0xd2, 0x3e, 0x7e, 0xde, 0xf4, 0x3b, 0x00, 0x00, 0xff, 0xff, - 0xd1, 0x6a, 0x3a, 0xa2, 0x10, 0x02, 0x00, 0x00, -} From 2c0832b97fe4ea571ba08832316c6c16688d1761 Mon Sep 17 00:00:00 2001 From: Jakub Sztandera Date: Mon, 18 Feb 2019 20:06:46 +0100 Subject: [PATCH 0704/1035] gx publish 1.1.24 This commit was moved from ipfs/go-bitswap@7b911d94c9a4a066351abf953a17144313e9cffe --- bitswap/testutil/testutil.go | 17 +++++------------ 1 file changed, 5 insertions(+), 12 deletions(-) diff --git a/bitswap/testutil/testutil.go b/bitswap/testutil/testutil.go index b25c1d355..3d7996668 100644 --- a/bitswap/testutil/testutil.go +++ b/bitswap/testutil/testutil.go @@ -1,9 +1,7 @@ package testutil import ( - "bytes" - - random "github.com/jbenet/go-random" + "math/rand" bsmsg "github.com/ipfs/go-bitswap/message" "github.com/ipfs/go-bitswap/wantlist" @@ -15,20 +13,15 @@ import ( var blockGenerator = blocksutil.NewBlockGenerator() var prioritySeq int -var seedSeq int64 - -func randomBytes(n int64, seed int64) []byte { - data := new(bytes.Buffer) - random.WritePseudoRandomBytes(n, data, seed) - return data.Bytes() -} // GenerateBlocksOfSize generates a series of blocks of the given byte size func GenerateBlocksOfSize(n int, size int64) []blocks.Block { generatedBlocks := make([]blocks.Block, 0, n) + buf := make([]byte, size) for i := 0; i < n; i++ { - seedSeq++ - b := blocks.NewBlock(randomBytes(size, seedSeq)) + // rand.Read never errors + rand.Read(buf) + b := blocks.NewBlock(buf) generatedBlocks = append(generatedBlocks, b) } From ce3d7e88061174816ea2cb84baf88787489abfb0 Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Wed, 20 Feb 2019 12:36:32 -0800 Subject: [PATCH 0705/1035] pubsub: fix race on shutdown Calling `wg.Add` after `wg.Wait` has returned is invalid. This change swaps the wait group for a plain rwmutex. (caught with the race detector) This commit was moved from ipfs/go-bitswap@a5edbdee2c3631749c65fa079c30e78232521c3c --- bitswap/notifications/notifications.go | 58 +++++++++------------ bitswap/notifications/notifications_test.go | 2 +- 2 files changed, 27 insertions(+), 33 deletions(-) diff --git a/bitswap/notifications/notifications.go b/bitswap/notifications/notifications.go index 81ba39499..b3283705c 100644 --- a/bitswap/notifications/notifications.go +++ b/bitswap/notifications/notifications.go @@ -20,29 +20,21 @@ type PubSub interface { func New() PubSub { return &impl{ wrapped: *pubsub.New(bufferSize), - cancel: make(chan struct{}), } } type impl struct { + lk sync.RWMutex wrapped pubsub.PubSub - // These two fields make up a shutdown "lock". - // We need them as calling, e.g., `Unsubscribe` after calling `Shutdown` - // blocks forever and fixing this in pubsub would be rather invasive. - cancel chan struct{} - wg sync.WaitGroup + closed bool } func (ps *impl) Publish(block blocks.Block) { - ps.wg.Add(1) - defer ps.wg.Done() - - select { - case <-ps.cancel: - // Already shutdown, bail. + ps.lk.RLock() + defer ps.lk.RUnlock() + if ps.closed { return - default: } ps.wrapped.Pub(block, block.Cid().KeyString()) @@ -50,12 +42,13 @@ func (ps *impl) Publish(block blocks.Block) { // Not safe to call more than once. func (ps *impl) Shutdown() { - // Interrupt in-progress subscriptions. - close(ps.cancel) - // Wait for them to finish. - ps.wg.Wait() - // shutdown the pubsub. + ps.lk.Lock() + defer ps.lk.Unlock() + if ps.closed { + return + } ps.wrapped.Shutdown() + ps.closed = true } // Subscribe returns a channel of blocks for the given |keys|. |blockChannel| @@ -71,32 +64,32 @@ func (ps *impl) Subscribe(ctx context.Context, keys ...cid.Cid) <-chan blocks.Bl } // prevent shutdown - ps.wg.Add(1) + ps.lk.RLock() + defer ps.lk.RUnlock() - // check if shutdown *after* preventing shutdowns. - select { - case <-ps.cancel: - // abort, allow shutdown to continue. - ps.wg.Done() + if ps.closed { close(blocksCh) return blocksCh - default: } ps.wrapped.AddSubOnceEach(valuesCh, toStrings(keys)...) go func() { defer func() { - ps.wrapped.Unsub(valuesCh) close(blocksCh) - // Unblock shutdown. - ps.wg.Done() + ps.lk.RLock() + defer ps.lk.RUnlock() + if ps.closed { + // Don't touch the pubsub instance if we're + // already closed. + return + } + + ps.wrapped.Unsub(valuesCh) }() for { select { - case <-ps.cancel: - return case <-ctx.Done(): return case val, ok := <-valuesCh: @@ -107,9 +100,10 @@ func (ps *impl) Subscribe(ctx context.Context, keys ...cid.Cid) <-chan blocks.Bl if !ok { return } + // We could end up blocking here if the client + // forgets to cancel the context but that's not + // our problem. select { - case <-ps.cancel: - return case <-ctx.Done(): return case blocksCh <- block: // continue diff --git a/bitswap/notifications/notifications_test.go b/bitswap/notifications/notifications_test.go index 38ab6f9af..4e59ae9b3 100644 --- a/bitswap/notifications/notifications_test.go +++ b/bitswap/notifications/notifications_test.go @@ -114,7 +114,7 @@ func TestShutdownBeforeUnsubscribe(t *testing.T) { if ok { t.Fatal("channel should have been closed") } - default: + case <-time.After(5 * time.Second): t.Fatal("channel should have been closed") } } From d401b2aa4a6dba3683095bf92efd3a46bc967bbb Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Wed, 20 Feb 2019 12:43:06 -0800 Subject: [PATCH 0706/1035] tests: bring tests back under race detector goroutine limit This commit was moved from ipfs/go-bitswap@13f4ed3c9d231ff6375b1dfd70dc0177c87719da --- bitswap/bitswap_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index 7882147ee..6b0f5c75d 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -140,7 +140,7 @@ func TestLargeSwarm(t *testing.T) { if detectrace.WithRace() { // when running with the race detector, 500 instances launches // well over 8k goroutines. This hits a race detector limit. - numInstances = 75 + numInstances = 50 } else if travis.IsRunning() { numInstances = 200 } else { From 7a4ad51f1f03d5faf5cca8bc22538efe3c631ba9 Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Wed, 20 Feb 2019 13:32:03 -0800 Subject: [PATCH 0707/1035] bitswap: fix stat data race This commit was moved from ipfs/go-bitswap@e6a2a40863ff35b5fce2083616fd3a450125ec8f --- bitswap/bitswap.go | 5 +++-- bitswap/bitswap_with_sessions_test.go | 6 +++++- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 0bd53b3d0..97e1daa1a 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -6,7 +6,6 @@ import ( "context" "errors" "sync" - "sync/atomic" "time" bssrs "github.com/ipfs/go-bitswap/sessionrequestsplitter" @@ -292,7 +291,9 @@ func (bs *Bitswap) receiveBlockFrom(blk blocks.Block, from peer.ID) error { } func (bs *Bitswap) ReceiveMessage(ctx context.Context, p peer.ID, incoming bsmsg.BitSwapMessage) { - atomic.AddUint64(&bs.counters.messagesRecvd, 1) + bs.counterLk.Lock() + bs.counters.messagesRecvd++ + bs.counterLk.Unlock() // This call records changes to wantlists, blocks received, // and number of bytes transfered. diff --git a/bitswap/bitswap_with_sessions_test.go b/bitswap/bitswap_with_sessions_test.go index 0be7bc97c..d4d0cfee4 100644 --- a/bitswap/bitswap_with_sessions_test.go +++ b/bitswap/bitswap_with_sessions_test.go @@ -104,7 +104,11 @@ func TestSessionBetweenPeers(t *testing.T) { } } for _, is := range inst[2:] { - if is.Exchange.counters.messagesRecvd > 2 { + stat, err := is.Exchange.Stat() + if err != nil { + t.Fatal(err) + } + if stat.MessagesReceived > 2 { t.Fatal("uninvolved nodes should only receive two messages", is.Exchange.counters.messagesRecvd) } } From c4b8cf8b20056c808ca013a285c2511ec72dba84 Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Wed, 20 Feb 2019 13:54:08 -0800 Subject: [PATCH 0708/1035] test: fix race when counting tagged peers This commit was moved from ipfs/go-bitswap@ba11ef59fcdf4f87f7c8fe87e5cc77388d62258f --- .../sessionpeermanager_test.go | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/bitswap/sessionpeermanager/sessionpeermanager_test.go b/bitswap/sessionpeermanager/sessionpeermanager_test.go index d6d1440a4..1cad238ad 100644 --- a/bitswap/sessionpeermanager/sessionpeermanager_test.go +++ b/bitswap/sessionpeermanager/sessionpeermanager_test.go @@ -41,18 +41,24 @@ func (fppf *fakePeerProviderFinder) FindProvidersAsync(ctx context.Context, c ci } type fakePeerTagger struct { + lk sync.Mutex taggedPeers []peer.ID wait sync.WaitGroup } func (fpt *fakePeerTagger) TagPeer(p peer.ID, tag string, n int) { fpt.wait.Add(1) + + fpt.lk.Lock() + defer fpt.lk.Unlock() fpt.taggedPeers = append(fpt.taggedPeers, p) } func (fpt *fakePeerTagger) UntagPeer(p peer.ID, tag string) { defer fpt.wait.Done() + fpt.lk.Lock() + defer fpt.lk.Unlock() for i := 0; i < len(fpt.taggedPeers); i++ { if fpt.taggedPeers[i] == p { fpt.taggedPeers[i] = fpt.taggedPeers[len(fpt.taggedPeers)-1] @@ -62,6 +68,12 @@ func (fpt *fakePeerTagger) UntagPeer(p peer.ID, tag string) { } } +func (fpt *fakePeerTagger) count() int { + fpt.lk.Lock() + defer fpt.lk.Unlock() + return len(fpt.taggedPeers) +} + func TestFindingMorePeers(t *testing.T) { ctx := context.Background() ctx, cancel := context.WithCancel(ctx) @@ -195,6 +207,7 @@ func TestOrderingPeers(t *testing.T) { t.Fatal("should not return the same random peers each time") } } + func TestUntaggingPeers(t *testing.T) { ctx := context.Background() ctx, cancel := context.WithTimeout(ctx, 10*time.Millisecond) @@ -216,13 +229,13 @@ func TestUntaggingPeers(t *testing.T) { } time.Sleep(2 * time.Millisecond) - if len(fpt.taggedPeers) != len(peers) { + if fpt.count() != len(peers) { t.Fatal("Peers were not tagged!") } <-ctx.Done() fpt.wait.Wait() - if len(fpt.taggedPeers) != 0 { + if fpt.count() != 0 { t.Fatal("Peers were not untagged!") } } From 300e01655dcdb77f11f1be2c9ce1bf3e5059a077 Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Wed, 20 Feb 2019 13:58:00 -0800 Subject: [PATCH 0709/1035] pubsub: add back closed channel Ensures that we don't leave goroutines behind, even if the client forgets to unsubscribe. This commit was moved from ipfs/go-bitswap@52f963033a0d7894c35e1dd168b99e6db1e5a3d9 --- bitswap/notifications/notifications.go | 31 ++++++++++++++++---------- 1 file changed, 19 insertions(+), 12 deletions(-) diff --git a/bitswap/notifications/notifications.go b/bitswap/notifications/notifications.go index b3283705c..240379ae0 100644 --- a/bitswap/notifications/notifications.go +++ b/bitswap/notifications/notifications.go @@ -20,6 +20,7 @@ type PubSub interface { func New() PubSub { return &impl{ wrapped: *pubsub.New(bufferSize), + closed: make(chan struct{}), } } @@ -27,28 +28,31 @@ type impl struct { lk sync.RWMutex wrapped pubsub.PubSub - closed bool + closed chan struct{} } func (ps *impl) Publish(block blocks.Block) { ps.lk.RLock() defer ps.lk.RUnlock() - if ps.closed { + select { + case <-ps.closed: return + default: } ps.wrapped.Pub(block, block.Cid().KeyString()) } -// Not safe to call more than once. func (ps *impl) Shutdown() { ps.lk.Lock() defer ps.lk.Unlock() - if ps.closed { + select { + case <-ps.closed: return + default: } + close(ps.closed) ps.wrapped.Shutdown() - ps.closed = true } // Subscribe returns a channel of blocks for the given |keys|. |blockChannel| @@ -67,9 +71,11 @@ func (ps *impl) Subscribe(ctx context.Context, keys ...cid.Cid) <-chan blocks.Bl ps.lk.RLock() defer ps.lk.RUnlock() - if ps.closed { + select { + case <-ps.closed: close(blocksCh) return blocksCh + default: } ps.wrapped.AddSubOnceEach(valuesCh, toStrings(keys)...) @@ -79,10 +85,12 @@ func (ps *impl) Subscribe(ctx context.Context, keys ...cid.Cid) <-chan blocks.Bl ps.lk.RLock() defer ps.lk.RUnlock() - if ps.closed { - // Don't touch the pubsub instance if we're - // already closed. + // Don't touch the pubsub instance if we're + // already closed. + select { + case <-ps.closed: return + default: } ps.wrapped.Unsub(valuesCh) @@ -92,6 +100,7 @@ func (ps *impl) Subscribe(ctx context.Context, keys ...cid.Cid) <-chan blocks.Bl select { case <-ctx.Done(): return + case <-ps.closed: case val, ok := <-valuesCh: if !ok { return @@ -100,13 +109,11 @@ func (ps *impl) Subscribe(ctx context.Context, keys ...cid.Cid) <-chan blocks.Bl if !ok { return } - // We could end up blocking here if the client - // forgets to cancel the context but that's not - // our problem. select { case <-ctx.Done(): return case blocksCh <- block: // continue + case <-ps.closed: } } } From a676f3850b32a58d1a15a26f65a3ada7545d4580 Mon Sep 17 00:00:00 2001 From: hannahhoward Date: Tue, 19 Feb 2019 14:57:34 -0800 Subject: [PATCH 0710/1035] fix(wantlist): remove races on setup fix race conditions while setting up wantlists by creating peer queues on demand BREAKING CHANGE: PeerManager SendMessage signature changed fix #51 This commit was moved from ipfs/go-bitswap@32d0c188e6c3fc003001db41ae4ae59d9c99bb89 --- bitswap/bitswap.go | 6 +- bitswap/peermanager/peermanager.go | 154 ++++++------------------ bitswap/peermanager/peermanager_test.go | 14 +-- bitswap/wantmanager/wantmanager.go | 48 ++++++-- bitswap/wantmanager/wantmanager_test.go | 32 +++-- 5 files changed, 103 insertions(+), 151 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 97e1daa1a..3abbc1979 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -132,7 +132,6 @@ func New(parent context.Context, network bsnet.BitSwapNetwork, } bs.wm.SetDelegate(bs.pm) - bs.pm.Startup() bs.wm.Startup() bs.pqm.Startup() network.SetDelegate(bs) @@ -361,14 +360,13 @@ func (bs *Bitswap) updateReceiveCounters(b blocks.Block) { // Connected/Disconnected warns bitswap about peer connections. func (bs *Bitswap) PeerConnected(p peer.ID) { - initialWants := bs.wm.CurrentBroadcastWants() - bs.pm.Connected(p, initialWants) + bs.wm.Connected(p) bs.engine.PeerConnected(p) } // Connected/Disconnected warns bitswap about peer connections. func (bs *Bitswap) PeerDisconnected(p peer.ID) { - bs.pm.Disconnected(p) + bs.wm.Disconnected(p) bs.engine.PeerDisconnected(p) } diff --git a/bitswap/peermanager/peermanager.go b/bitswap/peermanager/peermanager.go index fed1b3f76..ca7665cf7 100644 --- a/bitswap/peermanager/peermanager.go +++ b/bitswap/peermanager/peermanager.go @@ -2,6 +2,7 @@ package peermanager import ( "context" + "sync" bsmsg "github.com/ipfs/go-bitswap/message" wantlist "github.com/ipfs/go-bitswap/wantlist" @@ -34,150 +35,56 @@ type peerMessage interface { // PeerManager manages a pool of peers and sends messages to peers in the pool. type PeerManager struct { - // sync channel for Run loop - peerMessages chan peerMessage - - // synchronized by Run loop, only touch inside there - peerQueues map[peer.ID]PeerQueue - + peerQueues map[peer.ID]PeerQueue + lk sync.RWMutex createPeerQueue PeerQueueFactory ctx context.Context - cancel func() } // New creates a new PeerManager, given a context and a peerQueueFactory. func New(ctx context.Context, createPeerQueue PeerQueueFactory) *PeerManager { - ctx, cancel := context.WithCancel(ctx) return &PeerManager{ - peerMessages: make(chan peerMessage, 10), peerQueues: make(map[peer.ID]PeerQueue), createPeerQueue: createPeerQueue, ctx: ctx, - cancel: cancel, } } // ConnectedPeers returns a list of peers this PeerManager is managing. func (pm *PeerManager) ConnectedPeers() []peer.ID { - resp := make(chan []peer.ID, 1) - select { - case pm.peerMessages <- &getPeersMessage{resp}: - case <-pm.ctx.Done(): - return nil - } - select { - case peers := <-resp: - return peers - case <-pm.ctx.Done(): - return nil - } -} - -// Connected is called to add a new peer to the pool, and send it an initial set -// of wants. -func (pm *PeerManager) Connected(p peer.ID, initialEntries []*wantlist.Entry) { - select { - case pm.peerMessages <- &connectPeerMessage{p, initialEntries}: - case <-pm.ctx.Done(): - } -} - -// Disconnected is called to remove a peer from the pool. -func (pm *PeerManager) Disconnected(p peer.ID) { - select { - case pm.peerMessages <- &disconnectPeerMessage{p}: - case <-pm.ctx.Done(): - } -} - -// SendMessage is called to send a message to all or some peers in the pool; -// if targets is nil, it sends to all. -func (pm *PeerManager) SendMessage(entries []*bsmsg.Entry, targets []peer.ID, from uint64) { - select { - case pm.peerMessages <- &sendPeerMessage{entries: entries, targets: targets, from: from}: - case <-pm.ctx.Done(): - } -} - -// Startup enables the run loop for the PeerManager - no processing will occur -// if startup is not called. -func (pm *PeerManager) Startup() { - go pm.run() -} - -// Shutdown shutsdown processing for the PeerManager. -func (pm *PeerManager) Shutdown() { - pm.cancel() -} - -func (pm *PeerManager) run() { - for { - select { - case message := <-pm.peerMessages: - message.handle(pm) - case <-pm.ctx.Done(): - return - } - } -} + pm.lk.RLock() + defer pm.lk.RUnlock() -type sendPeerMessage struct { - entries []*bsmsg.Entry - targets []peer.ID - from uint64 -} - -func (s *sendPeerMessage) handle(pm *PeerManager) { - pm.sendMessage(s) -} - -type connectPeerMessage struct { - p peer.ID - initialEntries []*wantlist.Entry -} - -func (c *connectPeerMessage) handle(pm *PeerManager) { - pm.startPeerHandler(c.p, c.initialEntries) -} - -type disconnectPeerMessage struct { - p peer.ID -} - -func (dc *disconnectPeerMessage) handle(pm *PeerManager) { - pm.stopPeerHandler(dc.p) -} - -type getPeersMessage struct { - peerResp chan<- []peer.ID -} - -func (gp *getPeersMessage) handle(pm *PeerManager) { - pm.getPeers(gp.peerResp) -} - -func (pm *PeerManager) getPeers(peerResp chan<- []peer.ID) { peers := make([]peer.ID, 0, len(pm.peerQueues)) for p := range pm.peerQueues { peers = append(peers, p) } - peerResp <- peers + + return peers } -func (pm *PeerManager) startPeerHandler(p peer.ID, initialEntries []*wantlist.Entry) PeerQueue { +// Connected is called to add a new peer to the pool, and send it an initial set +// of wants. +func (pm *PeerManager) Connected(p peer.ID, initialEntries []*wantlist.Entry) { + pm.lk.Lock() + defer pm.lk.Unlock() + mq, ok := pm.peerQueues[p] if ok { mq.RefIncrement() - return nil + return } mq = pm.createPeerQueue(p) pm.peerQueues[p] = mq mq.Startup(pm.ctx, initialEntries) - return mq } -func (pm *PeerManager) stopPeerHandler(p peer.ID) { +// Disconnected is called to remove a peer from the pool. +func (pm *PeerManager) Disconnected(p peer.ID) { + pm.lk.Lock() + defer pm.lk.Unlock() + pq, ok := pm.peerQueues[p] if !ok { // TODO: log error? @@ -192,19 +99,28 @@ func (pm *PeerManager) stopPeerHandler(p peer.ID) { delete(pm.peerQueues, p) } -func (pm *PeerManager) sendMessage(ms *sendPeerMessage) { - if len(ms.targets) == 0 { +// SendMessage is called to send a message to all or some peers in the pool; +// if targets is nil, it sends to all. +func (pm *PeerManager) SendMessage(initialEntries []*wantlist.Entry, entries []*bsmsg.Entry, targets []peer.ID, from uint64) { + pm.lk.Lock() + defer pm.lk.Unlock() + + if len(targets) == 0 { for _, p := range pm.peerQueues { - p.AddMessage(ms.entries, ms.from) + p.AddMessage(entries, from) } } else { - for _, t := range ms.targets { + for _, t := range targets { p, ok := pm.peerQueues[t] if !ok { - log.Infof("tried sending wantlist change to non-partner peer: %s", t) - continue + p = pm.createPeerQueue(t) + pm.peerQueues[t] = p + p.Startup(pm.ctx, initialEntries) + // this is a "0 reference" queue because we haven't actually connected to it + // sending the first message will cause it to connect + p.RefDecrement() } - p.AddMessage(ms.entries, ms.from) + p.AddMessage(entries, from) } } } diff --git a/bitswap/peermanager/peermanager_test.go b/bitswap/peermanager/peermanager_test.go index 9617dad38..fa9d79405 100644 --- a/bitswap/peermanager/peermanager_test.go +++ b/bitswap/peermanager/peermanager_test.go @@ -79,7 +79,6 @@ func TestAddingAndRemovingPeers(t *testing.T) { tp := testutil.GeneratePeers(5) peer1, peer2, peer3, peer4, peer5 := tp[0], tp[1], tp[2], tp[3], tp[4] peerManager := New(ctx, peerQueueFactory) - peerManager.Startup() peerManager.Connected(peer1, nil) peerManager.Connected(peer2, nil) @@ -118,14 +117,13 @@ func TestAddingAndRemovingPeers(t *testing.T) { func TestSendingMessagesToPeers(t *testing.T) { ctx := context.Background() - messagesSent := make(chan messageSent) + messagesSent := make(chan messageSent, 16) peerQueueFactory := makePeerQueueFactory(messagesSent) tp := testutil.GeneratePeers(5) peer1, peer2, peer3, peer4, peer5 := tp[0], tp[1], tp[2], tp[3], tp[4] peerManager := New(ctx, peerQueueFactory) - peerManager.Startup() peerManager.Connected(peer1, nil) peerManager.Connected(peer2, nil) @@ -134,7 +132,7 @@ func TestSendingMessagesToPeers(t *testing.T) { entries := testutil.GenerateMessageEntries(5, false) ses := testutil.GenerateSessionID() - peerManager.SendMessage(entries, nil, ses) + peerManager.SendMessage(nil, entries, nil, ses) peersReceived := collectAndCheckMessages( ctx, t, messagesSent, entries, ses, 10*time.Millisecond) @@ -155,11 +153,11 @@ func TestSendingMessagesToPeers(t *testing.T) { var peersToSendTo []peer.ID peersToSendTo = append(peersToSendTo, peer1, peer3, peer4) - peerManager.SendMessage(entries, peersToSendTo, ses) + peerManager.SendMessage(nil, entries, peersToSendTo, ses) peersReceived = collectAndCheckMessages( ctx, t, messagesSent, entries, ses, 10*time.Millisecond) - if len(peersReceived) != 2 { + if len(peersReceived) != 3 { t.Fatal("Incorrect number of peers received messages") } @@ -173,7 +171,7 @@ func TestSendingMessagesToPeers(t *testing.T) { t.Fatal("Peers received message but should not have") } - if testutil.ContainsPeer(peersReceived, peer4) { - t.Fatal("Peers targeted received message but was not connected") + if !testutil.ContainsPeer(peersReceived, peer4) { + t.Fatal("Peer should have autoconnected on message send") } } diff --git a/bitswap/wantmanager/wantmanager.go b/bitswap/wantmanager/wantmanager.go index 3e5a6c9ab..8b2480599 100644 --- a/bitswap/wantmanager/wantmanager.go +++ b/bitswap/wantmanager/wantmanager.go @@ -20,10 +20,12 @@ const ( maxPriority = math.MaxInt32 ) -// WantSender sends changes out to the network as they get added to the wantlist +// PeerHandler sends changes out to the network as they get added to the wantlist // managed by the WantManager. -type WantSender interface { - SendMessage(entries []*bsmsg.Entry, targets []peer.ID, from uint64) +type PeerHandler interface { + Disconnected(p peer.ID) + Connected(p peer.ID, initialEntries []*wantlist.Entry) + SendMessage(initialEntries []*wantlist.Entry, entries []*bsmsg.Entry, targets []peer.ID, from uint64) } type wantMessage interface { @@ -46,7 +48,7 @@ type WantManager struct { ctx context.Context cancel func() - wantSender WantSender + peerHandler PeerHandler wantlistGauge metrics.Gauge } @@ -66,8 +68,8 @@ func New(ctx context.Context) *WantManager { } // SetDelegate specifies who will send want changes out to the internet. -func (wm *WantManager) SetDelegate(wantSender WantSender) { - wm.wantSender = wantSender +func (wm *WantManager) SetDelegate(peerHandler PeerHandler) { + wm.peerHandler = peerHandler } // WantBlocks adds the given cids to the wantlist, tracked by the given session. @@ -145,6 +147,22 @@ func (wm *WantManager) WantCount() int { } } +// Connected is called when a new peer is connected +func (wm *WantManager) Connected(p peer.ID) { + select { + case wm.wantMessages <- &connectedMessage{p}: + case <-wm.ctx.Done(): + } +} + +// Disconnected is called when a peer is disconnected +func (wm *WantManager) Disconnected(p peer.ID) { + select { + case wm.wantMessages <- &disconnectedMessage{p}: + case <-wm.ctx.Done(): + } +} + // Startup starts processing for the WantManager. func (wm *WantManager) Startup() { go wm.run() @@ -214,7 +232,7 @@ func (ws *wantSet) handle(wm *WantManager) { } // broadcast those wantlist changes - wm.wantSender.SendMessage(ws.entries, ws.targets, ws.from) + wm.peerHandler.SendMessage(wm.bcwl.Entries(), ws.entries, ws.targets, ws.from) } type isWantedMessage struct { @@ -250,3 +268,19 @@ type wantCountMessage struct { func (wcm *wantCountMessage) handle(wm *WantManager) { wcm.resp <- wm.wl.Len() } + +type connectedMessage struct { + p peer.ID +} + +func (cm *connectedMessage) handle(wm *WantManager) { + wm.peerHandler.Connected(cm.p, wm.bcwl.Entries()) +} + +type disconnectedMessage struct { + p peer.ID +} + +func (dm *disconnectedMessage) handle(wm *WantManager) { + wm.peerHandler.Disconnected(dm.p) +} diff --git a/bitswap/wantmanager/wantmanager_test.go b/bitswap/wantmanager/wantmanager_test.go index 85590bb15..37a1d2766 100644 --- a/bitswap/wantmanager/wantmanager_test.go +++ b/bitswap/wantmanager/wantmanager_test.go @@ -7,35 +7,41 @@ import ( "testing" "github.com/ipfs/go-bitswap/testutil" + wantlist "github.com/ipfs/go-bitswap/wantlist" bsmsg "github.com/ipfs/go-bitswap/message" "github.com/ipfs/go-cid" "github.com/libp2p/go-libp2p-peer" ) -type fakeWantSender struct { - lk sync.RWMutex - lastWantSet wantSet +type fakePeerHandler struct { + lk sync.RWMutex + lastWantSet wantSet + initialEntries []*wantlist.Entry } -func (fws *fakeWantSender) SendMessage(entries []*bsmsg.Entry, targets []peer.ID, from uint64) { - fws.lk.Lock() - fws.lastWantSet = wantSet{entries, targets, from} - fws.lk.Unlock() +func (fph *fakePeerHandler) SendMessage(initialEntries []*wantlist.Entry, entries []*bsmsg.Entry, targets []peer.ID, from uint64) { + fph.lk.Lock() + fph.lastWantSet = wantSet{entries, targets, from} + fph.initialEntries = initialEntries + fph.lk.Unlock() } -func (fws *fakeWantSender) getLastWantSet() wantSet { - fws.lk.Lock() - defer fws.lk.Unlock() - return fws.lastWantSet +func (fph *fakePeerHandler) Connected(p peer.ID, initialEntries []*wantlist.Entry) {} +func (fph *fakePeerHandler) Disconnected(p peer.ID) {} + +func (fph *fakePeerHandler) getLastWantSet() wantSet { + fph.lk.Lock() + defer fph.lk.Unlock() + return fph.lastWantSet } func setupTestFixturesAndInitialWantList() ( - context.Context, *fakeWantSender, *WantManager, []cid.Cid, []cid.Cid, []peer.ID, uint64, uint64) { + context.Context, *fakePeerHandler, *WantManager, []cid.Cid, []cid.Cid, []peer.ID, uint64, uint64) { ctx := context.Background() // setup fixtures - wantSender := &fakeWantSender{} + wantSender := &fakePeerHandler{} wantManager := New(ctx) keys := testutil.GenerateCids(10) otherKeys := testutil.GenerateCids(5) From 1073af855af72ef6e38f443421a49457537988e4 Mon Sep 17 00:00:00 2001 From: hannahhoward Date: Tue, 19 Feb 2019 15:02:43 -0800 Subject: [PATCH 0711/1035] feat(messagequeue): limit retries Limit retrying sending of a message even when a successful reconnect occurs This commit was moved from ipfs/go-bitswap@fd3edeac3b03a09e44d64faa755e045a4b668ce4 --- bitswap/messagequeue/messagequeue.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/bitswap/messagequeue/messagequeue.go b/bitswap/messagequeue/messagequeue.go index 294bad193..9e0f2df6b 100644 --- a/bitswap/messagequeue/messagequeue.go +++ b/bitswap/messagequeue/messagequeue.go @@ -14,6 +14,8 @@ import ( var log = logging.Logger("bitswap") +const maxRetries = 10 + // MessageNetwork is any network that can connect peers and generate a message // sender. type MessageNetwork interface { @@ -162,7 +164,7 @@ func (mq *MessageQueue) doWork(ctx context.Context) { } // send wantlist updates - for { // try to send this message until we fail. + for i := 0; i < maxRetries; i++ { // try to send this message until we fail. if mq.attemptSendAndRecovery(ctx, wlm) { return } From b90ad9f1e12435a860890d76ac7348bb180595b2 Mon Sep 17 00:00:00 2001 From: hannahhoward Date: Tue, 19 Feb 2019 15:20:01 -0800 Subject: [PATCH 0712/1035] feat(messagequeue): Send changes on startup If wantlist changes are present, send them immediately on startup, rather than as a seperate message This commit was moved from ipfs/go-bitswap@26b8a09f93bb20954078e8540f34eaad026c813a --- bitswap/messagequeue/messagequeue.go | 12 ++++++++++-- bitswap/messagequeue/messagequeue_test.go | 12 +++++------- bitswap/peermanager/peermanager.go | 9 +++++---- bitswap/peermanager/peermanager_test.go | 10 +++++++--- 4 files changed, 27 insertions(+), 16 deletions(-) diff --git a/bitswap/messagequeue/messagequeue.go b/bitswap/messagequeue/messagequeue.go index 9e0f2df6b..ab89f0b53 100644 --- a/bitswap/messagequeue/messagequeue.go +++ b/bitswap/messagequeue/messagequeue.go @@ -77,7 +77,7 @@ func (mq *MessageQueue) AddMessage(entries []*bsmsg.Entry, ses uint64) { // Startup starts the processing of messages, and creates an initial message // based on the given initial wantlist. -func (mq *MessageQueue) Startup(ctx context.Context, initialEntries []*wantlist.Entry) { +func (mq *MessageQueue) Startup(ctx context.Context, initialEntries []*wantlist.Entry, entries []*bsmsg.Entry, ses uint64) { // new peer, we will want to give them our full wantlist if len(initialEntries) > 0 { @@ -89,8 +89,16 @@ func (mq *MessageQueue) Startup(ctx context.Context, initialEntries []*wantlist. fullwantlist.AddEntry(e.Cid, e.Priority) } mq.out = fullwantlist - mq.work <- struct{}{} } + + if len(initialEntries) > 0 || mq.addEntries(entries, ses) { + select { + case <-ctx.Done(): + return + case mq.work <- struct{}{}: + } + } + go mq.runQueue(ctx) } diff --git a/bitswap/messagequeue/messagequeue_test.go b/bitswap/messagequeue/messagequeue_test.go index f3389fe7e..cb5b259b1 100644 --- a/bitswap/messagequeue/messagequeue_test.go +++ b/bitswap/messagequeue/messagequeue_test.go @@ -25,9 +25,9 @@ func (fmn *fakeMessageNetwork) ConnectTo(context.Context, peer.ID) error { func (fmn *fakeMessageNetwork) NewMessageSender(context.Context, peer.ID) (bsnet.MessageSender, error) { if fmn.messageSenderError == nil { return fmn.messageSender, nil - } else { - return nil, fmn.messageSenderError } + return nil, fmn.messageSenderError + } type fakeMessageSender struct { @@ -81,7 +81,7 @@ func TestStartupAndShutdown(t *testing.T) { ses := testutil.GenerateSessionID() wl := testutil.GenerateWantlist(10, ses) - messageQueue.Startup(ctx, wl.Entries()) + messageQueue.Startup(ctx, wl.Entries(), nil, 0) messages := collectMessages(ctx, t, messagesSent, 10*time.Millisecond) if len(messages) != 1 { @@ -123,9 +123,8 @@ func TestSendingMessagesDeduped(t *testing.T) { ses1 := testutil.GenerateSessionID() ses2 := testutil.GenerateSessionID() entries := testutil.GenerateMessageEntries(10, false) - messageQueue.Startup(ctx, nil) + messageQueue.Startup(ctx, nil, entries, ses1) - messageQueue.AddMessage(entries, ses1) messageQueue.AddMessage(entries, ses2) messages := collectMessages(ctx, t, messagesSent, 10*time.Millisecond) @@ -148,9 +147,8 @@ func TestSendingMessagesPartialDupe(t *testing.T) { entries := testutil.GenerateMessageEntries(10, false) moreEntries := testutil.GenerateMessageEntries(5, false) secondEntries := append(entries[5:], moreEntries...) - messageQueue.Startup(ctx, nil) + messageQueue.Startup(ctx, nil, entries, ses1) - messageQueue.AddMessage(entries, ses1) messageQueue.AddMessage(secondEntries, ses2) messages := collectMessages(ctx, t, messagesSent, 20*time.Millisecond) diff --git a/bitswap/peermanager/peermanager.go b/bitswap/peermanager/peermanager.go index ca7665cf7..d4eb7e757 100644 --- a/bitswap/peermanager/peermanager.go +++ b/bitswap/peermanager/peermanager.go @@ -22,7 +22,7 @@ type PeerQueue interface { RefIncrement() RefDecrement() bool AddMessage(entries []*bsmsg.Entry, ses uint64) - Startup(ctx context.Context, initialEntries []*wantlist.Entry) + Startup(ctx context.Context, initialEntries []*wantlist.Entry, entries []*bsmsg.Entry, ses uint64) Shutdown() } @@ -77,7 +77,7 @@ func (pm *PeerManager) Connected(p peer.ID, initialEntries []*wantlist.Entry) { mq = pm.createPeerQueue(p) pm.peerQueues[p] = mq - mq.Startup(pm.ctx, initialEntries) + mq.Startup(pm.ctx, initialEntries, nil, 0) } // Disconnected is called to remove a peer from the pool. @@ -115,12 +115,13 @@ func (pm *PeerManager) SendMessage(initialEntries []*wantlist.Entry, entries []* if !ok { p = pm.createPeerQueue(t) pm.peerQueues[t] = p - p.Startup(pm.ctx, initialEntries) + p.Startup(pm.ctx, initialEntries, entries, from) // this is a "0 reference" queue because we haven't actually connected to it // sending the first message will cause it to connect p.RefDecrement() + } else { + p.AddMessage(entries, from) } - p.AddMessage(entries, from) } } } diff --git a/bitswap/peermanager/peermanager_test.go b/bitswap/peermanager/peermanager_test.go index fa9d79405..3674f7e48 100644 --- a/bitswap/peermanager/peermanager_test.go +++ b/bitswap/peermanager/peermanager_test.go @@ -25,9 +25,13 @@ type fakePeer struct { messagesSent chan messageSent } -func (fp *fakePeer) Startup(ctx context.Context, initialEntries []*wantlist.Entry) {} -func (fp *fakePeer) Shutdown() {} -func (fp *fakePeer) RefIncrement() { fp.refcnt++ } +func (fp *fakePeer) Startup(ctx context.Context, initialEntries []*wantlist.Entry, entries []*bsmsg.Entry, ses uint64) { + if entries != nil { + fp.AddMessage(entries, ses) + } +} +func (fp *fakePeer) Shutdown() {} +func (fp *fakePeer) RefIncrement() { fp.refcnt++ } func (fp *fakePeer) RefDecrement() bool { fp.refcnt-- return fp.refcnt > 0 From cd3d01863232d1d68e8d1a9e12d1dfb029a2b9d6 Mon Sep 17 00:00:00 2001 From: hannahhoward Date: Tue, 19 Feb 2019 17:48:40 -0800 Subject: [PATCH 0713/1035] feat(peermanager): remove leaky sendmessage Breakup Startup function so that wantlists are not sent with each call to SendMessage This commit was moved from ipfs/go-bitswap@703d46a60e7f4a8e41e76861e1237205a5722143 --- bitswap/messagequeue/messagequeue.go | 30 +++++++++++++---------- bitswap/messagequeue/messagequeue_test.go | 10 +++++--- bitswap/peermanager/peermanager.go | 17 ++++++++----- bitswap/peermanager/peermanager_test.go | 17 ++++++------- bitswap/wantmanager/wantmanager.go | 4 +-- bitswap/wantmanager/wantmanager_test.go | 8 +++--- 6 files changed, 46 insertions(+), 40 deletions(-) diff --git a/bitswap/messagequeue/messagequeue.go b/bitswap/messagequeue/messagequeue.go index ab89f0b53..a2c228e17 100644 --- a/bitswap/messagequeue/messagequeue.go +++ b/bitswap/messagequeue/messagequeue.go @@ -52,6 +52,11 @@ func New(p peer.ID, network MessageNetwork) *MessageQueue { } } +// RefCount returns the number of open connections for this queue. +func (mq *MessageQueue) RefCount() int { + return mq.refcnt +} + // RefIncrement increments the refcount for a message queue. func (mq *MessageQueue) RefIncrement() { mq.refcnt++ @@ -75,32 +80,31 @@ func (mq *MessageQueue) AddMessage(entries []*bsmsg.Entry, ses uint64) { } } -// Startup starts the processing of messages, and creates an initial message -// based on the given initial wantlist. -func (mq *MessageQueue) Startup(ctx context.Context, initialEntries []*wantlist.Entry, entries []*bsmsg.Entry, ses uint64) { - - // new peer, we will want to give them our full wantlist +// AddWantlist adds a complete session tracked want list to a message queue +func (mq *MessageQueue) AddWantlist(initialEntries []*wantlist.Entry) { if len(initialEntries) > 0 { - fullwantlist := bsmsg.New(true) + if mq.out == nil { + mq.out = bsmsg.New(false) + } + for _, e := range initialEntries { for k := range e.SesTrk { mq.wl.AddEntry(e, k) } - fullwantlist.AddEntry(e.Cid, e.Priority) + mq.out.AddEntry(e.Cid, e.Priority) } - mq.out = fullwantlist - } - if len(initialEntries) > 0 || mq.addEntries(entries, ses) { select { - case <-ctx.Done(): - return case mq.work <- struct{}{}: + default: } } +} +// Startup starts the processing of messages, and creates an initial message +// based on the given initial wantlist. +func (mq *MessageQueue) Startup(ctx context.Context) { go mq.runQueue(ctx) - } // Shutdown stops the processing of messages for a message queue. diff --git a/bitswap/messagequeue/messagequeue_test.go b/bitswap/messagequeue/messagequeue_test.go index cb5b259b1..b780678d9 100644 --- a/bitswap/messagequeue/messagequeue_test.go +++ b/bitswap/messagequeue/messagequeue_test.go @@ -81,8 +81,8 @@ func TestStartupAndShutdown(t *testing.T) { ses := testutil.GenerateSessionID() wl := testutil.GenerateWantlist(10, ses) - messageQueue.Startup(ctx, wl.Entries(), nil, 0) - + messageQueue.Startup(ctx) + messageQueue.AddWantlist(wl.Entries()) messages := collectMessages(ctx, t, messagesSent, 10*time.Millisecond) if len(messages) != 1 { t.Fatal("wrong number of messages were sent for initial wants") @@ -123,8 +123,9 @@ func TestSendingMessagesDeduped(t *testing.T) { ses1 := testutil.GenerateSessionID() ses2 := testutil.GenerateSessionID() entries := testutil.GenerateMessageEntries(10, false) - messageQueue.Startup(ctx, nil, entries, ses1) + messageQueue.Startup(ctx) + messageQueue.AddMessage(entries, ses1) messageQueue.AddMessage(entries, ses2) messages := collectMessages(ctx, t, messagesSent, 10*time.Millisecond) @@ -147,8 +148,9 @@ func TestSendingMessagesPartialDupe(t *testing.T) { entries := testutil.GenerateMessageEntries(10, false) moreEntries := testutil.GenerateMessageEntries(5, false) secondEntries := append(entries[5:], moreEntries...) - messageQueue.Startup(ctx, nil, entries, ses1) + messageQueue.Startup(ctx) + messageQueue.AddMessage(entries, ses1) messageQueue.AddMessage(secondEntries, ses2) messages := collectMessages(ctx, t, messagesSent, 20*time.Millisecond) diff --git a/bitswap/peermanager/peermanager.go b/bitswap/peermanager/peermanager.go index d4eb7e757..3705d024a 100644 --- a/bitswap/peermanager/peermanager.go +++ b/bitswap/peermanager/peermanager.go @@ -21,8 +21,10 @@ var ( type PeerQueue interface { RefIncrement() RefDecrement() bool + RefCount() int AddMessage(entries []*bsmsg.Entry, ses uint64) - Startup(ctx context.Context, initialEntries []*wantlist.Entry, entries []*bsmsg.Entry, ses uint64) + Startup(ctx context.Context) + AddWantlist(initialEntries []*wantlist.Entry) Shutdown() } @@ -71,13 +73,17 @@ func (pm *PeerManager) Connected(p peer.ID, initialEntries []*wantlist.Entry) { mq, ok := pm.peerQueues[p] if ok { + if mq.RefCount() == 0 { + mq.AddWantlist(initialEntries) + } mq.RefIncrement() return } mq = pm.createPeerQueue(p) pm.peerQueues[p] = mq - mq.Startup(pm.ctx, initialEntries, nil, 0) + mq.Startup(pm.ctx) + mq.AddWantlist(initialEntries) } // Disconnected is called to remove a peer from the pool. @@ -101,7 +107,7 @@ func (pm *PeerManager) Disconnected(p peer.ID) { // SendMessage is called to send a message to all or some peers in the pool; // if targets is nil, it sends to all. -func (pm *PeerManager) SendMessage(initialEntries []*wantlist.Entry, entries []*bsmsg.Entry, targets []peer.ID, from uint64) { +func (pm *PeerManager) SendMessage(entries []*bsmsg.Entry, targets []peer.ID, from uint64) { pm.lk.Lock() defer pm.lk.Unlock() @@ -115,13 +121,12 @@ func (pm *PeerManager) SendMessage(initialEntries []*wantlist.Entry, entries []* if !ok { p = pm.createPeerQueue(t) pm.peerQueues[t] = p - p.Startup(pm.ctx, initialEntries, entries, from) + p.Startup(pm.ctx) // this is a "0 reference" queue because we haven't actually connected to it // sending the first message will cause it to connect p.RefDecrement() - } else { - p.AddMessage(entries, from) } + p.AddMessage(entries, from) } } } diff --git a/bitswap/peermanager/peermanager_test.go b/bitswap/peermanager/peermanager_test.go index 3674f7e48..2b7c938ed 100644 --- a/bitswap/peermanager/peermanager_test.go +++ b/bitswap/peermanager/peermanager_test.go @@ -25,13 +25,10 @@ type fakePeer struct { messagesSent chan messageSent } -func (fp *fakePeer) Startup(ctx context.Context, initialEntries []*wantlist.Entry, entries []*bsmsg.Entry, ses uint64) { - if entries != nil { - fp.AddMessage(entries, ses) - } -} -func (fp *fakePeer) Shutdown() {} -func (fp *fakePeer) RefIncrement() { fp.refcnt++ } +func (fp *fakePeer) Startup(ctx context.Context) {} +func (fp *fakePeer) Shutdown() {} +func (fp *fakePeer) RefCount() int { return fp.refcnt } +func (fp *fakePeer) RefIncrement() { fp.refcnt++ } func (fp *fakePeer) RefDecrement() bool { fp.refcnt-- return fp.refcnt > 0 @@ -39,7 +36,7 @@ func (fp *fakePeer) RefDecrement() bool { func (fp *fakePeer) AddMessage(entries []*bsmsg.Entry, ses uint64) { fp.messagesSent <- messageSent{fp.p, entries, ses} } - +func (fp *fakePeer) AddWantlist(initialEntries []*wantlist.Entry) {} func makePeerQueueFactory(messagesSent chan messageSent) PeerQueueFactory { return func(p peer.ID) PeerQueue { return &fakePeer{ @@ -136,7 +133,7 @@ func TestSendingMessagesToPeers(t *testing.T) { entries := testutil.GenerateMessageEntries(5, false) ses := testutil.GenerateSessionID() - peerManager.SendMessage(nil, entries, nil, ses) + peerManager.SendMessage(entries, nil, ses) peersReceived := collectAndCheckMessages( ctx, t, messagesSent, entries, ses, 10*time.Millisecond) @@ -157,7 +154,7 @@ func TestSendingMessagesToPeers(t *testing.T) { var peersToSendTo []peer.ID peersToSendTo = append(peersToSendTo, peer1, peer3, peer4) - peerManager.SendMessage(nil, entries, peersToSendTo, ses) + peerManager.SendMessage(entries, peersToSendTo, ses) peersReceived = collectAndCheckMessages( ctx, t, messagesSent, entries, ses, 10*time.Millisecond) diff --git a/bitswap/wantmanager/wantmanager.go b/bitswap/wantmanager/wantmanager.go index 8b2480599..57bd65f89 100644 --- a/bitswap/wantmanager/wantmanager.go +++ b/bitswap/wantmanager/wantmanager.go @@ -25,7 +25,7 @@ const ( type PeerHandler interface { Disconnected(p peer.ID) Connected(p peer.ID, initialEntries []*wantlist.Entry) - SendMessage(initialEntries []*wantlist.Entry, entries []*bsmsg.Entry, targets []peer.ID, from uint64) + SendMessage(entries []*bsmsg.Entry, targets []peer.ID, from uint64) } type wantMessage interface { @@ -232,7 +232,7 @@ func (ws *wantSet) handle(wm *WantManager) { } // broadcast those wantlist changes - wm.peerHandler.SendMessage(wm.bcwl.Entries(), ws.entries, ws.targets, ws.from) + wm.peerHandler.SendMessage(ws.entries, ws.targets, ws.from) } type isWantedMessage struct { diff --git a/bitswap/wantmanager/wantmanager_test.go b/bitswap/wantmanager/wantmanager_test.go index 37a1d2766..46d1d0b30 100644 --- a/bitswap/wantmanager/wantmanager_test.go +++ b/bitswap/wantmanager/wantmanager_test.go @@ -15,15 +15,13 @@ import ( ) type fakePeerHandler struct { - lk sync.RWMutex - lastWantSet wantSet - initialEntries []*wantlist.Entry + lk sync.RWMutex + lastWantSet wantSet } -func (fph *fakePeerHandler) SendMessage(initialEntries []*wantlist.Entry, entries []*bsmsg.Entry, targets []peer.ID, from uint64) { +func (fph *fakePeerHandler) SendMessage(entries []*bsmsg.Entry, targets []peer.ID, from uint64) { fph.lk.Lock() fph.lastWantSet = wantSet{entries, targets, from} - fph.initialEntries = initialEntries fph.lk.Unlock() } From 6235c313fa44d533a0197e3bcc4d9516acbc6097 Mon Sep 17 00:00:00 2001 From: hannahhoward Date: Tue, 19 Feb 2019 18:05:45 -0800 Subject: [PATCH 0714/1035] feat(peermanager): limit use of mutex Constrain use of mutex to actual operations on the peerQueues map via utility functions This commit was moved from ipfs/go-bitswap@9b54f91271066ed7fe26a7d3ce4c649ca0769d0c --- bitswap/peermanager/peermanager.go | 66 ++++++++++++++++++++---------- 1 file changed, 44 insertions(+), 22 deletions(-) diff --git a/bitswap/peermanager/peermanager.go b/bitswap/peermanager/peermanager.go index 3705d024a..c993148c1 100644 --- a/bitswap/peermanager/peermanager.go +++ b/bitswap/peermanager/peermanager.go @@ -37,8 +37,10 @@ type peerMessage interface { // PeerManager manages a pool of peers and sends messages to peers in the pool. type PeerManager struct { - peerQueues map[peer.ID]PeerQueue - lk sync.RWMutex + // peerQueues -- interact through internal utility functions get/set/remove/iterate + peerQueues map[peer.ID]PeerQueue + peerQueuesLk sync.RWMutex + createPeerQueue PeerQueueFactory ctx context.Context } @@ -54,24 +56,19 @@ func New(ctx context.Context, createPeerQueue PeerQueueFactory) *PeerManager { // ConnectedPeers returns a list of peers this PeerManager is managing. func (pm *PeerManager) ConnectedPeers() []peer.ID { - pm.lk.RLock() - defer pm.lk.RUnlock() peers := make([]peer.ID, 0, len(pm.peerQueues)) - for p := range pm.peerQueues { + pm.iterate(func(p peer.ID, _ PeerQueue) { peers = append(peers, p) - } - + }) return peers } // Connected is called to add a new peer to the pool, and send it an initial set // of wants. func (pm *PeerManager) Connected(p peer.ID, initialEntries []*wantlist.Entry) { - pm.lk.Lock() - defer pm.lk.Unlock() + mq, ok := pm.get(p) - mq, ok := pm.peerQueues[p] if ok { if mq.RefCount() == 0 { mq.AddWantlist(initialEntries) @@ -81,17 +78,17 @@ func (pm *PeerManager) Connected(p peer.ID, initialEntries []*wantlist.Entry) { } mq = pm.createPeerQueue(p) - pm.peerQueues[p] = mq + + pm.set(p, mq) + mq.Startup(pm.ctx) mq.AddWantlist(initialEntries) } // Disconnected is called to remove a peer from the pool. func (pm *PeerManager) Disconnected(p peer.ID) { - pm.lk.Lock() - defer pm.lk.Unlock() + pq, ok := pm.get(p) - pq, ok := pm.peerQueues[p] if !ok { // TODO: log error? return @@ -102,25 +99,23 @@ func (pm *PeerManager) Disconnected(p peer.ID) { } pq.Shutdown() - delete(pm.peerQueues, p) + + pm.remove(p) } // SendMessage is called to send a message to all or some peers in the pool; // if targets is nil, it sends to all. func (pm *PeerManager) SendMessage(entries []*bsmsg.Entry, targets []peer.ID, from uint64) { - pm.lk.Lock() - defer pm.lk.Unlock() - if len(targets) == 0 { - for _, p := range pm.peerQueues { + pm.iterate(func(_ peer.ID, p PeerQueue) { p.AddMessage(entries, from) - } + }) } else { for _, t := range targets { - p, ok := pm.peerQueues[t] + p, ok := pm.get(t) if !ok { p = pm.createPeerQueue(t) - pm.peerQueues[t] = p + pm.set(t, p) p.Startup(pm.ctx) // this is a "0 reference" queue because we haven't actually connected to it // sending the first message will cause it to connect @@ -130,3 +125,30 @@ func (pm *PeerManager) SendMessage(entries []*bsmsg.Entry, targets []peer.ID, fr } } } + +func (pm *PeerManager) get(p peer.ID) (PeerQueue, bool) { + pm.peerQueuesLk.RLock() + pq, ok := pm.peerQueues[p] + pm.peerQueuesLk.RUnlock() + return pq, ok +} + +func (pm *PeerManager) set(p peer.ID, pq PeerQueue) { + pm.peerQueuesLk.Lock() + pm.peerQueues[p] = pq + pm.peerQueuesLk.Unlock() +} + +func (pm *PeerManager) remove(p peer.ID) { + pm.peerQueuesLk.Lock() + delete(pm.peerQueues, p) + pm.peerQueuesLk.Unlock() +} + +func (pm *PeerManager) iterate(iterateFn func(peer.ID, PeerQueue)) { + pm.peerQueuesLk.RLock() + for p, pq := range pm.peerQueues { + iterateFn(p, pq) + } + pm.peerQueuesLk.RUnlock() +} From b09df1194585892f76b6d393704de123ca5fbfb5 Mon Sep 17 00:00:00 2001 From: hannahhoward Date: Wed, 20 Feb 2019 11:04:22 -0800 Subject: [PATCH 0715/1035] fix(peermanager): fix get/set race repace get/set with getOrCreate to keep operations atomic This commit was moved from ipfs/go-bitswap@d8454fe8aae0b9e5ad4b28bb37a39d7c902ca4d2 --- bitswap/messagequeue/messagequeue.go | 2 +- bitswap/peermanager/peermanager.go | 38 +++++++++---------------- bitswap/peermanager/peermanager_test.go | 2 +- 3 files changed, 15 insertions(+), 27 deletions(-) diff --git a/bitswap/messagequeue/messagequeue.go b/bitswap/messagequeue/messagequeue.go index a2c228e17..38c943b5e 100644 --- a/bitswap/messagequeue/messagequeue.go +++ b/bitswap/messagequeue/messagequeue.go @@ -48,7 +48,7 @@ func New(p peer.ID, network MessageNetwork) *MessageQueue { wl: wantlist.NewThreadSafe(), network: network, p: p, - refcnt: 1, + refcnt: 0, } } diff --git a/bitswap/peermanager/peermanager.go b/bitswap/peermanager/peermanager.go index c993148c1..773f29c08 100644 --- a/bitswap/peermanager/peermanager.go +++ b/bitswap/peermanager/peermanager.go @@ -67,22 +67,12 @@ func (pm *PeerManager) ConnectedPeers() []peer.ID { // Connected is called to add a new peer to the pool, and send it an initial set // of wants. func (pm *PeerManager) Connected(p peer.ID, initialEntries []*wantlist.Entry) { - mq, ok := pm.get(p) + mq := pm.getOrCreate(p) - if ok { - if mq.RefCount() == 0 { - mq.AddWantlist(initialEntries) - } - mq.RefIncrement() - return + if mq.RefCount() == 0 { + mq.AddWantlist(initialEntries) } - - mq = pm.createPeerQueue(p) - - pm.set(p, mq) - - mq.Startup(pm.ctx) - mq.AddWantlist(initialEntries) + mq.RefIncrement() } // Disconnected is called to remove a peer from the pool. @@ -112,15 +102,7 @@ func (pm *PeerManager) SendMessage(entries []*bsmsg.Entry, targets []peer.ID, fr }) } else { for _, t := range targets { - p, ok := pm.get(t) - if !ok { - p = pm.createPeerQueue(t) - pm.set(t, p) - p.Startup(pm.ctx) - // this is a "0 reference" queue because we haven't actually connected to it - // sending the first message will cause it to connect - p.RefDecrement() - } + p := pm.getOrCreate(t) p.AddMessage(entries, from) } } @@ -133,10 +115,16 @@ func (pm *PeerManager) get(p peer.ID) (PeerQueue, bool) { return pq, ok } -func (pm *PeerManager) set(p peer.ID, pq PeerQueue) { +func (pm *PeerManager) getOrCreate(p peer.ID) PeerQueue { pm.peerQueuesLk.Lock() - pm.peerQueues[p] = pq + pq, ok := pm.peerQueues[p] + if !ok { + pq = pm.createPeerQueue(p) + pq.Startup(pm.ctx) + pm.peerQueues[p] = pq + } pm.peerQueuesLk.Unlock() + return pq } func (pm *PeerManager) remove(p peer.ID) { diff --git a/bitswap/peermanager/peermanager_test.go b/bitswap/peermanager/peermanager_test.go index 2b7c938ed..00dd04473 100644 --- a/bitswap/peermanager/peermanager_test.go +++ b/bitswap/peermanager/peermanager_test.go @@ -41,7 +41,7 @@ func makePeerQueueFactory(messagesSent chan messageSent) PeerQueueFactory { return func(p peer.ID) PeerQueue { return &fakePeer{ p: p, - refcnt: 1, + refcnt: 0, messagesSent: messagesSent, } } From f2dae491a366978f81f6aa522e815d5bf8bc5938 Mon Sep 17 00:00:00 2001 From: hannahhoward Date: Wed, 20 Feb 2019 11:16:06 -0800 Subject: [PATCH 0716/1035] fix(peermanager): fix disconnect race Keep all of disconnection in a mutex This commit was moved from ipfs/go-bitswap@97bc28b91c00ea3f53aa0132f2cfbd01c8cfa2ce --- bitswap/peermanager/peermanager.go | 26 ++++++-------------------- 1 file changed, 6 insertions(+), 20 deletions(-) diff --git a/bitswap/peermanager/peermanager.go b/bitswap/peermanager/peermanager.go index 773f29c08..95361394b 100644 --- a/bitswap/peermanager/peermanager.go +++ b/bitswap/peermanager/peermanager.go @@ -77,20 +77,19 @@ func (pm *PeerManager) Connected(p peer.ID, initialEntries []*wantlist.Entry) { // Disconnected is called to remove a peer from the pool. func (pm *PeerManager) Disconnected(p peer.ID) { - pq, ok := pm.get(p) + pm.peerQueuesLk.Lock() + pq, ok := pm.peerQueues[p] - if !ok { - // TODO: log error? + if !ok || pq.RefDecrement() { + pm.peerQueuesLk.Unlock() return } - if pq.RefDecrement() { - return - } + delete(pm.peerQueues, p) + pm.peerQueuesLk.Unlock() pq.Shutdown() - pm.remove(p) } // SendMessage is called to send a message to all or some peers in the pool; @@ -108,13 +107,6 @@ func (pm *PeerManager) SendMessage(entries []*bsmsg.Entry, targets []peer.ID, fr } } -func (pm *PeerManager) get(p peer.ID) (PeerQueue, bool) { - pm.peerQueuesLk.RLock() - pq, ok := pm.peerQueues[p] - pm.peerQueuesLk.RUnlock() - return pq, ok -} - func (pm *PeerManager) getOrCreate(p peer.ID) PeerQueue { pm.peerQueuesLk.Lock() pq, ok := pm.peerQueues[p] @@ -127,12 +119,6 @@ func (pm *PeerManager) getOrCreate(p peer.ID) PeerQueue { return pq } -func (pm *PeerManager) remove(p peer.ID) { - pm.peerQueuesLk.Lock() - delete(pm.peerQueues, p) - pm.peerQueuesLk.Unlock() -} - func (pm *PeerManager) iterate(iterateFn func(peer.ID, PeerQueue)) { pm.peerQueuesLk.RLock() for p, pq := range pm.peerQueues { From cdf38a0d9bc5a50598116a94e2892d8786eb16f5 Mon Sep 17 00:00:00 2001 From: hannahhoward Date: Wed, 20 Feb 2019 14:18:22 -0800 Subject: [PATCH 0717/1035] fix(peermanager): race fix fix remaining issues for race detector in peer manager This commit was moved from ipfs/go-bitswap@434e0f416c7352b5545a2486816e1bd7c5c4c239 --- bitswap/peermanager/peermanager.go | 21 ++++++++------------- 1 file changed, 8 insertions(+), 13 deletions(-) diff --git a/bitswap/peermanager/peermanager.go b/bitswap/peermanager/peermanager.go index 95361394b..7a32e4831 100644 --- a/bitswap/peermanager/peermanager.go +++ b/bitswap/peermanager/peermanager.go @@ -56,11 +56,12 @@ func New(ctx context.Context, createPeerQueue PeerQueueFactory) *PeerManager { // ConnectedPeers returns a list of peers this PeerManager is managing. func (pm *PeerManager) ConnectedPeers() []peer.ID { - + pm.peerQueuesLk.RLock() + defer pm.peerQueuesLk.RUnlock() peers := make([]peer.ID, 0, len(pm.peerQueues)) - pm.iterate(func(p peer.ID, _ PeerQueue) { + for p := range pm.peerQueues { peers = append(peers, p) - }) + } return peers } @@ -96,9 +97,11 @@ func (pm *PeerManager) Disconnected(p peer.ID) { // if targets is nil, it sends to all. func (pm *PeerManager) SendMessage(entries []*bsmsg.Entry, targets []peer.ID, from uint64) { if len(targets) == 0 { - pm.iterate(func(_ peer.ID, p PeerQueue) { + pm.peerQueuesLk.RLock() + for _, p := range pm.peerQueues { p.AddMessage(entries, from) - }) + } + pm.peerQueuesLk.RUnlock() } else { for _, t := range targets { p := pm.getOrCreate(t) @@ -118,11 +121,3 @@ func (pm *PeerManager) getOrCreate(p peer.ID) PeerQueue { pm.peerQueuesLk.Unlock() return pq } - -func (pm *PeerManager) iterate(iterateFn func(peer.ID, PeerQueue)) { - pm.peerQueuesLk.RLock() - for p, pq := range pm.peerQueues { - iterateFn(p, pq) - } - pm.peerQueuesLk.RUnlock() -} From 14d4b5e6a337219b0bb82879f13ca60f8322e65a Mon Sep 17 00:00:00 2001 From: hannahhoward Date: Wed, 20 Feb 2019 14:45:40 -0800 Subject: [PATCH 0718/1035] feat(peermanager): move refcnt Move refcnt tracking from the messagequeue to the peermanager, where it's relevant This commit was moved from ipfs/go-bitswap@d4191c4d21ab78eb00c6da7a9e0f3177fcac0070 --- bitswap/messagequeue/messagequeue.go | 20 --------- bitswap/peermanager/peermanager.go | 56 +++++++++++++++---------- bitswap/peermanager/peermanager_test.go | 9 +--- 3 files changed, 36 insertions(+), 49 deletions(-) diff --git a/bitswap/messagequeue/messagequeue.go b/bitswap/messagequeue/messagequeue.go index 38c943b5e..6d2cd1ced 100644 --- a/bitswap/messagequeue/messagequeue.go +++ b/bitswap/messagequeue/messagequeue.go @@ -34,8 +34,6 @@ type MessageQueue struct { sender bsnet.MessageSender - refcnt int - work chan struct{} done chan struct{} } @@ -48,27 +46,9 @@ func New(p peer.ID, network MessageNetwork) *MessageQueue { wl: wantlist.NewThreadSafe(), network: network, p: p, - refcnt: 0, } } -// RefCount returns the number of open connections for this queue. -func (mq *MessageQueue) RefCount() int { - return mq.refcnt -} - -// RefIncrement increments the refcount for a message queue. -func (mq *MessageQueue) RefIncrement() { - mq.refcnt++ -} - -// RefDecrement decrements the refcount for a message queue and returns true -// if the refcount is now 0. -func (mq *MessageQueue) RefDecrement() bool { - mq.refcnt-- - return mq.refcnt > 0 -} - // AddMessage adds new entries to an outgoing message for a given session. func (mq *MessageQueue) AddMessage(entries []*bsmsg.Entry, ses uint64) { if !mq.addEntries(entries, ses) { diff --git a/bitswap/peermanager/peermanager.go b/bitswap/peermanager/peermanager.go index 7a32e4831..48c8de43b 100644 --- a/bitswap/peermanager/peermanager.go +++ b/bitswap/peermanager/peermanager.go @@ -19,9 +19,6 @@ var ( // PeerQueue provides a queer of messages to be sent for a single peer. type PeerQueue interface { - RefIncrement() - RefDecrement() bool - RefCount() int AddMessage(entries []*bsmsg.Entry, ses uint64) Startup(ctx context.Context) AddWantlist(initialEntries []*wantlist.Entry) @@ -35,10 +32,15 @@ type peerMessage interface { handle(pm *PeerManager) } +type peerQueueInstance struct { + refcnt int + pq PeerQueue +} + // PeerManager manages a pool of peers and sends messages to peers in the pool. type PeerManager struct { // peerQueues -- interact through internal utility functions get/set/remove/iterate - peerQueues map[peer.ID]PeerQueue + peerQueues map[peer.ID]*peerQueueInstance peerQueuesLk sync.RWMutex createPeerQueue PeerQueueFactory @@ -48,7 +50,7 @@ type PeerManager struct { // New creates a new PeerManager, given a context and a peerQueueFactory. func New(ctx context.Context, createPeerQueue PeerQueueFactory) *PeerManager { return &PeerManager{ - peerQueues: make(map[peer.ID]PeerQueue), + peerQueues: make(map[peer.ID]*peerQueueInstance), createPeerQueue: createPeerQueue, ctx: ctx, } @@ -68,12 +70,17 @@ func (pm *PeerManager) ConnectedPeers() []peer.ID { // Connected is called to add a new peer to the pool, and send it an initial set // of wants. func (pm *PeerManager) Connected(p peer.ID, initialEntries []*wantlist.Entry) { - mq := pm.getOrCreate(p) + pm.peerQueuesLk.Lock() + + pq := pm.getOrCreate(p) - if mq.RefCount() == 0 { - mq.AddWantlist(initialEntries) + if pq.refcnt == 0 { + pq.pq.AddWantlist(initialEntries) } - mq.RefIncrement() + + pq.refcnt++ + + pm.peerQueuesLk.Unlock() } // Disconnected is called to remove a peer from the pool. @@ -81,7 +88,13 @@ func (pm *PeerManager) Disconnected(p peer.ID) { pm.peerQueuesLk.Lock() pq, ok := pm.peerQueues[p] - if !ok || pq.RefDecrement() { + if !ok { + pm.peerQueuesLk.Unlock() + return + } + + pq.refcnt-- + if pq.refcnt > 0 { pm.peerQueuesLk.Unlock() return } @@ -89,7 +102,7 @@ func (pm *PeerManager) Disconnected(p peer.ID) { delete(pm.peerQueues, p) pm.peerQueuesLk.Unlock() - pq.Shutdown() + pq.pq.Shutdown() } @@ -99,25 +112,26 @@ func (pm *PeerManager) SendMessage(entries []*bsmsg.Entry, targets []peer.ID, fr if len(targets) == 0 { pm.peerQueuesLk.RLock() for _, p := range pm.peerQueues { - p.AddMessage(entries, from) + p.pq.AddMessage(entries, from) } pm.peerQueuesLk.RUnlock() } else { for _, t := range targets { - p := pm.getOrCreate(t) - p.AddMessage(entries, from) + pm.peerQueuesLk.Lock() + pqi := pm.getOrCreate(t) + pm.peerQueuesLk.Unlock() + pqi.pq.AddMessage(entries, from) } } } -func (pm *PeerManager) getOrCreate(p peer.ID) PeerQueue { - pm.peerQueuesLk.Lock() - pq, ok := pm.peerQueues[p] +func (pm *PeerManager) getOrCreate(p peer.ID) *peerQueueInstance { + pqi, ok := pm.peerQueues[p] if !ok { - pq = pm.createPeerQueue(p) + pq := pm.createPeerQueue(p) pq.Startup(pm.ctx) - pm.peerQueues[p] = pq + pqi = &peerQueueInstance{0, pq} + pm.peerQueues[p] = pqi } - pm.peerQueuesLk.Unlock() - return pq + return pqi } diff --git a/bitswap/peermanager/peermanager_test.go b/bitswap/peermanager/peermanager_test.go index 00dd04473..ac8595d5d 100644 --- a/bitswap/peermanager/peermanager_test.go +++ b/bitswap/peermanager/peermanager_test.go @@ -20,19 +20,13 @@ type messageSent struct { } type fakePeer struct { - refcnt int p peer.ID messagesSent chan messageSent } func (fp *fakePeer) Startup(ctx context.Context) {} func (fp *fakePeer) Shutdown() {} -func (fp *fakePeer) RefCount() int { return fp.refcnt } -func (fp *fakePeer) RefIncrement() { fp.refcnt++ } -func (fp *fakePeer) RefDecrement() bool { - fp.refcnt-- - return fp.refcnt > 0 -} + func (fp *fakePeer) AddMessage(entries []*bsmsg.Entry, ses uint64) { fp.messagesSent <- messageSent{fp.p, entries, ses} } @@ -41,7 +35,6 @@ func makePeerQueueFactory(messagesSent chan messageSent) PeerQueueFactory { return func(p peer.ID) PeerQueue { return &fakePeer{ p: p, - refcnt: 0, messagesSent: messagesSent, } } From 8692106412b23c5bedb87c3d302c157dc4706567 Mon Sep 17 00:00:00 2001 From: hannahhoward Date: Tue, 19 Feb 2019 17:26:54 -0800 Subject: [PATCH 0719/1035] feat(wantlist): differentiate types Seperate want list into differentiated types - session tracking and regular fix #13 This commit was moved from ipfs/go-bitswap@78386f0e0c837bfafe1c5a29ad9f7990913f9b4b --- bitswap/bitswap.go | 4 +- bitswap/messagequeue/messagequeue.go | 181 ++++++++++++---------- bitswap/messagequeue/messagequeue_test.go | 15 +- bitswap/peermanager/peermanager.go | 14 +- bitswap/peermanager/peermanager_test.go | 8 +- bitswap/testutil/testutil.go | 4 +- bitswap/wantlist/wantlist.go | 85 +++++----- bitswap/wantlist/wantlist_test.go | 4 +- bitswap/wantmanager/wantmanager.go | 12 +- bitswap/wantmanager/wantmanager_test.go | 4 +- 10 files changed, 174 insertions(+), 157 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 3abbc1979..28c1589b9 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -97,8 +97,8 @@ func New(parent context.Context, network bsnet.BitSwapNetwork, return nil }) - peerQueueFactory := func(p peer.ID) bspm.PeerQueue { - return bsmq.New(p, network) + peerQueueFactory := func(ctx context.Context, p peer.ID) bspm.PeerQueue { + return bsmq.New(ctx, p, network) } wm := bswm.New(ctx) diff --git a/bitswap/messagequeue/messagequeue.go b/bitswap/messagequeue/messagequeue.go index 6d2cd1ced..e92046522 100644 --- a/bitswap/messagequeue/messagequeue.go +++ b/bitswap/messagequeue/messagequeue.go @@ -2,7 +2,6 @@ package messagequeue import ( "context" - "sync" "time" bsmsg "github.com/ipfs/go-bitswap/message" @@ -23,68 +22,72 @@ type MessageNetwork interface { NewMessageSender(context.Context, peer.ID) (bsnet.MessageSender, error) } +type request interface { + handle(mq *MessageQueue) +} + // MessageQueue implements queue of want messages to send to peers. type MessageQueue struct { - p peer.ID - - outlk sync.Mutex - out bsmsg.BitSwapMessage + ctx context.Context + p peer.ID network MessageNetwork - wl *wantlist.ThreadSafe - sender bsnet.MessageSender + newRequests chan request + outgoingMessages chan bsmsg.BitSwapMessage + done chan struct{} + + // do not touch out of run loop + wl *wantlist.SessionTrackedWantlist + nextMessage bsmsg.BitSwapMessage + sender bsnet.MessageSender +} + +type messageRequest struct { + entries []*bsmsg.Entry + ses uint64 +} - work chan struct{} - done chan struct{} +type wantlistRequest struct { + wl *wantlist.SessionTrackedWantlist } // New creats a new MessageQueue. -func New(p peer.ID, network MessageNetwork) *MessageQueue { +func New(ctx context.Context, p peer.ID, network MessageNetwork) *MessageQueue { return &MessageQueue{ - done: make(chan struct{}), - work: make(chan struct{}, 1), - wl: wantlist.NewThreadSafe(), - network: network, - p: p, + ctx: ctx, + wl: wantlist.NewSessionTrackedWantlist(), + network: network, + p: p, + newRequests: make(chan request, 16), + outgoingMessages: make(chan bsmsg.BitSwapMessage), + done: make(chan struct{}), } } // AddMessage adds new entries to an outgoing message for a given session. func (mq *MessageQueue) AddMessage(entries []*bsmsg.Entry, ses uint64) { - if !mq.addEntries(entries, ses) { - return - } select { - case mq.work <- struct{}{}: - default: + case mq.newRequests <- &messageRequest{entries, ses}: + case <-mq.ctx.Done(): } } // AddWantlist adds a complete session tracked want list to a message queue -func (mq *MessageQueue) AddWantlist(initialEntries []*wantlist.Entry) { - if len(initialEntries) > 0 { - if mq.out == nil { - mq.out = bsmsg.New(false) - } +func (mq *MessageQueue) AddWantlist(initialWants *wantlist.SessionTrackedWantlist) { + wl := wantlist.NewSessionTrackedWantlist() + initialWants.CopyWants(wl) - for _, e := range initialEntries { - for k := range e.SesTrk { - mq.wl.AddEntry(e, k) - } - mq.out.AddEntry(e.Cid, e.Priority) - } - - select { - case mq.work <- struct{}{}: - default: - } + select { + case mq.newRequests <- &wantlistRequest{wl}: + case <-mq.ctx.Done(): } } // Startup starts the processing of messages, and creates an initial message // based on the given initial wantlist. -func (mq *MessageQueue) Startup(ctx context.Context) { - go mq.runQueue(ctx) +func (mq *MessageQueue) Startup() { + go mq.runQueue() + go mq.sendMessages() } // Shutdown stops the processing of messages for a message queue. @@ -92,17 +95,26 @@ func (mq *MessageQueue) Shutdown() { close(mq.done) } -func (mq *MessageQueue) runQueue(ctx context.Context) { +func (mq *MessageQueue) runQueue() { + outgoingMessages := func() chan bsmsg.BitSwapMessage { + if mq.nextMessage == nil { + return nil + } + return mq.outgoingMessages + } + for { select { - case <-mq.work: // there is work to be done - mq.doWork(ctx) + case newRequest := <-mq.newRequests: + newRequest.handle(mq) + case outgoingMessages() <- mq.nextMessage: + mq.nextMessage = nil case <-mq.done: if mq.sender != nil { mq.sender.Close() } return - case <-ctx.Done(): + case <-mq.ctx.Done(): if mq.sender != nil { mq.sender.Reset() } @@ -111,63 +123,77 @@ func (mq *MessageQueue) runQueue(ctx context.Context) { } } -func (mq *MessageQueue) addEntries(entries []*bsmsg.Entry, ses uint64) bool { - var work bool - mq.outlk.Lock() - defer mq.outlk.Unlock() - // if we have no message held allocate a new one - if mq.out == nil { - mq.out = bsmsg.New(false) +func (mr *messageRequest) handle(mq *MessageQueue) { + mq.addEntries(mr.entries, mr.ses) +} + +func (wr *wantlistRequest) handle(mq *MessageQueue) { + initialWants := wr.wl + initialWants.CopyWants(mq.wl) + if initialWants.Len() > 0 { + if mq.nextMessage == nil { + mq.nextMessage = bsmsg.New(false) + } + for _, e := range initialWants.Entries() { + mq.nextMessage.AddEntry(e.Cid, e.Priority) + } } +} - // TODO: add a msg.Combine(...) method - // otherwise, combine the one we are holding with the - // one passed in +func (mq *MessageQueue) addEntries(entries []*bsmsg.Entry, ses uint64) { for _, e := range entries { if e.Cancel { if mq.wl.Remove(e.Cid, ses) { - work = true - mq.out.Cancel(e.Cid) + if mq.nextMessage == nil { + mq.nextMessage = bsmsg.New(false) + } + mq.nextMessage.Cancel(e.Cid) } } else { if mq.wl.Add(e.Cid, e.Priority, ses) { - work = true - mq.out.AddEntry(e.Cid, e.Priority) + if mq.nextMessage == nil { + mq.nextMessage = bsmsg.New(false) + } + mq.nextMessage.AddEntry(e.Cid, e.Priority) } } } - - return work } -func (mq *MessageQueue) doWork(ctx context.Context) { - - wlm := mq.extractOutgoingMessage() - if wlm == nil || wlm.Empty() { - return +func (mq *MessageQueue) sendMessages() { + for { + select { + case nextMessage := <-mq.outgoingMessages: + mq.sendMessage(nextMessage) + case <-mq.done: + return + case <-mq.ctx.Done(): + return + } } +} + +func (mq *MessageQueue) sendMessage(message bsmsg.BitSwapMessage) { - // NB: only open a stream if we actually have data to send - err := mq.initializeSender(ctx) + err := mq.initializeSender() if err != nil { log.Infof("cant open message sender to peer %s: %s", mq.p, err) // TODO: cant connect, what now? return } - // send wantlist updates for i := 0; i < maxRetries; i++ { // try to send this message until we fail. - if mq.attemptSendAndRecovery(ctx, wlm) { + if mq.attemptSendAndRecovery(message) { return } } } -func (mq *MessageQueue) initializeSender(ctx context.Context) error { +func (mq *MessageQueue) initializeSender() error { if mq.sender != nil { return nil } - nsender, err := openSender(ctx, mq.network, mq.p) + nsender, err := openSender(mq.ctx, mq.network, mq.p) if err != nil { return err } @@ -175,8 +201,8 @@ func (mq *MessageQueue) initializeSender(ctx context.Context) error { return nil } -func (mq *MessageQueue) attemptSendAndRecovery(ctx context.Context, wlm bsmsg.BitSwapMessage) bool { - err := mq.sender.SendMsg(ctx, wlm) +func (mq *MessageQueue) attemptSendAndRecovery(message bsmsg.BitSwapMessage) bool { + err := mq.sender.SendMsg(mq.ctx, message) if err == nil { return true } @@ -188,14 +214,14 @@ func (mq *MessageQueue) attemptSendAndRecovery(ctx context.Context, wlm bsmsg.Bi select { case <-mq.done: return true - case <-ctx.Done(): + case <-mq.ctx.Done(): return true case <-time.After(time.Millisecond * 100): // wait 100ms in case disconnect notifications are still propogating log.Warning("SendMsg errored but neither 'done' nor context.Done() were set") } - err = mq.initializeSender(ctx) + err = mq.initializeSender() if err != nil { log.Infof("couldnt open sender again after SendMsg(%s) failed: %s", mq.p, err) // TODO(why): what do we do now? @@ -215,15 +241,6 @@ func (mq *MessageQueue) attemptSendAndRecovery(ctx context.Context, wlm bsmsg.Bi return false } -func (mq *MessageQueue) extractOutgoingMessage() bsmsg.BitSwapMessage { - // grab outgoing message - mq.outlk.Lock() - wlm := mq.out - mq.out = nil - mq.outlk.Unlock() - return wlm -} - func openSender(ctx context.Context, network MessageNetwork, p peer.ID) (bsnet.MessageSender, error) { // allow ten minutes for connections this includes looking them up in the // dht dialing them, and handshaking diff --git a/bitswap/messagequeue/messagequeue_test.go b/bitswap/messagequeue/messagequeue_test.go index b780678d9..aeb903ddc 100644 --- a/bitswap/messagequeue/messagequeue_test.go +++ b/bitswap/messagequeue/messagequeue_test.go @@ -27,7 +27,6 @@ func (fmn *fakeMessageNetwork) NewMessageSender(context.Context, peer.ID) (bsnet return fmn.messageSender, nil } return nil, fmn.messageSenderError - } type fakeMessageSender struct { @@ -77,12 +76,12 @@ func TestStartupAndShutdown(t *testing.T) { fakeSender := &fakeMessageSender{nil, fullClosedChan, resetChan, messagesSent} fakenet := &fakeMessageNetwork{nil, nil, fakeSender} peerID := testutil.GeneratePeers(1)[0] - messageQueue := New(peerID, fakenet) + messageQueue := New(ctx, peerID, fakenet) ses := testutil.GenerateSessionID() wl := testutil.GenerateWantlist(10, ses) - messageQueue.Startup(ctx) - messageQueue.AddWantlist(wl.Entries()) + messageQueue.Startup() + messageQueue.AddWantlist(wl) messages := collectMessages(ctx, t, messagesSent, 10*time.Millisecond) if len(messages) != 1 { t.Fatal("wrong number of messages were sent for initial wants") @@ -119,11 +118,11 @@ func TestSendingMessagesDeduped(t *testing.T) { fakeSender := &fakeMessageSender{nil, fullClosedChan, resetChan, messagesSent} fakenet := &fakeMessageNetwork{nil, nil, fakeSender} peerID := testutil.GeneratePeers(1)[0] - messageQueue := New(peerID, fakenet) + messageQueue := New(ctx, peerID, fakenet) ses1 := testutil.GenerateSessionID() ses2 := testutil.GenerateSessionID() entries := testutil.GenerateMessageEntries(10, false) - messageQueue.Startup(ctx) + messageQueue.Startup() messageQueue.AddMessage(entries, ses1) messageQueue.AddMessage(entries, ses2) @@ -142,13 +141,13 @@ func TestSendingMessagesPartialDupe(t *testing.T) { fakeSender := &fakeMessageSender{nil, fullClosedChan, resetChan, messagesSent} fakenet := &fakeMessageNetwork{nil, nil, fakeSender} peerID := testutil.GeneratePeers(1)[0] - messageQueue := New(peerID, fakenet) + messageQueue := New(ctx, peerID, fakenet) ses1 := testutil.GenerateSessionID() ses2 := testutil.GenerateSessionID() entries := testutil.GenerateMessageEntries(10, false) moreEntries := testutil.GenerateMessageEntries(5, false) secondEntries := append(entries[5:], moreEntries...) - messageQueue.Startup(ctx) + messageQueue.Startup() messageQueue.AddMessage(entries, ses1) messageQueue.AddMessage(secondEntries, ses2) diff --git a/bitswap/peermanager/peermanager.go b/bitswap/peermanager/peermanager.go index 48c8de43b..b1b8ee9a7 100644 --- a/bitswap/peermanager/peermanager.go +++ b/bitswap/peermanager/peermanager.go @@ -20,13 +20,13 @@ var ( // PeerQueue provides a queer of messages to be sent for a single peer. type PeerQueue interface { AddMessage(entries []*bsmsg.Entry, ses uint64) - Startup(ctx context.Context) - AddWantlist(initialEntries []*wantlist.Entry) + Startup() + AddWantlist(initialWants *wantlist.SessionTrackedWantlist) Shutdown() } // PeerQueueFactory provides a function that will create a PeerQueue. -type PeerQueueFactory func(p peer.ID) PeerQueue +type PeerQueueFactory func(ctx context.Context, p peer.ID) PeerQueue type peerMessage interface { handle(pm *PeerManager) @@ -69,13 +69,13 @@ func (pm *PeerManager) ConnectedPeers() []peer.ID { // Connected is called to add a new peer to the pool, and send it an initial set // of wants. -func (pm *PeerManager) Connected(p peer.ID, initialEntries []*wantlist.Entry) { +func (pm *PeerManager) Connected(p peer.ID, initialWants *wantlist.SessionTrackedWantlist) { pm.peerQueuesLk.Lock() pq := pm.getOrCreate(p) if pq.refcnt == 0 { - pq.pq.AddWantlist(initialEntries) + pq.pq.AddWantlist(initialWants) } pq.refcnt++ @@ -128,8 +128,8 @@ func (pm *PeerManager) SendMessage(entries []*bsmsg.Entry, targets []peer.ID, fr func (pm *PeerManager) getOrCreate(p peer.ID) *peerQueueInstance { pqi, ok := pm.peerQueues[p] if !ok { - pq := pm.createPeerQueue(p) - pq.Startup(pm.ctx) + pq := pm.createPeerQueue(pm.ctx, p) + pq.Startup() pqi = &peerQueueInstance{0, pq} pm.peerQueues[p] = pqi } diff --git a/bitswap/peermanager/peermanager_test.go b/bitswap/peermanager/peermanager_test.go index ac8595d5d..1d56d042a 100644 --- a/bitswap/peermanager/peermanager_test.go +++ b/bitswap/peermanager/peermanager_test.go @@ -24,15 +24,15 @@ type fakePeer struct { messagesSent chan messageSent } -func (fp *fakePeer) Startup(ctx context.Context) {} -func (fp *fakePeer) Shutdown() {} +func (fp *fakePeer) Startup() {} +func (fp *fakePeer) Shutdown() {} func (fp *fakePeer) AddMessage(entries []*bsmsg.Entry, ses uint64) { fp.messagesSent <- messageSent{fp.p, entries, ses} } -func (fp *fakePeer) AddWantlist(initialEntries []*wantlist.Entry) {} +func (fp *fakePeer) AddWantlist(initialWants *wantlist.SessionTrackedWantlist) {} func makePeerQueueFactory(messagesSent chan messageSent) PeerQueueFactory { - return func(p peer.ID) PeerQueue { + return func(ctx context.Context, p peer.ID) PeerQueue { return &fakePeer{ p: p, messagesSent: messagesSent, diff --git a/bitswap/testutil/testutil.go b/bitswap/testutil/testutil.go index 3d7996668..05fd152b1 100644 --- a/bitswap/testutil/testutil.go +++ b/bitswap/testutil/testutil.go @@ -39,8 +39,8 @@ func GenerateCids(n int) []cid.Cid { } // GenerateWantlist makes a populated wantlist. -func GenerateWantlist(n int, ses uint64) *wantlist.ThreadSafe { - wl := wantlist.NewThreadSafe() +func GenerateWantlist(n int, ses uint64) *wantlist.SessionTrackedWantlist { + wl := wantlist.NewSessionTrackedWantlist() for i := 0; i < n; i++ { prioritySeq++ entry := wantlist.NewRefEntry(blockGenerator.Next().Cid(), prioritySeq) diff --git a/bitswap/wantlist/wantlist.go b/bitswap/wantlist/wantlist.go index 947c964da..118a19ff8 100644 --- a/bitswap/wantlist/wantlist.go +++ b/bitswap/wantlist/wantlist.go @@ -1,20 +1,17 @@ -// package wantlist implements an object for bitswap that contains the keys +// Package wantlist implements an object for bitswap that contains the keys // that a given peer wants. package wantlist import ( "sort" - "sync" cid "github.com/ipfs/go-cid" ) -type ThreadSafe struct { - lk sync.RWMutex - set map[cid.Cid]*Entry +type SessionTrackedWantlist struct { + set map[cid.Cid]*sessionTrackedEntry } -// not threadsafe type Wantlist struct { set map[cid.Cid]*Entry } @@ -23,17 +20,20 @@ type Entry struct { Cid cid.Cid Priority int - SesTrk map[uint64]struct{} // Trash in a book-keeping field Trash bool } +type sessionTrackedEntry struct { + *Entry + sesTrk map[uint64]struct{} +} + // NewRefEntry creates a new reference tracked wantlist entry. func NewRefEntry(c cid.Cid, p int) *Entry { return &Entry{ Cid: c, Priority: p, - SesTrk: make(map[uint64]struct{}), } } @@ -43,9 +43,9 @@ func (es entrySlice) Len() int { return len(es) } func (es entrySlice) Swap(i, j int) { es[i], es[j] = es[j], es[i] } func (es entrySlice) Less(i, j int) bool { return es[i].Priority > es[j].Priority } -func NewThreadSafe() *ThreadSafe { - return &ThreadSafe{ - set: make(map[cid.Cid]*Entry), +func NewSessionTrackedWantlist() *SessionTrackedWantlist { + return &SessionTrackedWantlist{ + set: make(map[cid.Cid]*sessionTrackedEntry), } } @@ -63,33 +63,31 @@ func New() *Wantlist { // TODO: think through priority changes here // Add returns true if the cid did not exist in the wantlist before this call // (even if it was under a different session). -func (w *ThreadSafe) Add(c cid.Cid, priority int, ses uint64) bool { - w.lk.Lock() - defer w.lk.Unlock() +func (w *SessionTrackedWantlist) Add(c cid.Cid, priority int, ses uint64) bool { + if e, ok := w.set[c]; ok { - e.SesTrk[ses] = struct{}{} + e.sesTrk[ses] = struct{}{} return false } - w.set[c] = &Entry{ - Cid: c, - Priority: priority, - SesTrk: map[uint64]struct{}{ses: struct{}{}}, + w.set[c] = &sessionTrackedEntry{ + Entry: &Entry{Cid: c, Priority: priority}, + sesTrk: map[uint64]struct{}{ses: struct{}{}}, } return true } // AddEntry adds given Entry to the wantlist. For more information see Add method. -func (w *ThreadSafe) AddEntry(e *Entry, ses uint64) bool { - w.lk.Lock() - defer w.lk.Unlock() +func (w *SessionTrackedWantlist) AddEntry(e *Entry, ses uint64) bool { if ex, ok := w.set[e.Cid]; ok { - ex.SesTrk[ses] = struct{}{} + ex.sesTrk[ses] = struct{}{} return false } - w.set[e.Cid] = e - e.SesTrk[ses] = struct{}{} + w.set[e.Cid] = &sessionTrackedEntry{ + Entry: e, + sesTrk: map[uint64]struct{}{ses: struct{}{}}, + } return true } @@ -97,16 +95,14 @@ func (w *ThreadSafe) AddEntry(e *Entry, ses uint64) bool { // 'true' is returned if this call to Remove removed the final session ID // tracking the cid. (meaning true will be returned iff this call caused the // value of 'Contains(c)' to change from true to false) -func (w *ThreadSafe) Remove(c cid.Cid, ses uint64) bool { - w.lk.Lock() - defer w.lk.Unlock() +func (w *SessionTrackedWantlist) Remove(c cid.Cid, ses uint64) bool { e, ok := w.set[c] if !ok { return false } - delete(e.SesTrk, ses) - if len(e.SesTrk) == 0 { + delete(e.sesTrk, ses) + if len(e.sesTrk) == 0 { delete(w.set, c) return true } @@ -115,35 +111,40 @@ func (w *ThreadSafe) Remove(c cid.Cid, ses uint64) bool { // Contains returns true if the given cid is in the wantlist tracked by one or // more sessions. -func (w *ThreadSafe) Contains(k cid.Cid) (*Entry, bool) { - w.lk.RLock() - defer w.lk.RUnlock() +func (w *SessionTrackedWantlist) Contains(k cid.Cid) (*Entry, bool) { e, ok := w.set[k] - return e, ok + if !ok { + return nil, false + } + return e.Entry, true } -func (w *ThreadSafe) Entries() []*Entry { - w.lk.RLock() - defer w.lk.RUnlock() +func (w *SessionTrackedWantlist) Entries() []*Entry { es := make([]*Entry, 0, len(w.set)) for _, e := range w.set { - es = append(es, e) + es = append(es, e.Entry) } return es } -func (w *ThreadSafe) SortedEntries() []*Entry { +func (w *SessionTrackedWantlist) SortedEntries() []*Entry { es := w.Entries() sort.Sort(entrySlice(es)) return es } -func (w *ThreadSafe) Len() int { - w.lk.RLock() - defer w.lk.RUnlock() +func (w *SessionTrackedWantlist) Len() int { return len(w.set) } +func (w *SessionTrackedWantlist) CopyWants(to *SessionTrackedWantlist) { + for _, e := range w.set { + for k := range e.sesTrk { + to.AddEntry(e.Entry, k) + } + } +} + func (w *Wantlist) Len() int { return len(w.set) } diff --git a/bitswap/wantlist/wantlist_test.go b/bitswap/wantlist/wantlist_test.go index 4ce31949f..d11f6b7f5 100644 --- a/bitswap/wantlist/wantlist_test.go +++ b/bitswap/wantlist/wantlist_test.go @@ -82,8 +82,8 @@ func TestBasicWantlist(t *testing.T) { } } -func TestSesRefWantlist(t *testing.T) { - wl := NewThreadSafe() +func TestSessionTrackedWantlist(t *testing.T) { + wl := NewSessionTrackedWantlist() if !wl.Add(testcids[0], 5, 1) { t.Fatal("should have added") diff --git a/bitswap/wantmanager/wantmanager.go b/bitswap/wantmanager/wantmanager.go index 57bd65f89..17f76bb28 100644 --- a/bitswap/wantmanager/wantmanager.go +++ b/bitswap/wantmanager/wantmanager.go @@ -24,7 +24,7 @@ const ( // managed by the WantManager. type PeerHandler interface { Disconnected(p peer.ID) - Connected(p peer.ID, initialEntries []*wantlist.Entry) + Connected(p peer.ID, initialWants *wantlist.SessionTrackedWantlist) SendMessage(entries []*bsmsg.Entry, targets []peer.ID, from uint64) } @@ -42,8 +42,8 @@ type WantManager struct { wantMessages chan wantMessage // synchronized by Run loop, only touch inside there - wl *wantlist.ThreadSafe - bcwl *wantlist.ThreadSafe + wl *wantlist.SessionTrackedWantlist + bcwl *wantlist.SessionTrackedWantlist ctx context.Context cancel func() @@ -59,8 +59,8 @@ func New(ctx context.Context) *WantManager { "Number of items in wantlist.").Gauge() return &WantManager{ wantMessages: make(chan wantMessage, 10), - wl: wantlist.NewThreadSafe(), - bcwl: wantlist.NewThreadSafe(), + wl: wantlist.NewSessionTrackedWantlist(), + bcwl: wantlist.NewSessionTrackedWantlist(), ctx: ctx, cancel: cancel, wantlistGauge: wantlistGauge, @@ -274,7 +274,7 @@ type connectedMessage struct { } func (cm *connectedMessage) handle(wm *WantManager) { - wm.peerHandler.Connected(cm.p, wm.bcwl.Entries()) + wm.peerHandler.Connected(cm.p, wm.bcwl) } type disconnectedMessage struct { diff --git a/bitswap/wantmanager/wantmanager_test.go b/bitswap/wantmanager/wantmanager_test.go index 46d1d0b30..4cb05ac08 100644 --- a/bitswap/wantmanager/wantmanager_test.go +++ b/bitswap/wantmanager/wantmanager_test.go @@ -25,8 +25,8 @@ func (fph *fakePeerHandler) SendMessage(entries []*bsmsg.Entry, targets []peer.I fph.lk.Unlock() } -func (fph *fakePeerHandler) Connected(p peer.ID, initialEntries []*wantlist.Entry) {} -func (fph *fakePeerHandler) Disconnected(p peer.ID) {} +func (fph *fakePeerHandler) Connected(p peer.ID, initialWants *wantlist.SessionTrackedWantlist) {} +func (fph *fakePeerHandler) Disconnected(p peer.ID) {} func (fph *fakePeerHandler) getLastWantSet() wantSet { fph.lk.Lock() From 32e6570f7112460b2d5991b91cb5dd584104c04e Mon Sep 17 00:00:00 2001 From: hannahhoward Date: Tue, 19 Feb 2019 18:43:24 -0800 Subject: [PATCH 0720/1035] feat(wantlist): remove trash field put trash field only where it is needed, in peer request queues This commit was moved from ipfs/go-bitswap@95f6e6249886c413f2a39743a934d0919f80c3f8 --- bitswap/decision/peer_request_queue.go | 21 +++++++++++++-------- bitswap/wantlist/wantlist.go | 3 --- 2 files changed, 13 insertions(+), 11 deletions(-) diff --git a/bitswap/decision/peer_request_queue.go b/bitswap/decision/peer_request_queue.go index c7aaf553e..0fa78c8a5 100644 --- a/bitswap/decision/peer_request_queue.go +++ b/bitswap/decision/peer_request_queue.go @@ -60,7 +60,7 @@ func (tl *prq) Push(to peer.ID, entries ...*wantlist.Entry) { defer partner.activelk.Unlock() var priority int - newEntries := make([]*wantlist.Entry, 0, len(entries)) + newEntries := make([]*peerRequestTaskEntry, 0, len(entries)) for _, entry := range entries { if partner.activeBlocks.Has(entry.Cid) { continue @@ -75,7 +75,7 @@ func (tl *prq) Push(to peer.ID, entries ...*wantlist.Entry) { if entry.Priority > priority { priority = entry.Priority } - newEntries = append(newEntries, entry) + newEntries = append(newEntries, &peerRequestTaskEntry{entry, false}) } if len(newEntries) == 0 { @@ -86,7 +86,7 @@ func (tl *prq) Push(to peer.ID, entries ...*wantlist.Entry) { Entries: newEntries, Target: to, created: time.Now(), - Done: func(e []*wantlist.Entry) { + Done: func(e []*peerRequestTaskEntry) { tl.lock.Lock() for _, entry := range e { partner.TaskDone(entry.Cid) @@ -117,10 +117,10 @@ func (tl *prq) Pop() *peerRequestTask { for partner.taskQueue.Len() > 0 && partner.freezeVal == 0 { out = partner.taskQueue.Pop().(*peerRequestTask) - newEntries := make([]*wantlist.Entry, 0, len(out.Entries)) + newEntries := make([]*peerRequestTaskEntry, 0, len(out.Entries)) for _, entry := range out.Entries { delete(tl.taskMap, taskEntryKey{out.Target, entry.Cid}) - if entry.Trash { + if entry.trash { continue } partner.requests-- @@ -150,7 +150,7 @@ func (tl *prq) Remove(k cid.Cid, p peer.ID) { // remove the task "lazily" // simply mark it as trash, so it'll be dropped when popped off the // queue. - entry.Trash = true + entry.trash = true break } } @@ -197,13 +197,18 @@ func (tl *prq) thawRound() { } } +type peerRequestTaskEntry struct { + *wantlist.Entry + // trash in a book-keeping field + trash bool +} type peerRequestTask struct { - Entries []*wantlist.Entry + Entries []*peerRequestTaskEntry Priority int Target peer.ID // A callback to signal that this task has been completed - Done func([]*wantlist.Entry) + Done func([]*peerRequestTaskEntry) // created marks the time that the task was added to the queue created time.Time diff --git a/bitswap/wantlist/wantlist.go b/bitswap/wantlist/wantlist.go index 118a19ff8..1da4ed973 100644 --- a/bitswap/wantlist/wantlist.go +++ b/bitswap/wantlist/wantlist.go @@ -19,9 +19,6 @@ type Wantlist struct { type Entry struct { Cid cid.Cid Priority int - - // Trash in a book-keeping field - Trash bool } type sessionTrackedEntry struct { From 55b6cf6f9a5f102d410c4b5ce317b5998a9b2b80 Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Wed, 20 Feb 2019 15:35:59 -0800 Subject: [PATCH 0721/1035] feat(wantlist): remove an unnecessary allocation We allocate a _lot_ of these. This commit was moved from ipfs/go-bitswap@a34d5224992be8842d240540694ad692d0ca1fd9 --- bitswap/decision/bench_test.go | 2 +- bitswap/decision/engine.go | 6 ++-- bitswap/decision/ledger.go | 2 +- bitswap/decision/peer_request_queue.go | 6 ++-- bitswap/decision/peer_request_queue_test.go | 10 +++--- bitswap/message/message.go | 4 +-- bitswap/wantlist/wantlist.go | 38 ++++++++++----------- bitswap/wantlist/wantlist_test.go | 2 +- bitswap/wantmanager/wantmanager.go | 12 +++---- 9 files changed, 41 insertions(+), 41 deletions(-) diff --git a/bitswap/decision/bench_test.go b/bitswap/decision/bench_test.go index 46d40ce0d..4ef862a36 100644 --- a/bitswap/decision/bench_test.go +++ b/bitswap/decision/bench_test.go @@ -25,6 +25,6 @@ func BenchmarkTaskQueuePush(b *testing.B) { for i := 0; i < b.N; i++ { c := cid.NewCidV0(u.Hash([]byte(fmt.Sprint(i)))) - q.Push(peers[i%len(peers)], &wantlist.Entry{Cid: c, Priority: math.MaxInt32}) + q.Push(peers[i%len(peers)], wantlist.Entry{Cid: c, Priority: math.MaxInt32}) } } diff --git a/bitswap/decision/engine.go b/bitswap/decision/engine.go index 384c7c698..a8e6f1d11 100644 --- a/bitswap/decision/engine.go +++ b/bitswap/decision/engine.go @@ -107,7 +107,7 @@ func NewEngine(ctx context.Context, bs bstore.Blockstore) *Engine { return e } -func (e *Engine) WantlistForPeer(p peer.ID) (out []*wl.Entry) { +func (e *Engine) WantlistForPeer(p peer.ID) (out []wl.Entry) { partner := e.findOrCreate(p) partner.lk.Lock() defer partner.lk.Unlock() @@ -241,7 +241,7 @@ func (e *Engine) MessageReceived(p peer.ID, m bsmsg.BitSwapMessage) error { } var msgSize int - var activeEntries []*wl.Entry + var activeEntries []wl.Entry for _, entry := range m.Wantlist() { if entry.Cancel { log.Debugf("%s cancel %s", p, entry.Cid) @@ -261,7 +261,7 @@ func (e *Engine) MessageReceived(p peer.ID, m bsmsg.BitSwapMessage) error { newWorkExists = true if msgSize+blockSize > maxMessageSize { e.peerRequestQueue.Push(p, activeEntries...) - activeEntries = []*wl.Entry{} + activeEntries = []wl.Entry{} msgSize = 0 } activeEntries = append(activeEntries, entry.Entry) diff --git a/bitswap/decision/ledger.go b/bitswap/decision/ledger.go index 2c4497631..374f0e7e5 100644 --- a/bitswap/decision/ledger.go +++ b/bitswap/decision/ledger.go @@ -85,7 +85,7 @@ func (l *ledger) CancelWant(k cid.Cid) { l.wantList.Remove(k) } -func (l *ledger) WantListContains(k cid.Cid) (*wl.Entry, bool) { +func (l *ledger) WantListContains(k cid.Cid) (wl.Entry, bool) { return l.wantList.Contains(k) } diff --git a/bitswap/decision/peer_request_queue.go b/bitswap/decision/peer_request_queue.go index 0fa78c8a5..651085c6d 100644 --- a/bitswap/decision/peer_request_queue.go +++ b/bitswap/decision/peer_request_queue.go @@ -14,7 +14,7 @@ import ( type peerRequestQueue interface { // Pop returns the next peerRequestTask. Returns nil if the peerRequestQueue is empty. Pop() *peerRequestTask - Push(to peer.ID, entries ...*wantlist.Entry) + Push(to peer.ID, entries ...wantlist.Entry) Remove(k cid.Cid, p peer.ID) // NB: cannot expose simply expose taskQueue.Len because trashed elements @@ -46,7 +46,7 @@ type prq struct { } // Push currently adds a new peerRequestTask to the end of the list. -func (tl *prq) Push(to peer.ID, entries ...*wantlist.Entry) { +func (tl *prq) Push(to peer.ID, entries ...wantlist.Entry) { tl.lock.Lock() defer tl.lock.Unlock() partner, ok := tl.partners[to] @@ -198,7 +198,7 @@ func (tl *prq) thawRound() { } type peerRequestTaskEntry struct { - *wantlist.Entry + wantlist.Entry // trash in a book-keeping field trash bool } diff --git a/bitswap/decision/peer_request_queue_test.go b/bitswap/decision/peer_request_queue_test.go index d6ad8989a..246afb065 100644 --- a/bitswap/decision/peer_request_queue_test.go +++ b/bitswap/decision/peer_request_queue_test.go @@ -45,7 +45,7 @@ func TestPushPop(t *testing.T) { t.Log(partner.String()) c := cid.NewCidV0(u.Hash([]byte(letter))) - prq.Push(partner, &wantlist.Entry{Cid: c, Priority: math.MaxInt32 - index}) + prq.Push(partner, wantlist.Entry{Cid: c, Priority: math.MaxInt32 - index}) } for _, consonant := range consonants { c := cid.NewCidV0(u.Hash([]byte(consonant))) @@ -87,10 +87,10 @@ func TestPeerRepeats(t *testing.T) { for i := 0; i < 5; i++ { elcid := cid.NewCidV0(u.Hash([]byte(fmt.Sprint(i)))) - prq.Push(a, &wantlist.Entry{Cid: elcid}) - prq.Push(b, &wantlist.Entry{Cid: elcid}) - prq.Push(c, &wantlist.Entry{Cid: elcid}) - prq.Push(d, &wantlist.Entry{Cid: elcid}) + prq.Push(a, wantlist.Entry{Cid: elcid}) + prq.Push(b, wantlist.Entry{Cid: elcid}) + prq.Push(c, wantlist.Entry{Cid: elcid}) + prq.Push(d, wantlist.Entry{Cid: elcid}) } // now, pop off four entries, there should be one from each diff --git a/bitswap/message/message.go b/bitswap/message/message.go index 2b538a2f4..b9035d8ff 100644 --- a/bitswap/message/message.go +++ b/bitswap/message/message.go @@ -66,7 +66,7 @@ func newMsg(full bool) *impl { } type Entry struct { - *wantlist.Entry + wantlist.Entry Cancel bool } @@ -150,7 +150,7 @@ func (m *impl) addEntry(c cid.Cid, priority int, cancel bool) { e.Cancel = cancel } else { m.wantlist[c] = &Entry{ - Entry: &wantlist.Entry{ + Entry: wantlist.Entry{ Cid: c, Priority: priority, }, diff --git a/bitswap/wantlist/wantlist.go b/bitswap/wantlist/wantlist.go index 1da4ed973..999fcd9ef 100644 --- a/bitswap/wantlist/wantlist.go +++ b/bitswap/wantlist/wantlist.go @@ -13,7 +13,7 @@ type SessionTrackedWantlist struct { } type Wantlist struct { - set map[cid.Cid]*Entry + set map[cid.Cid]Entry } type Entry struct { @@ -22,19 +22,19 @@ type Entry struct { } type sessionTrackedEntry struct { - *Entry + Entry sesTrk map[uint64]struct{} } // NewRefEntry creates a new reference tracked wantlist entry. -func NewRefEntry(c cid.Cid, p int) *Entry { - return &Entry{ +func NewRefEntry(c cid.Cid, p int) Entry { + return Entry{ Cid: c, Priority: p, } } -type entrySlice []*Entry +type entrySlice []Entry func (es entrySlice) Len() int { return len(es) } func (es entrySlice) Swap(i, j int) { es[i], es[j] = es[j], es[i] } @@ -48,7 +48,7 @@ func NewSessionTrackedWantlist() *SessionTrackedWantlist { func New() *Wantlist { return &Wantlist{ - set: make(map[cid.Cid]*Entry), + set: make(map[cid.Cid]Entry), } } @@ -68,7 +68,7 @@ func (w *SessionTrackedWantlist) Add(c cid.Cid, priority int, ses uint64) bool { } w.set[c] = &sessionTrackedEntry{ - Entry: &Entry{Cid: c, Priority: priority}, + Entry: Entry{Cid: c, Priority: priority}, sesTrk: map[uint64]struct{}{ses: struct{}{}}, } @@ -76,7 +76,7 @@ func (w *SessionTrackedWantlist) Add(c cid.Cid, priority int, ses uint64) bool { } // AddEntry adds given Entry to the wantlist. For more information see Add method. -func (w *SessionTrackedWantlist) AddEntry(e *Entry, ses uint64) bool { +func (w *SessionTrackedWantlist) AddEntry(e Entry, ses uint64) bool { if ex, ok := w.set[e.Cid]; ok { ex.sesTrk[ses] = struct{}{} return false @@ -108,23 +108,23 @@ func (w *SessionTrackedWantlist) Remove(c cid.Cid, ses uint64) bool { // Contains returns true if the given cid is in the wantlist tracked by one or // more sessions. -func (w *SessionTrackedWantlist) Contains(k cid.Cid) (*Entry, bool) { +func (w *SessionTrackedWantlist) Contains(k cid.Cid) (Entry, bool) { e, ok := w.set[k] if !ok { - return nil, false + return Entry{}, false } return e.Entry, true } -func (w *SessionTrackedWantlist) Entries() []*Entry { - es := make([]*Entry, 0, len(w.set)) +func (w *SessionTrackedWantlist) Entries() []Entry { + es := make([]Entry, 0, len(w.set)) for _, e := range w.set { es = append(es, e.Entry) } return es } -func (w *SessionTrackedWantlist) SortedEntries() []*Entry { +func (w *SessionTrackedWantlist) SortedEntries() []Entry { es := w.Entries() sort.Sort(entrySlice(es)) return es @@ -151,7 +151,7 @@ func (w *Wantlist) Add(c cid.Cid, priority int) bool { return false } - w.set[c] = &Entry{ + w.set[c] = Entry{ Cid: c, Priority: priority, } @@ -159,7 +159,7 @@ func (w *Wantlist) Add(c cid.Cid, priority int) bool { return true } -func (w *Wantlist) AddEntry(e *Entry) bool { +func (w *Wantlist) AddEntry(e Entry) bool { if _, ok := w.set[e.Cid]; ok { return false } @@ -177,20 +177,20 @@ func (w *Wantlist) Remove(c cid.Cid) bool { return true } -func (w *Wantlist) Contains(c cid.Cid) (*Entry, bool) { +func (w *Wantlist) Contains(c cid.Cid) (Entry, bool) { e, ok := w.set[c] return e, ok } -func (w *Wantlist) Entries() []*Entry { - es := make([]*Entry, 0, len(w.set)) +func (w *Wantlist) Entries() []Entry { + es := make([]Entry, 0, len(w.set)) for _, e := range w.set { es = append(es, e) } return es } -func (w *Wantlist) SortedEntries() []*Entry { +func (w *Wantlist) SortedEntries() []Entry { es := w.Entries() sort.Sort(entrySlice(es)) return es diff --git a/bitswap/wantlist/wantlist_test.go b/bitswap/wantlist/wantlist_test.go index d11f6b7f5..8616efb0e 100644 --- a/bitswap/wantlist/wantlist_test.go +++ b/bitswap/wantlist/wantlist_test.go @@ -25,7 +25,7 @@ func init() { } type wli interface { - Contains(cid.Cid) (*Entry, bool) + Contains(cid.Cid) (Entry, bool) } func assertHasCid(t *testing.T, w wli, c cid.Cid) { diff --git a/bitswap/wantmanager/wantmanager.go b/bitswap/wantmanager/wantmanager.go index 17f76bb28..bf5db3c4a 100644 --- a/bitswap/wantmanager/wantmanager.go +++ b/bitswap/wantmanager/wantmanager.go @@ -100,8 +100,8 @@ func (wm *WantManager) IsWanted(c cid.Cid) bool { } // CurrentWants returns the list of current wants. -func (wm *WantManager) CurrentWants() []*wantlist.Entry { - resp := make(chan []*wantlist.Entry, 1) +func (wm *WantManager) CurrentWants() []wantlist.Entry { + resp := make(chan []wantlist.Entry, 1) select { case wm.wantMessages <- ¤tWantsMessage{resp}: case <-wm.ctx.Done(): @@ -116,8 +116,8 @@ func (wm *WantManager) CurrentWants() []*wantlist.Entry { } // CurrentBroadcastWants returns the current list of wants that are broadcasts. -func (wm *WantManager) CurrentBroadcastWants() []*wantlist.Entry { - resp := make(chan []*wantlist.Entry, 1) +func (wm *WantManager) CurrentBroadcastWants() []wantlist.Entry { + resp := make(chan []wantlist.Entry, 1) select { case wm.wantMessages <- ¤tBroadcastWantsMessage{resp}: case <-wm.ctx.Done(): @@ -246,7 +246,7 @@ func (iwm *isWantedMessage) handle(wm *WantManager) { } type currentWantsMessage struct { - resp chan<- []*wantlist.Entry + resp chan<- []wantlist.Entry } func (cwm *currentWantsMessage) handle(wm *WantManager) { @@ -254,7 +254,7 @@ func (cwm *currentWantsMessage) handle(wm *WantManager) { } type currentBroadcastWantsMessage struct { - resp chan<- []*wantlist.Entry + resp chan<- []wantlist.Entry } func (cbcwm *currentBroadcastWantsMessage) handle(wm *WantManager) { From 297262c4228f9ed7080a4c8c8c24edbf2f29771c Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Wed, 20 Feb 2019 15:39:59 -0800 Subject: [PATCH 0722/1035] feat(prq): don't allocate peerRequestTaskEntrys Each one is about 4 words wide (two for the CID, one for the priority, one for the trash flag). This commit was moved from ipfs/go-bitswap@5257505b5e853208dc4161b955ccbd82b9141748 --- bitswap/decision/peer_request_queue.go | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/bitswap/decision/peer_request_queue.go b/bitswap/decision/peer_request_queue.go index 651085c6d..4f6ededcc 100644 --- a/bitswap/decision/peer_request_queue.go +++ b/bitswap/decision/peer_request_queue.go @@ -60,7 +60,7 @@ func (tl *prq) Push(to peer.ID, entries ...wantlist.Entry) { defer partner.activelk.Unlock() var priority int - newEntries := make([]*peerRequestTaskEntry, 0, len(entries)) + newEntries := make([]peerRequestTaskEntry, 0, len(entries)) for _, entry := range entries { if partner.activeBlocks.Has(entry.Cid) { continue @@ -75,7 +75,7 @@ func (tl *prq) Push(to peer.ID, entries ...wantlist.Entry) { if entry.Priority > priority { priority = entry.Priority } - newEntries = append(newEntries, &peerRequestTaskEntry{entry, false}) + newEntries = append(newEntries, peerRequestTaskEntry{entry, false}) } if len(newEntries) == 0 { @@ -86,7 +86,7 @@ func (tl *prq) Push(to peer.ID, entries ...wantlist.Entry) { Entries: newEntries, Target: to, created: time.Now(), - Done: func(e []*peerRequestTaskEntry) { + Done: func(e []peerRequestTaskEntry) { tl.lock.Lock() for _, entry := range e { partner.TaskDone(entry.Cid) @@ -117,7 +117,7 @@ func (tl *prq) Pop() *peerRequestTask { for partner.taskQueue.Len() > 0 && partner.freezeVal == 0 { out = partner.taskQueue.Pop().(*peerRequestTask) - newEntries := make([]*peerRequestTaskEntry, 0, len(out.Entries)) + newEntries := make([]peerRequestTaskEntry, 0, len(out.Entries)) for _, entry := range out.Entries { delete(tl.taskMap, taskEntryKey{out.Target, entry.Cid}) if entry.trash { @@ -145,12 +145,12 @@ func (tl *prq) Remove(k cid.Cid, p peer.ID) { tl.lock.Lock() t, ok := tl.taskMap[taskEntryKey{p, k}] if ok { - for _, entry := range t.Entries { - if entry.Cid.Equals(k) { + for i := range t.Entries { + if t.Entries[i].Cid.Equals(k) { // remove the task "lazily" // simply mark it as trash, so it'll be dropped when popped off the // queue. - entry.trash = true + t.Entries[i].trash = true break } } @@ -203,12 +203,12 @@ type peerRequestTaskEntry struct { trash bool } type peerRequestTask struct { - Entries []*peerRequestTaskEntry + Entries []peerRequestTaskEntry Priority int Target peer.ID // A callback to signal that this task has been completed - Done func([]*peerRequestTaskEntry) + Done func([]peerRequestTaskEntry) // created marks the time that the task was added to the queue created time.Time From 8f7496a734fc16f66d35075b5d0ce277b679be0d Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Wed, 20 Feb 2019 15:45:35 -0800 Subject: [PATCH 0723/1035] fix(bitswap): remove CancelWants function Fixes #50. This commit was moved from ipfs/go-bitswap@d1a791cb94e826c3f3386a0d6ebb5817f486910a --- bitswap/bitswap.go | 8 -------- 1 file changed, 8 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 28c1589b9..94dec9ac1 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -239,14 +239,6 @@ func (bs *Bitswap) GetBlocks(ctx context.Context, keys []cid.Cid) (<-chan blocks return session.GetBlocks(ctx, keys) } -// CancelWants removes a given key from the wantlist. -func (bs *Bitswap) CancelWants(cids []cid.Cid, ses uint64) { - if len(cids) == 0 { - return - } - bs.wm.CancelWants(context.Background(), cids, nil, ses) -} - // HasBlock announces the existence of a block to this bitswap service. The // service will potentially notify its peers. func (bs *Bitswap) HasBlock(blk blocks.Block) error { From f7a5163ef793ec8b3125dce67dd66049aed2d99c Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Wed, 20 Feb 2019 17:29:22 -0800 Subject: [PATCH 0724/1035] nit: remove bsmsg.Entry redirection This commit was moved from ipfs/go-bitswap@cb8e65a8ce5fd69f93aa0c7afd18674a3c9777a9 --- bitswap/messagequeue/messagequeue.go | 6 +++--- bitswap/peermanager/peermanager.go | 4 ++-- bitswap/peermanager/peermanager_test.go | 6 +++--- bitswap/testutil/testutil.go | 6 +++--- bitswap/wantmanager/wantmanager.go | 8 ++++---- bitswap/wantmanager/wantmanager_test.go | 2 +- 6 files changed, 16 insertions(+), 16 deletions(-) diff --git a/bitswap/messagequeue/messagequeue.go b/bitswap/messagequeue/messagequeue.go index e92046522..3383e326e 100644 --- a/bitswap/messagequeue/messagequeue.go +++ b/bitswap/messagequeue/messagequeue.go @@ -43,7 +43,7 @@ type MessageQueue struct { } type messageRequest struct { - entries []*bsmsg.Entry + entries []bsmsg.Entry ses uint64 } @@ -65,7 +65,7 @@ func New(ctx context.Context, p peer.ID, network MessageNetwork) *MessageQueue { } // AddMessage adds new entries to an outgoing message for a given session. -func (mq *MessageQueue) AddMessage(entries []*bsmsg.Entry, ses uint64) { +func (mq *MessageQueue) AddMessage(entries []bsmsg.Entry, ses uint64) { select { case mq.newRequests <- &messageRequest{entries, ses}: case <-mq.ctx.Done(): @@ -140,7 +140,7 @@ func (wr *wantlistRequest) handle(mq *MessageQueue) { } } -func (mq *MessageQueue) addEntries(entries []*bsmsg.Entry, ses uint64) { +func (mq *MessageQueue) addEntries(entries []bsmsg.Entry, ses uint64) { for _, e := range entries { if e.Cancel { if mq.wl.Remove(e.Cid, ses) { diff --git a/bitswap/peermanager/peermanager.go b/bitswap/peermanager/peermanager.go index b1b8ee9a7..59e8ca3de 100644 --- a/bitswap/peermanager/peermanager.go +++ b/bitswap/peermanager/peermanager.go @@ -19,7 +19,7 @@ var ( // PeerQueue provides a queer of messages to be sent for a single peer. type PeerQueue interface { - AddMessage(entries []*bsmsg.Entry, ses uint64) + AddMessage(entries []bsmsg.Entry, ses uint64) Startup() AddWantlist(initialWants *wantlist.SessionTrackedWantlist) Shutdown() @@ -108,7 +108,7 @@ func (pm *PeerManager) Disconnected(p peer.ID) { // SendMessage is called to send a message to all or some peers in the pool; // if targets is nil, it sends to all. -func (pm *PeerManager) SendMessage(entries []*bsmsg.Entry, targets []peer.ID, from uint64) { +func (pm *PeerManager) SendMessage(entries []bsmsg.Entry, targets []peer.ID, from uint64) { if len(targets) == 0 { pm.peerQueuesLk.RLock() for _, p := range pm.peerQueues { diff --git a/bitswap/peermanager/peermanager_test.go b/bitswap/peermanager/peermanager_test.go index 1d56d042a..0505f973b 100644 --- a/bitswap/peermanager/peermanager_test.go +++ b/bitswap/peermanager/peermanager_test.go @@ -15,7 +15,7 @@ import ( type messageSent struct { p peer.ID - entries []*bsmsg.Entry + entries []bsmsg.Entry ses uint64 } @@ -27,7 +27,7 @@ type fakePeer struct { func (fp *fakePeer) Startup() {} func (fp *fakePeer) Shutdown() {} -func (fp *fakePeer) AddMessage(entries []*bsmsg.Entry, ses uint64) { +func (fp *fakePeer) AddMessage(entries []bsmsg.Entry, ses uint64) { fp.messagesSent <- messageSent{fp.p, entries, ses} } func (fp *fakePeer) AddWantlist(initialWants *wantlist.SessionTrackedWantlist) {} @@ -44,7 +44,7 @@ func collectAndCheckMessages( ctx context.Context, t *testing.T, messagesSent <-chan messageSent, - entries []*bsmsg.Entry, + entries []bsmsg.Entry, ses uint64, timeout time.Duration) []peer.ID { var peersReceived []peer.ID diff --git a/bitswap/testutil/testutil.go b/bitswap/testutil/testutil.go index 05fd152b1..87bd91d2d 100644 --- a/bitswap/testutil/testutil.go +++ b/bitswap/testutil/testutil.go @@ -50,11 +50,11 @@ func GenerateWantlist(n int, ses uint64) *wantlist.SessionTrackedWantlist { } // GenerateMessageEntries makes fake bitswap message entries. -func GenerateMessageEntries(n int, isCancel bool) []*bsmsg.Entry { - bsmsgs := make([]*bsmsg.Entry, 0, n) +func GenerateMessageEntries(n int, isCancel bool) []bsmsg.Entry { + bsmsgs := make([]bsmsg.Entry, 0, n) for i := 0; i < n; i++ { prioritySeq++ - msg := &bsmsg.Entry{ + msg := bsmsg.Entry{ Entry: wantlist.NewRefEntry(blockGenerator.Next().Cid(), prioritySeq), Cancel: isCancel, } diff --git a/bitswap/wantmanager/wantmanager.go b/bitswap/wantmanager/wantmanager.go index bf5db3c4a..0fd7d5a1a 100644 --- a/bitswap/wantmanager/wantmanager.go +++ b/bitswap/wantmanager/wantmanager.go @@ -25,7 +25,7 @@ const ( type PeerHandler interface { Disconnected(p peer.ID) Connected(p peer.ID, initialWants *wantlist.SessionTrackedWantlist) - SendMessage(entries []*bsmsg.Entry, targets []peer.ID, from uint64) + SendMessage(entries []bsmsg.Entry, targets []peer.ID, from uint64) } type wantMessage interface { @@ -187,9 +187,9 @@ func (wm *WantManager) run() { } func (wm *WantManager) addEntries(ctx context.Context, ks []cid.Cid, targets []peer.ID, cancel bool, ses uint64) { - entries := make([]*bsmsg.Entry, 0, len(ks)) + entries := make([]bsmsg.Entry, 0, len(ks)) for i, k := range ks { - entries = append(entries, &bsmsg.Entry{ + entries = append(entries, bsmsg.Entry{ Cancel: cancel, Entry: wantlist.NewRefEntry(k, maxPriority-i), }) @@ -202,7 +202,7 @@ func (wm *WantManager) addEntries(ctx context.Context, ks []cid.Cid, targets []p } type wantSet struct { - entries []*bsmsg.Entry + entries []bsmsg.Entry targets []peer.ID from uint64 } diff --git a/bitswap/wantmanager/wantmanager_test.go b/bitswap/wantmanager/wantmanager_test.go index 4cb05ac08..3b9d0cb18 100644 --- a/bitswap/wantmanager/wantmanager_test.go +++ b/bitswap/wantmanager/wantmanager_test.go @@ -19,7 +19,7 @@ type fakePeerHandler struct { lastWantSet wantSet } -func (fph *fakePeerHandler) SendMessage(entries []*bsmsg.Entry, targets []peer.ID, from uint64) { +func (fph *fakePeerHandler) SendMessage(entries []bsmsg.Entry, targets []peer.ID, from uint64) { fph.lk.Lock() fph.lastWantSet = wantSet{entries, targets, from} fph.lk.Unlock() From a43e40e7ea1dd4fdc6a3026d04fcf48ecdce1881 Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Wed, 20 Feb 2019 17:45:45 -0800 Subject: [PATCH 0725/1035] feat(messagequeue): use a buffer pool This commit was moved from ipfs/go-bitswap@8d357ff2fde61213129ba28e048e197ab5a7b108 --- bitswap/messagequeue/messagequeue.go | 23 ++++++++++++++++++++++- 1 file changed, 22 insertions(+), 1 deletion(-) diff --git a/bitswap/messagequeue/messagequeue.go b/bitswap/messagequeue/messagequeue.go index 3383e326e..405daf39e 100644 --- a/bitswap/messagequeue/messagequeue.go +++ b/bitswap/messagequeue/messagequeue.go @@ -2,6 +2,7 @@ package messagequeue import ( "context" + "sync" "time" bsmsg "github.com/ipfs/go-bitswap/message" @@ -67,7 +68,7 @@ func New(ctx context.Context, p peer.ID, network MessageNetwork) *MessageQueue { // AddMessage adds new entries to an outgoing message for a given session. func (mq *MessageQueue) AddMessage(entries []bsmsg.Entry, ses uint64) { select { - case mq.newRequests <- &messageRequest{entries, ses}: + case mq.newRequests <- newMessageRequest(entries, ses): case <-mq.ctx.Done(): } } @@ -123,8 +124,28 @@ func (mq *MessageQueue) runQueue() { } } +// We allocate a bunch of these so use a pool. +var messageRequestPool = sync.Pool{ + New: func() interface{} { + return new(messageRequest) + }, +} + +func newMessageRequest(entries []bsmsg.Entry, session uint64) *messageRequest { + mr := messageRequestPool.Get().(*messageRequest) + mr.entries = entries + mr.ses = session + return mr +} + +func returnMessageRequest(mr *messageRequest) { + *mr = messageRequest{} + messageRequestPool.Put(mr) +} + func (mr *messageRequest) handle(mq *MessageQueue) { mq.addEntries(mr.entries, mr.ses) + returnMessageRequest(mr) } func (wr *wantlistRequest) handle(mq *MessageQueue) { From b9127d7c76c78b3bf00421db43980e5259663018 Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Tue, 26 Feb 2019 18:30:06 -0700 Subject: [PATCH 0726/1035] fix(prq): return a closed channel when encountering a canceled context Otherwise, we'll wait forever. This commit was moved from ipfs/go-bitswap@b08e0f554424ce640acb1cb41bb8232c181052e0 --- .../providerquerymanager/providerquerymanager.go | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/bitswap/providerquerymanager/providerquerymanager.go b/bitswap/providerquerymanager/providerquerymanager.go index 38471479e..ec6eaa11a 100644 --- a/bitswap/providerquerymanager/providerquerymanager.go +++ b/bitswap/providerquerymanager/providerquerymanager.go @@ -124,17 +124,25 @@ func (pqm *ProviderQueryManager) FindProvidersAsync(sessionCtx context.Context, inProgressRequestChan: inProgressRequestChan, }: case <-pqm.ctx.Done(): - return nil + ch := make(chan peer.ID) + close(ch) + return ch case <-sessionCtx.Done(): - return nil + ch := make(chan peer.ID) + close(ch) + return ch } var receivedInProgressRequest inProgressRequest select { case <-pqm.ctx.Done(): - return nil + ch := make(chan peer.ID) + close(ch) + return ch case <-sessionCtx.Done(): - return nil + ch := make(chan peer.ID) + close(ch) + return ch case receivedInProgressRequest = <-inProgressRequestChan: } From 3b113d0b2d5dec328d3bbae945fcc7ece0bca455 Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Tue, 26 Feb 2019 18:36:12 -0700 Subject: [PATCH 0727/1035] fix(prq): make sure to cancel in-progress provider queries. This commit was moved from ipfs/go-bitswap@9394d3b6f8e5d61a9136ea7de2548004fb3ed9a2 --- bitswap/providerquerymanager/providerquerymanager.go | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/bitswap/providerquerymanager/providerquerymanager.go b/bitswap/providerquerymanager/providerquerymanager.go index ec6eaa11a..5d00a2b8b 100644 --- a/bitswap/providerquerymanager/providerquerymanager.go +++ b/bitswap/providerquerymanager/providerquerymanager.go @@ -133,16 +133,15 @@ func (pqm *ProviderQueryManager) FindProvidersAsync(sessionCtx context.Context, return ch } + // DO NOT select on sessionCtx. We only want to abort here if we're + // shutting down because we can't actually _cancel_ the request till we + // get to receiveProviders. var receivedInProgressRequest inProgressRequest select { case <-pqm.ctx.Done(): ch := make(chan peer.ID) close(ch) return ch - case <-sessionCtx.Done(): - ch := make(chan peer.ID) - close(ch) - return ch case receivedInProgressRequest = <-inProgressRequestChan: } From 646d81c4666d3b2da7881233601b73ded462a3e2 Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Tue, 26 Feb 2019 18:54:56 -0700 Subject: [PATCH 0728/1035] feat(prq): don't try to cancel finished provider requests This commit was moved from ipfs/go-bitswap@ffef00d97eee61a9baf7f324664a00b8e3e66edd --- bitswap/providerquerymanager/providerquerymanager.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/bitswap/providerquerymanager/providerquerymanager.go b/bitswap/providerquerymanager/providerquerymanager.go index 5d00a2b8b..3f8b7e566 100644 --- a/bitswap/providerquerymanager/providerquerymanager.go +++ b/bitswap/providerquerymanager/providerquerymanager.go @@ -177,7 +177,9 @@ func (pqm *ProviderQueryManager) receiveProviders(sessionCtx context.Context, k case <-pqm.ctx.Done(): return case <-sessionCtx.Done(): - pqm.cancelProviderRequest(k, incomingProviders) + if incomingProviders != nil { + pqm.cancelProviderRequest(k, incomingProviders) + } return case provider, ok := <-incomingProviders: if !ok { From 4e518720293ac0aa33af7ed7eb1f7a659215fa1e Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Tue, 26 Feb 2019 19:07:40 -0700 Subject: [PATCH 0729/1035] fix(prq): use the right context when connecting to providers This commit was moved from ipfs/go-bitswap@6407817be191c76563f50b0155a5044acc4f2e34 --- bitswap/providerquerymanager/providerquerymanager.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bitswap/providerquerymanager/providerquerymanager.go b/bitswap/providerquerymanager/providerquerymanager.go index 3f8b7e566..110772a23 100644 --- a/bitswap/providerquerymanager/providerquerymanager.go +++ b/bitswap/providerquerymanager/providerquerymanager.go @@ -237,7 +237,7 @@ func (pqm *ProviderQueryManager) findProviderWorker() { wg.Add(1) go func(p peer.ID) { defer wg.Done() - err := pqm.network.ConnectTo(pqm.ctx, p) + err := pqm.network.ConnectTo(findProviderCtx, p) if err != nil { log.Debugf("failed to connect to provider %s: %s", p, err) return From 337535e3253e1a910efb14ca16ef9bd954daa3da Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Tue, 26 Feb 2019 19:08:02 -0700 Subject: [PATCH 0730/1035] fix(prq): remove error logs for normal conditions This commit was moved from ipfs/go-bitswap@f6e0527444aae4102a7cb5ddd9531da7b9dee203 --- bitswap/providerquerymanager/providerquerymanager.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/bitswap/providerquerymanager/providerquerymanager.go b/bitswap/providerquerymanager/providerquerymanager.go index 110772a23..290652282 100644 --- a/bitswap/providerquerymanager/providerquerymanager.go +++ b/bitswap/providerquerymanager/providerquerymanager.go @@ -406,12 +406,12 @@ func (crm *cancelRequestMessage) debugMessage() string { func (crm *cancelRequestMessage) handle(pqm *ProviderQueryManager) { requestStatus, ok := pqm.inProgressRequestStatuses[crm.k] if !ok { - log.Errorf("Attempt to cancel request for cid (%s) not in progress", crm.k.String()) + // Request finished while queued. return } _, ok = requestStatus.listeners[crm.incomingProviders] if !ok { - log.Errorf("Attempt to cancel request for for cid (%s) this is not a listener", crm.k.String()) + // Request finished and _restarted_ while queued. return } delete(requestStatus.listeners, crm.incomingProviders) From b4ad58082c1297c91e1fd6fea913de261906ff65 Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Tue, 26 Feb 2019 20:11:25 -0700 Subject: [PATCH 0731/1035] test(prq): test finding providers with a pre-canceled context This commit was moved from ipfs/go-bitswap@21ccf0c77121d5b50142eb59c021cebab5d8188d --- .../providerquerymanager_test.go | 25 +++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/bitswap/providerquerymanager/providerquerymanager_test.go b/bitswap/providerquerymanager/providerquerymanager_test.go index 3abe6b0e8..9a70d8071 100644 --- a/bitswap/providerquerymanager/providerquerymanager_test.go +++ b/bitswap/providerquerymanager/providerquerymanager_test.go @@ -304,3 +304,28 @@ func TestFindProviderTimeout(t *testing.T) { t.Fatal("Find provider request should have timed out, did not") } } + +func TestFindProviderPreCanceled(t *testing.T) { + peers := testutil.GeneratePeers(10) + fpn := &fakeProviderNetwork{ + peersFound: peers, + delay: 1 * time.Millisecond, + } + ctx := context.Background() + providerQueryManager := New(ctx, fpn) + providerQueryManager.Startup() + providerQueryManager.SetFindProviderTimeout(100 * time.Millisecond) + keys := testutil.GenerateCids(1) + + sessionCtx, cancel := context.WithCancel(ctx) + cancel() + firstRequestChan := providerQueryManager.FindProvidersAsync(sessionCtx, keys[0]) + if firstRequestChan == nil { + t.Fatal("expected non-nil channel") + } + select { + case <-firstRequestChan: + case <-time.After(10 * time.Millisecond): + t.Fatal("shouldn't have blocked waiting on a closed context") + } +} From 21a5bc4e62db15c6b943a674a43ceea331dcfd07 Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Tue, 26 Feb 2019 20:22:23 -0700 Subject: [PATCH 0732/1035] test(prq): test canceling FindProviders context after completion This commit was moved from ipfs/go-bitswap@04e47665d2ec4ea2a006dfcf6861e3eb87b71e88 --- .../providerquerymanager_test.go | 32 +++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/bitswap/providerquerymanager/providerquerymanager_test.go b/bitswap/providerquerymanager/providerquerymanager_test.go index 9a70d8071..efdfd14f5 100644 --- a/bitswap/providerquerymanager/providerquerymanager_test.go +++ b/bitswap/providerquerymanager/providerquerymanager_test.go @@ -329,3 +329,35 @@ func TestFindProviderPreCanceled(t *testing.T) { t.Fatal("shouldn't have blocked waiting on a closed context") } } + +func TestCancelFindProvidersAfterCompletion(t *testing.T) { + peers := testutil.GeneratePeers(2) + fpn := &fakeProviderNetwork{ + peersFound: peers, + delay: 1 * time.Millisecond, + } + ctx := context.Background() + providerQueryManager := New(ctx, fpn) + providerQueryManager.Startup() + providerQueryManager.SetFindProviderTimeout(100 * time.Millisecond) + keys := testutil.GenerateCids(1) + + sessionCtx, cancel := context.WithCancel(ctx) + firstRequestChan := providerQueryManager.FindProvidersAsync(sessionCtx, keys[0]) + <-firstRequestChan // wait for everything to start. + time.Sleep(10 * time.Millisecond) // wait for the incoming providres to stop. + cancel() // cancel the context. + + timer := time.NewTimer(10 * time.Millisecond) + defer timer.Stop() + for { + select { + case _, ok := <-firstRequestChan: + if !ok { + return + } + case <-timer.C: + t.Fatal("should have finished receiving responses within timeout") + } + } +} From 66bae3dcf60efcd2be9cf471f51fe7399aea8386 Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Thu, 28 Feb 2019 10:35:26 -0800 Subject: [PATCH 0733/1035] fix: limit use of custom context type Goprocess returns a _custom_ context type. Unfortunately, golang has a bunch of magic type assertions to efficiently handle built-in context types but launches a new goroutine when deriving a new context from a custom context type. Otherwise, it has no way to wait on the custom context's channel. This fix just ensures we only ever have one of goroutines per provide worker by deriving a (normal) cancelable context up-front and then using that. This commit was moved from ipfs/go-bitswap@799bfb3e288d96af0429eac20656adcca8e5e6b9 --- bitswap/workers.go | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/bitswap/workers.go b/bitswap/workers.go index 614f95c1d..45f786152 100644 --- a/bitswap/workers.go +++ b/bitswap/workers.go @@ -98,6 +98,15 @@ func (bs *Bitswap) sendBlocks(ctx context.Context, env *engine.Envelope) { } func (bs *Bitswap) provideWorker(px process.Process) { + // FIXME: OnClosingContext returns a _custom_ context type. + // Unfortunately, deriving a new cancelable context from this custom + // type fires off a goroutine. To work around this, we create a single + // cancelable context up-front and derive all sub-contexts from that. + // + // See: https://github.com/ipfs/go-ipfs/issues/5810 + ctx := procctx.OnClosingContext(px) + ctx, cancel := context.WithCancel(ctx) + defer cancel() limit := make(chan struct{}, provideWorkerMax) @@ -108,7 +117,6 @@ func (bs *Bitswap) provideWorker(px process.Process) { }() ev := logging.LoggableMap{"ID": wid} - ctx := procctx.OnClosingContext(px) // derive ctx from px defer log.EventBegin(ctx, "Bitswap.ProvideWorker.Work", ev, k).Done() ctx, cancel := context.WithTimeout(ctx, provideTimeout) // timeout ctx @@ -123,7 +131,7 @@ func (bs *Bitswap) provideWorker(px process.Process) { // _ratelimited_ number of workers to handle each key. for wid := 2; ; wid++ { ev := logging.LoggableMap{"ID": 1} - log.Event(procctx.OnClosingContext(px), "Bitswap.ProvideWorker.Loop", ev) + log.Event(ctx, "Bitswap.ProvideWorker.Loop", ev) select { case <-px.Closing(): From cc6b36318d71a16bdc2212ee1fda9a2aba4b4023 Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Mon, 4 Mar 2019 10:46:17 -0800 Subject: [PATCH 0734/1035] fix: remove non-error log message This can happen even when everything is working correctly. fixes https://github.com/ipfs/go-ipfs/issues/6046 This commit was moved from ipfs/go-bitswap@c88c0e9ebb459459dbba5db613371258b9a44e04 --- bitswap/providerquerymanager/providerquerymanager.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bitswap/providerquerymanager/providerquerymanager.go b/bitswap/providerquerymanager/providerquerymanager.go index 290652282..a84e1f912 100644 --- a/bitswap/providerquerymanager/providerquerymanager.go +++ b/bitswap/providerquerymanager/providerquerymanager.go @@ -355,7 +355,7 @@ func (fpqm *finishedProviderQueryMessage) debugMessage() string { func (fpqm *finishedProviderQueryMessage) handle(pqm *ProviderQueryManager) { requestStatus, ok := pqm.inProgressRequestStatuses[fpqm.k] if !ok { - log.Errorf("Ended request for cid (%s) not in progress", fpqm.k.String()) + // we canceled the request as it finished. return } for listener := range requestStatus.listeners { From ef069e8e5a5810019843007dfb561b3ee4e03b0b Mon Sep 17 00:00:00 2001 From: hannahhoward Date: Mon, 4 Mar 2019 13:47:09 -0800 Subject: [PATCH 0735/1035] fix(messagequeue): Remove second run loop Revert to the old go-routine architecture for the messagequeue, which I believe is still compatible w/ wantlist w/o mutex fix #92 This commit was moved from ipfs/go-bitswap@576388c6dbaf2e271082ba9a1c5c975bfea375db --- bitswap/messagequeue/messagequeue.go | 143 +++++++++++---------------- 1 file changed, 57 insertions(+), 86 deletions(-) diff --git a/bitswap/messagequeue/messagequeue.go b/bitswap/messagequeue/messagequeue.go index 405daf39e..e3d09caf5 100644 --- a/bitswap/messagequeue/messagequeue.go +++ b/bitswap/messagequeue/messagequeue.go @@ -33,14 +33,15 @@ type MessageQueue struct { p peer.ID network MessageNetwork - newRequests chan request - outgoingMessages chan bsmsg.BitSwapMessage - done chan struct{} + newRequests chan request + outgoingWork chan struct{} + done chan struct{} // do not touch out of run loop - wl *wantlist.SessionTrackedWantlist - nextMessage bsmsg.BitSwapMessage - sender bsnet.MessageSender + wl *wantlist.SessionTrackedWantlist + nextMessage bsmsg.BitSwapMessage + nextMessageLk sync.RWMutex + sender bsnet.MessageSender } type messageRequest struct { @@ -55,32 +56,44 @@ type wantlistRequest struct { // New creats a new MessageQueue. func New(ctx context.Context, p peer.ID, network MessageNetwork) *MessageQueue { return &MessageQueue{ - ctx: ctx, - wl: wantlist.NewSessionTrackedWantlist(), - network: network, - p: p, - newRequests: make(chan request, 16), - outgoingMessages: make(chan bsmsg.BitSwapMessage), - done: make(chan struct{}), + ctx: ctx, + wl: wantlist.NewSessionTrackedWantlist(), + network: network, + p: p, + newRequests: make(chan request, 16), + outgoingWork: make(chan struct{}, 1), + done: make(chan struct{}), } } // AddMessage adds new entries to an outgoing message for a given session. func (mq *MessageQueue) AddMessage(entries []bsmsg.Entry, ses uint64) { + if !mq.addEntries(entries, ses) { + return + } select { - case mq.newRequests <- newMessageRequest(entries, ses): - case <-mq.ctx.Done(): + case mq.outgoingWork <- struct{}{}: + default: } } // AddWantlist adds a complete session tracked want list to a message queue func (mq *MessageQueue) AddWantlist(initialWants *wantlist.SessionTrackedWantlist) { - wl := wantlist.NewSessionTrackedWantlist() - initialWants.CopyWants(wl) + mq.nextMessageLk.Lock() + defer mq.nextMessageLk.Unlock() - select { - case mq.newRequests <- &wantlistRequest{wl}: - case <-mq.ctx.Done(): + initialWants.CopyWants(mq.wl) + if initialWants.Len() > 0 { + if mq.nextMessage == nil { + mq.nextMessage = bsmsg.New(false) + } + for _, e := range initialWants.Entries() { + mq.nextMessage.AddEntry(e.Cid, e.Priority) + } + select { + case mq.outgoingWork <- struct{}{}: + default: + } } } @@ -88,7 +101,6 @@ func (mq *MessageQueue) AddWantlist(initialWants *wantlist.SessionTrackedWantlis // based on the given initial wantlist. func (mq *MessageQueue) Startup() { go mq.runQueue() - go mq.sendMessages() } // Shutdown stops the processing of messages for a message queue. @@ -97,19 +109,10 @@ func (mq *MessageQueue) Shutdown() { } func (mq *MessageQueue) runQueue() { - outgoingMessages := func() chan bsmsg.BitSwapMessage { - if mq.nextMessage == nil { - return nil - } - return mq.outgoingMessages - } - for { select { - case newRequest := <-mq.newRequests: - newRequest.handle(mq) - case outgoingMessages() <- mq.nextMessage: - mq.nextMessage = nil + case <-mq.outgoingWork: + mq.sendMessage() case <-mq.done: if mq.sender != nil { mq.sender.Close() @@ -124,77 +127,45 @@ func (mq *MessageQueue) runQueue() { } } -// We allocate a bunch of these so use a pool. -var messageRequestPool = sync.Pool{ - New: func() interface{} { - return new(messageRequest) - }, -} - -func newMessageRequest(entries []bsmsg.Entry, session uint64) *messageRequest { - mr := messageRequestPool.Get().(*messageRequest) - mr.entries = entries - mr.ses = session - return mr -} - -func returnMessageRequest(mr *messageRequest) { - *mr = messageRequest{} - messageRequestPool.Put(mr) -} - -func (mr *messageRequest) handle(mq *MessageQueue) { - mq.addEntries(mr.entries, mr.ses) - returnMessageRequest(mr) -} - -func (wr *wantlistRequest) handle(mq *MessageQueue) { - initialWants := wr.wl - initialWants.CopyWants(mq.wl) - if initialWants.Len() > 0 { - if mq.nextMessage == nil { - mq.nextMessage = bsmsg.New(false) - } - for _, e := range initialWants.Entries() { - mq.nextMessage.AddEntry(e.Cid, e.Priority) - } +func (mq *MessageQueue) addEntries(entries []bsmsg.Entry, ses uint64) bool { + var work bool + mq.nextMessageLk.Lock() + defer mq.nextMessageLk.Unlock() + // if we have no message held allocate a new one + if mq.nextMessage == nil { + mq.nextMessage = bsmsg.New(false) } -} -func (mq *MessageQueue) addEntries(entries []bsmsg.Entry, ses uint64) { for _, e := range entries { if e.Cancel { if mq.wl.Remove(e.Cid, ses) { - if mq.nextMessage == nil { - mq.nextMessage = bsmsg.New(false) - } + work = true mq.nextMessage.Cancel(e.Cid) } } else { if mq.wl.Add(e.Cid, e.Priority, ses) { - if mq.nextMessage == nil { - mq.nextMessage = bsmsg.New(false) - } + work = true mq.nextMessage.AddEntry(e.Cid, e.Priority) } } } + return work } -func (mq *MessageQueue) sendMessages() { - for { - select { - case nextMessage := <-mq.outgoingMessages: - mq.sendMessage(nextMessage) - case <-mq.done: - return - case <-mq.ctx.Done(): - return - } - } +func (mq *MessageQueue) extractOutgoingMessage() bsmsg.BitSwapMessage { + // grab outgoing message + mq.nextMessageLk.Lock() + message := mq.nextMessage + mq.nextMessage = nil + mq.nextMessageLk.Unlock() + return message } -func (mq *MessageQueue) sendMessage(message bsmsg.BitSwapMessage) { +func (mq *MessageQueue) sendMessage() { + message := mq.extractOutgoingMessage() + if message == nil || message.Empty() { + return + } err := mq.initializeSender() if err != nil { From a1b3fab0458bae46bbecc2ee631ced7faef2e51d Mon Sep 17 00:00:00 2001 From: hannahhoward Date: Tue, 12 Mar 2019 14:07:42 -0700 Subject: [PATCH 0736/1035] refactor(messagequeue): remove dead code Remove code that should have been cleaned up in last message queue fix This commit was moved from ipfs/go-bitswap@22d5f13c1e639e7ad52c4071b436e2f5fae09bea --- bitswap/messagequeue/messagequeue.go | 15 --------------- 1 file changed, 15 deletions(-) diff --git a/bitswap/messagequeue/messagequeue.go b/bitswap/messagequeue/messagequeue.go index e3d09caf5..2b8f5f7cf 100644 --- a/bitswap/messagequeue/messagequeue.go +++ b/bitswap/messagequeue/messagequeue.go @@ -23,17 +23,12 @@ type MessageNetwork interface { NewMessageSender(context.Context, peer.ID) (bsnet.MessageSender, error) } -type request interface { - handle(mq *MessageQueue) -} - // MessageQueue implements queue of want messages to send to peers. type MessageQueue struct { ctx context.Context p peer.ID network MessageNetwork - newRequests chan request outgoingWork chan struct{} done chan struct{} @@ -44,15 +39,6 @@ type MessageQueue struct { sender bsnet.MessageSender } -type messageRequest struct { - entries []bsmsg.Entry - ses uint64 -} - -type wantlistRequest struct { - wl *wantlist.SessionTrackedWantlist -} - // New creats a new MessageQueue. func New(ctx context.Context, p peer.ID, network MessageNetwork) *MessageQueue { return &MessageQueue{ @@ -60,7 +46,6 @@ func New(ctx context.Context, p peer.ID, network MessageNetwork) *MessageQueue { wl: wantlist.NewSessionTrackedWantlist(), network: network, p: p, - newRequests: make(chan request, 16), outgoingWork: make(chan struct{}, 1), done: make(chan struct{}), } From f3b6bf83c337bf60d48f2d92e5d506c7bc6176ad Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Fri, 15 Mar 2019 12:15:57 -0700 Subject: [PATCH 0737/1035] dep: switch back to upstream pubsub In preparation for switching over to go modules entirely. We no longer need our fork. This commit was moved from ipfs/go-bitswap@27db97baca7ba4da15f4794e133795b162681b02 --- bitswap/notifications/notifications.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bitswap/notifications/notifications.go b/bitswap/notifications/notifications.go index 240379ae0..b29640bec 100644 --- a/bitswap/notifications/notifications.go +++ b/bitswap/notifications/notifications.go @@ -4,7 +4,7 @@ import ( "context" "sync" - pubsub "github.com/gxed/pubsub" + pubsub "github.com/cskr/pubsub" blocks "github.com/ipfs/go-block-format" cid "github.com/ipfs/go-cid" ) From 845eea0dcb53aa38a63e2edcbe9f9aacd823075b Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Mon, 4 Mar 2019 13:28:39 -0800 Subject: [PATCH 0738/1035] reduce provide workers to 6 This'll back up the queue but take a large load off the DHT. This commit was moved from ipfs/go-bitswap@130a07cb3affc86528dff23bd27095407a589b41 --- bitswap/bitswap.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 94dec9ac1..080bac71c 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -53,7 +53,7 @@ const ( var ( HasBlockBufferSize = 256 provideKeysBufferSize = 2048 - provideWorkerMax = 512 + provideWorkerMax = 6 // the 1<<18+15 is to observe old file chunks that are 1<<18 + 14 in size metricsBuckets = []float64{1 << 6, 1 << 10, 1 << 14, 1 << 18, 1<<18 + 15, 1 << 22} From 9ca43d776bef5f3bfd94151140f3711642493bc0 Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Wed, 27 Mar 2019 16:43:28 +0000 Subject: [PATCH 0739/1035] provide: massively increase provide timeout 15 will _never_ succeed This commit was moved from ipfs/go-bitswap@e5acc1a4966b6ae7519d8d03b31a71ad8aa9bfd2 --- bitswap/bitswap.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 080bac71c..217d54465 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -46,8 +46,9 @@ const ( maxProvidersPerRequest = 3 findProviderDelay = 1 * time.Second providerRequestTimeout = time.Second * 10 - provideTimeout = time.Second * 15 - sizeBatchRequestChan = 32 + // these requests take at _least_ two minutes at the moment. + provideTimeout = time.Minute * 3 + sizeBatchRequestChan = 32 ) var ( From d23733f93125626a947a152d55f9445ea08342e2 Mon Sep 17 00:00:00 2001 From: Bob Potter Date: Wed, 27 Mar 2019 18:29:18 -0500 Subject: [PATCH 0740/1035] Revert "buffer writes" This reverts commit ed59d74b1128adae5b7930921805ae1c0f24175d. It appears that using a buffer here is no longer necessary after the upstream fix https://github.com/gogo/protobuf/pull/504 This commit was moved from ipfs/go-bitswap@c9aa3744e095103f77d295267d3bb249262d11b5 --- bitswap/network/ipfs_impl.go | 13 +++---------- 1 file changed, 3 insertions(+), 10 deletions(-) diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index ec8037b10..8c2f5d68a 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -1,7 +1,6 @@ package network import ( - "bufio" "context" "fmt" "io" @@ -74,20 +73,19 @@ func msgToStream(ctx context.Context, s inet.Stream, msg bsmsg.BitSwapMessage) e if dl, ok := ctx.Deadline(); ok { deadline = dl } + if err := s.SetWriteDeadline(deadline); err != nil { log.Warningf("error setting deadline: %s", err) } - w := bufio.NewWriter(s) - switch s.Protocol() { case ProtocolBitswap: - if err := msg.ToNetV1(w); err != nil { + if err := msg.ToNetV1(s); err != nil { log.Debugf("error: %s", err) return err } case ProtocolBitswapOne, ProtocolBitswapNoVers: - if err := msg.ToNetV0(w); err != nil { + if err := msg.ToNetV0(s); err != nil { log.Debugf("error: %s", err) return err } @@ -95,11 +93,6 @@ func msgToStream(ctx context.Context, s inet.Stream, msg bsmsg.BitSwapMessage) e return fmt.Errorf("unrecognized protocol on remote: %s", s.Protocol()) } - if err := w.Flush(); err != nil { - log.Debugf("error: %s", err) - return err - } - if err := s.SetWriteDeadline(time.Time{}); err != nil { log.Warningf("error resetting deadline: %s", err) } From 0d7c16b1edbb13892b3d6ebbd2aca54814326272 Mon Sep 17 00:00:00 2001 From: hannahhoward Date: Thu, 4 Apr 2019 14:05:24 -0700 Subject: [PATCH 0741/1035] feat(messagequeue): rebroadcast wantlist Provide a failsafe to losing wants on other end by rebroadcasting a wantlist every thirty seconds fix #99, fix #65 This commit was moved from ipfs/go-bitswap@076f7091f41c90be13a83c6290cf07b8b9cb558e --- bitswap/messagequeue/messagequeue.go | 85 ++++++++++++++++------- bitswap/messagequeue/messagequeue_test.go | 37 ++++++++++ 2 files changed, 96 insertions(+), 26 deletions(-) diff --git a/bitswap/messagequeue/messagequeue.go b/bitswap/messagequeue/messagequeue.go index 2b8f5f7cf..d1a24ef43 100644 --- a/bitswap/messagequeue/messagequeue.go +++ b/bitswap/messagequeue/messagequeue.go @@ -14,7 +14,10 @@ import ( var log = logging.Logger("bitswap") -const maxRetries = 10 +const ( + defaultRebroadcastInterval = 30 * time.Second + maxRetries = 10 +) // MessageNetwork is any network that can connect peers and generate a message // sender. @@ -33,21 +36,25 @@ type MessageQueue struct { done chan struct{} // do not touch out of run loop - wl *wantlist.SessionTrackedWantlist - nextMessage bsmsg.BitSwapMessage - nextMessageLk sync.RWMutex - sender bsnet.MessageSender + wl *wantlist.SessionTrackedWantlist + nextMessage bsmsg.BitSwapMessage + nextMessageLk sync.RWMutex + sender bsnet.MessageSender + rebroadcastIntervalLk sync.RWMutex + rebroadcastInterval time.Duration + rebroadcastTimer *time.Timer } // New creats a new MessageQueue. func New(ctx context.Context, p peer.ID, network MessageNetwork) *MessageQueue { return &MessageQueue{ - ctx: ctx, - wl: wantlist.NewSessionTrackedWantlist(), - network: network, - p: p, - outgoingWork: make(chan struct{}, 1), - done: make(chan struct{}), + ctx: ctx, + wl: wantlist.NewSessionTrackedWantlist(), + network: network, + p: p, + outgoingWork: make(chan struct{}, 1), + done: make(chan struct{}), + rebroadcastInterval: defaultRebroadcastInterval, } } @@ -64,27 +71,24 @@ func (mq *MessageQueue) AddMessage(entries []bsmsg.Entry, ses uint64) { // AddWantlist adds a complete session tracked want list to a message queue func (mq *MessageQueue) AddWantlist(initialWants *wantlist.SessionTrackedWantlist) { - mq.nextMessageLk.Lock() - defer mq.nextMessageLk.Unlock() - initialWants.CopyWants(mq.wl) - if initialWants.Len() > 0 { - if mq.nextMessage == nil { - mq.nextMessage = bsmsg.New(false) - } - for _, e := range initialWants.Entries() { - mq.nextMessage.AddEntry(e.Cid, e.Priority) - } - select { - case mq.outgoingWork <- struct{}{}: - default: - } - } + mq.addWantlist() +} + +// SetRebroadcastInterval sets a new interval on which to rebroadcast the full wantlist +func (mq *MessageQueue) SetRebroadcastInterval(delay time.Duration) { + mq.rebroadcastIntervalLk.Lock() + mq.rebroadcastInterval = delay + mq.rebroadcastTimer.Reset(delay) + mq.rebroadcastIntervalLk.Unlock() } // Startup starts the processing of messages, and creates an initial message // based on the given initial wantlist. func (mq *MessageQueue) Startup() { + mq.rebroadcastIntervalLk.RLock() + mq.rebroadcastTimer = time.NewTimer(mq.rebroadcastInterval) + mq.rebroadcastIntervalLk.RUnlock() go mq.runQueue() } @@ -96,6 +100,8 @@ func (mq *MessageQueue) Shutdown() { func (mq *MessageQueue) runQueue() { for { select { + case <-mq.rebroadcastTimer.C: + mq.rebroadcastWantlist() case <-mq.outgoingWork: mq.sendMessage() case <-mq.done: @@ -112,6 +118,33 @@ func (mq *MessageQueue) runQueue() { } } +func (mq *MessageQueue) addWantlist() { + + mq.nextMessageLk.Lock() + defer mq.nextMessageLk.Unlock() + + if mq.wl.Len() > 0 { + if mq.nextMessage == nil { + mq.nextMessage = bsmsg.New(false) + } + for _, e := range mq.wl.Entries() { + mq.nextMessage.AddEntry(e.Cid, e.Priority) + } + select { + case mq.outgoingWork <- struct{}{}: + default: + } + } +} + +func (mq *MessageQueue) rebroadcastWantlist() { + mq.rebroadcastIntervalLk.RLock() + mq.rebroadcastTimer.Reset(mq.rebroadcastInterval) + mq.rebroadcastIntervalLk.RUnlock() + + mq.addWantlist() +} + func (mq *MessageQueue) addEntries(entries []bsmsg.Entry, ses uint64) bool { var work bool mq.nextMessageLk.Lock() diff --git a/bitswap/messagequeue/messagequeue_test.go b/bitswap/messagequeue/messagequeue_test.go index aeb903ddc..eaba9b3c2 100644 --- a/bitswap/messagequeue/messagequeue_test.go +++ b/bitswap/messagequeue/messagequeue_test.go @@ -158,3 +158,40 @@ func TestSendingMessagesPartialDupe(t *testing.T) { } } + +func TestWantlistRebroadcast(t *testing.T) { + + ctx := context.Background() + messagesSent := make(chan bsmsg.BitSwapMessage) + resetChan := make(chan struct{}, 1) + fullClosedChan := make(chan struct{}, 1) + fakeSender := &fakeMessageSender{nil, fullClosedChan, resetChan, messagesSent} + fakenet := &fakeMessageNetwork{nil, nil, fakeSender} + peerID := testutil.GeneratePeers(1)[0] + messageQueue := New(ctx, peerID, fakenet) + ses := testutil.GenerateSessionID() + wl := testutil.GenerateWantlist(10, ses) + + messageQueue.Startup() + messageQueue.AddWantlist(wl) + messages := collectMessages(ctx, t, messagesSent, 10*time.Millisecond) + if len(messages) != 1 { + t.Fatal("wrong number of messages were sent for initial wants") + } + + messageQueue.SetRebroadcastInterval(5 * time.Millisecond) + messages = collectMessages(ctx, t, messagesSent, 10*time.Millisecond) + if len(messages) != 1 { + t.Fatal("wrong number of messages were sent for initial wants") + } + + firstMessage := messages[0] + if len(firstMessage.Wantlist()) != wl.Len() { + t.Fatal("did not add all wants to want list") + } + for _, entry := range firstMessage.Wantlist() { + if entry.Cancel { + t.Fatal("initial add sent cancel entry when it should not have") + } + } +} From 35a1146acd2ca612dd0842343ca249ff931ff952 Mon Sep 17 00:00:00 2001 From: hannahhoward Date: Thu, 4 Apr 2019 17:01:16 -0700 Subject: [PATCH 0742/1035] fix(messagequeue): add nil check Make sure rebroadcast timer doesn't get reset if it's nil This commit was moved from ipfs/go-bitswap@256e680ca4afef917d54f2d11e697fbb6578e365 --- bitswap/messagequeue/messagequeue.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/bitswap/messagequeue/messagequeue.go b/bitswap/messagequeue/messagequeue.go index d1a24ef43..a71425085 100644 --- a/bitswap/messagequeue/messagequeue.go +++ b/bitswap/messagequeue/messagequeue.go @@ -79,7 +79,9 @@ func (mq *MessageQueue) AddWantlist(initialWants *wantlist.SessionTrackedWantlis func (mq *MessageQueue) SetRebroadcastInterval(delay time.Duration) { mq.rebroadcastIntervalLk.Lock() mq.rebroadcastInterval = delay - mq.rebroadcastTimer.Reset(delay) + if mq.rebroadcastTimer != nil { + mq.rebroadcastTimer.Reset(delay) + } mq.rebroadcastIntervalLk.Unlock() } From 23657b53b825da9876b23988ea7cf988d097ce15 Mon Sep 17 00:00:00 2001 From: hannahhoward Date: Wed, 10 Apr 2019 10:49:27 -0700 Subject: [PATCH 0743/1035] fix(messagequeue): test correction timing on test was failure prone, corrected This commit was moved from ipfs/go-bitswap@13e0a4dccf8455078fbba2732455f90dbd2224fe --- bitswap/messagequeue/messagequeue_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/bitswap/messagequeue/messagequeue_test.go b/bitswap/messagequeue/messagequeue_test.go index eaba9b3c2..146f21124 100644 --- a/bitswap/messagequeue/messagequeue_test.go +++ b/bitswap/messagequeue/messagequeue_test.go @@ -180,9 +180,9 @@ func TestWantlistRebroadcast(t *testing.T) { } messageQueue.SetRebroadcastInterval(5 * time.Millisecond) - messages = collectMessages(ctx, t, messagesSent, 10*time.Millisecond) + messages = collectMessages(ctx, t, messagesSent, 8*time.Millisecond) if len(messages) != 1 { - t.Fatal("wrong number of messages were sent for initial wants") + t.Fatal("wrong number of messages were rebroadcast") } firstMessage := messages[0] From 29d1a2293d63471951855a76d15c632e9dfc2251 Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Wed, 20 Feb 2019 14:57:07 -0800 Subject: [PATCH 0744/1035] make the WantlistManager own the PeerHandler And remove all locks. This commit was moved from ipfs/go-bitswap@3a24fa2c33b696ff81f43cf3218bbe267d222b0b --- bitswap/bitswap.go | 8 +------- bitswap/bitswap_test.go | 13 ------------- bitswap/peermanager/peermanager.go | 20 +------------------- bitswap/wantmanager/wantmanager.go | 8 ++------ bitswap/wantmanager/wantmanager_test.go | 3 +-- 5 files changed, 5 insertions(+), 47 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 217d54465..87418a9b0 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -102,7 +102,7 @@ func New(parent context.Context, network bsnet.BitSwapNetwork, return bsmq.New(ctx, p, network) } - wm := bswm.New(ctx) + wm := bswm.New(ctx, bspm.New(ctx, peerQueueFactory)) pqm := bspqm.New(ctx, network) sessionFactory := func(ctx context.Context, id uint64, pm bssession.PeerManager, srs bssession.RequestSplitter) bssm.Session { @@ -124,7 +124,6 @@ func New(parent context.Context, network bsnet.BitSwapNetwork, provideKeys: make(chan cid.Cid, provideKeysBufferSize), wm: wm, pqm: pqm, - pm: bspm.New(ctx, peerQueueFactory), sm: bssm.New(ctx, sessionFactory, sessionPeerManagerFactory, sessionRequestSplitterFactory), counters: new(counters), dupMetric: dupHist, @@ -132,7 +131,6 @@ func New(parent context.Context, network bsnet.BitSwapNetwork, sentHistogram: sentHistogram, } - bs.wm.SetDelegate(bs.pm) bs.wm.Startup() bs.pqm.Startup() network.SetDelegate(bs) @@ -153,10 +151,6 @@ func New(parent context.Context, network bsnet.BitSwapNetwork, // Bitswap instances implement the bitswap protocol. type Bitswap struct { - // the peermanager manages sending messages to peers in a way that - // wont block bitswap operation - pm *bspm.PeerManager - // the wantlist tracks global wants for bitswap wm *bswm.WantManager diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index 6b0f5c75d..bbd1b3494 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -199,19 +199,6 @@ func PerformDistributionTest(t *testing.T, numInstances, numBlocks int) { t.Log("Give the blocks to the first instance") - nump := len(instances) - 1 - // assert we're properly connected - for _, inst := range instances { - peers := inst.Exchange.pm.ConnectedPeers() - for i := 0; i < 10 && len(peers) != nump; i++ { - time.Sleep(time.Millisecond * 50) - peers = inst.Exchange.pm.ConnectedPeers() - } - if len(peers) != nump { - t.Fatal("not enough peers connected to instance") - } - } - var blkeys []cid.Cid first := instances[0] for _, b := range blocks { diff --git a/bitswap/peermanager/peermanager.go b/bitswap/peermanager/peermanager.go index 59e8ca3de..51cdf27d9 100644 --- a/bitswap/peermanager/peermanager.go +++ b/bitswap/peermanager/peermanager.go @@ -2,7 +2,6 @@ package peermanager import ( "context" - "sync" bsmsg "github.com/ipfs/go-bitswap/message" wantlist "github.com/ipfs/go-bitswap/wantlist" @@ -40,8 +39,7 @@ type peerQueueInstance struct { // PeerManager manages a pool of peers and sends messages to peers in the pool. type PeerManager struct { // peerQueues -- interact through internal utility functions get/set/remove/iterate - peerQueues map[peer.ID]*peerQueueInstance - peerQueuesLk sync.RWMutex + peerQueues map[peer.ID]*peerQueueInstance createPeerQueue PeerQueueFactory ctx context.Context @@ -58,8 +56,6 @@ func New(ctx context.Context, createPeerQueue PeerQueueFactory) *PeerManager { // ConnectedPeers returns a list of peers this PeerManager is managing. func (pm *PeerManager) ConnectedPeers() []peer.ID { - pm.peerQueuesLk.RLock() - defer pm.peerQueuesLk.RUnlock() peers := make([]peer.ID, 0, len(pm.peerQueues)) for p := range pm.peerQueues { peers = append(peers, p) @@ -70,8 +66,6 @@ func (pm *PeerManager) ConnectedPeers() []peer.ID { // Connected is called to add a new peer to the pool, and send it an initial set // of wants. func (pm *PeerManager) Connected(p peer.ID, initialWants *wantlist.SessionTrackedWantlist) { - pm.peerQueuesLk.Lock() - pq := pm.getOrCreate(p) if pq.refcnt == 0 { @@ -79,47 +73,35 @@ func (pm *PeerManager) Connected(p peer.ID, initialWants *wantlist.SessionTracke } pq.refcnt++ - - pm.peerQueuesLk.Unlock() } // Disconnected is called to remove a peer from the pool. func (pm *PeerManager) Disconnected(p peer.ID) { - pm.peerQueuesLk.Lock() pq, ok := pm.peerQueues[p] if !ok { - pm.peerQueuesLk.Unlock() return } pq.refcnt-- if pq.refcnt > 0 { - pm.peerQueuesLk.Unlock() return } delete(pm.peerQueues, p) - pm.peerQueuesLk.Unlock() - pq.pq.Shutdown() - } // SendMessage is called to send a message to all or some peers in the pool; // if targets is nil, it sends to all. func (pm *PeerManager) SendMessage(entries []bsmsg.Entry, targets []peer.ID, from uint64) { if len(targets) == 0 { - pm.peerQueuesLk.RLock() for _, p := range pm.peerQueues { p.pq.AddMessage(entries, from) } - pm.peerQueuesLk.RUnlock() } else { for _, t := range targets { - pm.peerQueuesLk.Lock() pqi := pm.getOrCreate(t) - pm.peerQueuesLk.Unlock() pqi.pq.AddMessage(entries, from) } } diff --git a/bitswap/wantmanager/wantmanager.go b/bitswap/wantmanager/wantmanager.go index 0fd7d5a1a..5f1129451 100644 --- a/bitswap/wantmanager/wantmanager.go +++ b/bitswap/wantmanager/wantmanager.go @@ -53,7 +53,7 @@ type WantManager struct { } // New initializes a new WantManager for a given context. -func New(ctx context.Context) *WantManager { +func New(ctx context.Context, peerHandler PeerHandler) *WantManager { ctx, cancel := context.WithCancel(ctx) wantlistGauge := metrics.NewCtx(ctx, "wantlist_total", "Number of items in wantlist.").Gauge() @@ -63,15 +63,11 @@ func New(ctx context.Context) *WantManager { bcwl: wantlist.NewSessionTrackedWantlist(), ctx: ctx, cancel: cancel, + peerHandler: peerHandler, wantlistGauge: wantlistGauge, } } -// SetDelegate specifies who will send want changes out to the internet. -func (wm *WantManager) SetDelegate(peerHandler PeerHandler) { - wm.peerHandler = peerHandler -} - // WantBlocks adds the given cids to the wantlist, tracked by the given session. func (wm *WantManager) WantBlocks(ctx context.Context, ks []cid.Cid, peers []peer.ID, ses uint64) { log.Infof("want blocks: %s", ks) diff --git a/bitswap/wantmanager/wantmanager_test.go b/bitswap/wantmanager/wantmanager_test.go index 3b9d0cb18..036908205 100644 --- a/bitswap/wantmanager/wantmanager_test.go +++ b/bitswap/wantmanager/wantmanager_test.go @@ -40,7 +40,7 @@ func setupTestFixturesAndInitialWantList() ( // setup fixtures wantSender := &fakePeerHandler{} - wantManager := New(ctx) + wantManager := New(ctx, wantSender) keys := testutil.GenerateCids(10) otherKeys := testutil.GenerateCids(5) peers := testutil.GeneratePeers(10) @@ -48,7 +48,6 @@ func setupTestFixturesAndInitialWantList() ( otherSession := testutil.GenerateSessionID() // startup wantManager - wantManager.SetDelegate(wantSender) wantManager.Startup() // add initial wants From 9d30c1f860a4e7b907e65a00d12f666357c01ac2 Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Mon, 29 Apr 2019 09:30:29 -0700 Subject: [PATCH 0745/1035] remove IPFS_LOW_MEM flag support * HasBlockBufferSize and provideKeysBufferSize no longer matter as we have an infinite in-memory buffer. * provideWorkersMax now defaults to 6 so changing this to 16 actually _increases memory consumption. This commit was moved from ipfs/go-bitswap@3699175cd9128298798bb3ab2b0a49cca7b1757c --- bitswap/bitswap.go | 9 --------- 1 file changed, 9 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 217d54465..3e1f2767c 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -26,7 +26,6 @@ import ( blockstore "github.com/ipfs/go-ipfs-blockstore" delay "github.com/ipfs/go-ipfs-delay" exchange "github.com/ipfs/go-ipfs-exchange-interface" - flags "github.com/ipfs/go-ipfs-flags" logging "github.com/ipfs/go-log" metrics "github.com/ipfs/go-metrics-interface" process "github.com/jbenet/goprocess" @@ -60,14 +59,6 @@ var ( metricsBuckets = []float64{1 << 6, 1 << 10, 1 << 14, 1 << 18, 1<<18 + 15, 1 << 22} ) -func init() { - if flags.LowMemMode { - HasBlockBufferSize = 64 - provideKeysBufferSize = 512 - provideWorkerMax = 16 - } -} - var rebroadcastDelay = delay.Fixed(time.Minute) // New initializes a BitSwap instance that communicates over the provided From eb4080f6b96a6989daf20931d40c6264d00fae66 Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Thu, 25 Apr 2019 14:31:24 -0700 Subject: [PATCH 0746/1035] give peers more weight when actively participating in a session This commit was moved from ipfs/go-bitswap@131b9df7b4c23ad6544b7192a18daae37376fa0a --- .../sessionpeermanager/sessionpeermanager.go | 25 ++++++++++--------- 1 file changed, 13 insertions(+), 12 deletions(-) diff --git a/bitswap/sessionpeermanager/sessionpeermanager.go b/bitswap/sessionpeermanager/sessionpeermanager.go index 0b02a2a2b..fa7ec50b4 100644 --- a/bitswap/sessionpeermanager/sessionpeermanager.go +++ b/bitswap/sessionpeermanager/sessionpeermanager.go @@ -14,8 +14,10 @@ import ( var log = logging.Logger("bitswap") const ( - maxOptimizedPeers = 32 - reservePeers = 2 + maxOptimizedPeers = 32 + reservePeers = 2 + unoptimizedTagValue = 5 // tag value for "unoptimized" session peers. + optimizedTagValue = 10 // tag value for "optimized" session peers. ) // PeerTagger is an interface for tagging peers with metadata @@ -131,7 +133,7 @@ func (spm *SessionPeerManager) run(ctx context.Context) { } } -func (spm *SessionPeerManager) tagPeer(p peer.ID) { +func (spm *SessionPeerManager) tagPeer(p peer.ID, value int) { spm.tagger.TagPeer(p, spm.tag, 10) } @@ -173,7 +175,7 @@ func (pfm *peerFoundMessage) handle(spm *SessionPeerManager) { if _, ok := spm.activePeers[p]; !ok { spm.activePeers[p] = false spm.unoptimizedPeersArr = append(spm.unoptimizedPeersArr, p) - spm.tagPeer(p) + spm.tagPeer(p, unoptimizedTagValue) } } @@ -182,17 +184,16 @@ type peerResponseMessage struct { } func (prm *peerResponseMessage) handle(spm *SessionPeerManager) { - p := prm.p isOptimized, ok := spm.activePeers[p] - if !ok { - spm.activePeers[p] = true - spm.tagPeer(p) + if isOptimized { + spm.removeOptimizedPeer(p) } else { - if isOptimized { - spm.removeOptimizedPeer(p) - } else { - spm.activePeers[p] = true + spm.activePeers[p] = true + spm.tagPeer(p, optimizedTagValue) + + // transition from unoptimized. + if ok { spm.removeUnoptimizedPeer(p) } } From 3a58638fc09c551f35d9f5772b7acf1585896cad Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Thu, 25 Apr 2019 14:32:01 -0700 Subject: [PATCH 0747/1035] chore: remove dead code This commit was moved from ipfs/go-bitswap@af8c7b4a0198f7c4b965ebbb96ea52f20b2d885f --- bitswap/bitswap.go | 15 +-------------- bitswap/peermanager/peermanager.go | 11 ----------- bitswap/sessionpeermanager/sessionpeermanager.go | 4 ---- 3 files changed, 1 insertion(+), 29 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 7e63d9362..e298c20ce 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -38,16 +38,8 @@ var log = logging.Logger("bitswap") var _ exchange.SessionExchange = (*Bitswap)(nil) const ( - // maxProvidersPerRequest specifies the maximum number of providers desired - // from the network. This value is specified because the network streams - // results. - // TODO: if a 'non-nice' strategy is implemented, consider increasing this value - maxProvidersPerRequest = 3 - findProviderDelay = 1 * time.Second - providerRequestTimeout = time.Second * 10 // these requests take at _least_ two minutes at the moment. - provideTimeout = time.Minute * 3 - sizeBatchRequestChan = 32 + provideTimeout = time.Minute * 3 ) var ( @@ -190,11 +182,6 @@ type counters struct { messagesRecvd uint64 } -type blockRequest struct { - Cid cid.Cid - Ctx context.Context -} - // GetBlock attempts to retrieve a particular block from peers within the // deadline enforced by the context. func (bs *Bitswap) GetBlock(parent context.Context, k cid.Cid) (blocks.Block, error) { diff --git a/bitswap/peermanager/peermanager.go b/bitswap/peermanager/peermanager.go index 51cdf27d9..658766d15 100644 --- a/bitswap/peermanager/peermanager.go +++ b/bitswap/peermanager/peermanager.go @@ -5,17 +5,10 @@ import ( bsmsg "github.com/ipfs/go-bitswap/message" wantlist "github.com/ipfs/go-bitswap/wantlist" - logging "github.com/ipfs/go-log" peer "github.com/libp2p/go-libp2p-peer" ) -var log = logging.Logger("bitswap") - -var ( - metricsBuckets = []float64{1 << 6, 1 << 10, 1 << 14, 1 << 18, 1<<18 + 15, 1 << 22} -) - // PeerQueue provides a queer of messages to be sent for a single peer. type PeerQueue interface { AddMessage(entries []bsmsg.Entry, ses uint64) @@ -27,10 +20,6 @@ type PeerQueue interface { // PeerQueueFactory provides a function that will create a PeerQueue. type PeerQueueFactory func(ctx context.Context, p peer.ID) PeerQueue -type peerMessage interface { - handle(pm *PeerManager) -} - type peerQueueInstance struct { refcnt int pq PeerQueue diff --git a/bitswap/sessionpeermanager/sessionpeermanager.go b/bitswap/sessionpeermanager/sessionpeermanager.go index fa7ec50b4..04d20f07e 100644 --- a/bitswap/sessionpeermanager/sessionpeermanager.go +++ b/bitswap/sessionpeermanager/sessionpeermanager.go @@ -5,14 +5,10 @@ import ( "fmt" "math/rand" - logging "github.com/ipfs/go-log" - cid "github.com/ipfs/go-cid" peer "github.com/libp2p/go-libp2p-peer" ) -var log = logging.Logger("bitswap") - const ( maxOptimizedPeers = 32 reservePeers = 2 From 0f005c29513211b791d95e83958eb6dbbb6ddb42 Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Thu, 25 Apr 2019 14:39:26 -0700 Subject: [PATCH 0748/1035] chore: remove error return value from functions with no error (fixes linter issues) This commit was moved from ipfs/go-bitswap@2128a5a227ee9a5fdfcb0d8a0f6bad343f8dd3e5 --- bitswap/decision/engine.go | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/bitswap/decision/engine.go b/bitswap/decision/engine.go index a8e6f1d11..37737c8d8 100644 --- a/bitswap/decision/engine.go +++ b/bitswap/decision/engine.go @@ -221,7 +221,7 @@ func (e *Engine) Peers() []peer.ID { // MessageReceived performs book-keeping. Returns error if passed invalid // arguments. -func (e *Engine) MessageReceived(p peer.ID, m bsmsg.BitSwapMessage) error { +func (e *Engine) MessageReceived(p peer.ID, m bsmsg.BitSwapMessage) { if m.Empty() { log.Debugf("received empty message from %s", p) } @@ -276,7 +276,6 @@ func (e *Engine) MessageReceived(p peer.ID, m bsmsg.BitSwapMessage) error { log.Debugf("got block %s %d bytes", block, len(block.RawData())) l.ReceivedBytes(len(block.RawData())) } - return nil } func (e *Engine) addBlock(block blocks.Block) { @@ -309,7 +308,7 @@ func (e *Engine) AddBlock(block blocks.Block) { // inconsistent. Would need to ensure that Sends and acknowledgement of the // send happen atomically -func (e *Engine) MessageSent(p peer.ID, m bsmsg.BitSwapMessage) error { +func (e *Engine) MessageSent(p peer.ID, m bsmsg.BitSwapMessage) { l := e.findOrCreate(p) l.lk.Lock() defer l.lk.Unlock() @@ -320,7 +319,6 @@ func (e *Engine) MessageSent(p peer.ID, m bsmsg.BitSwapMessage) error { e.peerRequestQueue.Remove(block.Cid(), p) } - return nil } func (e *Engine) PeerConnected(p peer.ID) { From 57d4630610ffac97923ce4bb3fc7c480aa4041c1 Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Mon, 29 Apr 2019 14:47:42 -0700 Subject: [PATCH 0749/1035] fix(sessionpeermanager): actually use the tag value This commit was moved from ipfs/go-bitswap@8d74ae262723f856349165253d69718363bf50e9 --- bitswap/sessionpeermanager/sessionpeermanager.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bitswap/sessionpeermanager/sessionpeermanager.go b/bitswap/sessionpeermanager/sessionpeermanager.go index 04d20f07e..d5382980f 100644 --- a/bitswap/sessionpeermanager/sessionpeermanager.go +++ b/bitswap/sessionpeermanager/sessionpeermanager.go @@ -130,7 +130,7 @@ func (spm *SessionPeerManager) run(ctx context.Context) { } func (spm *SessionPeerManager) tagPeer(p peer.ID, value int) { - spm.tagger.TagPeer(p, spm.tag, 10) + spm.tagger.TagPeer(p, spm.tag, value) } func (spm *SessionPeerManager) insertOptimizedPeer(p peer.ID) { From 98f9cec054ffd86a7d07dfd375cf4acb4e3107ef Mon Sep 17 00:00:00 2001 From: Michael Avila Date: Tue, 20 Nov 2018 12:59:52 -0800 Subject: [PATCH 0750/1035] Control provider workers with experiment flag This commit was moved from ipfs/go-bitswap@67856544264823a646a2ef9d90251ae5ba8d2a0e --- bitswap/bitswap.go | 14 +++++++++----- bitswap/bitswap_test.go | 37 +++++++++++++++++++++++++++++++++++++ bitswap/workers.go | 20 +++++++++++--------- 3 files changed, 57 insertions(+), 14 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 7e63d9362..9a2a1281e 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -51,6 +51,8 @@ const ( ) var ( + ProvideEnabled = true + HasBlockBufferSize = 256 provideKeysBufferSize = 2048 provideWorkerMax = 6 @@ -258,11 +260,13 @@ func (bs *Bitswap) receiveBlockFrom(blk blocks.Block, from peer.ID) error { bs.engine.AddBlock(blk) - select { - case bs.newBlocks <- blk.Cid(): - // send block off to be reprovided - case <-bs.process.Closing(): - return bs.process.Close() + if ProvideEnabled { + select { + case bs.newBlocks <- blk.Cid(): + // send block off to be reprovided + case <-bs.process.Closing(): + return bs.process.Close() + } } return nil } diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index bbd1b3494..127ac0dcd 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -10,6 +10,7 @@ import ( decision "github.com/ipfs/go-bitswap/decision" "github.com/ipfs/go-bitswap/message" + bssession "github.com/ipfs/go-bitswap/session" tn "github.com/ipfs/go-bitswap/testnet" blocks "github.com/ipfs/go-block-format" @@ -99,6 +100,42 @@ func TestGetBlockFromPeerAfterPeerAnnounces(t *testing.T) { } } +func TestDoesNotProvideWhenConfiguredNotTo(t *testing.T) { + ProvideEnabled = false + defer func() { ProvideEnabled = true }() + + net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay)) + block := blocks.NewBlock([]byte("block")) + g := NewTestSessionGenerator(net) + defer g.Close() + + hasBlock := g.Next() + defer hasBlock.Exchange.Close() + + if err := hasBlock.Exchange.HasBlock(block); err != nil { + t.Fatal(err) + } + + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + + wantsBlock := g.Next() + defer wantsBlock.Exchange.Close() + + ns := wantsBlock.Exchange.NewSession(ctx).(*bssession.Session) + // set find providers delay to less than timeout context of this test + ns.SetBaseTickDelay(10 * time.Millisecond) + + received, err := ns.GetBlock(ctx, block.Cid()) + if received != nil { + t.Fatalf("Expected to find nothing, found %s", received) + } + + if err != context.DeadlineExceeded { + t.Fatal("Expected deadline exceeded") + } +} + func TestUnwantedBlockNotAdded(t *testing.T) { net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay)) diff --git a/bitswap/workers.go b/bitswap/workers.go index 45f786152..6e0bf037f 100644 --- a/bitswap/workers.go +++ b/bitswap/workers.go @@ -23,15 +23,17 @@ func (bs *Bitswap) startWorkers(px process.Process, ctx context.Context) { }) } - // Start up a worker to manage sending out provides messages - px.Go(func(px process.Process) { - bs.provideCollector(ctx) - }) - - // Spawn up multiple workers to handle incoming blocks - // consider increasing number if providing blocks bottlenecks - // file transfers - px.Go(bs.provideWorker) + if ProvideEnabled { + // Start up a worker to manage sending out provides messages + px.Go(func(px process.Process) { + bs.provideCollector(ctx) + }) + + // Spawn up multiple workers to handle incoming blocks + // consider increasing number if providing blocks bottlenecks + // file transfers + px.Go(bs.provideWorker) + } } func (bs *Bitswap) taskWorker(ctx context.Context, id int) { From 90460365a148f4597797d85ce5dd50131a6894ee Mon Sep 17 00:00:00 2001 From: hannahhoward Date: Wed, 1 May 2019 18:29:21 -0700 Subject: [PATCH 0751/1035] fix(decision): cleanup request queues Make sure when request queues are idle that they are removed fix #112 This commit was moved from ipfs/go-bitswap@0a309a1700ebdacb1f281bc829e311f8b870033e --- bitswap/decision/peer_request_queue.go | 19 +++++++++++- bitswap/decision/peer_request_queue_test.go | 32 +++++++++++++++++++++ 2 files changed, 50 insertions(+), 1 deletion(-) diff --git a/bitswap/decision/peer_request_queue.go b/bitswap/decision/peer_request_queue.go index 4f6ededcc..85901c67e 100644 --- a/bitswap/decision/peer_request_queue.go +++ b/bitswap/decision/peer_request_queue.go @@ -136,7 +136,17 @@ func (tl *prq) Pop() *peerRequestTask { break // and return |out| } - tl.pQueue.Push(partner) + if partner.IsIdle() { + for target, testPartner := range tl.partners { + if testPartner == partner { + delete(tl.partners, target) + delete(tl.frozen, target) + break + } + } + } else { + tl.pQueue.Push(partner) + } return out } @@ -323,6 +333,7 @@ func (p *activePartner) StartTask(k cid.Cid) { // TaskDone signals that a task was completed for this partner. func (p *activePartner) TaskDone(k cid.Cid) { p.activelk.Lock() + p.activeBlocks.Remove(k) p.active-- if p.active < 0 { @@ -331,6 +342,12 @@ func (p *activePartner) TaskDone(k cid.Cid) { p.activelk.Unlock() } +func (p *activePartner) IsIdle() bool { + p.activelk.Lock() + defer p.activelk.Unlock() + return p.requests == 0 && p.active == 0 +} + // Index implements pq.Elem. func (p *activePartner) Index() int { return p.index diff --git a/bitswap/decision/peer_request_queue_test.go b/bitswap/decision/peer_request_queue_test.go index 246afb065..33b111a52 100644 --- a/bitswap/decision/peer_request_queue_test.go +++ b/bitswap/decision/peer_request_queue_test.go @@ -128,3 +128,35 @@ func TestPeerRepeats(t *testing.T) { } } } + +func TestCleaningUpQueues(t *testing.T) { + partner := testutil.RandPeerIDFatal(t) + var entries []wantlist.Entry + for i := 0; i < 5; i++ { + entries = append(entries, wantlist.Entry{Cid: cid.NewCidV0(u.Hash([]byte(fmt.Sprint(i))))}) + } + + prq := newPRQ() + + // push a block, pop a block, complete everything, should be removed + prq.Push(partner, entries...) + task := prq.Pop() + task.Done(task.Entries) + task = prq.Pop() + + if task != nil || len(prq.partners) > 0 || prq.pQueue.Len() > 0 { + t.Fatal("Partner should have been removed because it's idle") + } + + // push a block, remove each of its entries, should be removed + prq.Push(partner, entries...) + for _, entry := range entries { + prq.Remove(entry.Cid, partner) + } + task = prq.Pop() + + if task != nil || len(prq.partners) > 0 || prq.pQueue.Len() > 0 { + t.Fatal("Partner should have been removed because it's idle") + } + +} From 50fac931a7c13f92060fe04f2a0d8bf82dd59c29 Mon Sep 17 00:00:00 2001 From: hannahhoward Date: Fri, 3 May 2019 08:16:49 -0700 Subject: [PATCH 0752/1035] feat(peerrequestqueue): add target to queue Add a peer id to an active partner queue This commit was moved from ipfs/go-bitswap@0bdc018cfd147b66bb94572ab6d196832df86603 --- bitswap/decision/peer_request_queue.go | 17 +++++++---------- 1 file changed, 7 insertions(+), 10 deletions(-) diff --git a/bitswap/decision/peer_request_queue.go b/bitswap/decision/peer_request_queue.go index 85901c67e..5cb95c782 100644 --- a/bitswap/decision/peer_request_queue.go +++ b/bitswap/decision/peer_request_queue.go @@ -51,7 +51,7 @@ func (tl *prq) Push(to peer.ID, entries ...wantlist.Entry) { defer tl.lock.Unlock() partner, ok := tl.partners[to] if !ok { - partner = newActivePartner() + partner = newActivePartner(to) tl.pQueue.Push(partner) tl.partners[to] = partner } @@ -137,13 +137,9 @@ func (tl *prq) Pop() *peerRequestTask { } if partner.IsIdle() { - for target, testPartner := range tl.partners { - if testPartner == partner { - delete(tl.partners, target) - delete(tl.frozen, target) - break - } - } + target := partner.target + delete(tl.partners, target) + delete(tl.frozen, target) } else { tl.pQueue.Push(partner) } @@ -262,7 +258,7 @@ func wrapCmp(f func(a, b *peerRequestTask) bool) func(a, b pq.Elem) bool { } type activePartner struct { - + target peer.ID // Active is the number of blocks this peer is currently being sent // active must be locked around as it will be updated externally activelk sync.Mutex @@ -284,8 +280,9 @@ type activePartner struct { taskQueue pq.PQ } -func newActivePartner() *activePartner { +func newActivePartner(target peer.ID) *activePartner { return &activePartner{ + target: target, taskQueue: pq.New(wrapCmp(V1)), activeBlocks: cid.NewSet(), } From ff80f8d858503692d4607e6625fa2063119b9889 Mon Sep 17 00:00:00 2001 From: hannahhoward Date: Thu, 9 May 2019 16:50:04 -0700 Subject: [PATCH 0753/1035] refactor(bitswap): add comments and extract testutils.go Add comments to all exported functions, extract the utils for creating instances in testnet.go, moves integration tests to bitswap_test BREAKING CHANGE: removed one constant -- rebroadcastDelay -- which I believe was unused This commit was moved from ipfs/go-bitswap@59317cc1cb5ea9348f3185f7ed2d11cca6bba8a1 --- bitswap/benchmarks_test.go | 38 +++++++------ bitswap/bitswap.go | 41 ++++++++++---- bitswap/bitswap_test.go | 56 +++++++------------ bitswap/bitswap_with_sessions_test.go | 21 +++---- bitswap/decision/engine.go | 14 ++++- bitswap/decision/ledger.go | 3 + bitswap/message/message.go | 12 +++- bitswap/network/interface.go | 18 ++++-- bitswap/network/ipfs_impl.go | 6 +- bitswap/notifications/notifications.go | 4 ++ bitswap/stat.go | 2 + .../testinstance.go} | 32 +++++++---- bitswap/testnet/interface.go | 2 + bitswap/testnet/peernet.go | 1 + bitswap/testnet/virtual.go | 17 ++++-- bitswap/testutil/testutil.go | 2 +- bitswap/wantlist/wantlist.go | 19 +++++++ bitswap/workers.go | 4 +- 18 files changed, 186 insertions(+), 106 deletions(-) rename bitswap/{testutils.go => testinstance/testinstance.go} (69%) diff --git a/bitswap/benchmarks_test.go b/bitswap/benchmarks_test.go index b8c90d97a..291982741 100644 --- a/bitswap/benchmarks_test.go +++ b/bitswap/benchmarks_test.go @@ -1,4 +1,4 @@ -package bitswap +package bitswap_test import ( "context" @@ -10,19 +10,21 @@ import ( "time" "github.com/ipfs/go-bitswap/testutil" + blocks "github.com/ipfs/go-block-format" + bitswap "github.com/ipfs/go-bitswap" bssession "github.com/ipfs/go-bitswap/session" + testinstance "github.com/ipfs/go-bitswap/testinstance" tn "github.com/ipfs/go-bitswap/testnet" - "github.com/ipfs/go-block-format" cid "github.com/ipfs/go-cid" blocksutil "github.com/ipfs/go-ipfs-blocksutil" delay "github.com/ipfs/go-ipfs-delay" mockrouting "github.com/ipfs/go-ipfs-routing/mock" ) -type fetchFunc func(b *testing.B, bs *Bitswap, ks []cid.Cid) +type fetchFunc func(b *testing.B, bs *bitswap.Bitswap, ks []cid.Cid) -type distFunc func(b *testing.B, provs []Instance, blocks []blocks.Block) +type distFunc func(b *testing.B, provs []testinstance.Instance, blocks []blocks.Block) type runStats struct { Dups uint64 @@ -146,7 +148,7 @@ func subtestDistributeAndFetch(b *testing.B, numnodes, numblks int, d delay.D, d start := time.Now() net := tn.VirtualNetwork(mockrouting.NewServer(), d) - sg := NewTestSessionGenerator(net) + sg := testinstance.NewTestSessionGenerator(net) defer sg.Close() bg := blocksutil.NewBlockGenerator() @@ -160,7 +162,7 @@ func subtestDistributeAndFetchRateLimited(b *testing.B, numnodes, numblks int, d start := time.Now() net := tn.RateLimitedVirtualNetwork(mockrouting.NewServer(), d, rateLimitGenerator) - sg := NewTestSessionGenerator(net) + sg := testinstance.NewTestSessionGenerator(net) defer sg.Close() instances := sg.Instances(numnodes) @@ -169,7 +171,7 @@ func subtestDistributeAndFetchRateLimited(b *testing.B, numnodes, numblks int, d runDistribution(b, instances, blocks, df, ff, start) } -func runDistribution(b *testing.B, instances []Instance, blocks []blocks.Block, df distFunc, ff fetchFunc, start time.Time) { +func runDistribution(b *testing.B, instances []testinstance.Instance, blocks []blocks.Block, df distFunc, ff fetchFunc, start time.Time) { numnodes := len(instances) @@ -189,7 +191,7 @@ func runDistribution(b *testing.B, instances []Instance, blocks []blocks.Block, b.Fatal(err) } - nst := fetcher.Exchange.network.Stats() + nst := fetcher.Adapter.Stats() stats := runStats{ Time: time.Now().Sub(start), MsgRecd: nst.MessagesRecvd, @@ -204,7 +206,7 @@ func runDistribution(b *testing.B, instances []Instance, blocks []blocks.Block, } } -func allToAll(b *testing.B, provs []Instance, blocks []blocks.Block) { +func allToAll(b *testing.B, provs []testinstance.Instance, blocks []blocks.Block) { for _, p := range provs { if err := p.Blockstore().PutMany(blocks); err != nil { b.Fatal(err) @@ -214,7 +216,7 @@ func allToAll(b *testing.B, provs []Instance, blocks []blocks.Block) { // overlap1 gives the first 75 blocks to the first peer, and the last 75 blocks // to the second peer. This means both peers have the middle 50 blocks -func overlap1(b *testing.B, provs []Instance, blks []blocks.Block) { +func overlap1(b *testing.B, provs []testinstance.Instance, blks []blocks.Block) { if len(provs) != 2 { b.Fatal("overlap1 only works with 2 provs") } @@ -231,7 +233,7 @@ func overlap1(b *testing.B, provs []Instance, blks []blocks.Block) { // overlap2 gives every even numbered block to the first peer, odd numbered // blocks to the second. it also gives every third block to both peers -func overlap2(b *testing.B, provs []Instance, blks []blocks.Block) { +func overlap2(b *testing.B, provs []testinstance.Instance, blks []blocks.Block) { if len(provs) != 2 { b.Fatal("overlap2 only works with 2 provs") } @@ -252,7 +254,7 @@ func overlap2(b *testing.B, provs []Instance, blks []blocks.Block) { } } -func overlap3(b *testing.B, provs []Instance, blks []blocks.Block) { +func overlap3(b *testing.B, provs []testinstance.Instance, blks []blocks.Block) { if len(provs) != 2 { b.Fatal("overlap3 only works with 2 provs") } @@ -277,13 +279,13 @@ func overlap3(b *testing.B, provs []Instance, blks []blocks.Block) { // onePeerPerBlock picks a random peer to hold each block // with this layout, we shouldnt actually ever see any duplicate blocks // but we're mostly just testing performance of the sync algorithm -func onePeerPerBlock(b *testing.B, provs []Instance, blks []blocks.Block) { +func onePeerPerBlock(b *testing.B, provs []testinstance.Instance, blks []blocks.Block) { for _, blk := range blks { provs[rand.Intn(len(provs))].Blockstore().Put(blk) } } -func oneAtATime(b *testing.B, bs *Bitswap, ks []cid.Cid) { +func oneAtATime(b *testing.B, bs *bitswap.Bitswap, ks []cid.Cid) { ses := bs.NewSession(context.Background()).(*bssession.Session) for _, c := range ks { _, err := ses.GetBlock(context.Background(), c) @@ -295,7 +297,7 @@ func oneAtATime(b *testing.B, bs *Bitswap, ks []cid.Cid) { } // fetch data in batches, 10 at a time -func batchFetchBy10(b *testing.B, bs *Bitswap, ks []cid.Cid) { +func batchFetchBy10(b *testing.B, bs *bitswap.Bitswap, ks []cid.Cid) { ses := bs.NewSession(context.Background()) for i := 0; i < len(ks); i += 10 { out, err := ses.GetBlocks(context.Background(), ks[i:i+10]) @@ -308,7 +310,7 @@ func batchFetchBy10(b *testing.B, bs *Bitswap, ks []cid.Cid) { } // fetch each block at the same time concurrently -func fetchAllConcurrent(b *testing.B, bs *Bitswap, ks []cid.Cid) { +func fetchAllConcurrent(b *testing.B, bs *bitswap.Bitswap, ks []cid.Cid) { ses := bs.NewSession(context.Background()) var wg sync.WaitGroup @@ -325,7 +327,7 @@ func fetchAllConcurrent(b *testing.B, bs *Bitswap, ks []cid.Cid) { wg.Wait() } -func batchFetchAll(b *testing.B, bs *Bitswap, ks []cid.Cid) { +func batchFetchAll(b *testing.B, bs *bitswap.Bitswap, ks []cid.Cid) { ses := bs.NewSession(context.Background()) out, err := ses.GetBlocks(context.Background(), ks) if err != nil { @@ -336,7 +338,7 @@ func batchFetchAll(b *testing.B, bs *Bitswap, ks []cid.Cid) { } // simulates the fetch pattern of trying to sync a unixfs file graph as fast as possible -func unixfsFileFetch(b *testing.B, bs *Bitswap, ks []cid.Cid) { +func unixfsFileFetch(b *testing.B, bs *bitswap.Bitswap, ks []cid.Cid) { ses := bs.NewSession(context.Background()) _, err := ses.GetBlock(context.Background(), ks[0]) if err != nil { diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index e6f90fe7d..4a407feba 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -1,4 +1,4 @@ -// package bitswap implements the IPFS exchange interface with the BitSwap +// Package bitswap implements the IPFS exchange interface with the BitSwap // bilateral exchange protocol. package bitswap @@ -24,7 +24,6 @@ import ( blocks "github.com/ipfs/go-block-format" cid "github.com/ipfs/go-cid" blockstore "github.com/ipfs/go-ipfs-blockstore" - delay "github.com/ipfs/go-ipfs-delay" exchange "github.com/ipfs/go-ipfs-exchange-interface" logging "github.com/ipfs/go-log" metrics "github.com/ipfs/go-metrics-interface" @@ -43,8 +42,14 @@ const ( ) var ( + // ProvideEnabled is a variable that tells Bitswap whether or not + // to handle providing blocks (see experimental provider system) ProvideEnabled = true + // HasBlockBufferSize is the buffer size of the channel for new blocks + // that need to be provided. They should get pulled over by the + // provideCollector even before they are actually provided. + // TODO: Does this need to be this large givent that? HasBlockBufferSize = 256 provideKeysBufferSize = 2048 provideWorkerMax = 6 @@ -53,12 +58,9 @@ var ( metricsBuckets = []float64{1 << 6, 1 << 10, 1 << 14, 1 << 18, 1<<18 + 15, 1 << 22} ) -var rebroadcastDelay = delay.Fixed(time.Minute) - // New initializes a BitSwap instance that communicates over the provided // BitSwapNetwork. This function registers the returned instance as the network -// delegate. -// Runs until context is cancelled. +// delegate. Runs until context is cancelled or bitswap.Close is called. func New(parent context.Context, network bsnet.BitSwapNetwork, bstore blockstore.Blockstore) exchange.Interface { @@ -121,7 +123,7 @@ func New(parent context.Context, network bsnet.BitSwapNetwork, network.SetDelegate(bs) // Start up bitswaps async worker routines - bs.startWorkers(px, ctx) + bs.startWorkers(ctx, px) // bind the context and process. // do it over here to avoid closing before all setup is done. @@ -190,6 +192,8 @@ func (bs *Bitswap) GetBlock(parent context.Context, k cid.Cid) (blocks.Block, er return bsgetter.SyncGetBlock(parent, k, bs.GetBlocks) } +// WantlistForPeer returns the currently understood list of blocks requested by a +// given peer. func (bs *Bitswap) WantlistForPeer(p peer.ID) []cid.Cid { var out []cid.Cid for _, e := range bs.engine.WantlistForPeer(p) { @@ -198,6 +202,8 @@ func (bs *Bitswap) WantlistForPeer(p peer.ID) []cid.Cid { return out } +// LedgerForPeer returns aggregated data about blocks swapped and communication +// with a given peer. func (bs *Bitswap) LedgerForPeer(p peer.ID) *decision.Receipt { return bs.engine.LedgerForPeer(p) } @@ -258,6 +264,8 @@ func (bs *Bitswap) receiveBlockFrom(blk blocks.Block, from peer.ID) error { return nil } +// ReceiveMessage is called by the network interface when a new message is +// received. func (bs *Bitswap) ReceiveMessage(ctx context.Context, p peer.ID, incoming bsmsg.BitSwapMessage) { bs.counterLk.Lock() bs.counters.messagesRecvd++ @@ -300,8 +308,6 @@ func (bs *Bitswap) ReceiveMessage(ctx context.Context, p peer.ID, incoming bsmsg wg.Wait() } -var ErrAlreadyHaveBlock = errors.New("already have block") - func (bs *Bitswap) updateReceiveCounters(b blocks.Block) { blkLen := len(b.RawData()) has, err := bs.blockstore.Has(b.Cid()) @@ -327,28 +333,34 @@ func (bs *Bitswap) updateReceiveCounters(b blocks.Block) { } } -// Connected/Disconnected warns bitswap about peer connections. +// PeerConnected is called by the network interface +// when a peer initiates a new connection to bitswap. func (bs *Bitswap) PeerConnected(p peer.ID) { bs.wm.Connected(p) bs.engine.PeerConnected(p) } -// Connected/Disconnected warns bitswap about peer connections. +// PeerDisconnected is called by the network interface when a peer +// closes a connection func (bs *Bitswap) PeerDisconnected(p peer.ID) { bs.wm.Disconnected(p) bs.engine.PeerDisconnected(p) } +// ReceiveError is called by the network interface when an error happens +// at the network layer. Currently just logs error. func (bs *Bitswap) ReceiveError(err error) { log.Infof("Bitswap ReceiveError: %s", err) // TODO log the network error // TODO bubble the network error up to the parent context/error logger } +// Close is called to shutdown Bitswap func (bs *Bitswap) Close() error { return bs.process.Close() } +// GetWantlist returns the current local wantlist. func (bs *Bitswap) GetWantlist() []cid.Cid { entries := bs.wm.CurrentWants() out := make([]cid.Cid, 0, len(entries)) @@ -358,10 +370,17 @@ func (bs *Bitswap) GetWantlist() []cid.Cid { return out } +// IsOnline is needed to match go-ipfs-exchange-interface func (bs *Bitswap) IsOnline() bool { return true } +// NewSession generates a new Bitswap session. You should use this, rather +// that calling Bitswap.GetBlocks, any time you intend to do several related +// block requests in a row. The session returned will have it's own GetBlocks +// method, but the session will use the fact that the requests are related to +// be more efficient in its requests to peers. If you are using a session +// from go-blockservice, it will create a bitswap session automatically. func (bs *Bitswap) NewSession(ctx context.Context) exchange.Fetcher { return bs.sm.NewSession(ctx) } diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index 127ac0dcd..55690a735 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -1,4 +1,4 @@ -package bitswap +package bitswap_test import ( "bytes" @@ -8,11 +8,12 @@ import ( "testing" "time" + bitswap "github.com/ipfs/go-bitswap" decision "github.com/ipfs/go-bitswap/decision" "github.com/ipfs/go-bitswap/message" bssession "github.com/ipfs/go-bitswap/session" + testinstance "github.com/ipfs/go-bitswap/testinstance" tn "github.com/ipfs/go-bitswap/testnet" - blocks "github.com/ipfs/go-block-format" cid "github.com/ipfs/go-cid" detectrace "github.com/ipfs/go-detect-race" @@ -35,7 +36,7 @@ func getVirtualNetwork() tn.Network { func TestClose(t *testing.T) { vnet := getVirtualNetwork() - sesgen := NewTestSessionGenerator(vnet) + sesgen := testinstance.NewTestSessionGenerator(vnet) defer sesgen.Close() bgen := blocksutil.NewBlockGenerator() @@ -50,7 +51,7 @@ func TestProviderForKeyButNetworkCannotFind(t *testing.T) { // TODO revisit this rs := mockrouting.NewServer() net := tn.VirtualNetwork(rs, delay.Fixed(kNetworkDelay)) - g := NewTestSessionGenerator(net) + g := testinstance.NewTestSessionGenerator(net) defer g.Close() block := blocks.NewBlock([]byte("block")) @@ -73,7 +74,7 @@ func TestGetBlockFromPeerAfterPeerAnnounces(t *testing.T) { net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay)) block := blocks.NewBlock([]byte("block")) - g := NewTestSessionGenerator(net) + g := testinstance.NewTestSessionGenerator(net) defer g.Close() peers := g.Instances(2) @@ -101,12 +102,12 @@ func TestGetBlockFromPeerAfterPeerAnnounces(t *testing.T) { } func TestDoesNotProvideWhenConfiguredNotTo(t *testing.T) { - ProvideEnabled = false - defer func() { ProvideEnabled = true }() + bitswap.ProvideEnabled = false + defer func() { bitswap.ProvideEnabled = true }() net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay)) block := blocks.NewBlock([]byte("block")) - g := NewTestSessionGenerator(net) + g := testinstance.NewTestSessionGenerator(net) defer g.Close() hasBlock := g.Next() @@ -143,7 +144,7 @@ func TestUnwantedBlockNotAdded(t *testing.T) { bsMessage := message.New(true) bsMessage.AddBlock(block) - g := NewTestSessionGenerator(net) + g := testinstance.NewTestSessionGenerator(net) defer g.Close() peers := g.Instances(2) @@ -162,7 +163,7 @@ func TestUnwantedBlockNotAdded(t *testing.T) { doesNotWantBlock.Exchange.ReceiveMessage(ctx, hasBlock.Peer, bsMessage) - blockInStore, err := doesNotWantBlock.blockstore.Has(block.Cid()) + blockInStore, err := doesNotWantBlock.Blockstore().Has(block.Cid()) if err != nil || blockInStore { t.Fatal("Unwanted block added to block store") } @@ -200,18 +201,6 @@ func TestLargeFile(t *testing.T) { PerformDistributionTest(t, numInstances, numBlocks) } -func TestLargeFileNoRebroadcast(t *testing.T) { - rbd := rebroadcastDelay.Get() - rebroadcastDelay.Set(time.Hour * 24 * 365 * 10) // ten years should be long enough - if testing.Short() { - t.SkipNow() - } - numInstances := 10 - numBlocks := 100 - PerformDistributionTest(t, numInstances, numBlocks) - rebroadcastDelay.Set(rbd) -} - func TestLargeFileTwoPeers(t *testing.T) { if testing.Short() { t.SkipNow() @@ -227,7 +216,7 @@ func PerformDistributionTest(t *testing.T, numInstances, numBlocks int) { t.SkipNow() } net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay)) - sg := NewTestSessionGenerator(net) + sg := testinstance.NewTestSessionGenerator(net) defer sg.Close() bg := blocksutil.NewBlockGenerator() @@ -250,7 +239,7 @@ func PerformDistributionTest(t *testing.T, numInstances, numBlocks int) { for _, inst := range instances[1:] { wg.Add(1) - go func(inst Instance) { + go func(inst testinstance.Instance) { defer wg.Done() outch, err := inst.Exchange.GetBlocks(ctx, blkeys) if err != nil { @@ -290,13 +279,10 @@ func TestSendToWantingPeer(t *testing.T) { } net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay)) - sg := NewTestSessionGenerator(net) + sg := testinstance.NewTestSessionGenerator(net) defer sg.Close() bg := blocksutil.NewBlockGenerator() - prev := rebroadcastDelay.Set(time.Second / 2) - defer func() { rebroadcastDelay.Set(prev) }() - peers := sg.Instances(2) peerA := peers[0] peerB := peers[1] @@ -335,7 +321,7 @@ func TestSendToWantingPeer(t *testing.T) { func TestEmptyKey(t *testing.T) { net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay)) - sg := NewTestSessionGenerator(net) + sg := testinstance.NewTestSessionGenerator(net) defer sg.Close() bs := sg.Instances(1)[0].Exchange @@ -348,7 +334,7 @@ func TestEmptyKey(t *testing.T) { } } -func assertStat(t *testing.T, st *Stat, sblks, rblks, sdata, rdata uint64) { +func assertStat(t *testing.T, st *bitswap.Stat, sblks, rblks, sdata, rdata uint64) { if sblks != st.BlocksSent { t.Errorf("mismatch in blocks sent: %d vs %d", sblks, st.BlocksSent) } @@ -368,7 +354,7 @@ func assertStat(t *testing.T, st *Stat, sblks, rblks, sdata, rdata uint64) { func TestBasicBitswap(t *testing.T) { net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay)) - sg := NewTestSessionGenerator(net) + sg := testinstance.NewTestSessionGenerator(net) defer sg.Close() bg := blocksutil.NewBlockGenerator() @@ -437,7 +423,7 @@ func TestBasicBitswap(t *testing.T) { func TestDoubleGet(t *testing.T) { net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay)) - sg := NewTestSessionGenerator(net) + sg := testinstance.NewTestSessionGenerator(net) defer sg.Close() bg := blocksutil.NewBlockGenerator() @@ -505,7 +491,7 @@ func TestDoubleGet(t *testing.T) { func TestWantlistCleanup(t *testing.T) { net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay)) - sg := NewTestSessionGenerator(net) + sg := testinstance.NewTestSessionGenerator(net) defer sg.Close() bg := blocksutil.NewBlockGenerator() @@ -616,7 +602,7 @@ func newReceipt(sent, recv, exchanged uint64) *decision.Receipt { func TestBitswapLedgerOneWay(t *testing.T) { net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay)) - sg := NewTestSessionGenerator(net) + sg := testinstance.NewTestSessionGenerator(net) defer sg.Close() bg := blocksutil.NewBlockGenerator() @@ -668,7 +654,7 @@ func TestBitswapLedgerOneWay(t *testing.T) { func TestBitswapLedgerTwoWay(t *testing.T) { net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay)) - sg := NewTestSessionGenerator(net) + sg := testinstance.NewTestSessionGenerator(net) defer sg.Close() bg := blocksutil.NewBlockGenerator() diff --git a/bitswap/bitswap_with_sessions_test.go b/bitswap/bitswap_with_sessions_test.go index d4d0cfee4..dd26a30c8 100644 --- a/bitswap/bitswap_with_sessions_test.go +++ b/bitswap/bitswap_with_sessions_test.go @@ -1,4 +1,4 @@ -package bitswap +package bitswap_test import ( "context" @@ -7,6 +7,7 @@ import ( "time" bssession "github.com/ipfs/go-bitswap/session" + testinstance "github.com/ipfs/go-bitswap/testinstance" blocks "github.com/ipfs/go-block-format" cid "github.com/ipfs/go-cid" blocksutil "github.com/ipfs/go-ipfs-blocksutil" @@ -18,7 +19,7 @@ func TestBasicSessions(t *testing.T) { defer cancel() vnet := getVirtualNetwork() - sesgen := NewTestSessionGenerator(vnet) + sesgen := testinstance.NewTestSessionGenerator(vnet) defer sesgen.Close() bgen := blocksutil.NewBlockGenerator() @@ -66,7 +67,7 @@ func TestSessionBetweenPeers(t *testing.T) { defer cancel() vnet := getVirtualNetwork() - sesgen := NewTestSessionGenerator(vnet) + sesgen := testinstance.NewTestSessionGenerator(vnet) defer sesgen.Close() bgen := blocksutil.NewBlockGenerator() @@ -109,7 +110,7 @@ func TestSessionBetweenPeers(t *testing.T) { t.Fatal(err) } if stat.MessagesReceived > 2 { - t.Fatal("uninvolved nodes should only receive two messages", is.Exchange.counters.messagesRecvd) + t.Fatal("uninvolved nodes should only receive two messages", stat.MessagesReceived) } } } @@ -119,7 +120,7 @@ func TestSessionSplitFetch(t *testing.T) { defer cancel() vnet := getVirtualNetwork() - sesgen := NewTestSessionGenerator(vnet) + sesgen := testinstance.NewTestSessionGenerator(vnet) defer sesgen.Close() bgen := blocksutil.NewBlockGenerator() @@ -162,7 +163,7 @@ func TestFetchNotConnected(t *testing.T) { bssession.SetProviderSearchDelay(10 * time.Millisecond) vnet := getVirtualNetwork() - sesgen := NewTestSessionGenerator(vnet) + sesgen := testinstance.NewTestSessionGenerator(vnet) defer sesgen.Close() bgen := blocksutil.NewBlockGenerator() @@ -202,7 +203,7 @@ func TestInterestCacheOverflow(t *testing.T) { defer cancel() vnet := getVirtualNetwork() - sesgen := NewTestSessionGenerator(vnet) + sesgen := testinstance.NewTestSessionGenerator(vnet) defer sesgen.Close() bgen := blocksutil.NewBlockGenerator() @@ -254,7 +255,7 @@ func TestPutAfterSessionCacheEvict(t *testing.T) { defer cancel() vnet := getVirtualNetwork() - sesgen := NewTestSessionGenerator(vnet) + sesgen := testinstance.NewTestSessionGenerator(vnet) defer sesgen.Close() bgen := blocksutil.NewBlockGenerator() @@ -294,7 +295,7 @@ func TestMultipleSessions(t *testing.T) { defer cancel() vnet := getVirtualNetwork() - sesgen := NewTestSessionGenerator(vnet) + sesgen := testinstance.NewTestSessionGenerator(vnet) defer sesgen.Close() bgen := blocksutil.NewBlockGenerator() @@ -337,7 +338,7 @@ func TestWantlistClearsOnCancel(t *testing.T) { defer cancel() vnet := getVirtualNetwork() - sesgen := NewTestSessionGenerator(vnet) + sesgen := testinstance.NewTestSessionGenerator(vnet) defer sesgen.Close() bgen := blocksutil.NewBlockGenerator() diff --git a/bitswap/decision/engine.go b/bitswap/decision/engine.go index 37737c8d8..c2de9299c 100644 --- a/bitswap/decision/engine.go +++ b/bitswap/decision/engine.go @@ -1,4 +1,4 @@ -// package decision implements the decision engine for the bitswap service. +// Package decision implements the decision engine for the bitswap service. package decision import ( @@ -68,6 +68,7 @@ type Envelope struct { Sent func() } +// Engine manages sending requested blocks to peers. type Engine struct { // peerRequestQueue is a priority queue of requests received from peers. // Requests are popped from the queue, packaged up, and placed in the @@ -94,6 +95,7 @@ type Engine struct { ticker *time.Ticker } +// NewEngine creates a new block sending engine for the given block store func NewEngine(ctx context.Context, bs bstore.Blockstore) *Engine { e := &Engine{ ledgerMap: make(map[peer.ID]*ledger), @@ -107,6 +109,7 @@ func NewEngine(ctx context.Context, bs bstore.Blockstore) *Engine { return e } +// WantlistForPeer returns the currently understood want list for a given peer func (e *Engine) WantlistForPeer(p peer.ID) (out []wl.Entry) { partner := e.findOrCreate(p) partner.lk.Lock() @@ -114,6 +117,8 @@ func (e *Engine) WantlistForPeer(p peer.ID) (out []wl.Entry) { return partner.wantList.SortedEntries() } +// LedgerForPeer returns aggregated data about blocks swapped and communication +// with a given peer. func (e *Engine) LedgerForPeer(p peer.ID) *Receipt { ledger := e.findOrCreate(p) @@ -295,6 +300,8 @@ func (e *Engine) addBlock(block blocks.Block) { } } +// AddBlock is called to when a new block is received and added to a block store +// meaning there may be peers who want that block that we should send it to. func (e *Engine) AddBlock(block blocks.Block) { e.lock.Lock() defer e.lock.Unlock() @@ -308,6 +315,8 @@ func (e *Engine) AddBlock(block blocks.Block) { // inconsistent. Would need to ensure that Sends and acknowledgement of the // send happen atomically +// MessageSent is called when a message has successfully been sent out, to record +// changes. func (e *Engine) MessageSent(p peer.ID, m bsmsg.BitSwapMessage) { l := e.findOrCreate(p) l.lk.Lock() @@ -321,6 +330,8 @@ func (e *Engine) MessageSent(p peer.ID, m bsmsg.BitSwapMessage) { } +// PeerConnected is called when a new peer connects, meaning we should start +// sending blocks. func (e *Engine) PeerConnected(p peer.ID) { e.lock.Lock() defer e.lock.Unlock() @@ -334,6 +345,7 @@ func (e *Engine) PeerConnected(p peer.ID) { l.ref++ } +// PeerDisconnected is called when a peer disconnects. func (e *Engine) PeerDisconnected(p peer.ID) { e.lock.Lock() defer e.lock.Unlock() diff --git a/bitswap/decision/ledger.go b/bitswap/decision/ledger.go index 374f0e7e5..37ca57459 100644 --- a/bitswap/decision/ledger.go +++ b/bitswap/decision/ledger.go @@ -47,6 +47,9 @@ type ledger struct { lk sync.Mutex } +// Receipt is a summary of the ledger for a given peer +// collecting various pieces of aggregated data for external +// reporting purposes. type Receipt struct { Peer string Value float64 diff --git a/bitswap/message/message.go b/bitswap/message/message.go index b9035d8ff..8bddc509c 100644 --- a/bitswap/message/message.go +++ b/bitswap/message/message.go @@ -13,9 +13,8 @@ import ( inet "github.com/libp2p/go-libp2p-net" ) -// TODO move message.go into the bitswap package -// TODO move bs/msg/internal/pb to bs/internal/pb and rename pb package to bitswap_pb - +// BitSwapMessage is the basic interface for interacting building, encoding, +// and decoding messages sent on the BitSwap protocol. type BitSwapMessage interface { // Wantlist returns a slice of unique keys that represent data wanted by // the sender. @@ -40,6 +39,8 @@ type BitSwapMessage interface { Loggable() map[string]interface{} } +// Exportable is an interface for structures than can be +// encoded in a bitswap protobuf. type Exportable interface { ToProtoV0() *pb.Message ToProtoV1() *pb.Message @@ -53,6 +54,7 @@ type impl struct { blocks map[cid.Cid]blocks.Block } +// New returns a new, empty bitswap message func New(full bool) BitSwapMessage { return newMsg(full) } @@ -65,6 +67,8 @@ func newMsg(full bool) *impl { } } +// Entry is an wantlist entry in a Bitswap message (along with whether it's an +// add or cancel). type Entry struct { wantlist.Entry Cancel bool @@ -163,11 +167,13 @@ func (m *impl) AddBlock(b blocks.Block) { m.blocks[b.Cid()] = b } +// FromNet generates a new BitswapMessage from incoming data on an io.Reader. func FromNet(r io.Reader) (BitSwapMessage, error) { pbr := ggio.NewDelimitedReader(r, inet.MessageSizeMax) return FromPBReader(pbr) } +// FromPBReader generates a new Bitswap message from a gogo-protobuf reader func FromPBReader(pbr ggio.Reader) (BitSwapMessage, error) { pb := new(pb.Message) if err := pbr.ReadMsg(pb); err != nil { diff --git a/bitswap/network/interface.go b/bitswap/network/interface.go index 2d2c9b19c..1d7cdc744 100644 --- a/bitswap/network/interface.go +++ b/bitswap/network/interface.go @@ -12,10 +12,12 @@ import ( ) var ( - // These two are equivalent, legacy - ProtocolBitswapOne protocol.ID = "/ipfs/bitswap/1.0.0" + // ProtocolBitswapOne is the prefix for the legacy bitswap protocol + ProtocolBitswapOne protocol.ID = "/ipfs/bitswap/1.0.0" + // ProtocolBitswapNoVers is equivalent to the legacy bitswap protocol ProtocolBitswapNoVers protocol.ID = "/ipfs/bitswap" + // ProtocolBitswap is the current version of bitswap protocol, 1.1.0 ProtocolBitswap protocol.ID = "/ipfs/bitswap/1.1.0" ) @@ -38,18 +40,20 @@ type BitSwapNetwork interface { ConnectionManager() ifconnmgr.ConnManager - Stats() NetworkStats + Stats() Stats Routing } +// MessageSender is an interface for sending a series of messages over the bitswap +// network type MessageSender interface { SendMsg(context.Context, bsmsg.BitSwapMessage) error Close() error Reset() error } -// Implement Receiver to receive messages from the BitSwapNetwork. +// Receiver is an interface that can receive messages from the BitSwapNetwork. type Receiver interface { ReceiveMessage( ctx context.Context, @@ -63,6 +67,8 @@ type Receiver interface { PeerDisconnected(peer.ID) } +// Routing is an interface to providing and finding providers on a bitswap +// network. type Routing interface { // FindProvidersAsync returns a channel of providers for the given key. FindProvidersAsync(context.Context, cid.Cid, int) <-chan peer.ID @@ -71,10 +77,10 @@ type Routing interface { Provide(context.Context, cid.Cid) error } -// NetworkStats is a container for statistics about the bitswap network +// Stats is a container for statistics about the bitswap network // the numbers inside are specific to bitswap, and not any other protocols // using the same underlying network. -type NetworkStats struct { +type Stats struct { MessagesSent uint64 MessagesRecvd uint64 } diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index 8c2f5d68a..ffb4800d6 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -49,7 +49,7 @@ type impl struct { // inbound messages from the network are forwarded to the receiver receiver Receiver - stats NetworkStats + stats Stats } type streamMessageSender struct { @@ -201,8 +201,8 @@ func (bsnet *impl) ConnectionManager() ifconnmgr.ConnManager { return bsnet.host.ConnManager() } -func (bsnet *impl) Stats() NetworkStats { - return NetworkStats{ +func (bsnet *impl) Stats() Stats { + return Stats{ MessagesRecvd: atomic.LoadUint64(&bsnet.stats.MessagesRecvd), MessagesSent: atomic.LoadUint64(&bsnet.stats.MessagesSent), } diff --git a/bitswap/notifications/notifications.go b/bitswap/notifications/notifications.go index b29640bec..0934fa5f5 100644 --- a/bitswap/notifications/notifications.go +++ b/bitswap/notifications/notifications.go @@ -11,12 +11,16 @@ import ( const bufferSize = 16 +// PubSub is a simple interface for publishing blocks and being able to subscribe +// for cids. It's used internally by bitswap to decouple receiving blocks +// and actually providing them back to the GetBlocks caller. type PubSub interface { Publish(block blocks.Block) Subscribe(ctx context.Context, keys ...cid.Cid) <-chan blocks.Block Shutdown() } +// New generates a new PubSub interface. func New() PubSub { return &impl{ wrapped: *pubsub.New(bufferSize), diff --git a/bitswap/stat.go b/bitswap/stat.go index 99b2def1c..af39ecb2e 100644 --- a/bitswap/stat.go +++ b/bitswap/stat.go @@ -6,6 +6,7 @@ import ( cid "github.com/ipfs/go-cid" ) +// Stat is a struct that provides various statistics on bitswap operations type Stat struct { ProvideBufLen int Wantlist []cid.Cid @@ -19,6 +20,7 @@ type Stat struct { MessagesReceived uint64 } +// Stat returns aggregated statistics about bitswap operations func (bs *Bitswap) Stat() (*Stat, error) { st := new(Stat) st.ProvideBufLen = len(bs.newBlocks) diff --git a/bitswap/testutils.go b/bitswap/testinstance/testinstance.go similarity index 69% rename from bitswap/testutils.go rename to bitswap/testinstance/testinstance.go index f9be69435..f677c9493 100644 --- a/bitswap/testutils.go +++ b/bitswap/testinstance/testinstance.go @@ -1,11 +1,12 @@ -package bitswap +package testsession import ( "context" "time" + bitswap "github.com/ipfs/go-bitswap" + bsnet "github.com/ipfs/go-bitswap/network" tn "github.com/ipfs/go-bitswap/testnet" - ds "github.com/ipfs/go-datastore" delayed "github.com/ipfs/go-datastore/delayed" ds_sync "github.com/ipfs/go-datastore/sync" @@ -16,7 +17,8 @@ import ( testutil "github.com/libp2p/go-testutil" ) -// WARNING: this uses RandTestBogusIdentity DO NOT USE for NON TESTS! +// NewTestSessionGenerator generates a new SessionGenerator for the given +// testnet func NewTestSessionGenerator( net tn.Network) SessionGenerator { ctx, cancel := context.WithCancel(context.Background()) @@ -28,7 +30,7 @@ func NewTestSessionGenerator( } } -// TODO move this SessionGenerator to the core package and export it as the core generator +// SessionGenerator generates new test instances of bitswap+dependencies type SessionGenerator struct { seq int net tn.Network @@ -36,11 +38,13 @@ type SessionGenerator struct { cancel context.CancelFunc } +// Close closes the clobal context, shutting down all test instances func (g *SessionGenerator) Close() error { g.cancel() return nil // for Closer interface } +// Next generates a new instance of bitswap + dependencies func (g *SessionGenerator) Next() Instance { g.seq++ p, err := p2ptestutil.RandTestBogusIdentity() @@ -50,6 +54,7 @@ func (g *SessionGenerator) Next() Instance { return MkSession(g.ctx, g.net, p) } +// Instances creates N test instances of bitswap + dependencies func (g *SessionGenerator) Instances(n int) []Instance { var instances []Instance for j := 0; j < n; j++ { @@ -59,29 +64,33 @@ func (g *SessionGenerator) Instances(n int) []Instance { for i, inst := range instances { for j := i + 1; j < len(instances); j++ { oinst := instances[j] - inst.Exchange.network.ConnectTo(context.Background(), oinst.Peer) + inst.Adapter.ConnectTo(context.Background(), oinst.Peer) } } return instances } +// Instance is a test instance of bitswap + dependencies for integration testing type Instance struct { - Peer peer.ID - Exchange *Bitswap - blockstore blockstore.Blockstore - + Peer peer.ID + Exchange *bitswap.Bitswap + blockstore blockstore.Blockstore + Adapter bsnet.BitSwapNetwork blockstoreDelay delay.D } +// Blockstore returns the block store for this test instance func (i *Instance) Blockstore() blockstore.Blockstore { return i.blockstore } +// SetBlockstoreLatency customizes the artificial delay on receiving blocks +// from a blockstore test instance. func (i *Instance) SetBlockstoreLatency(t time.Duration) time.Duration { return i.blockstoreDelay.Set(t) } -// session creates a test bitswap instance. +// MkSession creates a test bitswap instance. // // NB: It's easy make mistakes by providing the same peer ID to two different // sessions. To safeguard, use the SessionGenerator to generate sessions. It's @@ -99,9 +108,10 @@ func MkSession(ctx context.Context, net tn.Network, p testutil.Identity) Instanc panic(err.Error()) // FIXME perhaps change signature and return error. } - bs := New(ctx, adapter, bstore).(*Bitswap) + bs := bitswap.New(ctx, adapter, bstore).(*bitswap.Bitswap) return Instance{ + Adapter: adapter, Peer: p.ID(), Exchange: bs, blockstore: bstore, diff --git a/bitswap/testnet/interface.go b/bitswap/testnet/interface.go index ed7d4b1ec..3441f69d2 100644 --- a/bitswap/testnet/interface.go +++ b/bitswap/testnet/interface.go @@ -6,6 +6,8 @@ import ( "github.com/libp2p/go-testutil" ) +// Network is an interface for generating bitswap network interfaces +// based on a test network. type Network interface { Adapter(testutil.Identity) bsnet.BitSwapNetwork diff --git a/bitswap/testnet/peernet.go b/bitswap/testnet/peernet.go index dbad1f65e..cea4b7278 100644 --- a/bitswap/testnet/peernet.go +++ b/bitswap/testnet/peernet.go @@ -17,6 +17,7 @@ type peernet struct { routingserver mockrouting.Server } +// StreamNet is a testnet that uses libp2p's MockNet func StreamNet(ctx context.Context, net mockpeernet.Mocknet, rs mockrouting.Server) (Network, error) { return &peernet{net, rs}, nil } diff --git a/bitswap/testnet/virtual.go b/bitswap/testnet/virtual.go index e3af99d09..19cc47d3d 100644 --- a/bitswap/testnet/virtual.go +++ b/bitswap/testnet/virtual.go @@ -24,6 +24,8 @@ import ( var log = logging.Logger("bstestnet") +// VirtualNetwork generates a new testnet instance - a fake network that +// is used to simulate sending messages. func VirtualNetwork(rs mockrouting.Server, d delay.D) Network { return &network{ latencies: make(map[peer.ID]map[peer.ID]time.Duration), @@ -36,10 +38,13 @@ func VirtualNetwork(rs mockrouting.Server, d delay.D) Network { } } +// RateLimitGenerator is an interface for generating rate limits across peers type RateLimitGenerator interface { NextRateLimit() float64 } +// RateLimitedVirtualNetwork generates a testnet instance where nodes are rate +// limited in the upload/download speed. func RateLimitedVirtualNetwork(rs mockrouting.Server, d delay.D, rateLimitGenerator RateLimitGenerator) Network { return &network{ latencies: make(map[peer.ID]map[peer.ID]time.Duration), @@ -168,7 +173,7 @@ type networkClient struct { bsnet.Receiver network *network routing routing.IpfsRouting - stats bsnet.NetworkStats + stats bsnet.Stats } func (nc *networkClient) SendMessage( @@ -182,8 +187,8 @@ func (nc *networkClient) SendMessage( return nil } -func (nc *networkClient) Stats() bsnet.NetworkStats { - return bsnet.NetworkStats{ +func (nc *networkClient) Stats() bsnet.Stats { + return bsnet.Stats{ MessagesRecvd: atomic.LoadUint64(&nc.stats.MessagesRecvd), MessagesSent: atomic.LoadUint64(&nc.stats.MessagesSent), } @@ -234,11 +239,11 @@ func (mp *messagePasser) Reset() error { return nil } -func (n *networkClient) NewMessageSender(ctx context.Context, p peer.ID) (bsnet.MessageSender, error) { +func (nc *networkClient) NewMessageSender(ctx context.Context, p peer.ID) (bsnet.MessageSender, error) { return &messagePasser{ - net: n, + net: nc, target: p, - local: n.local, + local: nc.local, ctx: ctx, }, nil } diff --git a/bitswap/testutil/testutil.go b/bitswap/testutil/testutil.go index 87bd91d2d..6f82fede6 100644 --- a/bitswap/testutil/testutil.go +++ b/bitswap/testutil/testutil.go @@ -5,7 +5,7 @@ import ( bsmsg "github.com/ipfs/go-bitswap/message" "github.com/ipfs/go-bitswap/wantlist" - "github.com/ipfs/go-block-format" + blocks "github.com/ipfs/go-block-format" cid "github.com/ipfs/go-cid" blocksutil "github.com/ipfs/go-ipfs-blocksutil" peer "github.com/libp2p/go-libp2p-peer" diff --git a/bitswap/wantlist/wantlist.go b/bitswap/wantlist/wantlist.go index 999fcd9ef..b5c2a602c 100644 --- a/bitswap/wantlist/wantlist.go +++ b/bitswap/wantlist/wantlist.go @@ -8,14 +8,18 @@ import ( cid "github.com/ipfs/go-cid" ) +// SessionTrackedWantlist is a list of wants that also track which bitswap +// sessions have requested them type SessionTrackedWantlist struct { set map[cid.Cid]*sessionTrackedEntry } +// Wantlist is a raw list of wanted blocks and their priorities type Wantlist struct { set map[cid.Cid]Entry } +// Entry is an entry in a want list, consisting of a cid and its priority type Entry struct { Cid cid.Cid Priority int @@ -40,12 +44,14 @@ func (es entrySlice) Len() int { return len(es) } func (es entrySlice) Swap(i, j int) { es[i], es[j] = es[j], es[i] } func (es entrySlice) Less(i, j int) bool { return es[i].Priority > es[j].Priority } +// NewSessionTrackedWantlist generates a new SessionTrackedWantList. func NewSessionTrackedWantlist() *SessionTrackedWantlist { return &SessionTrackedWantlist{ set: make(map[cid.Cid]*sessionTrackedEntry), } } +// New generates a new raw Wantlist func New() *Wantlist { return &Wantlist{ set: make(map[cid.Cid]Entry), @@ -116,6 +122,7 @@ func (w *SessionTrackedWantlist) Contains(k cid.Cid) (Entry, bool) { return e.Entry, true } +// Entries returns all wantlist entries for a given session tracked want list. func (w *SessionTrackedWantlist) Entries() []Entry { es := make([]Entry, 0, len(w.set)) for _, e := range w.set { @@ -124,16 +131,20 @@ func (w *SessionTrackedWantlist) Entries() []Entry { return es } +// SortedEntries returns wantlist entries ordered by priority. func (w *SessionTrackedWantlist) SortedEntries() []Entry { es := w.Entries() sort.Sort(entrySlice(es)) return es } +// Len returns the number of entries in a wantlist. func (w *SessionTrackedWantlist) Len() int { return len(w.set) } +// CopyWants copies all wants from one SessionTrackWantlist to another (along with +// the session data) func (w *SessionTrackedWantlist) CopyWants(to *SessionTrackedWantlist) { for _, e := range w.set { for k := range e.sesTrk { @@ -142,10 +153,12 @@ func (w *SessionTrackedWantlist) CopyWants(to *SessionTrackedWantlist) { } } +// Len returns the number of entries in a wantlist. func (w *Wantlist) Len() int { return len(w.set) } +// Add adds an entry in a wantlist from CID & Priority, if not already present. func (w *Wantlist) Add(c cid.Cid, priority int) bool { if _, ok := w.set[c]; ok { return false @@ -159,6 +172,7 @@ func (w *Wantlist) Add(c cid.Cid, priority int) bool { return true } +// AddEntry adds an entry to a wantlist if not already present. func (w *Wantlist) AddEntry(e Entry) bool { if _, ok := w.set[e.Cid]; ok { return false @@ -167,6 +181,7 @@ func (w *Wantlist) AddEntry(e Entry) bool { return true } +// Remove removes the given cid from the wantlist. func (w *Wantlist) Remove(c cid.Cid) bool { _, ok := w.set[c] if !ok { @@ -177,11 +192,14 @@ func (w *Wantlist) Remove(c cid.Cid) bool { return true } +// Contains returns the entry, if present, for the given CID, plus whether it +// was present. func (w *Wantlist) Contains(c cid.Cid) (Entry, bool) { e, ok := w.set[c] return e, ok } +// Entries returns all wantlist entries for a want list. func (w *Wantlist) Entries() []Entry { es := make([]Entry, 0, len(w.set)) for _, e := range w.set { @@ -190,6 +208,7 @@ func (w *Wantlist) Entries() []Entry { return es } +// SortedEntries returns wantlist entries ordered by priority. func (w *Wantlist) SortedEntries() []Entry { es := w.Entries() sort.Sort(entrySlice(es)) diff --git a/bitswap/workers.go b/bitswap/workers.go index 6e0bf037f..4a6e91dd6 100644 --- a/bitswap/workers.go +++ b/bitswap/workers.go @@ -11,9 +11,11 @@ import ( procctx "github.com/jbenet/goprocess/context" ) +// TaskWorkerCount is the total number of simultaneous threads sending +// outgoing messages var TaskWorkerCount = 8 -func (bs *Bitswap) startWorkers(px process.Process, ctx context.Context) { +func (bs *Bitswap) startWorkers(ctx context.Context, px process.Process) { // Start up workers to handle requests from other nodes for the data on this node for i := 0; i < TaskWorkerCount; i++ { From 4ab04469a7447991f86b2e83f2d33ef245175d68 Mon Sep 17 00:00:00 2001 From: hannahhoward Date: Thu, 9 May 2019 17:05:29 -0700 Subject: [PATCH 0754/1035] refactor(testinstance): rename instance generator Instance generator was previously named session generator, which created confusion with bitswap sessions fix #101 This commit was moved from ipfs/go-bitswap@7af3e0a540195f3a987817583838f0682a0e1b87 --- bitswap/benchmarks_test.go | 12 ++-- bitswap/bitswap_test.go | 80 +++++++++++++-------------- bitswap/bitswap_with_sessions_test.go | 50 ++++++++--------- bitswap/testinstance/testinstance.go | 26 ++++----- 4 files changed, 84 insertions(+), 84 deletions(-) diff --git a/bitswap/benchmarks_test.go b/bitswap/benchmarks_test.go index 291982741..dbe05889d 100644 --- a/bitswap/benchmarks_test.go +++ b/bitswap/benchmarks_test.go @@ -148,12 +148,12 @@ func subtestDistributeAndFetch(b *testing.B, numnodes, numblks int, d delay.D, d start := time.Now() net := tn.VirtualNetwork(mockrouting.NewServer(), d) - sg := testinstance.NewTestSessionGenerator(net) - defer sg.Close() + ig := testinstance.NewTestInstanceGenerator(net) + defer ig.Close() bg := blocksutil.NewBlockGenerator() - instances := sg.Instances(numnodes) + instances := ig.Instances(numnodes) blocks := bg.Blocks(numblks) runDistribution(b, instances, blocks, df, ff, start) } @@ -162,10 +162,10 @@ func subtestDistributeAndFetchRateLimited(b *testing.B, numnodes, numblks int, d start := time.Now() net := tn.RateLimitedVirtualNetwork(mockrouting.NewServer(), d, rateLimitGenerator) - sg := testinstance.NewTestSessionGenerator(net) - defer sg.Close() + ig := testinstance.NewTestInstanceGenerator(net) + defer ig.Close() - instances := sg.Instances(numnodes) + instances := ig.Instances(numnodes) blocks := testutil.GenerateBlocksOfSize(numblks, blockSize) runDistribution(b, instances, blocks, df, ff, start) diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index 55690a735..c1d059b4c 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -36,12 +36,12 @@ func getVirtualNetwork() tn.Network { func TestClose(t *testing.T) { vnet := getVirtualNetwork() - sesgen := testinstance.NewTestSessionGenerator(vnet) - defer sesgen.Close() + ig := testinstance.NewTestInstanceGenerator(vnet) + defer ig.Close() bgen := blocksutil.NewBlockGenerator() block := bgen.Next() - bitswap := sesgen.Next() + bitswap := ig.Next() bitswap.Exchange.Close() bitswap.Exchange.GetBlock(context.Background(), block.Cid()) @@ -51,14 +51,14 @@ func TestProviderForKeyButNetworkCannotFind(t *testing.T) { // TODO revisit this rs := mockrouting.NewServer() net := tn.VirtualNetwork(rs, delay.Fixed(kNetworkDelay)) - g := testinstance.NewTestSessionGenerator(net) - defer g.Close() + ig := testinstance.NewTestInstanceGenerator(net) + defer ig.Close() block := blocks.NewBlock([]byte("block")) pinfo := p2ptestutil.RandTestBogusIdentityOrFatal(t) rs.Client(pinfo).Provide(context.Background(), block.Cid(), true) // but not on network - solo := g.Next() + solo := ig.Next() defer solo.Exchange.Close() ctx, cancel := context.WithTimeout(context.Background(), time.Nanosecond) @@ -74,10 +74,10 @@ func TestGetBlockFromPeerAfterPeerAnnounces(t *testing.T) { net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay)) block := blocks.NewBlock([]byte("block")) - g := testinstance.NewTestSessionGenerator(net) - defer g.Close() + ig := testinstance.NewTestInstanceGenerator(net) + defer ig.Close() - peers := g.Instances(2) + peers := ig.Instances(2) hasBlock := peers[0] defer hasBlock.Exchange.Close() @@ -107,10 +107,10 @@ func TestDoesNotProvideWhenConfiguredNotTo(t *testing.T) { net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay)) block := blocks.NewBlock([]byte("block")) - g := testinstance.NewTestSessionGenerator(net) - defer g.Close() + ig := testinstance.NewTestInstanceGenerator(net) + defer ig.Close() - hasBlock := g.Next() + hasBlock := ig.Next() defer hasBlock.Exchange.Close() if err := hasBlock.Exchange.HasBlock(block); err != nil { @@ -120,7 +120,7 @@ func TestDoesNotProvideWhenConfiguredNotTo(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), time.Second) defer cancel() - wantsBlock := g.Next() + wantsBlock := ig.Next() defer wantsBlock.Exchange.Close() ns := wantsBlock.Exchange.NewSession(ctx).(*bssession.Session) @@ -144,10 +144,10 @@ func TestUnwantedBlockNotAdded(t *testing.T) { bsMessage := message.New(true) bsMessage.AddBlock(block) - g := testinstance.NewTestSessionGenerator(net) - defer g.Close() + ig := testinstance.NewTestInstanceGenerator(net) + defer ig.Close() - peers := g.Instances(2) + peers := ig.Instances(2) hasBlock := peers[0] defer hasBlock.Exchange.Close() @@ -216,11 +216,11 @@ func PerformDistributionTest(t *testing.T, numInstances, numBlocks int) { t.SkipNow() } net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay)) - sg := testinstance.NewTestSessionGenerator(net) - defer sg.Close() + ig := testinstance.NewTestInstanceGenerator(net) + defer ig.Close() bg := blocksutil.NewBlockGenerator() - instances := sg.Instances(numInstances) + instances := ig.Instances(numInstances) blocks := bg.Blocks(numBlocks) t.Log("Give the blocks to the first instance") @@ -279,11 +279,11 @@ func TestSendToWantingPeer(t *testing.T) { } net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay)) - sg := testinstance.NewTestSessionGenerator(net) - defer sg.Close() + ig := testinstance.NewTestInstanceGenerator(net) + defer ig.Close() bg := blocksutil.NewBlockGenerator() - peers := sg.Instances(2) + peers := ig.Instances(2) peerA := peers[0] peerB := peers[1] @@ -321,9 +321,9 @@ func TestSendToWantingPeer(t *testing.T) { func TestEmptyKey(t *testing.T) { net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay)) - sg := testinstance.NewTestSessionGenerator(net) - defer sg.Close() - bs := sg.Instances(1)[0].Exchange + ig := testinstance.NewTestInstanceGenerator(net) + defer ig.Close() + bs := ig.Instances(1)[0].Exchange ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) defer cancel() @@ -354,13 +354,13 @@ func assertStat(t *testing.T, st *bitswap.Stat, sblks, rblks, sdata, rdata uint6 func TestBasicBitswap(t *testing.T) { net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay)) - sg := testinstance.NewTestSessionGenerator(net) - defer sg.Close() + ig := testinstance.NewTestInstanceGenerator(net) + defer ig.Close() bg := blocksutil.NewBlockGenerator() t.Log("Test a one node trying to get one block from another") - instances := sg.Instances(3) + instances := ig.Instances(3) blocks := bg.Blocks(1) err := instances[0].Exchange.HasBlock(blocks[0]) if err != nil { @@ -423,13 +423,13 @@ func TestBasicBitswap(t *testing.T) { func TestDoubleGet(t *testing.T) { net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay)) - sg := testinstance.NewTestSessionGenerator(net) - defer sg.Close() + ig := testinstance.NewTestInstanceGenerator(net) + defer ig.Close() bg := blocksutil.NewBlockGenerator() t.Log("Test a one node trying to get one block from another") - instances := sg.Instances(2) + instances := ig.Instances(2) blocks := bg.Blocks(1) // NOTE: A race condition can happen here where these GetBlocks requests go @@ -491,11 +491,11 @@ func TestDoubleGet(t *testing.T) { func TestWantlistCleanup(t *testing.T) { net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay)) - sg := testinstance.NewTestSessionGenerator(net) - defer sg.Close() + ig := testinstance.NewTestInstanceGenerator(net) + defer ig.Close() bg := blocksutil.NewBlockGenerator() - instances := sg.Instances(1)[0] + instances := ig.Instances(1)[0] bswap := instances.Exchange blocks := bg.Blocks(20) @@ -602,13 +602,13 @@ func newReceipt(sent, recv, exchanged uint64) *decision.Receipt { func TestBitswapLedgerOneWay(t *testing.T) { net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay)) - sg := testinstance.NewTestSessionGenerator(net) - defer sg.Close() + ig := testinstance.NewTestInstanceGenerator(net) + defer ig.Close() bg := blocksutil.NewBlockGenerator() t.Log("Test ledgers match when one peer sends block to another") - instances := sg.Instances(2) + instances := ig.Instances(2) blocks := bg.Blocks(1) err := instances[0].Exchange.HasBlock(blocks[0]) if err != nil { @@ -654,13 +654,13 @@ func TestBitswapLedgerOneWay(t *testing.T) { func TestBitswapLedgerTwoWay(t *testing.T) { net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay)) - sg := testinstance.NewTestSessionGenerator(net) - defer sg.Close() + ig := testinstance.NewTestInstanceGenerator(net) + defer ig.Close() bg := blocksutil.NewBlockGenerator() t.Log("Test ledgers match when two peers send one block to each other") - instances := sg.Instances(2) + instances := ig.Instances(2) blocks := bg.Blocks(2) err := instances[0].Exchange.HasBlock(blocks[0]) if err != nil { diff --git a/bitswap/bitswap_with_sessions_test.go b/bitswap/bitswap_with_sessions_test.go index dd26a30c8..50be52caf 100644 --- a/bitswap/bitswap_with_sessions_test.go +++ b/bitswap/bitswap_with_sessions_test.go @@ -19,12 +19,12 @@ func TestBasicSessions(t *testing.T) { defer cancel() vnet := getVirtualNetwork() - sesgen := testinstance.NewTestSessionGenerator(vnet) - defer sesgen.Close() + ig := testinstance.NewTestInstanceGenerator(vnet) + defer ig.Close() bgen := blocksutil.NewBlockGenerator() block := bgen.Next() - inst := sesgen.Instances(2) + inst := ig.Instances(2) a := inst[0] b := inst[1] @@ -67,11 +67,11 @@ func TestSessionBetweenPeers(t *testing.T) { defer cancel() vnet := getVirtualNetwork() - sesgen := testinstance.NewTestSessionGenerator(vnet) - defer sesgen.Close() + ig := testinstance.NewTestInstanceGenerator(vnet) + defer ig.Close() bgen := blocksutil.NewBlockGenerator() - inst := sesgen.Instances(10) + inst := ig.Instances(10) blks := bgen.Blocks(101) if err := inst[0].Blockstore().PutMany(blks); err != nil { @@ -120,11 +120,11 @@ func TestSessionSplitFetch(t *testing.T) { defer cancel() vnet := getVirtualNetwork() - sesgen := testinstance.NewTestSessionGenerator(vnet) - defer sesgen.Close() + ig := testinstance.NewTestInstanceGenerator(vnet) + defer ig.Close() bgen := blocksutil.NewBlockGenerator() - inst := sesgen.Instances(11) + inst := ig.Instances(11) blks := bgen.Blocks(100) for i := 0; i < 10; i++ { @@ -163,11 +163,11 @@ func TestFetchNotConnected(t *testing.T) { bssession.SetProviderSearchDelay(10 * time.Millisecond) vnet := getVirtualNetwork() - sesgen := testinstance.NewTestSessionGenerator(vnet) - defer sesgen.Close() + ig := testinstance.NewTestInstanceGenerator(vnet) + defer ig.Close() bgen := blocksutil.NewBlockGenerator() - other := sesgen.Next() + other := ig.Next() blks := bgen.Blocks(10) for _, block := range blks { @@ -181,7 +181,7 @@ func TestFetchNotConnected(t *testing.T) { cids = append(cids, blk.Cid()) } - thisNode := sesgen.Next() + thisNode := ig.Next() ses := thisNode.Exchange.NewSession(ctx).(*bssession.Session) ses.SetBaseTickDelay(time.Millisecond * 10) @@ -203,12 +203,12 @@ func TestInterestCacheOverflow(t *testing.T) { defer cancel() vnet := getVirtualNetwork() - sesgen := testinstance.NewTestSessionGenerator(vnet) - defer sesgen.Close() + ig := testinstance.NewTestInstanceGenerator(vnet) + defer ig.Close() bgen := blocksutil.NewBlockGenerator() blks := bgen.Blocks(2049) - inst := sesgen.Instances(2) + inst := ig.Instances(2) a := inst[0] b := inst[1] @@ -255,12 +255,12 @@ func TestPutAfterSessionCacheEvict(t *testing.T) { defer cancel() vnet := getVirtualNetwork() - sesgen := testinstance.NewTestSessionGenerator(vnet) - defer sesgen.Close() + ig := testinstance.NewTestInstanceGenerator(vnet) + defer ig.Close() bgen := blocksutil.NewBlockGenerator() blks := bgen.Blocks(2500) - inst := sesgen.Instances(1) + inst := ig.Instances(1) a := inst[0] @@ -295,12 +295,12 @@ func TestMultipleSessions(t *testing.T) { defer cancel() vnet := getVirtualNetwork() - sesgen := testinstance.NewTestSessionGenerator(vnet) - defer sesgen.Close() + ig := testinstance.NewTestInstanceGenerator(vnet) + defer ig.Close() bgen := blocksutil.NewBlockGenerator() blk := bgen.Blocks(1)[0] - inst := sesgen.Instances(2) + inst := ig.Instances(2) a := inst[0] b := inst[1] @@ -338,8 +338,8 @@ func TestWantlistClearsOnCancel(t *testing.T) { defer cancel() vnet := getVirtualNetwork() - sesgen := testinstance.NewTestSessionGenerator(vnet) - defer sesgen.Close() + ig := testinstance.NewTestInstanceGenerator(vnet) + defer ig.Close() bgen := blocksutil.NewBlockGenerator() blks := bgen.Blocks(10) @@ -348,7 +348,7 @@ func TestWantlistClearsOnCancel(t *testing.T) { cids = append(cids, blk.Cid()) } - inst := sesgen.Instances(1) + inst := ig.Instances(1) a := inst[0] diff --git a/bitswap/testinstance/testinstance.go b/bitswap/testinstance/testinstance.go index f677c9493..f459065fc 100644 --- a/bitswap/testinstance/testinstance.go +++ b/bitswap/testinstance/testinstance.go @@ -17,12 +17,12 @@ import ( testutil "github.com/libp2p/go-testutil" ) -// NewTestSessionGenerator generates a new SessionGenerator for the given +// NewTestInstanceGenerator generates a new InstanceGenerator for the given // testnet -func NewTestSessionGenerator( - net tn.Network) SessionGenerator { +func NewTestInstanceGenerator( + net tn.Network) InstanceGenerator { ctx, cancel := context.WithCancel(context.Background()) - return SessionGenerator{ + return InstanceGenerator{ net: net, seq: 0, ctx: ctx, // TODO take ctx as param to Next, Instances @@ -30,8 +30,8 @@ func NewTestSessionGenerator( } } -// SessionGenerator generates new test instances of bitswap+dependencies -type SessionGenerator struct { +// InstanceGenerator generates new test instances of bitswap+dependencies +type InstanceGenerator struct { seq int net tn.Network ctx context.Context @@ -39,23 +39,23 @@ type SessionGenerator struct { } // Close closes the clobal context, shutting down all test instances -func (g *SessionGenerator) Close() error { +func (g *InstanceGenerator) Close() error { g.cancel() return nil // for Closer interface } // Next generates a new instance of bitswap + dependencies -func (g *SessionGenerator) Next() Instance { +func (g *InstanceGenerator) Next() Instance { g.seq++ p, err := p2ptestutil.RandTestBogusIdentity() if err != nil { panic("FIXME") // TODO change signature } - return MkSession(g.ctx, g.net, p) + return NewInstance(g.ctx, g.net, p) } // Instances creates N test instances of bitswap + dependencies -func (g *SessionGenerator) Instances(n int) []Instance { +func (g *InstanceGenerator) Instances(n int) []Instance { var instances []Instance for j := 0; j < n; j++ { inst := g.Next() @@ -90,12 +90,12 @@ func (i *Instance) SetBlockstoreLatency(t time.Duration) time.Duration { return i.blockstoreDelay.Set(t) } -// MkSession creates a test bitswap instance. +// NewInstance creates a test bitswap instance. // // NB: It's easy make mistakes by providing the same peer ID to two different -// sessions. To safeguard, use the SessionGenerator to generate sessions. It's +// instances. To safeguard, use the InstanceGenerator to generate instances. It's // just a much better idea. -func MkSession(ctx context.Context, net tn.Network, p testutil.Identity) Instance { +func NewInstance(ctx context.Context, net tn.Network, p testutil.Identity) Instance { bsdelay := delay.Fixed(0) adapter := net.Adapter(p) From 1b2cb26e2d46cd70df7b169b47b8a3a77bdab74d Mon Sep 17 00:00:00 2001 From: hannahhoward Date: Thu, 9 May 2019 19:08:46 -0700 Subject: [PATCH 0755/1035] refactor(decision): use external peer task queue Uses shared external package peer task queue in place of peer request queue. Shared by graphsync. This commit was moved from ipfs/go-bitswap@81e6fc27f63d9a5e243ca2fa2f3b0bb82c8b123c --- bitswap/decision/bench_test.go | 30 -- bitswap/decision/engine.go | 38 ++- bitswap/decision/peer_request_queue.go | 356 -------------------- bitswap/decision/peer_request_queue_test.go | 162 --------- 4 files changed, 22 insertions(+), 564 deletions(-) delete mode 100644 bitswap/decision/bench_test.go delete mode 100644 bitswap/decision/peer_request_queue.go delete mode 100644 bitswap/decision/peer_request_queue_test.go diff --git a/bitswap/decision/bench_test.go b/bitswap/decision/bench_test.go deleted file mode 100644 index 4ef862a36..000000000 --- a/bitswap/decision/bench_test.go +++ /dev/null @@ -1,30 +0,0 @@ -package decision - -import ( - "fmt" - "math" - "testing" - - "github.com/ipfs/go-bitswap/wantlist" - cid "github.com/ipfs/go-cid" - u "github.com/ipfs/go-ipfs-util" - "github.com/libp2p/go-libp2p-peer" - "github.com/libp2p/go-testutil" -) - -// FWIW: At the time of this commit, including a timestamp in task increases -// time cost of Push by 3%. -func BenchmarkTaskQueuePush(b *testing.B) { - q := newPRQ() - peers := []peer.ID{ - testutil.RandPeerIDFatal(b), - testutil.RandPeerIDFatal(b), - testutil.RandPeerIDFatal(b), - } - b.ResetTimer() - for i := 0; i < b.N; i++ { - c := cid.NewCidV0(u.Hash([]byte(fmt.Sprint(i)))) - - q.Push(peers[i%len(peers)], wantlist.Entry{Cid: c, Priority: math.MaxInt32}) - } -} diff --git a/bitswap/decision/engine.go b/bitswap/decision/engine.go index c2de9299c..a79015677 100644 --- a/bitswap/decision/engine.go +++ b/bitswap/decision/engine.go @@ -8,6 +8,9 @@ import ( bsmsg "github.com/ipfs/go-bitswap/message" wl "github.com/ipfs/go-bitswap/wantlist" + cid "github.com/ipfs/go-cid" + "github.com/ipfs/go-peertaskqueue" + "github.com/ipfs/go-peertaskqueue/peertask" blocks "github.com/ipfs/go-block-format" bstore "github.com/ipfs/go-ipfs-blockstore" @@ -73,7 +76,7 @@ type Engine struct { // peerRequestQueue is a priority queue of requests received from peers. // Requests are popped from the queue, packaged up, and placed in the // outbox. - peerRequestQueue *prq + peerRequestQueue *peertaskqueue.PeerTaskQueue // FIXME it's a bit odd for the client and the worker to both share memory // (both modify the peerRequestQueue) and also to communicate over the @@ -100,7 +103,7 @@ func NewEngine(ctx context.Context, bs bstore.Blockstore) *Engine { e := &Engine{ ledgerMap: make(map[peer.ID]*ledger), bs: bs, - peerRequestQueue: newPRQ(), + peerRequestQueue: peertaskqueue.New(), outbox: make(chan (<-chan *Envelope), outboxChanBuffer), workSignal: make(chan struct{}, 1), ticker: time.NewTicker(time.Millisecond * 100), @@ -159,23 +162,23 @@ func (e *Engine) taskWorker(ctx context.Context) { // context is cancelled before the next Envelope can be created. func (e *Engine) nextEnvelope(ctx context.Context) (*Envelope, error) { for { - nextTask := e.peerRequestQueue.Pop() + nextTask := e.peerRequestQueue.PopBlock() for nextTask == nil { select { case <-ctx.Done(): return nil, ctx.Err() case <-e.workSignal: - nextTask = e.peerRequestQueue.Pop() + nextTask = e.peerRequestQueue.PopBlock() case <-e.ticker.C: - e.peerRequestQueue.thawRound() - nextTask = e.peerRequestQueue.Pop() + e.peerRequestQueue.ThawRound() + nextTask = e.peerRequestQueue.PopBlock() } } // with a task in hand, we're ready to prepare the envelope... msg := bsmsg.New(true) - for _, entry := range nextTask.Entries { - block, err := e.bs.Get(entry.Cid) + for _, entry := range nextTask.Tasks { + block, err := e.bs.Get(entry.Identifier.(cid.Cid)) if err != nil { log.Errorf("tried to execute a task and errored fetching block: %s", err) continue @@ -186,7 +189,7 @@ func (e *Engine) nextEnvelope(ctx context.Context) (*Envelope, error) { if msg.Empty() { // If we don't have the block, don't hold that against the peer // make sure to update that the task has been 'completed' - nextTask.Done(nextTask.Entries) + nextTask.Done(nextTask.Tasks) continue } @@ -194,7 +197,7 @@ func (e *Engine) nextEnvelope(ctx context.Context) (*Envelope, error) { Peer: nextTask.Target, Message: msg, Sent: func() { - nextTask.Done(nextTask.Entries) + nextTask.Done(nextTask.Tasks) select { case e.workSignal <- struct{}{}: // work completing may mean that our queue will provide new @@ -246,7 +249,7 @@ func (e *Engine) MessageReceived(p peer.ID, m bsmsg.BitSwapMessage) { } var msgSize int - var activeEntries []wl.Entry + var activeEntries []peertask.Task for _, entry := range m.Wantlist() { if entry.Cancel { log.Debugf("%s cancel %s", p, entry.Cid) @@ -265,17 +268,17 @@ func (e *Engine) MessageReceived(p peer.ID, m bsmsg.BitSwapMessage) { // we have the block newWorkExists = true if msgSize+blockSize > maxMessageSize { - e.peerRequestQueue.Push(p, activeEntries...) - activeEntries = []wl.Entry{} + e.peerRequestQueue.PushBlock(p, activeEntries...) + activeEntries = []peertask.Task{} msgSize = 0 } - activeEntries = append(activeEntries, entry.Entry) + activeEntries = append(activeEntries, peertask.Task{Identifier: entry.Cid, Priority: entry.Priority}) msgSize += blockSize } } } if len(activeEntries) > 0 { - e.peerRequestQueue.Push(p, activeEntries...) + e.peerRequestQueue.PushBlock(p, activeEntries...) } for _, block := range m.Blocks() { log.Debugf("got block %s %d bytes", block, len(block.RawData())) @@ -289,7 +292,10 @@ func (e *Engine) addBlock(block blocks.Block) { for _, l := range e.ledgerMap { l.lk.Lock() if entry, ok := l.WantListContains(block.Cid()); ok { - e.peerRequestQueue.Push(l.Partner, entry) + e.peerRequestQueue.PushBlock(l.Partner, peertask.Task{ + Identifier: entry.Cid, + Priority: entry.Priority, + }) work = true } l.lk.Unlock() diff --git a/bitswap/decision/peer_request_queue.go b/bitswap/decision/peer_request_queue.go deleted file mode 100644 index 5cb95c782..000000000 --- a/bitswap/decision/peer_request_queue.go +++ /dev/null @@ -1,356 +0,0 @@ -package decision - -import ( - "sync" - "time" - - wantlist "github.com/ipfs/go-bitswap/wantlist" - - cid "github.com/ipfs/go-cid" - pq "github.com/ipfs/go-ipfs-pq" - peer "github.com/libp2p/go-libp2p-peer" -) - -type peerRequestQueue interface { - // Pop returns the next peerRequestTask. Returns nil if the peerRequestQueue is empty. - Pop() *peerRequestTask - Push(to peer.ID, entries ...wantlist.Entry) - Remove(k cid.Cid, p peer.ID) - - // NB: cannot expose simply expose taskQueue.Len because trashed elements - // may exist. These trashed elements should not contribute to the count. -} - -func newPRQ() *prq { - return &prq{ - taskMap: make(map[taskEntryKey]*peerRequestTask), - partners: make(map[peer.ID]*activePartner), - frozen: make(map[peer.ID]*activePartner), - pQueue: pq.New(partnerCompare), - } -} - -// verify interface implementation -var _ peerRequestQueue = &prq{} - -// TODO: at some point, the strategy needs to plug in here -// to help decide how to sort tasks (on add) and how to select -// tasks (on getnext). For now, we are assuming a dumb/nice strategy. -type prq struct { - lock sync.Mutex - pQueue pq.PQ - taskMap map[taskEntryKey]*peerRequestTask - partners map[peer.ID]*activePartner - - frozen map[peer.ID]*activePartner -} - -// Push currently adds a new peerRequestTask to the end of the list. -func (tl *prq) Push(to peer.ID, entries ...wantlist.Entry) { - tl.lock.Lock() - defer tl.lock.Unlock() - partner, ok := tl.partners[to] - if !ok { - partner = newActivePartner(to) - tl.pQueue.Push(partner) - tl.partners[to] = partner - } - - partner.activelk.Lock() - defer partner.activelk.Unlock() - - var priority int - newEntries := make([]peerRequestTaskEntry, 0, len(entries)) - for _, entry := range entries { - if partner.activeBlocks.Has(entry.Cid) { - continue - } - if task, ok := tl.taskMap[taskEntryKey{to, entry.Cid}]; ok { - if entry.Priority > task.Priority { - task.Priority = entry.Priority - partner.taskQueue.Update(task.index) - } - continue - } - if entry.Priority > priority { - priority = entry.Priority - } - newEntries = append(newEntries, peerRequestTaskEntry{entry, false}) - } - - if len(newEntries) == 0 { - return - } - - task := &peerRequestTask{ - Entries: newEntries, - Target: to, - created: time.Now(), - Done: func(e []peerRequestTaskEntry) { - tl.lock.Lock() - for _, entry := range e { - partner.TaskDone(entry.Cid) - } - tl.pQueue.Update(partner.Index()) - tl.lock.Unlock() - }, - } - task.Priority = priority - partner.taskQueue.Push(task) - for _, entry := range newEntries { - tl.taskMap[taskEntryKey{to, entry.Cid}] = task - } - partner.requests += len(newEntries) - tl.pQueue.Update(partner.Index()) -} - -// Pop 'pops' the next task to be performed. Returns nil if no task exists. -func (tl *prq) Pop() *peerRequestTask { - tl.lock.Lock() - defer tl.lock.Unlock() - if tl.pQueue.Len() == 0 { - return nil - } - partner := tl.pQueue.Pop().(*activePartner) - - var out *peerRequestTask - for partner.taskQueue.Len() > 0 && partner.freezeVal == 0 { - out = partner.taskQueue.Pop().(*peerRequestTask) - - newEntries := make([]peerRequestTaskEntry, 0, len(out.Entries)) - for _, entry := range out.Entries { - delete(tl.taskMap, taskEntryKey{out.Target, entry.Cid}) - if entry.trash { - continue - } - partner.requests-- - partner.StartTask(entry.Cid) - newEntries = append(newEntries, entry) - } - if len(newEntries) > 0 { - out.Entries = newEntries - } else { - out = nil // discarding tasks that have been removed - continue - } - break // and return |out| - } - - if partner.IsIdle() { - target := partner.target - delete(tl.partners, target) - delete(tl.frozen, target) - } else { - tl.pQueue.Push(partner) - } - return out -} - -// Remove removes a task from the queue. -func (tl *prq) Remove(k cid.Cid, p peer.ID) { - tl.lock.Lock() - t, ok := tl.taskMap[taskEntryKey{p, k}] - if ok { - for i := range t.Entries { - if t.Entries[i].Cid.Equals(k) { - // remove the task "lazily" - // simply mark it as trash, so it'll be dropped when popped off the - // queue. - t.Entries[i].trash = true - break - } - } - - // having canceled a block, we now account for that in the given partner - partner := tl.partners[p] - partner.requests-- - - // we now also 'freeze' that partner. If they sent us a cancel for a - // block we were about to send them, we should wait a short period of time - // to make sure we receive any other in-flight cancels before sending - // them a block they already potentially have - if partner.freezeVal == 0 { - tl.frozen[p] = partner - } - - partner.freezeVal++ - tl.pQueue.Update(partner.index) - } - tl.lock.Unlock() -} - -func (tl *prq) fullThaw() { - tl.lock.Lock() - defer tl.lock.Unlock() - - for id, partner := range tl.frozen { - partner.freezeVal = 0 - delete(tl.frozen, id) - tl.pQueue.Update(partner.index) - } -} - -func (tl *prq) thawRound() { - tl.lock.Lock() - defer tl.lock.Unlock() - - for id, partner := range tl.frozen { - partner.freezeVal -= (partner.freezeVal + 1) / 2 - if partner.freezeVal <= 0 { - delete(tl.frozen, id) - } - tl.pQueue.Update(partner.index) - } -} - -type peerRequestTaskEntry struct { - wantlist.Entry - // trash in a book-keeping field - trash bool -} -type peerRequestTask struct { - Entries []peerRequestTaskEntry - Priority int - Target peer.ID - - // A callback to signal that this task has been completed - Done func([]peerRequestTaskEntry) - - // created marks the time that the task was added to the queue - created time.Time - index int // book-keeping field used by the pq container -} - -// Index implements pq.Elem. -func (t *peerRequestTask) Index() int { - return t.index -} - -// SetIndex implements pq.Elem. -func (t *peerRequestTask) SetIndex(i int) { - t.index = i -} - -// taskEntryKey is a key identifying a task. -type taskEntryKey struct { - p peer.ID - k cid.Cid -} - -// FIFO is a basic task comparator that returns tasks in the order created. -var FIFO = func(a, b *peerRequestTask) bool { - return a.created.Before(b.created) -} - -// V1 respects the target peer's wantlist priority. For tasks involving -// different peers, the oldest task is prioritized. -var V1 = func(a, b *peerRequestTask) bool { - if a.Target == b.Target { - return a.Priority > b.Priority - } - return FIFO(a, b) -} - -func wrapCmp(f func(a, b *peerRequestTask) bool) func(a, b pq.Elem) bool { - return func(a, b pq.Elem) bool { - return f(a.(*peerRequestTask), b.(*peerRequestTask)) - } -} - -type activePartner struct { - target peer.ID - // Active is the number of blocks this peer is currently being sent - // active must be locked around as it will be updated externally - activelk sync.Mutex - active int - - activeBlocks *cid.Set - - // requests is the number of blocks this peer is currently requesting - // request need not be locked around as it will only be modified under - // the peerRequestQueue's locks - requests int - - // for the PQ interface - index int - - freezeVal int - - // priority queue of tasks belonging to this peer - taskQueue pq.PQ -} - -func newActivePartner(target peer.ID) *activePartner { - return &activePartner{ - target: target, - taskQueue: pq.New(wrapCmp(V1)), - activeBlocks: cid.NewSet(), - } -} - -// partnerCompare implements pq.ElemComparator -// returns true if peer 'a' has higher priority than peer 'b' -func partnerCompare(a, b pq.Elem) bool { - pa := a.(*activePartner) - pb := b.(*activePartner) - - // having no blocks in their wantlist means lowest priority - // having both of these checks ensures stability of the sort - if pa.requests == 0 { - return false - } - if pb.requests == 0 { - return true - } - - if pa.freezeVal > pb.freezeVal { - return false - } - if pa.freezeVal < pb.freezeVal { - return true - } - - if pa.active == pb.active { - // sorting by taskQueue.Len() aids in cleaning out trash entries faster - // if we sorted instead by requests, one peer could potentially build up - // a huge number of cancelled entries in the queue resulting in a memory leak - return pa.taskQueue.Len() > pb.taskQueue.Len() - } - return pa.active < pb.active -} - -// StartTask signals that a task was started for this partner. -func (p *activePartner) StartTask(k cid.Cid) { - p.activelk.Lock() - p.activeBlocks.Add(k) - p.active++ - p.activelk.Unlock() -} - -// TaskDone signals that a task was completed for this partner. -func (p *activePartner) TaskDone(k cid.Cid) { - p.activelk.Lock() - - p.activeBlocks.Remove(k) - p.active-- - if p.active < 0 { - panic("more tasks finished than started!") - } - p.activelk.Unlock() -} - -func (p *activePartner) IsIdle() bool { - p.activelk.Lock() - defer p.activelk.Unlock() - return p.requests == 0 && p.active == 0 -} - -// Index implements pq.Elem. -func (p *activePartner) Index() int { - return p.index -} - -// SetIndex implements pq.Elem. -func (p *activePartner) SetIndex(i int) { - p.index = i -} diff --git a/bitswap/decision/peer_request_queue_test.go b/bitswap/decision/peer_request_queue_test.go deleted file mode 100644 index 33b111a52..000000000 --- a/bitswap/decision/peer_request_queue_test.go +++ /dev/null @@ -1,162 +0,0 @@ -package decision - -import ( - "fmt" - "math" - "math/rand" - "sort" - "strings" - "testing" - - "github.com/ipfs/go-bitswap/wantlist" - cid "github.com/ipfs/go-cid" - u "github.com/ipfs/go-ipfs-util" - "github.com/libp2p/go-testutil" -) - -func TestPushPop(t *testing.T) { - prq := newPRQ() - partner := testutil.RandPeerIDFatal(t) - alphabet := strings.Split("abcdefghijklmnopqrstuvwxyz", "") - vowels := strings.Split("aeiou", "") - consonants := func() []string { - var out []string - for _, letter := range alphabet { - skip := false - for _, vowel := range vowels { - if letter == vowel { - skip = true - } - } - if !skip { - out = append(out, letter) - } - } - return out - }() - sort.Strings(alphabet) - sort.Strings(vowels) - sort.Strings(consonants) - - // add a bunch of blocks. cancel some. drain the queue. the queue should only have the kept entries - - for _, index := range rand.Perm(len(alphabet)) { // add blocks for all letters - letter := alphabet[index] - t.Log(partner.String()) - - c := cid.NewCidV0(u.Hash([]byte(letter))) - prq.Push(partner, wantlist.Entry{Cid: c, Priority: math.MaxInt32 - index}) - } - for _, consonant := range consonants { - c := cid.NewCidV0(u.Hash([]byte(consonant))) - prq.Remove(c, partner) - } - - prq.fullThaw() - - var out []string - for { - received := prq.Pop() - if received == nil { - break - } - - for _, entry := range received.Entries { - out = append(out, entry.Cid.String()) - } - } - - // Entries popped should already be in correct order - for i, expected := range vowels { - exp := cid.NewCidV0(u.Hash([]byte(expected))).String() - if out[i] != exp { - t.Fatal("received", out[i], "expected", expected) - } - } -} - -// This test checks that peers wont starve out other peers -func TestPeerRepeats(t *testing.T) { - prq := newPRQ() - a := testutil.RandPeerIDFatal(t) - b := testutil.RandPeerIDFatal(t) - c := testutil.RandPeerIDFatal(t) - d := testutil.RandPeerIDFatal(t) - - // Have each push some blocks - - for i := 0; i < 5; i++ { - elcid := cid.NewCidV0(u.Hash([]byte(fmt.Sprint(i)))) - prq.Push(a, wantlist.Entry{Cid: elcid}) - prq.Push(b, wantlist.Entry{Cid: elcid}) - prq.Push(c, wantlist.Entry{Cid: elcid}) - prq.Push(d, wantlist.Entry{Cid: elcid}) - } - - // now, pop off four entries, there should be one from each - var targets []string - var tasks []*peerRequestTask - for i := 0; i < 4; i++ { - t := prq.Pop() - targets = append(targets, t.Target.Pretty()) - tasks = append(tasks, t) - } - - expected := []string{a.Pretty(), b.Pretty(), c.Pretty(), d.Pretty()} - sort.Strings(expected) - sort.Strings(targets) - - t.Log(targets) - t.Log(expected) - for i, s := range targets { - if expected[i] != s { - t.Fatal("unexpected peer", s, expected[i]) - } - } - - // Now, if one of the tasks gets finished, the next task off the queue should - // be for the same peer - for blockI := 0; blockI < 4; blockI++ { - for i := 0; i < 4; i++ { - // its okay to mark the same task done multiple times here (JUST FOR TESTING) - tasks[i].Done(tasks[i].Entries) - - ntask := prq.Pop() - if ntask.Target != tasks[i].Target { - t.Fatal("Expected task from peer with lowest active count") - } - } - } -} - -func TestCleaningUpQueues(t *testing.T) { - partner := testutil.RandPeerIDFatal(t) - var entries []wantlist.Entry - for i := 0; i < 5; i++ { - entries = append(entries, wantlist.Entry{Cid: cid.NewCidV0(u.Hash([]byte(fmt.Sprint(i))))}) - } - - prq := newPRQ() - - // push a block, pop a block, complete everything, should be removed - prq.Push(partner, entries...) - task := prq.Pop() - task.Done(task.Entries) - task = prq.Pop() - - if task != nil || len(prq.partners) > 0 || prq.pQueue.Len() > 0 { - t.Fatal("Partner should have been removed because it's idle") - } - - // push a block, remove each of its entries, should be removed - prq.Push(partner, entries...) - for _, entry := range entries { - prq.Remove(entry.Cid, partner) - } - task = prq.Pop() - - if task != nil || len(prq.partners) > 0 || prq.pQueue.Len() > 0 { - t.Fatal("Partner should have been removed because it's idle") - } - -} From 4081de76c46cd7ad82c2c4a94cbbad386c8a3fc6 Mon Sep 17 00:00:00 2001 From: Michael Avila Date: Fri, 17 May 2019 12:37:34 -0700 Subject: [PATCH 0756/1035] Introduce functional option for enabling/disabling provide This commit was moved from ipfs/go-bitswap@0bae16c6cbb946fa35fa215385b31d8a95ec9daa --- bitswap/bitswap.go | 54 ++++++++++++++++++---------- bitswap/bitswap_test.go | 14 ++++---- bitswap/testinstance/testinstance.go | 27 +++++++------- bitswap/workers.go | 2 +- 4 files changed, 56 insertions(+), 41 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 4a407feba..6213627af 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -42,10 +42,6 @@ const ( ) var ( - // ProvideEnabled is a variable that tells Bitswap whether or not - // to handle providing blocks (see experimental provider system) - ProvideEnabled = true - // HasBlockBufferSize is the buffer size of the channel for new blocks // that need to be provided. They should get pulled over by the // provideCollector even before they are actually provided. @@ -58,11 +54,22 @@ var ( metricsBuckets = []float64{1 << 6, 1 << 10, 1 << 14, 1 << 18, 1<<18 + 15, 1 << 22} ) +// Option defines the functional option type that can be used to configure +// bitswap instances +type Option func(*Bitswap) + +// ProvideEnabled is an option for enabling/disabling provide announcements +func ProvideEnabled(enabled bool) Option { + return func(bs *Bitswap) { + bs.provideEnabled = enabled + } +} + // New initializes a BitSwap instance that communicates over the provided // BitSwapNetwork. This function registers the returned instance as the network // delegate. Runs until context is cancelled or bitswap.Close is called. func New(parent context.Context, network bsnet.BitSwapNetwork, - bstore blockstore.Blockstore) exchange.Interface { + bstore blockstore.Blockstore, options ...Option) exchange.Interface { // important to use provided parent context (since it may include important // loggable data). It's probably not a good idea to allow bitswap to be @@ -103,19 +110,25 @@ func New(parent context.Context, network bsnet.BitSwapNetwork, } bs := &Bitswap{ - blockstore: bstore, - engine: decision.NewEngine(ctx, bstore), // TODO close the engine with Close() method - network: network, - process: px, - newBlocks: make(chan cid.Cid, HasBlockBufferSize), - provideKeys: make(chan cid.Cid, provideKeysBufferSize), - wm: wm, - pqm: pqm, - sm: bssm.New(ctx, sessionFactory, sessionPeerManagerFactory, sessionRequestSplitterFactory), - counters: new(counters), - dupMetric: dupHist, - allMetric: allHist, - sentHistogram: sentHistogram, + blockstore: bstore, + engine: decision.NewEngine(ctx, bstore), // TODO close the engine with Close() method + network: network, + process: px, + newBlocks: make(chan cid.Cid, HasBlockBufferSize), + provideKeys: make(chan cid.Cid, provideKeysBufferSize), + wm: wm, + pqm: pqm, + sm: bssm.New(ctx, sessionFactory, sessionPeerManagerFactory, sessionRequestSplitterFactory), + counters: new(counters), + dupMetric: dupHist, + allMetric: allHist, + sentHistogram: sentHistogram, + provideEnabled: true, + } + + // apply functional options before starting and running bitswap + for _, option := range options { + option(bs) } bs.wm.Startup() @@ -174,6 +187,9 @@ type Bitswap struct { // the sessionmanager manages tracking sessions sm *bssm.SessionManager + + // whether or not to make provide announcements + provideEnabled bool } type counters struct { @@ -253,7 +269,7 @@ func (bs *Bitswap) receiveBlockFrom(blk blocks.Block, from peer.ID) error { bs.engine.AddBlock(blk) - if ProvideEnabled { + if bs.provideEnabled { select { case bs.newBlocks <- blk.Cid(): // send block off to be reprovided diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index c1d059b4c..ce13ec68d 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -102,27 +102,25 @@ func TestGetBlockFromPeerAfterPeerAnnounces(t *testing.T) { } func TestDoesNotProvideWhenConfiguredNotTo(t *testing.T) { - bitswap.ProvideEnabled = false - defer func() { bitswap.ProvideEnabled = true }() - + bssession.SetProviderSearchDelay(10 * time.Millisecond) net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay)) block := blocks.NewBlock([]byte("block")) - ig := testinstance.NewTestInstanceGenerator(net) + ig := testinstance.NewTestInstanceGenerator(net, bitswap.ProvideEnabled(false)) defer ig.Close() hasBlock := ig.Next() defer hasBlock.Exchange.Close() + wantsBlock := ig.Next() + defer wantsBlock.Exchange.Close() + if err := hasBlock.Exchange.HasBlock(block); err != nil { t.Fatal(err) } - ctx, cancel := context.WithTimeout(context.Background(), time.Second) + ctx, cancel := context.WithTimeout(context.Background(), 50*time.Millisecond) defer cancel() - wantsBlock := ig.Next() - defer wantsBlock.Exchange.Close() - ns := wantsBlock.Exchange.NewSession(ctx).(*bssession.Session) // set find providers delay to less than timeout context of this test ns.SetBaseTickDelay(10 * time.Millisecond) diff --git a/bitswap/testinstance/testinstance.go b/bitswap/testinstance/testinstance.go index f459065fc..bd61b90ed 100644 --- a/bitswap/testinstance/testinstance.go +++ b/bitswap/testinstance/testinstance.go @@ -19,23 +19,24 @@ import ( // NewTestInstanceGenerator generates a new InstanceGenerator for the given // testnet -func NewTestInstanceGenerator( - net tn.Network) InstanceGenerator { +func NewTestInstanceGenerator(net tn.Network, bsOptions ...bitswap.Option) InstanceGenerator { ctx, cancel := context.WithCancel(context.Background()) return InstanceGenerator{ - net: net, - seq: 0, - ctx: ctx, // TODO take ctx as param to Next, Instances - cancel: cancel, + net: net, + seq: 0, + ctx: ctx, // TODO take ctx as param to Next, Instances + cancel: cancel, + bsOptions: bsOptions, } } // InstanceGenerator generates new test instances of bitswap+dependencies type InstanceGenerator struct { - seq int - net tn.Network - ctx context.Context - cancel context.CancelFunc + seq int + net tn.Network + ctx context.Context + cancel context.CancelFunc + bsOptions []bitswap.Option } // Close closes the clobal context, shutting down all test instances @@ -51,7 +52,7 @@ func (g *InstanceGenerator) Next() Instance { if err != nil { panic("FIXME") // TODO change signature } - return NewInstance(g.ctx, g.net, p) + return NewInstance(g.ctx, g.net, p, g.bsOptions...) } // Instances creates N test instances of bitswap + dependencies @@ -95,7 +96,7 @@ func (i *Instance) SetBlockstoreLatency(t time.Duration) time.Duration { // NB: It's easy make mistakes by providing the same peer ID to two different // instances. To safeguard, use the InstanceGenerator to generate instances. It's // just a much better idea. -func NewInstance(ctx context.Context, net tn.Network, p testutil.Identity) Instance { +func NewInstance(ctx context.Context, net tn.Network, p testutil.Identity, options ...bitswap.Option) Instance { bsdelay := delay.Fixed(0) adapter := net.Adapter(p) @@ -108,7 +109,7 @@ func NewInstance(ctx context.Context, net tn.Network, p testutil.Identity) Insta panic(err.Error()) // FIXME perhaps change signature and return error. } - bs := bitswap.New(ctx, adapter, bstore).(*bitswap.Bitswap) + bs := bitswap.New(ctx, adapter, bstore, options...).(*bitswap.Bitswap) return Instance{ Adapter: adapter, diff --git a/bitswap/workers.go b/bitswap/workers.go index 4a6e91dd6..fb3dc019f 100644 --- a/bitswap/workers.go +++ b/bitswap/workers.go @@ -25,7 +25,7 @@ func (bs *Bitswap) startWorkers(ctx context.Context, px process.Process) { }) } - if ProvideEnabled { + if bs.provideEnabled { // Start up a worker to manage sending out provides messages px.Go(func(px process.Process) { bs.provideCollector(ctx) From d287ac26a994df7e6f44c11b481a187a344c9e25 Mon Sep 17 00:00:00 2001 From: Michael Avila Date: Mon, 20 May 2019 11:11:16 -0700 Subject: [PATCH 0757/1035] Fixup timing; Unset ProviderSearchDelay at test exit This commit was moved from ipfs/go-bitswap@94b505a64229ec01b3c6be432a83daac5f955c69 --- bitswap/bitswap_test.go | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index ce13ec68d..fd3066abc 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -102,7 +102,8 @@ func TestGetBlockFromPeerAfterPeerAnnounces(t *testing.T) { } func TestDoesNotProvideWhenConfiguredNotTo(t *testing.T) { - bssession.SetProviderSearchDelay(10 * time.Millisecond) + bssession.SetProviderSearchDelay(50 * time.Millisecond) + defer bssession.SetProviderSearchDelay(time.Second) net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay)) block := blocks.NewBlock([]byte("block")) ig := testinstance.NewTestInstanceGenerator(net, bitswap.ProvideEnabled(false)) @@ -118,12 +119,10 @@ func TestDoesNotProvideWhenConfiguredNotTo(t *testing.T) { t.Fatal(err) } - ctx, cancel := context.WithTimeout(context.Background(), 50*time.Millisecond) + ctx, cancel := context.WithTimeout(context.Background(), 60*time.Millisecond) defer cancel() ns := wantsBlock.Exchange.NewSession(ctx).(*bssession.Session) - // set find providers delay to less than timeout context of this test - ns.SetBaseTickDelay(10 * time.Millisecond) received, err := ns.GetBlock(ctx, block.Cid()) if received != nil { From 2374b1cccbddaede7215b0e526a1aff58c1ccfa7 Mon Sep 17 00:00:00 2001 From: hannahhoward Date: Tue, 21 May 2019 21:53:26 -0700 Subject: [PATCH 0758/1035] fix(network): delay binding delay binding of network until a receiver is present. also add test of ipfs host network This commit was moved from ipfs/go-bitswap@f67349e93661f570e432f8fb5aeee0cc9ffeb31d --- bitswap/network/ipfs_impl.go | 12 +-- bitswap/network/ipfs_impl_test.go | 152 ++++++++++++++++++++++++++++++ 2 files changed, 158 insertions(+), 6 deletions(-) create mode 100644 bitswap/network/ipfs_impl_test.go diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index ffb4800d6..33c55d10a 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -31,12 +31,6 @@ func NewFromIpfsHost(host host.Host, r routing.ContentRouting) BitSwapNetwork { host: host, routing: r, } - host.SetStreamHandler(ProtocolBitswap, bitswapNetwork.handleNewStream) - host.SetStreamHandler(ProtocolBitswapOne, bitswapNetwork.handleNewStream) - host.SetStreamHandler(ProtocolBitswapNoVers, bitswapNetwork.handleNewStream) - host.Network().Notify((*netNotifiee)(&bitswapNetwork)) - // TODO: StopNotify. - return &bitswapNetwork } @@ -136,6 +130,12 @@ func (bsnet *impl) SendMessage( func (bsnet *impl) SetDelegate(r Receiver) { bsnet.receiver = r + bsnet.host.SetStreamHandler(ProtocolBitswap, bsnet.handleNewStream) + bsnet.host.SetStreamHandler(ProtocolBitswapOne, bsnet.handleNewStream) + bsnet.host.SetStreamHandler(ProtocolBitswapNoVers, bsnet.handleNewStream) + bsnet.host.Network().Notify((*netNotifiee)(bsnet)) + // TODO: StopNotify. + } func (bsnet *impl) ConnectTo(ctx context.Context, p peer.ID) error { diff --git a/bitswap/network/ipfs_impl_test.go b/bitswap/network/ipfs_impl_test.go new file mode 100644 index 000000000..1cac34f3d --- /dev/null +++ b/bitswap/network/ipfs_impl_test.go @@ -0,0 +1,152 @@ +package network_test + +import ( + "context" + "testing" + "time" + + bsmsg "github.com/ipfs/go-bitswap/message" + tn "github.com/ipfs/go-bitswap/testnet" + blocksutil "github.com/ipfs/go-ipfs-blocksutil" + mockrouting "github.com/ipfs/go-ipfs-routing/mock" + peer "github.com/libp2p/go-libp2p-peer" + mocknet "github.com/libp2p/go-libp2p/p2p/net/mock" + testutil "github.com/libp2p/go-testutil" +) + +// Receiver is an interface for receiving messages from the GraphSyncNetwork. +type receiver struct { + peers map[peer.ID]struct{} + messageReceived chan struct{} + connectionEvent chan struct{} + lastMessage bsmsg.BitSwapMessage + lastSender peer.ID +} + +func (r *receiver) ReceiveMessage( + ctx context.Context, + sender peer.ID, + incoming bsmsg.BitSwapMessage) { + r.lastSender = sender + r.lastMessage = incoming + select { + case <-ctx.Done(): + case r.messageReceived <- struct{}{}: + } +} + +func (r *receiver) ReceiveError(err error) { +} + +func (r *receiver) PeerConnected(p peer.ID) { + r.peers[p] = struct{}{} + select { + case r.connectionEvent <- struct{}{}: + } +} + +func (r *receiver) PeerDisconnected(p peer.ID) { + delete(r.peers, p) + select { + case r.connectionEvent <- struct{}{}: + } +} +func TestMessageSendAndReceive(t *testing.T) { + // create network + ctx := context.Background() + ctx, cancel := context.WithTimeout(ctx, 10*time.Second) + defer cancel() + mn := mocknet.New(ctx) + mr := mockrouting.NewServer() + streamNet, err := tn.StreamNet(ctx, mn, mr) + if err != nil { + t.Fatal("Unable to setup network") + } + p1 := testutil.RandIdentityOrFatal(t) + p2 := testutil.RandIdentityOrFatal(t) + + bsnet1 := streamNet.Adapter(p1) + bsnet2 := streamNet.Adapter(p2) + r1 := &receiver{ + peers: make(map[peer.ID]struct{}), + messageReceived: make(chan struct{}), + connectionEvent: make(chan struct{}, 1), + } + r2 := &receiver{ + peers: make(map[peer.ID]struct{}), + messageReceived: make(chan struct{}), + connectionEvent: make(chan struct{}, 1), + } + bsnet1.SetDelegate(r1) + bsnet2.SetDelegate(r2) + + mn.LinkAll() + bsnet1.ConnectTo(ctx, p2.ID()) + select { + case <-ctx.Done(): + t.Fatal("did not connect peer") + case <-r1.connectionEvent: + } + bsnet2.ConnectTo(ctx, p1.ID()) + select { + case <-ctx.Done(): + t.Fatal("did not connect peer") + case <-r2.connectionEvent: + } + if _, ok := r1.peers[p2.ID()]; !ok { + t.Fatal("did to connect to correct peer") + } + if _, ok := r2.peers[p1.ID()]; !ok { + t.Fatal("did to connect to correct peer") + } + blockGenerator := blocksutil.NewBlockGenerator() + block1 := blockGenerator.Next() + block2 := blockGenerator.Next() + sent := bsmsg.New(false) + sent.AddEntry(block1.Cid(), 1) + sent.AddBlock(block2) + + bsnet1.SendMessage(ctx, p2.ID(), sent) + + select { + case <-ctx.Done(): + t.Fatal("did not receive message sent") + case <-r2.messageReceived: + } + + sender := r2.lastSender + if sender != p1.ID() { + t.Fatal("received message from wrong node") + } + + received := r2.lastMessage + + sentWants := sent.Wantlist() + if len(sentWants) != 1 { + t.Fatal("Did not add want to sent message") + } + sentWant := sentWants[0] + receivedWants := received.Wantlist() + if len(receivedWants) != 1 { + t.Fatal("Did not add want to received message") + } + receivedWant := receivedWants[0] + if receivedWant.Cid != sentWant.Cid || + receivedWant.Priority != receivedWant.Priority || + receivedWant.Cancel != receivedWant.Cancel { + t.Fatal("Sent message wants did not match received message wants") + } + sentBlocks := sent.Blocks() + if len(sentBlocks) != 1 { + t.Fatal("Did not add block to sent message") + } + sentBlock := sentBlocks[0] + receivedBlocks := received.Blocks() + if len(receivedBlocks) != 1 { + t.Fatal("Did not add response to received message") + } + receivedBlock := receivedBlocks[0] + if receivedBlock.Cid() != sentBlock.Cid() { + t.Fatal("Sent message blocks did not match received message blocks") + } +} From 1bfd0c21d2649f0dead36f86fce3bcf837f0318b Mon Sep 17 00:00:00 2001 From: hannahhoward Date: Wed, 22 May 2019 09:09:59 -0700 Subject: [PATCH 0759/1035] feat(engine): tag peers with requests tag peers in connection manager as they have outstanding requests for blocks to serve fix #114 This commit was moved from ipfs/go-bitswap@b711c363356596a962c25de0530272ea6c3fdc11 --- bitswap/bitswap.go | 2 +- bitswap/decision/engine.go | 48 ++++++++++++++----- bitswap/decision/engine_test.go | 84 +++++++++++++++++++++++++++++---- 3 files changed, 114 insertions(+), 20 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 6213627af..757e8be93 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -111,7 +111,7 @@ func New(parent context.Context, network bsnet.BitSwapNetwork, bs := &Bitswap{ blockstore: bstore, - engine: decision.NewEngine(ctx, bstore), // TODO close the engine with Close() method + engine: decision.NewEngine(ctx, bstore, network.ConnectionManager()), // TODO close the engine with Close() method network: network, process: px, newBlocks: make(chan cid.Cid, HasBlockBufferSize), diff --git a/bitswap/decision/engine.go b/bitswap/decision/engine.go index a79015677..e16544292 100644 --- a/bitswap/decision/engine.go +++ b/bitswap/decision/engine.go @@ -3,18 +3,19 @@ package decision import ( "context" + "fmt" "sync" "time" + "github.com/google/uuid" bsmsg "github.com/ipfs/go-bitswap/message" wl "github.com/ipfs/go-bitswap/wantlist" - cid "github.com/ipfs/go-cid" - "github.com/ipfs/go-peertaskqueue" - "github.com/ipfs/go-peertaskqueue/peertask" - blocks "github.com/ipfs/go-block-format" + cid "github.com/ipfs/go-cid" bstore "github.com/ipfs/go-ipfs-blockstore" logging "github.com/ipfs/go-log" + "github.com/ipfs/go-peertaskqueue" + "github.com/ipfs/go-peertaskqueue/peertask" peer "github.com/libp2p/go-libp2p-peer" ) @@ -57,6 +58,11 @@ const ( outboxChanBuffer = 0 // maxMessageSize is the maximum size of the batched payload maxMessageSize = 512 * 1024 + // tagPrefix is the tag given to peers associated an engine + tagPrefix = "bs-engine-%s" + + // tagWeight is the default weight for peers associated with an engine + tagWeight = 5 ) // Envelope contains a message for a Peer. @@ -71,6 +77,13 @@ type Envelope struct { Sent func() } +// PeerTagger covers the methods on the connection manager used by the decision +// engine to tag peers +type PeerTagger interface { + TagPeer(peer.ID, string, int) + UntagPeer(p peer.ID, tag string) +} + // Engine manages sending requested blocks to peers. type Engine struct { // peerRequestQueue is a priority queue of requests received from peers. @@ -91,6 +104,9 @@ type Engine struct { bs bstore.Blockstore + peerTagger PeerTagger + + tag string lock sync.Mutex // protects the fields immediatly below // ledgerMap lists Ledgers by their Partner key. ledgerMap map[peer.ID]*ledger @@ -99,19 +115,29 @@ type Engine struct { } // NewEngine creates a new block sending engine for the given block store -func NewEngine(ctx context.Context, bs bstore.Blockstore) *Engine { +func NewEngine(ctx context.Context, bs bstore.Blockstore, peerTagger PeerTagger) *Engine { e := &Engine{ - ledgerMap: make(map[peer.ID]*ledger), - bs: bs, - peerRequestQueue: peertaskqueue.New(), - outbox: make(chan (<-chan *Envelope), outboxChanBuffer), - workSignal: make(chan struct{}, 1), - ticker: time.NewTicker(time.Millisecond * 100), + ledgerMap: make(map[peer.ID]*ledger), + bs: bs, + peerTagger: peerTagger, + outbox: make(chan (<-chan *Envelope), outboxChanBuffer), + workSignal: make(chan struct{}, 1), + ticker: time.NewTicker(time.Millisecond * 100), } + e.tag = fmt.Sprintf(tagPrefix, uuid.New().String()) + e.peerRequestQueue = peertaskqueue.New(peertaskqueue.OnPeerAddedHook(e.onPeerAdded), peertaskqueue.OnPeerRemovedHook(e.onPeerRemoved)) go e.taskWorker(ctx) return e } +func (e *Engine) onPeerAdded(p peer.ID) { + e.peerTagger.TagPeer(p, e.tag, tagWeight) +} + +func (e *Engine) onPeerRemoved(p peer.ID) { + e.peerTagger.UntagPeer(p, e.tag) +} + // WantlistForPeer returns the currently understood want list for a given peer func (e *Engine) WantlistForPeer(p peer.ID) (out []wl.Entry) { partner := e.findOrCreate(p) diff --git a/bitswap/decision/engine_test.go b/bitswap/decision/engine_test.go index 73130ca14..43c48b7eb 100644 --- a/bitswap/decision/engine_test.go +++ b/bitswap/decision/engine_test.go @@ -7,6 +7,7 @@ import ( "strings" "sync" "testing" + "time" message "github.com/ipfs/go-bitswap/message" @@ -18,17 +19,57 @@ import ( testutil "github.com/libp2p/go-testutil" ) -type peerAndEngine struct { - Peer peer.ID - Engine *Engine +type fakePeerTagger struct { + lk sync.Mutex + wait sync.WaitGroup + taggedPeers []peer.ID } -func newEngine(ctx context.Context, idStr string) peerAndEngine { - return peerAndEngine{ +func (fpt *fakePeerTagger) TagPeer(p peer.ID, tag string, n int) { + fpt.wait.Add(1) + + fpt.lk.Lock() + defer fpt.lk.Unlock() + fpt.taggedPeers = append(fpt.taggedPeers, p) +} + +func (fpt *fakePeerTagger) UntagPeer(p peer.ID, tag string) { + defer fpt.wait.Done() + + fpt.lk.Lock() + defer fpt.lk.Unlock() + for i := 0; i < len(fpt.taggedPeers); i++ { + if fpt.taggedPeers[i] == p { + fpt.taggedPeers[i] = fpt.taggedPeers[len(fpt.taggedPeers)-1] + fpt.taggedPeers = fpt.taggedPeers[:len(fpt.taggedPeers)-1] + return + } + } +} + +func (fpt *fakePeerTagger) count() int { + fpt.lk.Lock() + defer fpt.lk.Unlock() + return len(fpt.taggedPeers) +} + +type engineSet struct { + PeerTagger *fakePeerTagger + Peer peer.ID + Engine *Engine + Blockstore blockstore.Blockstore +} + +func newEngine(ctx context.Context, idStr string) engineSet { + fpt := &fakePeerTagger{} + bs := blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore())) + return engineSet{ Peer: peer.ID(idStr), //Strategy: New(true), + PeerTagger: fpt, + Blockstore: bs, Engine: NewEngine(ctx, - blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore()))), + bs, fpt), } } @@ -107,7 +148,7 @@ func peerIsPartner(p peer.ID, e *Engine) bool { func TestOutboxClosedWhenEngineClosed(t *testing.T) { t.SkipNow() // TODO implement *Engine.Close - e := NewEngine(context.Background(), blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore()))) + e := NewEngine(context.Background(), blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore())), &fakePeerTagger{}) var wg sync.WaitGroup wg.Add(1) go func() { @@ -164,7 +205,7 @@ func TestPartnerWantsThenCancels(t *testing.T) { for i := 0; i < numRounds; i++ { expected := make([][]string, 0, len(testcases)) - e := NewEngine(context.Background(), bs) + e := NewEngine(context.Background(), bs, &fakePeerTagger{}) for _, testcase := range testcases { set := testcase[0] cancels := testcase[1] @@ -183,6 +224,33 @@ func TestPartnerWantsThenCancels(t *testing.T) { } } +func TestTaggingPeers(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) + defer cancel() + sanfrancisco := newEngine(ctx, "sf") + seattle := newEngine(ctx, "sea") + + keys := []string{"a", "b", "c", "d", "e"} + for _, letter := range keys { + block := blocks.NewBlock([]byte(letter)) + if err := sanfrancisco.Blockstore.Put(block); err != nil { + t.Fatal(err) + } + } + partnerWants(sanfrancisco.Engine, keys, seattle.Peer) + next := <-sanfrancisco.Engine.Outbox() + envelope := <-next + + if sanfrancisco.PeerTagger.count() != 1 { + t.Fatal("Incorrect number of peers tagged") + } + envelope.Sent() + next = <-sanfrancisco.Engine.Outbox() + sanfrancisco.PeerTagger.wait.Wait() + if sanfrancisco.PeerTagger.count() != 0 { + t.Fatal("Peers should be untagged but weren't") + } +} func partnerWants(e *Engine, keys []string, partner peer.ID) { add := message.New(false) for i, letter := range keys { From 9b90033ed59e8f9cc133cd4285b3969e748731bb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Kripalani?= Date: Tue, 28 May 2019 17:02:11 +0100 Subject: [PATCH 0760/1035] migrate to go-libp2p-core. This commit was moved from ipfs/go-bitswap@8cc0b26240b467dd3c0731d1c4cf031497ae6dfc --- bitswap/bitswap.go | 2 +- bitswap/decision/engine.go | 2 +- bitswap/decision/engine_test.go | 2 +- bitswap/decision/ledger.go | 2 +- bitswap/message/message.go | 5 ++- bitswap/messagequeue/messagequeue.go | 2 +- bitswap/messagequeue/messagequeue_test.go | 2 +- bitswap/network/interface.go | 9 ++-- bitswap/network/ipfs_impl.go | 45 ++++++++++--------- bitswap/network/ipfs_impl_test.go | 9 ++-- bitswap/peermanager/peermanager.go | 2 +- bitswap/peermanager/peermanager_test.go | 2 +- .../providerquerymanager.go | 2 +- .../providerquerymanager_test.go | 2 +- bitswap/session/session.go | 2 +- bitswap/session/session_test.go | 2 +- bitswap/sessionmanager/sessionmanager.go | 2 +- bitswap/sessionmanager/sessionmanager_test.go | 2 +- .../sessionpeermanager/sessionpeermanager.go | 2 +- .../sessionpeermanager_test.go | 2 +- .../sessionrequestsplitter.go | 2 +- bitswap/testinstance/testinstance.go | 2 +- bitswap/testnet/interface.go | 7 +-- bitswap/testnet/network_test.go | 9 ++-- bitswap/testnet/peernet.go | 7 +-- bitswap/testnet/virtual.go | 19 ++++---- bitswap/testutil/testutil.go | 2 +- bitswap/wantmanager/wantmanager.go | 2 +- bitswap/wantmanager/wantmanager_test.go | 2 +- 29 files changed, 80 insertions(+), 72 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 757e8be93..245950a70 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -29,7 +29,7 @@ import ( metrics "github.com/ipfs/go-metrics-interface" process "github.com/jbenet/goprocess" procctx "github.com/jbenet/goprocess/context" - peer "github.com/libp2p/go-libp2p-peer" + peer "github.com/libp2p/go-libp2p-core/peer" ) var log = logging.Logger("bitswap") diff --git a/bitswap/decision/engine.go b/bitswap/decision/engine.go index e16544292..61bb4ca19 100644 --- a/bitswap/decision/engine.go +++ b/bitswap/decision/engine.go @@ -16,7 +16,7 @@ import ( logging "github.com/ipfs/go-log" "github.com/ipfs/go-peertaskqueue" "github.com/ipfs/go-peertaskqueue/peertask" - peer "github.com/libp2p/go-libp2p-peer" + peer "github.com/libp2p/go-libp2p-core/peer" ) // TODO consider taking responsibility for other types of requests. For diff --git a/bitswap/decision/engine_test.go b/bitswap/decision/engine_test.go index 43c48b7eb..21c59eae8 100644 --- a/bitswap/decision/engine_test.go +++ b/bitswap/decision/engine_test.go @@ -15,7 +15,7 @@ import ( ds "github.com/ipfs/go-datastore" dssync "github.com/ipfs/go-datastore/sync" blockstore "github.com/ipfs/go-ipfs-blockstore" - peer "github.com/libp2p/go-libp2p-peer" + peer "github.com/libp2p/go-libp2p-core/peer" testutil "github.com/libp2p/go-testutil" ) diff --git a/bitswap/decision/ledger.go b/bitswap/decision/ledger.go index 37ca57459..12eca63b3 100644 --- a/bitswap/decision/ledger.go +++ b/bitswap/decision/ledger.go @@ -7,7 +7,7 @@ import ( wl "github.com/ipfs/go-bitswap/wantlist" cid "github.com/ipfs/go-cid" - peer "github.com/libp2p/go-libp2p-peer" + peer "github.com/libp2p/go-libp2p-core/peer" ) func newLedger(p peer.ID) *ledger { diff --git a/bitswap/message/message.go b/bitswap/message/message.go index 8bddc509c..df44d1123 100644 --- a/bitswap/message/message.go +++ b/bitswap/message/message.go @@ -10,7 +10,8 @@ import ( ggio "github.com/gogo/protobuf/io" cid "github.com/ipfs/go-cid" - inet "github.com/libp2p/go-libp2p-net" + + "github.com/libp2p/go-libp2p-core/network" ) // BitSwapMessage is the basic interface for interacting building, encoding, @@ -169,7 +170,7 @@ func (m *impl) AddBlock(b blocks.Block) { // FromNet generates a new BitswapMessage from incoming data on an io.Reader. func FromNet(r io.Reader) (BitSwapMessage, error) { - pbr := ggio.NewDelimitedReader(r, inet.MessageSizeMax) + pbr := ggio.NewDelimitedReader(r, network.MessageSizeMax) return FromPBReader(pbr) } diff --git a/bitswap/messagequeue/messagequeue.go b/bitswap/messagequeue/messagequeue.go index a71425085..9e4724244 100644 --- a/bitswap/messagequeue/messagequeue.go +++ b/bitswap/messagequeue/messagequeue.go @@ -9,7 +9,7 @@ import ( bsnet "github.com/ipfs/go-bitswap/network" wantlist "github.com/ipfs/go-bitswap/wantlist" logging "github.com/ipfs/go-log" - peer "github.com/libp2p/go-libp2p-peer" + peer "github.com/libp2p/go-libp2p-core/peer" ) var log = logging.Logger("bitswap") diff --git a/bitswap/messagequeue/messagequeue_test.go b/bitswap/messagequeue/messagequeue_test.go index 146f21124..e9d09b931 100644 --- a/bitswap/messagequeue/messagequeue_test.go +++ b/bitswap/messagequeue/messagequeue_test.go @@ -9,7 +9,7 @@ import ( bsmsg "github.com/ipfs/go-bitswap/message" bsnet "github.com/ipfs/go-bitswap/network" - peer "github.com/libp2p/go-libp2p-peer" + peer "github.com/libp2p/go-libp2p-core/peer" ) type fakeMessageNetwork struct { diff --git a/bitswap/network/interface.go b/bitswap/network/interface.go index 1d7cdc744..783e29e9e 100644 --- a/bitswap/network/interface.go +++ b/bitswap/network/interface.go @@ -6,9 +6,10 @@ import ( bsmsg "github.com/ipfs/go-bitswap/message" cid "github.com/ipfs/go-cid" - ifconnmgr "github.com/libp2p/go-libp2p-interface-connmgr" - peer "github.com/libp2p/go-libp2p-peer" - protocol "github.com/libp2p/go-libp2p-protocol" + + "github.com/libp2p/go-libp2p-core/connmgr" + "github.com/libp2p/go-libp2p-core/peer" + "github.com/libp2p/go-libp2p-core/protocol" ) var ( @@ -38,7 +39,7 @@ type BitSwapNetwork interface { NewMessageSender(context.Context, peer.ID) (MessageSender, error) - ConnectionManager() ifconnmgr.ConnManager + ConnectionManager() connmgr.ConnManager Stats() Stats diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index 33c55d10a..2cfbbcbf3 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -8,16 +8,17 @@ import ( "time" bsmsg "github.com/ipfs/go-bitswap/message" + "github.com/libp2p/go-libp2p-core/helpers" ggio "github.com/gogo/protobuf/io" cid "github.com/ipfs/go-cid" logging "github.com/ipfs/go-log" - host "github.com/libp2p/go-libp2p-host" - ifconnmgr "github.com/libp2p/go-libp2p-interface-connmgr" - inet "github.com/libp2p/go-libp2p-net" - peer "github.com/libp2p/go-libp2p-peer" - pstore "github.com/libp2p/go-libp2p-peerstore" - routing "github.com/libp2p/go-libp2p-routing" + "github.com/libp2p/go-libp2p-core/connmgr" + "github.com/libp2p/go-libp2p-core/host" + "github.com/libp2p/go-libp2p-core/network" + "github.com/libp2p/go-libp2p-core/peer" + peerstore "github.com/libp2p/go-libp2p-core/peerstore" + "github.com/libp2p/go-libp2p-core/routing" ma "github.com/multiformats/go-multiaddr" ) @@ -47,11 +48,11 @@ type impl struct { } type streamMessageSender struct { - s inet.Stream + s network.Stream } func (s *streamMessageSender) Close() error { - return inet.FullClose(s.s) + return helpers.FullClose(s.s) } func (s *streamMessageSender) Reset() error { @@ -62,7 +63,7 @@ func (s *streamMessageSender) SendMsg(ctx context.Context, msg bsmsg.BitSwapMess return msgToStream(ctx, s.s, msg) } -func msgToStream(ctx context.Context, s inet.Stream, msg bsmsg.BitSwapMessage) error { +func msgToStream(ctx context.Context, s network.Stream, msg bsmsg.BitSwapMessage) error { deadline := time.Now().Add(sendMessageTimeout) if dl, ok := ctx.Deadline(); ok { deadline = dl @@ -102,7 +103,7 @@ func (bsnet *impl) NewMessageSender(ctx context.Context, p peer.ID) (MessageSend return &streamMessageSender{s: s}, nil } -func (bsnet *impl) newStreamToPeer(ctx context.Context, p peer.ID) (inet.Stream, error) { +func (bsnet *impl) newStreamToPeer(ctx context.Context, p peer.ID) (network.Stream, error) { return bsnet.host.NewStream(ctx, p, ProtocolBitswap, ProtocolBitswapOne, ProtocolBitswapNoVers) } @@ -123,7 +124,7 @@ func (bsnet *impl) SendMessage( atomic.AddUint64(&bsnet.stats.MessagesSent, 1) // TODO(https://github.com/libp2p/go-libp2p-net/issues/28): Avoid this goroutine. - go inet.AwaitEOF(s) + go helpers.AwaitEOF(s) return s.Close() } @@ -139,7 +140,7 @@ func (bsnet *impl) SetDelegate(r Receiver) { } func (bsnet *impl) ConnectTo(ctx context.Context, p peer.ID) error { - return bsnet.host.Connect(ctx, pstore.PeerInfo{ID: p}) + return bsnet.host.Connect(ctx, peer.AddrInfo{ID: p}) } // FindProvidersAsync returns a channel of providers for the given key. @@ -152,7 +153,7 @@ func (bsnet *impl) FindProvidersAsync(ctx context.Context, k cid.Cid, max int) < if info.ID == bsnet.host.ID() { continue // ignore self as provider } - bsnet.host.Peerstore().AddAddrs(info.ID, info.Addrs, pstore.TempAddrTTL) + bsnet.host.Peerstore().AddAddrs(info.ID, info.Addrs, peerstore.TempAddrTTL) select { case <-ctx.Done(): return @@ -169,7 +170,7 @@ func (bsnet *impl) Provide(ctx context.Context, k cid.Cid) error { } // handleNewStream receives a new stream from the network. -func (bsnet *impl) handleNewStream(s inet.Stream) { +func (bsnet *impl) handleNewStream(s network.Stream) { defer s.Close() if bsnet.receiver == nil { @@ -177,7 +178,7 @@ func (bsnet *impl) handleNewStream(s inet.Stream) { return } - reader := ggio.NewDelimitedReader(s, inet.MessageSizeMax) + reader := ggio.NewDelimitedReader(s, network.MessageSizeMax) for { received, err := bsmsg.FromPBReader(reader) if err != nil { @@ -197,7 +198,7 @@ func (bsnet *impl) handleNewStream(s inet.Stream) { } } -func (bsnet *impl) ConnectionManager() ifconnmgr.ConnManager { +func (bsnet *impl) ConnectionManager() connmgr.ConnManager { return bsnet.host.ConnManager() } @@ -214,15 +215,15 @@ func (nn *netNotifiee) impl() *impl { return (*impl)(nn) } -func (nn *netNotifiee) Connected(n inet.Network, v inet.Conn) { +func (nn *netNotifiee) Connected(n network.Network, v network.Conn) { nn.impl().receiver.PeerConnected(v.RemotePeer()) } -func (nn *netNotifiee) Disconnected(n inet.Network, v inet.Conn) { +func (nn *netNotifiee) Disconnected(n network.Network, v network.Conn) { nn.impl().receiver.PeerDisconnected(v.RemotePeer()) } -func (nn *netNotifiee) OpenedStream(n inet.Network, v inet.Stream) {} -func (nn *netNotifiee) ClosedStream(n inet.Network, v inet.Stream) {} -func (nn *netNotifiee) Listen(n inet.Network, a ma.Multiaddr) {} -func (nn *netNotifiee) ListenClose(n inet.Network, a ma.Multiaddr) {} +func (nn *netNotifiee) OpenedStream(n network.Network, v network.Stream) {} +func (nn *netNotifiee) ClosedStream(n network.Network, v network.Stream) {} +func (nn *netNotifiee) Listen(n network.Network, a ma.Multiaddr) {} +func (nn *netNotifiee) ListenClose(n network.Network, a ma.Multiaddr) {} diff --git a/bitswap/network/ipfs_impl_test.go b/bitswap/network/ipfs_impl_test.go index 1cac34f3d..2a8fab4c4 100644 --- a/bitswap/network/ipfs_impl_test.go +++ b/bitswap/network/ipfs_impl_test.go @@ -9,9 +9,10 @@ import ( tn "github.com/ipfs/go-bitswap/testnet" blocksutil "github.com/ipfs/go-ipfs-blocksutil" mockrouting "github.com/ipfs/go-ipfs-routing/mock" - peer "github.com/libp2p/go-libp2p-peer" + + "github.com/libp2p/go-libp2p-core/peer" + "github.com/libp2p/go-libp2p-testing/net" mocknet "github.com/libp2p/go-libp2p/p2p/net/mock" - testutil "github.com/libp2p/go-testutil" ) // Receiver is an interface for receiving messages from the GraphSyncNetwork. @@ -62,8 +63,8 @@ func TestMessageSendAndReceive(t *testing.T) { if err != nil { t.Fatal("Unable to setup network") } - p1 := testutil.RandIdentityOrFatal(t) - p2 := testutil.RandIdentityOrFatal(t) + p1 := tnet.RandIdentityOrFatal(t) + p2 := tnet.RandIdentityOrFatal(t) bsnet1 := streamNet.Adapter(p1) bsnet2 := streamNet.Adapter(p2) diff --git a/bitswap/peermanager/peermanager.go b/bitswap/peermanager/peermanager.go index 658766d15..3aefbbe6d 100644 --- a/bitswap/peermanager/peermanager.go +++ b/bitswap/peermanager/peermanager.go @@ -6,7 +6,7 @@ import ( bsmsg "github.com/ipfs/go-bitswap/message" wantlist "github.com/ipfs/go-bitswap/wantlist" - peer "github.com/libp2p/go-libp2p-peer" + peer "github.com/libp2p/go-libp2p-core/peer" ) // PeerQueue provides a queer of messages to be sent for a single peer. diff --git a/bitswap/peermanager/peermanager_test.go b/bitswap/peermanager/peermanager_test.go index 0505f973b..cea9ce26b 100644 --- a/bitswap/peermanager/peermanager_test.go +++ b/bitswap/peermanager/peermanager_test.go @@ -10,7 +10,7 @@ import ( bsmsg "github.com/ipfs/go-bitswap/message" wantlist "github.com/ipfs/go-bitswap/wantlist" - "github.com/libp2p/go-libp2p-peer" + "github.com/libp2p/go-libp2p-core/peer" ) type messageSent struct { diff --git a/bitswap/providerquerymanager/providerquerymanager.go b/bitswap/providerquerymanager/providerquerymanager.go index a84e1f912..e1f77edf6 100644 --- a/bitswap/providerquerymanager/providerquerymanager.go +++ b/bitswap/providerquerymanager/providerquerymanager.go @@ -8,7 +8,7 @@ import ( "github.com/ipfs/go-cid" logging "github.com/ipfs/go-log" - peer "github.com/libp2p/go-libp2p-peer" + peer "github.com/libp2p/go-libp2p-core/peer" ) var log = logging.Logger("bitswap") diff --git a/bitswap/providerquerymanager/providerquerymanager_test.go b/bitswap/providerquerymanager/providerquerymanager_test.go index efdfd14f5..689c5ec2d 100644 --- a/bitswap/providerquerymanager/providerquerymanager_test.go +++ b/bitswap/providerquerymanager/providerquerymanager_test.go @@ -11,7 +11,7 @@ import ( "github.com/ipfs/go-bitswap/testutil" cid "github.com/ipfs/go-cid" - "github.com/libp2p/go-libp2p-peer" + "github.com/libp2p/go-libp2p-core/peer" ) type fakeProviderNetwork struct { diff --git a/bitswap/session/session.go b/bitswap/session/session.go index b57f472e6..b5aab6025 100644 --- a/bitswap/session/session.go +++ b/bitswap/session/session.go @@ -10,8 +10,8 @@ import ( blocks "github.com/ipfs/go-block-format" cid "github.com/ipfs/go-cid" logging "github.com/ipfs/go-log" + peer "github.com/libp2p/go-libp2p-core/peer" loggables "github.com/libp2p/go-libp2p-loggables" - peer "github.com/libp2p/go-libp2p-peer" bssrs "github.com/ipfs/go-bitswap/sessionrequestsplitter" ) diff --git a/bitswap/session/session_test.go b/bitswap/session/session_test.go index 9f6aef549..8ff6ede1f 100644 --- a/bitswap/session/session_test.go +++ b/bitswap/session/session_test.go @@ -12,7 +12,7 @@ import ( "github.com/ipfs/go-bitswap/testutil" cid "github.com/ipfs/go-cid" blocksutil "github.com/ipfs/go-ipfs-blocksutil" - peer "github.com/libp2p/go-libp2p-peer" + peer "github.com/libp2p/go-libp2p-core/peer" ) type wantReq struct { diff --git a/bitswap/sessionmanager/sessionmanager.go b/bitswap/sessionmanager/sessionmanager.go index ac1bb700a..1b4431153 100644 --- a/bitswap/sessionmanager/sessionmanager.go +++ b/bitswap/sessionmanager/sessionmanager.go @@ -9,7 +9,7 @@ import ( bssession "github.com/ipfs/go-bitswap/session" exchange "github.com/ipfs/go-ipfs-exchange-interface" - peer "github.com/libp2p/go-libp2p-peer" + peer "github.com/libp2p/go-libp2p-core/peer" ) // Session is a session that is managed by the session manager diff --git a/bitswap/sessionmanager/sessionmanager_test.go b/bitswap/sessionmanager/sessionmanager_test.go index 1310ac978..ff0ec15db 100644 --- a/bitswap/sessionmanager/sessionmanager_test.go +++ b/bitswap/sessionmanager/sessionmanager_test.go @@ -11,7 +11,7 @@ import ( blocks "github.com/ipfs/go-block-format" cid "github.com/ipfs/go-cid" - peer "github.com/libp2p/go-libp2p-peer" + peer "github.com/libp2p/go-libp2p-core/peer" ) type fakeSession struct { diff --git a/bitswap/sessionpeermanager/sessionpeermanager.go b/bitswap/sessionpeermanager/sessionpeermanager.go index d5382980f..59bfbf497 100644 --- a/bitswap/sessionpeermanager/sessionpeermanager.go +++ b/bitswap/sessionpeermanager/sessionpeermanager.go @@ -6,7 +6,7 @@ import ( "math/rand" cid "github.com/ipfs/go-cid" - peer "github.com/libp2p/go-libp2p-peer" + peer "github.com/libp2p/go-libp2p-core/peer" ) const ( diff --git a/bitswap/sessionpeermanager/sessionpeermanager_test.go b/bitswap/sessionpeermanager/sessionpeermanager_test.go index 1cad238ad..2aceeecd3 100644 --- a/bitswap/sessionpeermanager/sessionpeermanager_test.go +++ b/bitswap/sessionpeermanager/sessionpeermanager_test.go @@ -10,7 +10,7 @@ import ( "github.com/ipfs/go-bitswap/testutil" cid "github.com/ipfs/go-cid" - peer "github.com/libp2p/go-libp2p-peer" + peer "github.com/libp2p/go-libp2p-core/peer" ) type fakePeerProviderFinder struct { diff --git a/bitswap/sessionrequestsplitter/sessionrequestsplitter.go b/bitswap/sessionrequestsplitter/sessionrequestsplitter.go index 1305b73b2..5400fe5c4 100644 --- a/bitswap/sessionrequestsplitter/sessionrequestsplitter.go +++ b/bitswap/sessionrequestsplitter/sessionrequestsplitter.go @@ -4,7 +4,7 @@ import ( "context" "github.com/ipfs/go-cid" - "github.com/libp2p/go-libp2p-peer" + "github.com/libp2p/go-libp2p-core/peer" ) const ( diff --git a/bitswap/testinstance/testinstance.go b/bitswap/testinstance/testinstance.go index bd61b90ed..0a5e20f58 100644 --- a/bitswap/testinstance/testinstance.go +++ b/bitswap/testinstance/testinstance.go @@ -12,8 +12,8 @@ import ( ds_sync "github.com/ipfs/go-datastore/sync" blockstore "github.com/ipfs/go-ipfs-blockstore" delay "github.com/ipfs/go-ipfs-delay" + peer "github.com/libp2p/go-libp2p-core/peer" p2ptestutil "github.com/libp2p/go-libp2p-netutil" - peer "github.com/libp2p/go-libp2p-peer" testutil "github.com/libp2p/go-testutil" ) diff --git a/bitswap/testnet/interface.go b/bitswap/testnet/interface.go index 3441f69d2..b6616256f 100644 --- a/bitswap/testnet/interface.go +++ b/bitswap/testnet/interface.go @@ -2,14 +2,15 @@ package bitswap import ( bsnet "github.com/ipfs/go-bitswap/network" - peer "github.com/libp2p/go-libp2p-peer" - "github.com/libp2p/go-testutil" + + "github.com/libp2p/go-libp2p-core/peer" + "github.com/libp2p/go-libp2p-testing/net" ) // Network is an interface for generating bitswap network interfaces // based on a test network. type Network interface { - Adapter(testutil.Identity) bsnet.BitSwapNetwork + Adapter(tnet.Identity) bsnet.BitSwapNetwork HasPeer(peer.ID) bool } diff --git a/bitswap/testnet/network_test.go b/bitswap/testnet/network_test.go index 988c33ef1..d0b55ed55 100644 --- a/bitswap/testnet/network_test.go +++ b/bitswap/testnet/network_test.go @@ -11,14 +11,15 @@ import ( blocks "github.com/ipfs/go-block-format" delay "github.com/ipfs/go-ipfs-delay" mockrouting "github.com/ipfs/go-ipfs-routing/mock" - peer "github.com/libp2p/go-libp2p-peer" - testutil "github.com/libp2p/go-testutil" + + "github.com/libp2p/go-libp2p-core/peer" + "github.com/libp2p/go-libp2p-testing/net" ) func TestSendMessageAsyncButWaitForResponse(t *testing.T) { net := VirtualNetwork(mockrouting.NewServer(), delay.Fixed(0)) - responderPeer := testutil.RandIdentityOrFatal(t) - waiter := net.Adapter(testutil.RandIdentityOrFatal(t)) + responderPeer := tnet.RandIdentityOrFatal(t) + waiter := net.Adapter(tnet.RandIdentityOrFatal(t)) responder := net.Adapter(responderPeer) var wg sync.WaitGroup diff --git a/bitswap/testnet/peernet.go b/bitswap/testnet/peernet.go index cea4b7278..ffbe10264 100644 --- a/bitswap/testnet/peernet.go +++ b/bitswap/testnet/peernet.go @@ -7,9 +7,10 @@ import ( ds "github.com/ipfs/go-datastore" mockrouting "github.com/ipfs/go-ipfs-routing/mock" - peer "github.com/libp2p/go-libp2p-peer" + + "github.com/libp2p/go-libp2p-core/peer" + "github.com/libp2p/go-libp2p-testing/net" mockpeernet "github.com/libp2p/go-libp2p/p2p/net/mock" - testutil "github.com/libp2p/go-testutil" ) type peernet struct { @@ -22,7 +23,7 @@ func StreamNet(ctx context.Context, net mockpeernet.Mocknet, rs mockrouting.Serv return &peernet{net, rs}, nil } -func (pn *peernet) Adapter(p testutil.Identity) bsnet.BitSwapNetwork { +func (pn *peernet) Adapter(p tnet.Identity) bsnet.BitSwapNetwork { client, err := pn.Mocknet.AddPeer(p.PrivateKey(), p.Address()) if err != nil { panic(err.Error()) diff --git a/bitswap/testnet/virtual.go b/bitswap/testnet/virtual.go index 19cc47d3d..8421c2db9 100644 --- a/bitswap/testnet/virtual.go +++ b/bitswap/testnet/virtual.go @@ -15,11 +15,12 @@ import ( delay "github.com/ipfs/go-ipfs-delay" mockrouting "github.com/ipfs/go-ipfs-routing/mock" logging "github.com/ipfs/go-log" - ifconnmgr "github.com/libp2p/go-libp2p-interface-connmgr" - peer "github.com/libp2p/go-libp2p-peer" - routing "github.com/libp2p/go-libp2p-routing" + + "github.com/libp2p/go-libp2p-core/connmgr" + "github.com/libp2p/go-libp2p-core/peer" + "github.com/libp2p/go-libp2p-core/routing" + "github.com/libp2p/go-libp2p-testing/net" mocknet "github.com/libp2p/go-libp2p/p2p/net/mock" - testutil "github.com/libp2p/go-testutil" ) var log = logging.Logger("bstestnet") @@ -86,7 +87,7 @@ type receiverQueue struct { lk sync.Mutex } -func (n *network) Adapter(p testutil.Identity) bsnet.BitSwapNetwork { +func (n *network) Adapter(p tnet.Identity) bsnet.BitSwapNetwork { n.mu.Lock() defer n.mu.Unlock() @@ -172,7 +173,7 @@ type networkClient struct { local peer.ID bsnet.Receiver network *network - routing routing.IpfsRouting + routing routing.Routing stats bsnet.Stats } @@ -197,7 +198,7 @@ func (nc *networkClient) Stats() bsnet.Stats { // FindProvidersAsync returns a channel of providers for the given key. func (nc *networkClient) FindProvidersAsync(ctx context.Context, k cid.Cid, max int) <-chan peer.ID { - // NB: this function duplicates the PeerInfo -> ID transformation in the + // NB: this function duplicates the AddrInfo -> ID transformation in the // bitswap network adapter. Not to worry. This network client will be // deprecated once the ipfsnet.Mock is added. The code below is only // temporary. @@ -216,8 +217,8 @@ func (nc *networkClient) FindProvidersAsync(ctx context.Context, k cid.Cid, max return out } -func (nc *networkClient) ConnectionManager() ifconnmgr.ConnManager { - return &ifconnmgr.NullConnMgr{} +func (nc *networkClient) ConnectionManager() connmgr.ConnManager { + return &connmgr.NullConnMgr{} } type messagePasser struct { diff --git a/bitswap/testutil/testutil.go b/bitswap/testutil/testutil.go index 6f82fede6..e47401eef 100644 --- a/bitswap/testutil/testutil.go +++ b/bitswap/testutil/testutil.go @@ -8,7 +8,7 @@ import ( blocks "github.com/ipfs/go-block-format" cid "github.com/ipfs/go-cid" blocksutil "github.com/ipfs/go-ipfs-blocksutil" - peer "github.com/libp2p/go-libp2p-peer" + peer "github.com/libp2p/go-libp2p-core/peer" ) var blockGenerator = blocksutil.NewBlockGenerator() diff --git a/bitswap/wantmanager/wantmanager.go b/bitswap/wantmanager/wantmanager.go index 5f1129451..4203d14f4 100644 --- a/bitswap/wantmanager/wantmanager.go +++ b/bitswap/wantmanager/wantmanager.go @@ -10,7 +10,7 @@ import ( cid "github.com/ipfs/go-cid" metrics "github.com/ipfs/go-metrics-interface" - peer "github.com/libp2p/go-libp2p-peer" + peer "github.com/libp2p/go-libp2p-core/peer" ) var log = logging.Logger("bitswap") diff --git a/bitswap/wantmanager/wantmanager_test.go b/bitswap/wantmanager/wantmanager_test.go index 036908205..a721e24ab 100644 --- a/bitswap/wantmanager/wantmanager_test.go +++ b/bitswap/wantmanager/wantmanager_test.go @@ -11,7 +11,7 @@ import ( bsmsg "github.com/ipfs/go-bitswap/message" "github.com/ipfs/go-cid" - "github.com/libp2p/go-libp2p-peer" + "github.com/libp2p/go-libp2p-core/peer" ) type fakePeerHandler struct { From 80a661c8c3b5e0975d0533d3e17d04d0358dea83 Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Fri, 31 May 2019 18:46:33 -0700 Subject: [PATCH 0761/1035] dep: remove dep on libp2p/go-testutil This commit was moved from ipfs/go-bitswap@da10fb8ead49d6e841eeaf1b20807025ab578d92 --- bitswap/bitswap_test.go | 4 ++-- bitswap/bitswap_with_sessions_test.go | 2 +- bitswap/decision/engine_test.go | 2 +- bitswap/testinstance/testinstance.go | 4 ++-- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index fd3066abc..ed4b31a6b 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -22,8 +22,8 @@ import ( delay "github.com/ipfs/go-ipfs-delay" mockrouting "github.com/ipfs/go-ipfs-routing/mock" p2ptestutil "github.com/libp2p/go-libp2p-netutil" - tu "github.com/libp2p/go-testutil" - travis "github.com/libp2p/go-testutil/ci/travis" + travis "github.com/libp2p/go-libp2p-testing/ci/travis" + tu "github.com/libp2p/go-libp2p-testing/etc" ) // FIXME the tests are really sensitive to the network delay. fix them to work diff --git a/bitswap/bitswap_with_sessions_test.go b/bitswap/bitswap_with_sessions_test.go index 50be52caf..85d936c4e 100644 --- a/bitswap/bitswap_with_sessions_test.go +++ b/bitswap/bitswap_with_sessions_test.go @@ -11,7 +11,7 @@ import ( blocks "github.com/ipfs/go-block-format" cid "github.com/ipfs/go-cid" blocksutil "github.com/ipfs/go-ipfs-blocksutil" - tu "github.com/libp2p/go-testutil" + tu "github.com/libp2p/go-libp2p-testing/etc" ) func TestBasicSessions(t *testing.T) { diff --git a/bitswap/decision/engine_test.go b/bitswap/decision/engine_test.go index 21c59eae8..d654c191c 100644 --- a/bitswap/decision/engine_test.go +++ b/bitswap/decision/engine_test.go @@ -16,7 +16,7 @@ import ( dssync "github.com/ipfs/go-datastore/sync" blockstore "github.com/ipfs/go-ipfs-blockstore" peer "github.com/libp2p/go-libp2p-core/peer" - testutil "github.com/libp2p/go-testutil" + testutil "github.com/libp2p/go-libp2p-core/test" ) type fakePeerTagger struct { diff --git a/bitswap/testinstance/testinstance.go b/bitswap/testinstance/testinstance.go index 0a5e20f58..65d25f135 100644 --- a/bitswap/testinstance/testinstance.go +++ b/bitswap/testinstance/testinstance.go @@ -14,7 +14,7 @@ import ( delay "github.com/ipfs/go-ipfs-delay" peer "github.com/libp2p/go-libp2p-core/peer" p2ptestutil "github.com/libp2p/go-libp2p-netutil" - testutil "github.com/libp2p/go-testutil" + tnet "github.com/libp2p/go-libp2p-testing/net" ) // NewTestInstanceGenerator generates a new InstanceGenerator for the given @@ -96,7 +96,7 @@ func (i *Instance) SetBlockstoreLatency(t time.Duration) time.Duration { // NB: It's easy make mistakes by providing the same peer ID to two different // instances. To safeguard, use the InstanceGenerator to generate instances. It's // just a much better idea. -func NewInstance(ctx context.Context, net tn.Network, p testutil.Identity, options ...bitswap.Option) Instance { +func NewInstance(ctx context.Context, net tn.Network, p tnet.Identity, options ...bitswap.Option) Instance { bsdelay := delay.Fixed(0) adapter := net.Adapter(p) From 82925fb4e5a73a7774f8dd063a5ca4b0eac3697c Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Mon, 3 Jun 2019 10:22:21 -0700 Subject: [PATCH 0762/1035] testutil: fix block generator This commit was moved from ipfs/go-bitswap@1298633e4460aeb5a6b75f2d1e6d04c5ec4badb8 --- bitswap/testutil/testutil.go | 2 +- bitswap/testutil/testutil_test.go | 16 ++++++++++++++++ 2 files changed, 17 insertions(+), 1 deletion(-) create mode 100644 bitswap/testutil/testutil_test.go diff --git a/bitswap/testutil/testutil.go b/bitswap/testutil/testutil.go index e47401eef..96d4241c5 100644 --- a/bitswap/testutil/testutil.go +++ b/bitswap/testutil/testutil.go @@ -17,9 +17,9 @@ var prioritySeq int // GenerateBlocksOfSize generates a series of blocks of the given byte size func GenerateBlocksOfSize(n int, size int64) []blocks.Block { generatedBlocks := make([]blocks.Block, 0, n) - buf := make([]byte, size) for i := 0; i < n; i++ { // rand.Read never errors + buf := make([]byte, size) rand.Read(buf) b := blocks.NewBlock(buf) generatedBlocks = append(generatedBlocks, b) diff --git a/bitswap/testutil/testutil_test.go b/bitswap/testutil/testutil_test.go new file mode 100644 index 000000000..c4dc1af15 --- /dev/null +++ b/bitswap/testutil/testutil_test.go @@ -0,0 +1,16 @@ +package testutil + +import ( + "testing" + + blocks "github.com/ipfs/go-block-format" +) + +func TestGenerateBlocksOfSize(t *testing.T) { + for _, b1 := range GenerateBlocksOfSize(10, 100) { + b2 := blocks.NewBlock(b1.RawData()) + if b2.Cid() != b1.Cid() { + t.Fatal("block CIDs mismatch") + } + } +} From 5d495296405193e802ebf7a47be2987bbf3e4be1 Mon Sep 17 00:00:00 2001 From: hannahhoward Date: Wed, 29 May 2019 16:29:48 -0700 Subject: [PATCH 0763/1035] feat(sessions): add rebroadcasting, search backoff on a tick, do not keep searching for providers for the same block. instead rely on a periodic search for more providers. (which will run no matter what, even w/o ticks, to optimize found providers). also backoff tick time to reduce broadcasts. fix #95, fix #107 This commit was moved from ipfs/go-bitswap@49a96fbef948888aa00ab6be3836220ba2009025 --- bitswap/session/session.go | 110 +++++++++++++++++++------- bitswap/session/session_test.go | 135 ++++++++++++++++++++++++++++++-- 2 files changed, 212 insertions(+), 33 deletions(-) diff --git a/bitswap/session/session.go b/bitswap/session/session.go index b5aab6025..0e335f901 100644 --- a/bitswap/session/session.go +++ b/bitswap/session/session.go @@ -2,6 +2,8 @@ package session import ( "context" + "fmt" + "math/rand" "time" lru "github.com/hashicorp/golang-lru" @@ -9,6 +11,7 @@ import ( notifications "github.com/ipfs/go-bitswap/notifications" blocks "github.com/ipfs/go-block-format" cid "github.com/ipfs/go-cid" + delay "github.com/ipfs/go-ipfs-delay" logging "github.com/ipfs/go-log" peer "github.com/libp2p/go-libp2p-core/peer" loggables "github.com/libp2p/go-libp2p-loggables" @@ -75,14 +78,17 @@ type Session struct { tickDelayReqs chan time.Duration // do not touch outside run loop - tofetch *cidQueue - interest *lru.Cache - pastWants *cidQueue - liveWants map[cid.Cid]time.Time - tick *time.Timer - baseTickDelay time.Duration - latTotal time.Duration - fetchcnt int + tofetch *cidQueue + interest *lru.Cache + pastWants *cidQueue + liveWants map[cid.Cid]time.Time + tick *time.Timer + rebroadcast *time.Timer + baseTickDelay time.Duration + latTotal time.Duration + fetchcnt int + consecutiveTicks int + lastFetchCount int // identifiers notif notifications.PubSub uuid logging.Loggable @@ -93,23 +99,24 @@ type Session struct { // given context. func New(ctx context.Context, id uint64, wm WantManager, pm PeerManager, srs RequestSplitter) *Session { s := &Session{ - liveWants: make(map[cid.Cid]time.Time), - newReqs: make(chan []cid.Cid), - cancelKeys: make(chan []cid.Cid), - tofetch: newCidQueue(), - pastWants: newCidQueue(), - interestReqs: make(chan interestReq), - latencyReqs: make(chan chan time.Duration), - tickDelayReqs: make(chan time.Duration), - ctx: ctx, - wm: wm, - pm: pm, - srs: srs, - incoming: make(chan blkRecv), - notif: notifications.New(), - uuid: loggables.Uuid("GetBlockRequest"), - baseTickDelay: time.Millisecond * 500, - id: id, + liveWants: make(map[cid.Cid]time.Time), + newReqs: make(chan []cid.Cid), + cancelKeys: make(chan []cid.Cid), + tofetch: newCidQueue(), + pastWants: newCidQueue(), + interestReqs: make(chan interestReq), + latencyReqs: make(chan chan time.Duration), + tickDelayReqs: make(chan time.Duration), + ctx: ctx, + wm: wm, + pm: pm, + srs: srs, + incoming: make(chan blkRecv), + notif: notifications.New(), + uuid: loggables.Uuid("GetBlockRequest"), + baseTickDelay: time.Millisecond * 500, + lastFetchCount: -1, + id: id, } cache, _ := lru.New(2048) @@ -223,16 +230,23 @@ func (s *Session) SetBaseTickDelay(baseTickDelay time.Duration) { } var provSearchDelay = time.Second +var rebroadcastDelay = delay.Fixed(time.Minute) // SetProviderSearchDelay overwrites the global provider search delay func SetProviderSearchDelay(newProvSearchDelay time.Duration) { provSearchDelay = newProvSearchDelay } +// SetRebroadcastDelay overwrites the global provider rebroadcast delay +func SetRebroadcastDelay(newRebroadcastDelay delay.D) { + rebroadcastDelay = newRebroadcastDelay +} + // Session run loop -- everything function below here should not be called // of this loop func (s *Session) run(ctx context.Context) { s.tick = time.NewTimer(provSearchDelay) + s.rebroadcast = time.NewTimer(rebroadcastDelay.Get()) for { select { case blk := <-s.incoming: @@ -247,6 +261,8 @@ func (s *Session) run(ctx context.Context) { s.handleCancel(keys) case <-s.tick.C: s.handleTick(ctx) + case <-s.rebroadcast.C: + s.handleRebroadcast(ctx) case lwchk := <-s.interestReqs: lwchk.resp <- s.cidIsWanted(lwchk.c) case resp := <-s.latencyReqs: @@ -299,6 +315,12 @@ func (s *Session) handleCancel(keys []cid.Cid) { func (s *Session) handleTick(ctx context.Context) { + if s.fetchcnt == s.lastFetchCount { + s.consecutiveTicks++ + } else { + s.lastFetchCount = s.fetchcnt + } + live := make([]cid.Cid, 0, len(s.liveWants)) now := time.Now() for c := range s.liveWants { @@ -310,12 +332,39 @@ func (s *Session) handleTick(ctx context.Context) { s.pm.RecordPeerRequests(nil, live) s.wm.WantBlocks(ctx, live, nil, s.id) - if len(live) > 0 { + // do no find providers on consecutive ticks + // -- just rely on periodic rebroadcast + if len(live) > 0 && (s.consecutiveTicks == 0) { s.pm.FindMorePeers(ctx, live[0]) } s.resetTick() } +func (s *Session) handleRebroadcast(ctx context.Context) { + fmt.Println("Rebroadcast") + + if len(s.liveWants) == 0 { + return + } + + // TODO: come up with a better strategy for determining when to search + // for new providers for blocks. + s.pm.FindMorePeers(ctx, s.randomLiveWant()) + + s.rebroadcast.Reset(rebroadcastDelay.Get()) +} + +func (s *Session) randomLiveWant() cid.Cid { + i := rand.Intn(len(s.liveWants)) + // picking a random live want + for k := range s.liveWants { + if i == 0 { + return k + } + i-- + } + return cid.Cid{} +} func (s *Session) handleShutdown() { s.tick.Stop() s.notif.Shutdown() @@ -347,6 +396,8 @@ func (s *Session) receiveBlock(ctx context.Context, blk blocks.Block) { s.tofetch.Remove(c) } s.fetchcnt++ + // we've received new wanted blocks, so future ticks are not consecutive + s.consecutiveTicks = 0 s.notif.Publish(blk) toAdd := s.wantBudget() @@ -395,12 +446,15 @@ func (s *Session) averageLatency() time.Duration { } func (s *Session) resetTick() { + var tickDelay time.Duration if s.latTotal == 0 { - s.tick.Reset(provSearchDelay) + tickDelay = provSearchDelay } else { avLat := s.averageLatency() - s.tick.Reset(s.baseTickDelay + (3 * avLat)) + tickDelay = s.baseTickDelay + (3 * avLat) } + tickDelay = tickDelay * time.Duration(1+s.consecutiveTicks) + s.tick.Reset(tickDelay) } func (s *Session) wantBudget() int { diff --git a/bitswap/session/session_test.go b/bitswap/session/session_test.go index 8ff6ede1f..065b459a7 100644 --- a/bitswap/session/session_test.go +++ b/bitswap/session/session_test.go @@ -6,12 +6,12 @@ import ( "testing" "time" - "github.com/ipfs/go-block-format" - bssrs "github.com/ipfs/go-bitswap/sessionrequestsplitter" "github.com/ipfs/go-bitswap/testutil" + blocks "github.com/ipfs/go-block-format" cid "github.com/ipfs/go-cid" blocksutil "github.com/ipfs/go-ipfs-blocksutil" + delay "github.com/ipfs/go-ipfs-delay" peer "github.com/libp2p/go-libp2p-core/peer" ) @@ -42,12 +42,12 @@ func (fwm *fakeWantManager) CancelWants(ctx context.Context, cids []cid.Cid, pee type fakePeerManager struct { lk sync.RWMutex peers []peer.ID - findMorePeersRequested chan struct{} + findMorePeersRequested chan cid.Cid } func (fpm *fakePeerManager) FindMorePeers(ctx context.Context, k cid.Cid) { select { - case fpm.findMorePeersRequested <- struct{}{}: + case fpm.findMorePeersRequested <- k: case <-ctx.Done(): } } @@ -193,7 +193,7 @@ func TestSessionFindMorePeers(t *testing.T) { wantReqs := make(chan wantReq, 1) cancelReqs := make(chan wantReq, 1) fwm := &fakeWantManager{wantReqs, cancelReqs} - fpm := &fakePeerManager{findMorePeersRequested: make(chan struct{}, 1)} + fpm := &fakePeerManager{findMorePeersRequested: make(chan cid.Cid, 1)} frs := &fakeRequestSplitter{} id := testutil.GenerateSessionID() session := New(ctx, id, fwm, fpm, frs) @@ -258,3 +258,128 @@ func TestSessionFindMorePeers(t *testing.T) { t.Fatal("Did not find more peers") } } + +func TestSessionFailingToGetFirstBlock(t *testing.T) { + + ctx, cancel := context.WithTimeout(context.Background(), 900*time.Millisecond) + defer cancel() + wantReqs := make(chan wantReq, 1) + cancelReqs := make(chan wantReq, 1) + fwm := &fakeWantManager{wantReqs, cancelReqs} + fpm := &fakePeerManager{findMorePeersRequested: make(chan cid.Cid, 1)} + frs := &fakeRequestSplitter{} + id := testutil.GenerateSessionID() + SetProviderSearchDelay(10 * time.Millisecond) + defer SetProviderSearchDelay(1 * time.Second) + SetRebroadcastDelay(delay.Fixed(100 * time.Millisecond)) + defer SetRebroadcastDelay(delay.Fixed(1 * time.Minute)) + session := New(ctx, id, fwm, fpm, frs) + blockGenerator := blocksutil.NewBlockGenerator() + blks := blockGenerator.Blocks(4) + var cids []cid.Cid + for _, block := range blks { + cids = append(cids, block.Cid()) + } + startTick := time.Now() + _, err := session.GetBlocks(ctx, cids) + if err != nil { + t.Fatal("error getting blocks") + } + + // clear the initial block of wants + select { + case <-wantReqs: + case <-ctx.Done(): + t.Fatal("Did not make first want request ") + } + + // verify a broadcast is made + select { + case receivedWantReq := <-wantReqs: + if len(receivedWantReq.cids) < len(cids) { + t.Fatal("did not rebroadcast whole live list") + } + if receivedWantReq.peers != nil { + t.Fatal("did not make a broadcast") + } + case <-ctx.Done(): + t.Fatal("Never rebroadcast want list") + } + + // wait for a request to get more peers to occur + select { + case k := <-fpm.findMorePeersRequested: + if testutil.IndexOf(blks, k) == -1 { + t.Fatal("did not rebroadcast an active want") + } + case <-ctx.Done(): + t.Fatal("Did not find more peers") + } + firstTickLength := time.Since(startTick) + + // wait for another broadcast to occur + select { + case receivedWantReq := <-wantReqs: + if len(receivedWantReq.cids) < len(cids) { + t.Fatal("did not rebroadcast whole live list") + } + if receivedWantReq.peers != nil { + t.Fatal("did not make a broadcast") + } + case <-ctx.Done(): + t.Fatal("Never rebroadcast want list") + } + startTick = time.Now() + // wait for another broadcast to occur + select { + case receivedWantReq := <-wantReqs: + if len(receivedWantReq.cids) < len(cids) { + t.Fatal("did not rebroadcast whole live list") + } + if receivedWantReq.peers != nil { + t.Fatal("did not make a broadcast") + } + case <-ctx.Done(): + t.Fatal("Never rebroadcast want list") + } + consecutiveTickLength := time.Since(startTick) + // tick should take longer + if firstTickLength > consecutiveTickLength { + t.Fatal("Should have increased tick length after first consecutive tick") + } + startTick = time.Now() + // wait for another broadcast to occur + select { + case receivedWantReq := <-wantReqs: + if len(receivedWantReq.cids) < len(cids) { + t.Fatal("did not rebroadcast whole live list") + } + if receivedWantReq.peers != nil { + t.Fatal("did not make a broadcast") + } + case <-ctx.Done(): + t.Fatal("Never rebroadcast want list") + } + secondConsecutiveTickLength := time.Since(startTick) + // tick should take longer + if consecutiveTickLength > secondConsecutiveTickLength { + t.Fatal("Should have increased tick length after first consecutive tick") + } + + // should not have looked for peers on consecutive ticks + select { + case <-fpm.findMorePeersRequested: + t.Fatal("Should not have looked for peers on consecutive tick") + default: + } + + // wait for rebroadcast to occur + select { + case k := <-fpm.findMorePeersRequested: + if testutil.IndexOf(blks, k) == -1 { + t.Fatal("did not rebroadcast an active want") + } + case <-ctx.Done(): + t.Fatal("Did not rebroadcast to find more peers") + } +} From 995eab118bb4792f1916b49b939a547b74876ba3 Mon Sep 17 00:00:00 2001 From: hannahhoward Date: Wed, 29 May 2019 16:51:19 -0700 Subject: [PATCH 0764/1035] fix(sessions): fix data race in test This commit was moved from ipfs/go-bitswap@3104b2da5da56fa1d22c82318b82c66385c78799 --- bitswap/session/session.go | 2 -- bitswap/session/session_test.go | 11 ++++++----- 2 files changed, 6 insertions(+), 7 deletions(-) diff --git a/bitswap/session/session.go b/bitswap/session/session.go index 0e335f901..26949543c 100644 --- a/bitswap/session/session.go +++ b/bitswap/session/session.go @@ -2,7 +2,6 @@ package session import ( "context" - "fmt" "math/rand" "time" @@ -341,7 +340,6 @@ func (s *Session) handleTick(ctx context.Context) { } func (s *Session) handleRebroadcast(ctx context.Context) { - fmt.Println("Rebroadcast") if len(s.liveWants) == 0 { return diff --git a/bitswap/session/session_test.go b/bitswap/session/session_test.go index 065b459a7..b6f7f4084 100644 --- a/bitswap/session/session_test.go +++ b/bitswap/session/session_test.go @@ -260,8 +260,12 @@ func TestSessionFindMorePeers(t *testing.T) { } func TestSessionFailingToGetFirstBlock(t *testing.T) { + SetProviderSearchDelay(10 * time.Millisecond) + defer SetProviderSearchDelay(1 * time.Second) + SetRebroadcastDelay(delay.Fixed(100 * time.Millisecond)) + defer SetRebroadcastDelay(delay.Fixed(1 * time.Minute)) - ctx, cancel := context.WithTimeout(context.Background(), 900*time.Millisecond) + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) defer cancel() wantReqs := make(chan wantReq, 1) cancelReqs := make(chan wantReq, 1) @@ -269,10 +273,7 @@ func TestSessionFailingToGetFirstBlock(t *testing.T) { fpm := &fakePeerManager{findMorePeersRequested: make(chan cid.Cid, 1)} frs := &fakeRequestSplitter{} id := testutil.GenerateSessionID() - SetProviderSearchDelay(10 * time.Millisecond) - defer SetProviderSearchDelay(1 * time.Second) - SetRebroadcastDelay(delay.Fixed(100 * time.Millisecond)) - defer SetRebroadcastDelay(delay.Fixed(1 * time.Minute)) + session := New(ctx, id, fwm, fpm, frs) blockGenerator := blocksutil.NewBlockGenerator() blks := blockGenerator.Blocks(4) From 1250287b0838b659ec9d7052d09eff2720656f8b Mon Sep 17 00:00:00 2001 From: hannahhoward Date: Wed, 29 May 2019 17:03:17 -0700 Subject: [PATCH 0765/1035] fix(sessions): consecutive ticks only when wants present Don't count consecutive ticks if there are no active wants This commit was moved from ipfs/go-bitswap@d9488272b78540b6a6c4ce8f3fafced01b4b1b4f --- bitswap/session/session.go | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/bitswap/session/session.go b/bitswap/session/session.go index 26949543c..060a387d5 100644 --- a/bitswap/session/session.go +++ b/bitswap/session/session.go @@ -314,10 +314,12 @@ func (s *Session) handleCancel(keys []cid.Cid) { func (s *Session) handleTick(ctx context.Context) { - if s.fetchcnt == s.lastFetchCount { - s.consecutiveTicks++ - } else { - s.lastFetchCount = s.fetchcnt + if len(s.liveWants) > 0 { + if s.fetchcnt == s.lastFetchCount { + s.consecutiveTicks++ + } else { + s.lastFetchCount = s.fetchcnt + } } live := make([]cid.Cid, 0, len(s.liveWants)) From 3d4f4c66b4cf34d215b3dd407229a974ebc1f179 Mon Sep 17 00:00:00 2001 From: hannahhoward Date: Fri, 31 May 2019 14:55:50 -0700 Subject: [PATCH 0766/1035] feat(session): minor code clean-up This commit was moved from ipfs/go-bitswap@e2e33435c76360af154049cbd148c859c1cc8fe2 --- bitswap/session/session.go | 48 +++++++++++++++++--------------------- 1 file changed, 21 insertions(+), 27 deletions(-) diff --git a/bitswap/session/session.go b/bitswap/session/session.go index 060a387d5..4afbc6ec7 100644 --- a/bitswap/session/session.go +++ b/bitswap/session/session.go @@ -87,7 +87,6 @@ type Session struct { latTotal time.Duration fetchcnt int consecutiveTicks int - lastFetchCount int // identifiers notif notifications.PubSub uuid logging.Loggable @@ -98,24 +97,23 @@ type Session struct { // given context. func New(ctx context.Context, id uint64, wm WantManager, pm PeerManager, srs RequestSplitter) *Session { s := &Session{ - liveWants: make(map[cid.Cid]time.Time), - newReqs: make(chan []cid.Cid), - cancelKeys: make(chan []cid.Cid), - tofetch: newCidQueue(), - pastWants: newCidQueue(), - interestReqs: make(chan interestReq), - latencyReqs: make(chan chan time.Duration), - tickDelayReqs: make(chan time.Duration), - ctx: ctx, - wm: wm, - pm: pm, - srs: srs, - incoming: make(chan blkRecv), - notif: notifications.New(), - uuid: loggables.Uuid("GetBlockRequest"), - baseTickDelay: time.Millisecond * 500, - lastFetchCount: -1, - id: id, + liveWants: make(map[cid.Cid]time.Time), + newReqs: make(chan []cid.Cid), + cancelKeys: make(chan []cid.Cid), + tofetch: newCidQueue(), + pastWants: newCidQueue(), + interestReqs: make(chan interestReq), + latencyReqs: make(chan chan time.Duration), + tickDelayReqs: make(chan time.Duration), + ctx: ctx, + wm: wm, + pm: pm, + srs: srs, + incoming: make(chan blkRecv), + notif: notifications.New(), + uuid: loggables.Uuid("GetBlockRequest"), + baseTickDelay: time.Millisecond * 500, + id: id, } cache, _ := lru.New(2048) @@ -314,14 +312,6 @@ func (s *Session) handleCancel(keys []cid.Cid) { func (s *Session) handleTick(ctx context.Context) { - if len(s.liveWants) > 0 { - if s.fetchcnt == s.lastFetchCount { - s.consecutiveTicks++ - } else { - s.lastFetchCount = s.fetchcnt - } - } - live := make([]cid.Cid, 0, len(s.liveWants)) now := time.Now() for c := range s.liveWants { @@ -339,6 +329,10 @@ func (s *Session) handleTick(ctx context.Context) { s.pm.FindMorePeers(ctx, live[0]) } s.resetTick() + + if len(s.liveWants) > 0 { + s.consecutiveTicks++ + } } func (s *Session) handleRebroadcast(ctx context.Context) { From 7c9e882c88c39d824296d65ad6fc9fe8eb40d801 Mon Sep 17 00:00:00 2001 From: hannahhoward Date: Mon, 3 Jun 2019 17:38:08 -0700 Subject: [PATCH 0767/1035] feat(session): allow configuring delays per instance Re-setup provider search delay and rebroadcast delay on a per bitswap instance basis This commit was moved from ipfs/go-bitswap@92a82791fb60a3df6b9961bb398aee1c1ad6129b --- bitswap/bitswap.go | 64 ++++++++++++------ bitswap/bitswap_test.go | 4 +- bitswap/bitswap_with_sessions_test.go | 4 +- bitswap/session/session.go | 67 +++++++++---------- bitswap/session/session_test.go | 11 +-- bitswap/sessionmanager/sessionmanager.go | 10 ++- bitswap/sessionmanager/sessionmanager_test.go | 32 +++++---- 7 files changed, 109 insertions(+), 83 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 245950a70..ec89982ff 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -9,6 +9,7 @@ import ( "time" bssrs "github.com/ipfs/go-bitswap/sessionrequestsplitter" + delay "github.com/ipfs/go-ipfs-delay" decision "github.com/ipfs/go-bitswap/decision" bsgetter "github.com/ipfs/go-bitswap/getter" @@ -38,7 +39,8 @@ var _ exchange.SessionExchange = (*Bitswap)(nil) const ( // these requests take at _least_ two minutes at the moment. - provideTimeout = time.Minute * 3 + provideTimeout = time.Minute * 3 + defaultProvSearchDelay = time.Second ) var ( @@ -65,6 +67,20 @@ func ProvideEnabled(enabled bool) Option { } } +// ProviderSearchDelay overwrites the global provider search delay +func ProviderSearchDelay(newProvSearchDelay time.Duration) Option { + return func(bs *Bitswap) { + bs.provSearchDelay = newProvSearchDelay + } +} + +// RebroadcastDelay overwrites the global provider rebroadcast delay +func RebroadcastDelay(newRebroadcastDelay delay.D) Option { + return func(bs *Bitswap) { + bs.rebroadcastDelay = newRebroadcastDelay + } +} + // New initializes a BitSwap instance that communicates over the provided // BitSwapNetwork. This function registers the returned instance as the network // delegate. Runs until context is cancelled or bitswap.Close is called. @@ -99,8 +115,10 @@ func New(parent context.Context, network bsnet.BitSwapNetwork, wm := bswm.New(ctx, bspm.New(ctx, peerQueueFactory)) pqm := bspqm.New(ctx, network) - sessionFactory := func(ctx context.Context, id uint64, pm bssession.PeerManager, srs bssession.RequestSplitter) bssm.Session { - return bssession.New(ctx, id, wm, pm, srs) + sessionFactory := func(ctx context.Context, id uint64, pm bssession.PeerManager, srs bssession.RequestSplitter, + provSearchDelay time.Duration, + rebroadcastDelay delay.D) bssm.Session { + return bssession.New(ctx, id, wm, pm, srs, provSearchDelay, rebroadcastDelay) } sessionPeerManagerFactory := func(ctx context.Context, id uint64) bssession.PeerManager { return bsspm.New(ctx, id, network.ConnectionManager(), pqm) @@ -110,20 +128,22 @@ func New(parent context.Context, network bsnet.BitSwapNetwork, } bs := &Bitswap{ - blockstore: bstore, - engine: decision.NewEngine(ctx, bstore, network.ConnectionManager()), // TODO close the engine with Close() method - network: network, - process: px, - newBlocks: make(chan cid.Cid, HasBlockBufferSize), - provideKeys: make(chan cid.Cid, provideKeysBufferSize), - wm: wm, - pqm: pqm, - sm: bssm.New(ctx, sessionFactory, sessionPeerManagerFactory, sessionRequestSplitterFactory), - counters: new(counters), - dupMetric: dupHist, - allMetric: allHist, - sentHistogram: sentHistogram, - provideEnabled: true, + blockstore: bstore, + engine: decision.NewEngine(ctx, bstore, network.ConnectionManager()), // TODO close the engine with Close() method + network: network, + process: px, + newBlocks: make(chan cid.Cid, HasBlockBufferSize), + provideKeys: make(chan cid.Cid, provideKeysBufferSize), + wm: wm, + pqm: pqm, + sm: bssm.New(ctx, sessionFactory, sessionPeerManagerFactory, sessionRequestSplitterFactory), + counters: new(counters), + dupMetric: dupHist, + allMetric: allHist, + sentHistogram: sentHistogram, + provideEnabled: true, + provSearchDelay: defaultProvSearchDelay, + rebroadcastDelay: delay.Fixed(time.Minute), } // apply functional options before starting and running bitswap @@ -190,6 +210,12 @@ type Bitswap struct { // whether or not to make provide announcements provideEnabled bool + + // how long to wait before looking for providers in a session + provSearchDelay time.Duration + + // how often to rebroadcast providing requests to find more optimized providers + rebroadcastDelay delay.D } type counters struct { @@ -232,7 +258,7 @@ func (bs *Bitswap) LedgerForPeer(p peer.ID) *decision.Receipt { // resources, provide a context with a reasonably short deadline (ie. not one // that lasts throughout the lifetime of the server) func (bs *Bitswap) GetBlocks(ctx context.Context, keys []cid.Cid) (<-chan blocks.Block, error) { - session := bs.sm.NewSession(ctx) + session := bs.sm.NewSession(ctx, bs.provSearchDelay, bs.rebroadcastDelay) return session.GetBlocks(ctx, keys) } @@ -398,5 +424,5 @@ func (bs *Bitswap) IsOnline() bool { // be more efficient in its requests to peers. If you are using a session // from go-blockservice, it will create a bitswap session automatically. func (bs *Bitswap) NewSession(ctx context.Context) exchange.Fetcher { - return bs.sm.NewSession(ctx) + return bs.sm.NewSession(ctx, bs.provSearchDelay, bs.rebroadcastDelay) } diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index ed4b31a6b..777e2b46f 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -102,11 +102,9 @@ func TestGetBlockFromPeerAfterPeerAnnounces(t *testing.T) { } func TestDoesNotProvideWhenConfiguredNotTo(t *testing.T) { - bssession.SetProviderSearchDelay(50 * time.Millisecond) - defer bssession.SetProviderSearchDelay(time.Second) net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay)) block := blocks.NewBlock([]byte("block")) - ig := testinstance.NewTestInstanceGenerator(net, bitswap.ProvideEnabled(false)) + ig := testinstance.NewTestInstanceGenerator(net, bitswap.ProvideEnabled(false), bitswap.ProviderSearchDelay(50*time.Millisecond)) defer ig.Close() hasBlock := ig.Next() diff --git a/bitswap/bitswap_with_sessions_test.go b/bitswap/bitswap_with_sessions_test.go index 85d936c4e..db7255c80 100644 --- a/bitswap/bitswap_with_sessions_test.go +++ b/bitswap/bitswap_with_sessions_test.go @@ -6,6 +6,7 @@ import ( "testing" "time" + bitswap "github.com/ipfs/go-bitswap" bssession "github.com/ipfs/go-bitswap/session" testinstance "github.com/ipfs/go-bitswap/testinstance" blocks "github.com/ipfs/go-block-format" @@ -161,9 +162,8 @@ func TestFetchNotConnected(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) defer cancel() - bssession.SetProviderSearchDelay(10 * time.Millisecond) vnet := getVirtualNetwork() - ig := testinstance.NewTestInstanceGenerator(vnet) + ig := testinstance.NewTestInstanceGenerator(vnet, bitswap.ProviderSearchDelay(10*time.Millisecond)) defer ig.Close() bgen := blocksutil.NewBlockGenerator() diff --git a/bitswap/session/session.go b/bitswap/session/session.go index 4afbc6ec7..6ac47470a 100644 --- a/bitswap/session/session.go +++ b/bitswap/session/session.go @@ -87,6 +87,8 @@ type Session struct { latTotal time.Duration fetchcnt int consecutiveTicks int + provSearchDelay time.Duration + rebroadcastDelay delay.D // identifiers notif notifications.PubSub uuid logging.Loggable @@ -95,25 +97,33 @@ type Session struct { // New creates a new bitswap session whose lifetime is bounded by the // given context. -func New(ctx context.Context, id uint64, wm WantManager, pm PeerManager, srs RequestSplitter) *Session { +func New(ctx context.Context, + id uint64, + wm WantManager, + pm PeerManager, + srs RequestSplitter, + provSearchDelay time.Duration, + rebroadcastDelay delay.D) *Session { s := &Session{ - liveWants: make(map[cid.Cid]time.Time), - newReqs: make(chan []cid.Cid), - cancelKeys: make(chan []cid.Cid), - tofetch: newCidQueue(), - pastWants: newCidQueue(), - interestReqs: make(chan interestReq), - latencyReqs: make(chan chan time.Duration), - tickDelayReqs: make(chan time.Duration), - ctx: ctx, - wm: wm, - pm: pm, - srs: srs, - incoming: make(chan blkRecv), - notif: notifications.New(), - uuid: loggables.Uuid("GetBlockRequest"), - baseTickDelay: time.Millisecond * 500, - id: id, + liveWants: make(map[cid.Cid]time.Time), + newReqs: make(chan []cid.Cid), + cancelKeys: make(chan []cid.Cid), + tofetch: newCidQueue(), + pastWants: newCidQueue(), + interestReqs: make(chan interestReq), + latencyReqs: make(chan chan time.Duration), + tickDelayReqs: make(chan time.Duration), + ctx: ctx, + wm: wm, + pm: pm, + srs: srs, + incoming: make(chan blkRecv), + notif: notifications.New(), + uuid: loggables.Uuid("GetBlockRequest"), + baseTickDelay: time.Millisecond * 500, + id: id, + provSearchDelay: provSearchDelay, + rebroadcastDelay: rebroadcastDelay, } cache, _ := lru.New(2048) @@ -226,24 +236,11 @@ func (s *Session) SetBaseTickDelay(baseTickDelay time.Duration) { } } -var provSearchDelay = time.Second -var rebroadcastDelay = delay.Fixed(time.Minute) - -// SetProviderSearchDelay overwrites the global provider search delay -func SetProviderSearchDelay(newProvSearchDelay time.Duration) { - provSearchDelay = newProvSearchDelay -} - -// SetRebroadcastDelay overwrites the global provider rebroadcast delay -func SetRebroadcastDelay(newRebroadcastDelay delay.D) { - rebroadcastDelay = newRebroadcastDelay -} - // Session run loop -- everything function below here should not be called // of this loop func (s *Session) run(ctx context.Context) { - s.tick = time.NewTimer(provSearchDelay) - s.rebroadcast = time.NewTimer(rebroadcastDelay.Get()) + s.tick = time.NewTimer(s.provSearchDelay) + s.rebroadcast = time.NewTimer(s.rebroadcastDelay.Get()) for { select { case blk := <-s.incoming: @@ -345,7 +342,7 @@ func (s *Session) handleRebroadcast(ctx context.Context) { // for new providers for blocks. s.pm.FindMorePeers(ctx, s.randomLiveWant()) - s.rebroadcast.Reset(rebroadcastDelay.Get()) + s.rebroadcast.Reset(s.rebroadcastDelay.Get()) } func (s *Session) randomLiveWant() cid.Cid { @@ -442,7 +439,7 @@ func (s *Session) averageLatency() time.Duration { func (s *Session) resetTick() { var tickDelay time.Duration if s.latTotal == 0 { - tickDelay = provSearchDelay + tickDelay = s.provSearchDelay } else { avLat := s.averageLatency() tickDelay = s.baseTickDelay + (3 * avLat) diff --git a/bitswap/session/session_test.go b/bitswap/session/session_test.go index b6f7f4084..751f9f0cd 100644 --- a/bitswap/session/session_test.go +++ b/bitswap/session/session_test.go @@ -84,7 +84,7 @@ func TestSessionGetBlocks(t *testing.T) { fpm := &fakePeerManager{} frs := &fakeRequestSplitter{} id := testutil.GenerateSessionID() - session := New(ctx, id, fwm, fpm, frs) + session := New(ctx, id, fwm, fpm, frs, time.Second, delay.Fixed(time.Minute)) blockGenerator := blocksutil.NewBlockGenerator() blks := blockGenerator.Blocks(broadcastLiveWantsLimit * 2) var cids []cid.Cid @@ -196,7 +196,7 @@ func TestSessionFindMorePeers(t *testing.T) { fpm := &fakePeerManager{findMorePeersRequested: make(chan cid.Cid, 1)} frs := &fakeRequestSplitter{} id := testutil.GenerateSessionID() - session := New(ctx, id, fwm, fpm, frs) + session := New(ctx, id, fwm, fpm, frs, time.Second, delay.Fixed(time.Minute)) session.SetBaseTickDelay(200 * time.Microsecond) blockGenerator := blocksutil.NewBlockGenerator() blks := blockGenerator.Blocks(broadcastLiveWantsLimit * 2) @@ -260,11 +260,6 @@ func TestSessionFindMorePeers(t *testing.T) { } func TestSessionFailingToGetFirstBlock(t *testing.T) { - SetProviderSearchDelay(10 * time.Millisecond) - defer SetProviderSearchDelay(1 * time.Second) - SetRebroadcastDelay(delay.Fixed(100 * time.Millisecond)) - defer SetRebroadcastDelay(delay.Fixed(1 * time.Minute)) - ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) defer cancel() wantReqs := make(chan wantReq, 1) @@ -274,7 +269,7 @@ func TestSessionFailingToGetFirstBlock(t *testing.T) { frs := &fakeRequestSplitter{} id := testutil.GenerateSessionID() - session := New(ctx, id, fwm, fpm, frs) + session := New(ctx, id, fwm, fpm, frs, 10*time.Millisecond, delay.Fixed(100*time.Millisecond)) blockGenerator := blocksutil.NewBlockGenerator() blks := blockGenerator.Blocks(4) var cids []cid.Cid diff --git a/bitswap/sessionmanager/sessionmanager.go b/bitswap/sessionmanager/sessionmanager.go index 1b4431153..a2617073b 100644 --- a/bitswap/sessionmanager/sessionmanager.go +++ b/bitswap/sessionmanager/sessionmanager.go @@ -3,9 +3,11 @@ package sessionmanager import ( "context" "sync" + "time" blocks "github.com/ipfs/go-block-format" cid "github.com/ipfs/go-cid" + delay "github.com/ipfs/go-ipfs-delay" bssession "github.com/ipfs/go-bitswap/session" exchange "github.com/ipfs/go-ipfs-exchange-interface" @@ -27,7 +29,7 @@ type sesTrk struct { } // SessionFactory generates a new session for the SessionManager to track. -type SessionFactory func(ctx context.Context, id uint64, pm bssession.PeerManager, srs bssession.RequestSplitter) Session +type SessionFactory func(ctx context.Context, id uint64, pm bssession.PeerManager, srs bssession.RequestSplitter, provSearchDelay time.Duration, rebroadcastDelay delay.D) Session // RequestSplitterFactory generates a new request splitter for a session. type RequestSplitterFactory func(ctx context.Context) bssession.RequestSplitter @@ -64,13 +66,15 @@ func New(ctx context.Context, sessionFactory SessionFactory, peerManagerFactory // NewSession initializes a session with the given context, and adds to the // session manager. -func (sm *SessionManager) NewSession(ctx context.Context) exchange.Fetcher { +func (sm *SessionManager) NewSession(ctx context.Context, + provSearchDelay time.Duration, + rebroadcastDelay delay.D) exchange.Fetcher { id := sm.GetNextSessionID() sessionctx, cancel := context.WithCancel(ctx) pm := sm.peerManagerFactory(sessionctx, id) srs := sm.requestSplitterFactory(sessionctx) - session := sm.sessionFactory(sessionctx, id, pm, srs) + session := sm.sessionFactory(sessionctx, id, pm, srs, provSearchDelay, rebroadcastDelay) tracked := sesTrk{session, pm, srs} sm.sessLk.Lock() sm.sessions = append(sm.sessions, tracked) diff --git a/bitswap/sessionmanager/sessionmanager_test.go b/bitswap/sessionmanager/sessionmanager_test.go index ff0ec15db..b858f7dd7 100644 --- a/bitswap/sessionmanager/sessionmanager_test.go +++ b/bitswap/sessionmanager/sessionmanager_test.go @@ -6,6 +6,7 @@ import ( "time" bssrs "github.com/ipfs/go-bitswap/sessionrequestsplitter" + delay "github.com/ipfs/go-ipfs-delay" bssession "github.com/ipfs/go-bitswap/session" @@ -53,7 +54,12 @@ func (frs *fakeRequestSplitter) RecordUniqueBlock() {} var nextInterestedIn bool -func sessionFactory(ctx context.Context, id uint64, pm bssession.PeerManager, srs bssession.RequestSplitter) Session { +func sessionFactory(ctx context.Context, + id uint64, + pm bssession.PeerManager, + srs bssession.RequestSplitter, + provSearchDelay time.Duration, + rebroadcastDelay delay.D) Session { return &fakeSession{ interested: nextInterestedIn, receivedBlock: false, @@ -83,18 +89,18 @@ func TestAddingSessions(t *testing.T) { nextInterestedIn = true currentID := sm.GetNextSessionID() - firstSession := sm.NewSession(ctx).(*fakeSession) + firstSession := sm.NewSession(ctx, time.Second, delay.Fixed(time.Minute)).(*fakeSession) if firstSession.id != firstSession.pm.id || firstSession.id != currentID+1 { t.Fatal("session does not have correct id set") } - secondSession := sm.NewSession(ctx).(*fakeSession) + secondSession := sm.NewSession(ctx, time.Second, delay.Fixed(time.Minute)).(*fakeSession) if secondSession.id != secondSession.pm.id || secondSession.id != firstSession.id+1 { t.Fatal("session does not have correct id set") } sm.GetNextSessionID() - thirdSession := sm.NewSession(ctx).(*fakeSession) + thirdSession := sm.NewSession(ctx, time.Second, delay.Fixed(time.Minute)).(*fakeSession) if thirdSession.id != thirdSession.pm.id || thirdSession.id != secondSession.id+2 { t.Fatal("session does not have correct id set") @@ -117,11 +123,11 @@ func TestReceivingBlocksWhenNotInterested(t *testing.T) { block := blocks.NewBlock([]byte("block")) // we'll be interested in all blocks for this test nextInterestedIn = false - firstSession := sm.NewSession(ctx).(*fakeSession) + firstSession := sm.NewSession(ctx, time.Second, delay.Fixed(time.Minute)).(*fakeSession) nextInterestedIn = true - secondSession := sm.NewSession(ctx).(*fakeSession) + secondSession := sm.NewSession(ctx, time.Second, delay.Fixed(time.Minute)).(*fakeSession) nextInterestedIn = false - thirdSession := sm.NewSession(ctx).(*fakeSession) + thirdSession := sm.NewSession(ctx, time.Second, delay.Fixed(time.Minute)).(*fakeSession) sm.ReceiveBlockFrom(p, block) if firstSession.receivedBlock || @@ -140,9 +146,9 @@ func TestRemovingPeersWhenManagerContextCancelled(t *testing.T) { block := blocks.NewBlock([]byte("block")) // we'll be interested in all blocks for this test nextInterestedIn = true - firstSession := sm.NewSession(ctx).(*fakeSession) - secondSession := sm.NewSession(ctx).(*fakeSession) - thirdSession := sm.NewSession(ctx).(*fakeSession) + firstSession := sm.NewSession(ctx, time.Second, delay.Fixed(time.Minute)).(*fakeSession) + secondSession := sm.NewSession(ctx, time.Second, delay.Fixed(time.Minute)).(*fakeSession) + thirdSession := sm.NewSession(ctx, time.Second, delay.Fixed(time.Minute)).(*fakeSession) cancel() // wait for sessions to get removed @@ -165,10 +171,10 @@ func TestRemovingPeersWhenSessionContextCancelled(t *testing.T) { block := blocks.NewBlock([]byte("block")) // we'll be interested in all blocks for this test nextInterestedIn = true - firstSession := sm.NewSession(ctx).(*fakeSession) + firstSession := sm.NewSession(ctx, time.Second, delay.Fixed(time.Minute)).(*fakeSession) sessionCtx, sessionCancel := context.WithCancel(ctx) - secondSession := sm.NewSession(sessionCtx).(*fakeSession) - thirdSession := sm.NewSession(ctx).(*fakeSession) + secondSession := sm.NewSession(sessionCtx, time.Second, delay.Fixed(time.Minute)).(*fakeSession) + thirdSession := sm.NewSession(ctx, time.Second, delay.Fixed(time.Minute)).(*fakeSession) sessionCancel() // wait for sessions to get removed From 36f90a1ee48be0ea2a6f8c6b1d2b4ee51fc994ac Mon Sep 17 00:00:00 2001 From: Jakub Sztandera Date: Mon, 10 Jun 2019 20:18:40 +0200 Subject: [PATCH 0768/1035] Enchanced logging for bitswap License: MIT Signed-off-by: Jakub Sztandera This commit was moved from ipfs/go-bitswap@4c5fb600b81998a47fe0ba3ff96b7473d515ccf4 --- bitswap/bitswap.go | 3 ++- bitswap/wantmanager/wantmanager.go | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index ec89982ff..a05c4ca6b 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -334,10 +334,11 @@ func (bs *Bitswap) ReceiveMessage(ctx context.Context, p peer.ID, incoming bsmsg bs.updateReceiveCounters(b) bs.sm.UpdateReceiveCounters(b) - log.Debugf("got block %s from %s", b, p) + log.Debugf("[recv] block; cid=%s, peer=%s", b.Cid(), p) // skip received blocks that are not in the wantlist if !bs.wm.IsWanted(b.Cid()) { + log.Debugf("[recv] block not in wantlist; cid=%s, peer=%s", b.Cid(), p) return } diff --git a/bitswap/wantmanager/wantmanager.go b/bitswap/wantmanager/wantmanager.go index 4203d14f4..2ed7082e4 100644 --- a/bitswap/wantmanager/wantmanager.go +++ b/bitswap/wantmanager/wantmanager.go @@ -70,12 +70,13 @@ func New(ctx context.Context, peerHandler PeerHandler) *WantManager { // WantBlocks adds the given cids to the wantlist, tracked by the given session. func (wm *WantManager) WantBlocks(ctx context.Context, ks []cid.Cid, peers []peer.ID, ses uint64) { - log.Infof("want blocks: %s", ks) + log.Debugf("[wantlist] want blocks; cids=%s, peers=%s, ses=%d", ks, peers, ses) wm.addEntries(ctx, ks, peers, false, ses) } // CancelWants removes the given cids from the wantlist, tracked by the given session. func (wm *WantManager) CancelWants(ctx context.Context, ks []cid.Cid, peers []peer.ID, ses uint64) { + log.Debugf("[wantlist] unwant blocks; cids=%s, peers=%s, ses=%d", ks, peers, ses) wm.addEntries(context.Background(), ks, peers, true, ses) } From 8b357ad7d2c7ff9cc164a1b8372d52d76ea553c5 Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Tue, 11 Jun 2019 17:04:50 -0700 Subject: [PATCH 0769/1035] fix(session): obey delay function when searching for more providers This commit was moved from ipfs/go-bitswap@c783e018cd986b3773e8d7d2ede9d2be5e9495fa --- bitswap/session/session.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/bitswap/session/session.go b/bitswap/session/session.go index 6ac47470a..1db2abc3c 100644 --- a/bitswap/session/session.go +++ b/bitswap/session/session.go @@ -240,7 +240,7 @@ func (s *Session) SetBaseTickDelay(baseTickDelay time.Duration) { // of this loop func (s *Session) run(ctx context.Context) { s.tick = time.NewTimer(s.provSearchDelay) - s.rebroadcast = time.NewTimer(s.rebroadcastDelay.Get()) + s.rebroadcast = time.NewTimer(s.rebroadcastDelay.NextWaitTime()) for { select { case blk := <-s.incoming: @@ -342,7 +342,7 @@ func (s *Session) handleRebroadcast(ctx context.Context) { // for new providers for blocks. s.pm.FindMorePeers(ctx, s.randomLiveWant()) - s.rebroadcast.Reset(s.rebroadcastDelay.Get()) + s.rebroadcast.Reset(s.rebroadcastDelay.NextWaitTime()) } func (s *Session) randomLiveWant() cid.Cid { From 8ed5f63e82b4431f04903b6fb950e7dc1587c1df Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Tue, 11 Jun 2019 17:11:06 -0700 Subject: [PATCH 0770/1035] nit(session): improve naming This commit was moved from ipfs/go-bitswap@2a00256b53fa695b161431a7a4502e08cf627cf7 --- bitswap/session/session.go | 100 ++++++++++++++++++------------------- 1 file changed, 50 insertions(+), 50 deletions(-) diff --git a/bitswap/session/session.go b/bitswap/session/session.go index 1db2abc3c..04fd2bbdb 100644 --- a/bitswap/session/session.go +++ b/bitswap/session/session.go @@ -77,18 +77,18 @@ type Session struct { tickDelayReqs chan time.Duration // do not touch outside run loop - tofetch *cidQueue - interest *lru.Cache - pastWants *cidQueue - liveWants map[cid.Cid]time.Time - tick *time.Timer - rebroadcast *time.Timer - baseTickDelay time.Duration - latTotal time.Duration - fetchcnt int - consecutiveTicks int - provSearchDelay time.Duration - rebroadcastDelay delay.D + tofetch *cidQueue + interest *lru.Cache + pastWants *cidQueue + liveWants map[cid.Cid]time.Time + idleTick *time.Timer + periodicSearchTimer *time.Timer + baseTickDelay time.Duration + latTotal time.Duration + fetchcnt int + consecutiveTicks int + initialSearchDelay time.Duration + periodicSearchDelay delay.D // identifiers notif notifications.PubSub uuid logging.Loggable @@ -102,28 +102,28 @@ func New(ctx context.Context, wm WantManager, pm PeerManager, srs RequestSplitter, - provSearchDelay time.Duration, - rebroadcastDelay delay.D) *Session { + initialSearchDelay time.Duration, + periodicSearchDelay delay.D) *Session { s := &Session{ - liveWants: make(map[cid.Cid]time.Time), - newReqs: make(chan []cid.Cid), - cancelKeys: make(chan []cid.Cid), - tofetch: newCidQueue(), - pastWants: newCidQueue(), - interestReqs: make(chan interestReq), - latencyReqs: make(chan chan time.Duration), - tickDelayReqs: make(chan time.Duration), - ctx: ctx, - wm: wm, - pm: pm, - srs: srs, - incoming: make(chan blkRecv), - notif: notifications.New(), - uuid: loggables.Uuid("GetBlockRequest"), - baseTickDelay: time.Millisecond * 500, - id: id, - provSearchDelay: provSearchDelay, - rebroadcastDelay: rebroadcastDelay, + liveWants: make(map[cid.Cid]time.Time), + newReqs: make(chan []cid.Cid), + cancelKeys: make(chan []cid.Cid), + tofetch: newCidQueue(), + pastWants: newCidQueue(), + interestReqs: make(chan interestReq), + latencyReqs: make(chan chan time.Duration), + tickDelayReqs: make(chan time.Duration), + ctx: ctx, + wm: wm, + pm: pm, + srs: srs, + incoming: make(chan blkRecv), + notif: notifications.New(), + uuid: loggables.Uuid("GetBlockRequest"), + baseTickDelay: time.Millisecond * 500, + id: id, + initialSearchDelay: initialSearchDelay, + periodicSearchDelay: periodicSearchDelay, } cache, _ := lru.New(2048) @@ -239,8 +239,8 @@ func (s *Session) SetBaseTickDelay(baseTickDelay time.Duration) { // Session run loop -- everything function below here should not be called // of this loop func (s *Session) run(ctx context.Context) { - s.tick = time.NewTimer(s.provSearchDelay) - s.rebroadcast = time.NewTimer(s.rebroadcastDelay.NextWaitTime()) + s.idleTick = time.NewTimer(s.initialSearchDelay) + s.periodicSearchTimer = time.NewTimer(s.periodicSearchDelay.NextWaitTime()) for { select { case blk := <-s.incoming: @@ -253,10 +253,10 @@ func (s *Session) run(ctx context.Context) { s.handleNewRequest(ctx, keys) case keys := <-s.cancelKeys: s.handleCancel(keys) - case <-s.tick.C: - s.handleTick(ctx) - case <-s.rebroadcast.C: - s.handleRebroadcast(ctx) + case <-s.idleTick.C: + s.handleIdleTick(ctx) + case <-s.periodicSearchTimer.C: + s.handlePeriodicSearch(ctx) case lwchk := <-s.interestReqs: lwchk.resp <- s.cidIsWanted(lwchk.c) case resp := <-s.latencyReqs: @@ -271,7 +271,7 @@ func (s *Session) run(ctx context.Context) { } func (s *Session) handleIncomingBlock(ctx context.Context, blk blkRecv) { - s.tick.Stop() + s.idleTick.Stop() if blk.from != "" { s.pm.RecordPeerResponse(blk.from, blk.blk.Cid()) @@ -279,7 +279,7 @@ func (s *Session) handleIncomingBlock(ctx context.Context, blk blkRecv) { s.receiveBlock(ctx, blk.blk) - s.resetTick() + s.resetIdleTick() } func (s *Session) handleNewRequest(ctx context.Context, keys []cid.Cid) { @@ -307,7 +307,7 @@ func (s *Session) handleCancel(keys []cid.Cid) { } } -func (s *Session) handleTick(ctx context.Context) { +func (s *Session) handleIdleTick(ctx context.Context) { live := make([]cid.Cid, 0, len(s.liveWants)) now := time.Now() @@ -321,18 +321,18 @@ func (s *Session) handleTick(ctx context.Context) { s.wm.WantBlocks(ctx, live, nil, s.id) // do no find providers on consecutive ticks - // -- just rely on periodic rebroadcast + // -- just rely on periodic search widening if len(live) > 0 && (s.consecutiveTicks == 0) { s.pm.FindMorePeers(ctx, live[0]) } - s.resetTick() + s.resetIdleTick() if len(s.liveWants) > 0 { s.consecutiveTicks++ } } -func (s *Session) handleRebroadcast(ctx context.Context) { +func (s *Session) handlePeriodicSearch(ctx context.Context) { if len(s.liveWants) == 0 { return @@ -342,7 +342,7 @@ func (s *Session) handleRebroadcast(ctx context.Context) { // for new providers for blocks. s.pm.FindMorePeers(ctx, s.randomLiveWant()) - s.rebroadcast.Reset(s.rebroadcastDelay.NextWaitTime()) + s.periodicSearchTimer.Reset(s.periodicSearchDelay.NextWaitTime()) } func (s *Session) randomLiveWant() cid.Cid { @@ -357,7 +357,7 @@ func (s *Session) randomLiveWant() cid.Cid { return cid.Cid{} } func (s *Session) handleShutdown() { - s.tick.Stop() + s.idleTick.Stop() s.notif.Shutdown() live := make([]cid.Cid, 0, len(s.liveWants)) @@ -436,16 +436,16 @@ func (s *Session) averageLatency() time.Duration { return s.latTotal / time.Duration(s.fetchcnt) } -func (s *Session) resetTick() { +func (s *Session) resetIdleTick() { var tickDelay time.Duration if s.latTotal == 0 { - tickDelay = s.provSearchDelay + tickDelay = s.initialSearchDelay } else { avLat := s.averageLatency() tickDelay = s.baseTickDelay + (3 * avLat) } tickDelay = tickDelay * time.Duration(1+s.consecutiveTicks) - s.tick.Reset(tickDelay) + s.idleTick.Reset(tickDelay) } func (s *Session) wantBudget() int { From 108a398a8e0e3a53db7c5b3c61846534ca4adcf1 Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Tue, 11 Jun 2019 17:18:02 -0700 Subject: [PATCH 0771/1035] feat(session): when periodically searching, broadcast want to connected peers This fixes the case where: 1. I start downloading something. 2. A friend jumps on our WiFi. 3. Our IPFS daemons connect via local discovery. 4. I never notice that they have the file I'm looking for because I'm already downloading it from a peer. 5. The peer I'm downloading from is _really_ slow. This commit was moved from ipfs/go-bitswap@eb28a2e1cb5a345f7be48329b7a18fcb1702183a --- bitswap/session/session.go | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/bitswap/session/session.go b/bitswap/session/session.go index 04fd2bbdb..f10d9605c 100644 --- a/bitswap/session/session.go +++ b/bitswap/session/session.go @@ -333,14 +333,15 @@ func (s *Session) handleIdleTick(ctx context.Context) { } func (s *Session) handlePeriodicSearch(ctx context.Context) { - - if len(s.liveWants) == 0 { + randomWant := s.randomLiveWant() + if !randomWant.Defined() { return } // TODO: come up with a better strategy for determining when to search // for new providers for blocks. - s.pm.FindMorePeers(ctx, s.randomLiveWant()) + s.pm.FindMorePeers(ctx, randomWant) + s.wm.WantBlocks(ctx, []cid.Cid{randomWant}, nil, s.id) s.periodicSearchTimer.Reset(s.periodicSearchDelay.NextWaitTime()) } From 600cf8506ec5216a5b0f8e375f38f5f2075aa38e Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Fri, 14 Jun 2019 02:02:30 -0700 Subject: [PATCH 0772/1035] aggressively free memory This ensures we don't keep large buffers allocated. This commit was moved from ipfs/go-bitswap@70fd0fd93e76c95fab000a2aa6447ff2697261f8 --- bitswap/message/message.go | 42 +++++++++++++++++++++++++----------- bitswap/network/ipfs_impl.go | 6 +++--- 2 files changed, 32 insertions(+), 16 deletions(-) diff --git a/bitswap/message/message.go b/bitswap/message/message.go index df44d1123..a16046197 100644 --- a/bitswap/message/message.go +++ b/bitswap/message/message.go @@ -1,6 +1,7 @@ package message import ( + "encoding/binary" "fmt" "io" @@ -8,8 +9,9 @@ import ( wantlist "github.com/ipfs/go-bitswap/wantlist" blocks "github.com/ipfs/go-block-format" - ggio "github.com/gogo/protobuf/io" cid "github.com/ipfs/go-cid" + pool "github.com/libp2p/go-buffer-pool" + msgio "github.com/libp2p/go-msgio" "github.com/libp2p/go-libp2p-core/network" ) @@ -170,18 +172,22 @@ func (m *impl) AddBlock(b blocks.Block) { // FromNet generates a new BitswapMessage from incoming data on an io.Reader. func FromNet(r io.Reader) (BitSwapMessage, error) { - pbr := ggio.NewDelimitedReader(r, network.MessageSizeMax) - return FromPBReader(pbr) + reader := msgio.NewVarintReaderSize(r, network.MessageSizeMax) + return FromMsgReader(reader) } // FromPBReader generates a new Bitswap message from a gogo-protobuf reader -func FromPBReader(pbr ggio.Reader) (BitSwapMessage, error) { - pb := new(pb.Message) - if err := pbr.ReadMsg(pb); err != nil { +func FromMsgReader(r msgio.Reader) (BitSwapMessage, error) { + msg, err := r.ReadMsg() + if err != nil { return nil, err } - - return newMessageFromProto(*pb) + var pb pb.Message + if err := pb.Unmarshal(msg); err != nil { + return nil, err + } + r.ReleaseMsg(msg) + return newMessageFromProto(pb) } func (m *impl) ToProtoV0() *pb.Message { @@ -228,15 +234,25 @@ func (m *impl) ToProtoV1() *pb.Message { } func (m *impl) ToNetV0(w io.Writer) error { - pbw := ggio.NewDelimitedWriter(w) - - return pbw.WriteMsg(m.ToProtoV0()) + return write(w, m.ToProtoV0()) } func (m *impl) ToNetV1(w io.Writer) error { - pbw := ggio.NewDelimitedWriter(w) + return write(w, m.ToProtoV1()) +} - return pbw.WriteMsg(m.ToProtoV1()) +func write(w io.Writer, m *pb.Message) error { + size := m.Size() + buf := pool.Get(size + binary.MaxVarintLen64) + defer pool.Put(buf) + n := binary.PutUvarint(buf, uint64(size)) + if written, err := m.MarshalTo(buf[n:]); err != nil { + return err + } else { + n += written + } + _, err := w.Write(buf[:n]) + return err } func (m *impl) Loggable() map[string]interface{} { diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index 2cfbbcbf3..52ee64c67 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -10,7 +10,6 @@ import ( bsmsg "github.com/ipfs/go-bitswap/message" "github.com/libp2p/go-libp2p-core/helpers" - ggio "github.com/gogo/protobuf/io" cid "github.com/ipfs/go-cid" logging "github.com/ipfs/go-log" "github.com/libp2p/go-libp2p-core/connmgr" @@ -19,6 +18,7 @@ import ( "github.com/libp2p/go-libp2p-core/peer" peerstore "github.com/libp2p/go-libp2p-core/peerstore" "github.com/libp2p/go-libp2p-core/routing" + msgio "github.com/libp2p/go-msgio" ma "github.com/multiformats/go-multiaddr" ) @@ -178,9 +178,9 @@ func (bsnet *impl) handleNewStream(s network.Stream) { return } - reader := ggio.NewDelimitedReader(s, network.MessageSizeMax) + reader := msgio.NewVarintReaderSize(s, network.MessageSizeMax) for { - received, err := bsmsg.FromPBReader(reader) + received, err := bsmsg.FromMsgReader(reader) if err != nil { if err != io.EOF { s.Reset() From 4e49464830d0bf2b0dfa2cd0eadb17f341b81eb7 Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Fri, 14 Jun 2019 10:19:51 -0700 Subject: [PATCH 0773/1035] fix: rand.Intn(0) panics This commit was moved from ipfs/go-bitswap@9f3ffaf6ed0ddd53b1a15c68639afb6e17dd57a5 --- bitswap/session/session.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/bitswap/session/session.go b/bitswap/session/session.go index f10d9605c..0757ab11e 100644 --- a/bitswap/session/session.go +++ b/bitswap/session/session.go @@ -347,6 +347,9 @@ func (s *Session) handlePeriodicSearch(ctx context.Context) { } func (s *Session) randomLiveWant() cid.Cid { + if len(s.liveWants) == 0 { + return cid.Cid{} + } i := rand.Intn(len(s.liveWants)) // picking a random live want for k := range s.liveWants { From 381e82defc41e6f1ebd6454961b7f8234c683008 Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Wed, 19 Jun 2019 11:27:50 +0200 Subject: [PATCH 0774/1035] chore: whitespace This commit was moved from ipfs/go-bitswap@9bf38f7e8f6a74b7f2715dee3fff6cddfbca2479 --- bitswap/message/message.go | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) diff --git a/bitswap/message/message.go b/bitswap/message/message.go index a16046197..08c85ea6f 100644 --- a/bitswap/message/message.go +++ b/bitswap/message/message.go @@ -182,11 +182,14 @@ func FromMsgReader(r msgio.Reader) (BitSwapMessage, error) { if err != nil { return nil, err } + var pb pb.Message - if err := pb.Unmarshal(msg); err != nil { + err = pb.Unmarshal(msg) + r.ReleaseMsg(msg) + if err != nil { return nil, err } - r.ReleaseMsg(msg) + return newMessageFromProto(pb) } @@ -243,15 +246,19 @@ func (m *impl) ToNetV1(w io.Writer) error { func write(w io.Writer, m *pb.Message) error { size := m.Size() + buf := pool.Get(size + binary.MaxVarintLen64) defer pool.Put(buf) + n := binary.PutUvarint(buf, uint64(size)) - if written, err := m.MarshalTo(buf[n:]); err != nil { + + written, err := m.MarshalTo(buf[n:]) + if err != nil { return err - } else { - n += written } - _, err := w.Write(buf[:n]) + n += written + + _, err = w.Write(buf[:n]) return err } From bef10ee64cb14de020bf972f20bac0f2ff1bff32 Mon Sep 17 00:00:00 2001 From: hannahhoward Date: Sat, 22 Jun 2019 14:16:59 +0200 Subject: [PATCH 0775/1035] fix(benchmark): make benchmarks non-failing This commit was moved from ipfs/go-bitswap@0d419f75ce584aa39081f661698311005cddf545 --- bitswap/benchmarks_test.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/bitswap/benchmarks_test.go b/bitswap/benchmarks_test.go index dbe05889d..8fd65a2a0 100644 --- a/bitswap/benchmarks_test.go +++ b/bitswap/benchmarks_test.go @@ -201,9 +201,9 @@ func runDistribution(b *testing.B, instances []testinstance.Instance, blocks []b } benchmarkLog = append(benchmarkLog, stats) b.Logf("send/recv: %d / %d", nst.MessagesSent, nst.MessagesRecvd) - if st.DupBlksReceived != 0 { - b.Fatalf("got %d duplicate blocks!", st.DupBlksReceived) - } + //if st.DupBlksReceived != 0 { + // b.Fatalf("got %d duplicate blocks!", st.DupBlksReceived) + //} } func allToAll(b *testing.B, provs []testinstance.Instance, blocks []blocks.Block) { From a9d08d390b0d60cee118791d62e1c4949f56c1f5 Mon Sep 17 00:00:00 2001 From: hannahhoward Date: Tue, 2 Jul 2019 16:34:59 -0700 Subject: [PATCH 0776/1035] test(benchmarks): minor usage fixes add proper usage of go benchmark timing and environment random seed support for CI This commit was moved from ipfs/go-bitswap@fb007f94fc9fd7d0f64996838d068a0d55271b2e --- bitswap/benchmarks_test.go | 58 ++++++++++++++++++++++---------------- 1 file changed, 34 insertions(+), 24 deletions(-) diff --git a/bitswap/benchmarks_test.go b/bitswap/benchmarks_test.go index 8fd65a2a0..4293a9870 100644 --- a/bitswap/benchmarks_test.go +++ b/bitswap/benchmarks_test.go @@ -5,6 +5,8 @@ import ( "encoding/json" "io/ioutil" "math/rand" + "os" + "strconv" "sync" "testing" "time" @@ -115,21 +117,27 @@ const stdBlockSize = 8000 func BenchmarkDupsManyNodesRealWorldNetwork(b *testing.B) { benchmarkLog = nil + benchmarkSeed, err := strconv.ParseInt(os.Getenv("BENCHMARK_SEED"), 10, 64) + var randomGen *rand.Rand = nil + if err == nil { + randomGen = rand.New(rand.NewSource(benchmarkSeed)) + } + fastNetworkDelayGenerator := tn.InternetLatencyDelayGenerator( mediumSpeed-fastSpeed, slowSpeed-fastSpeed, - 0.0, 0.0, distribution, nil) + 0.0, 0.0, distribution, randomGen) fastNetworkDelay := delay.Delay(fastSpeed, fastNetworkDelayGenerator) - fastBandwidthGenerator := tn.VariableRateLimitGenerator(fastBandwidth, fastBandwidthDeviation, nil) + fastBandwidthGenerator := tn.VariableRateLimitGenerator(fastBandwidth, fastBandwidthDeviation, randomGen) averageNetworkDelayGenerator := tn.InternetLatencyDelayGenerator( mediumSpeed-fastSpeed, slowSpeed-fastSpeed, - 0.3, 0.3, distribution, nil) + 0.3, 0.3, distribution, randomGen) averageNetworkDelay := delay.Delay(fastSpeed, averageNetworkDelayGenerator) - averageBandwidthGenerator := tn.VariableRateLimitGenerator(mediumBandwidth, mediumBandwidthDeviation, nil) + averageBandwidthGenerator := tn.VariableRateLimitGenerator(mediumBandwidth, mediumBandwidthDeviation, randomGen) slowNetworkDelayGenerator := tn.InternetLatencyDelayGenerator( mediumSpeed-fastSpeed, superSlowSpeed-fastSpeed, - 0.3, 0.3, distribution, nil) + 0.3, 0.3, distribution, randomGen) slowNetworkDelay := delay.Delay(fastSpeed, slowNetworkDelayGenerator) - slowBandwidthGenerator := tn.VariableRateLimitGenerator(slowBandwidth, slowBandwidthDeviation, nil) + slowBandwidthGenerator := tn.VariableRateLimitGenerator(slowBandwidth, slowBandwidthDeviation, randomGen) b.Run("200Nodes-AllToAll-BigBatch-FastNetwork", func(b *testing.B) { subtestDistributeAndFetchRateLimited(b, 300, 200, fastNetworkDelay, fastBandwidthGenerator, stdBlockSize, allToAll, batchFetchAll) @@ -145,30 +153,35 @@ func BenchmarkDupsManyNodesRealWorldNetwork(b *testing.B) { } func subtestDistributeAndFetch(b *testing.B, numnodes, numblks int, d delay.D, df distFunc, ff fetchFunc) { - start := time.Now() - net := tn.VirtualNetwork(mockrouting.NewServer(), d) + for i := 0; i < b.N; i++ { + start := time.Now() + net := tn.VirtualNetwork(mockrouting.NewServer(), d) - ig := testinstance.NewTestInstanceGenerator(net) - defer ig.Close() + ig := testinstance.NewTestInstanceGenerator(net) + defer ig.Close() - bg := blocksutil.NewBlockGenerator() + bg := blocksutil.NewBlockGenerator() - instances := ig.Instances(numnodes) - blocks := bg.Blocks(numblks) - runDistribution(b, instances, blocks, df, ff, start) + instances := ig.Instances(numnodes) + blocks := bg.Blocks(numblks) + runDistribution(b, instances, blocks, df, ff, start) + } } func subtestDistributeAndFetchRateLimited(b *testing.B, numnodes, numblks int, d delay.D, rateLimitGenerator tn.RateLimitGenerator, blockSize int64, df distFunc, ff fetchFunc) { - start := time.Now() - net := tn.RateLimitedVirtualNetwork(mockrouting.NewServer(), d, rateLimitGenerator) + for i := 0; i < b.N; i++ { - ig := testinstance.NewTestInstanceGenerator(net) - defer ig.Close() + start := time.Now() + net := tn.RateLimitedVirtualNetwork(mockrouting.NewServer(), d, rateLimitGenerator) - instances := ig.Instances(numnodes) - blocks := testutil.GenerateBlocksOfSize(numblks, blockSize) + ig := testinstance.NewTestInstanceGenerator(net) + defer ig.Close() - runDistribution(b, instances, blocks, df, ff, start) + instances := ig.Instances(numnodes) + blocks := testutil.GenerateBlocksOfSize(numblks, blockSize) + + runDistribution(b, instances, blocks, df, ff, start) + } } func runDistribution(b *testing.B, instances []testinstance.Instance, blocks []blocks.Block, df distFunc, ff fetchFunc, start time.Time) { @@ -201,9 +214,6 @@ func runDistribution(b *testing.B, instances []testinstance.Instance, blocks []b } benchmarkLog = append(benchmarkLog, stats) b.Logf("send/recv: %d / %d", nst.MessagesSent, nst.MessagesRecvd) - //if st.DupBlksReceived != 0 { - // b.Fatalf("got %d duplicate blocks!", st.DupBlksReceived) - //} } func allToAll(b *testing.B, provs []testinstance.Instance, blocks []blocks.Block) { From 224211923023f2921a9033d1b50533698d144a85 Mon Sep 17 00:00:00 2001 From: hannahhoward Date: Mon, 17 Dec 2018 14:34:33 -0800 Subject: [PATCH 0777/1035] feat(sessions): track real latency per peer Return optimized peers in real latency order, weighted toward recent requests This commit was moved from ipfs/go-bitswap@98f01e7f26ab6cf49ed9412e7ba579710900ea19 --- bitswap/sessionpeermanager/latencytracker.go | 65 +++++++++++++ bitswap/sessionpeermanager/peerdata.go | 41 ++++++++ .../sessionpeermanager/sessionpeermanager.go | 93 +++++++++++++------ .../sessionpeermanager_test.go | 21 +++-- 4 files changed, 183 insertions(+), 37 deletions(-) create mode 100644 bitswap/sessionpeermanager/latencytracker.go create mode 100644 bitswap/sessionpeermanager/peerdata.go diff --git a/bitswap/sessionpeermanager/latencytracker.go b/bitswap/sessionpeermanager/latencytracker.go new file mode 100644 index 000000000..ca756a037 --- /dev/null +++ b/bitswap/sessionpeermanager/latencytracker.go @@ -0,0 +1,65 @@ +package sessionpeermanager + +import ( + "time" + + "github.com/ipfs/go-cid" +) + +const ( + timeoutDuration = 5 * time.Second +) + +type requestData struct { + startedAt time.Time + timeoutFunc *time.Timer +} + +type latencyTracker struct { + requests map[cid.Cid]*requestData +} + +func newLatencyTracker() *latencyTracker { + return &latencyTracker{requests: make(map[cid.Cid]*requestData)} +} + +type afterTimeoutFunc func(cid.Cid) + +func (lt *latencyTracker) SetupRequests(keys []cid.Cid, afterTimeout afterTimeoutFunc) { + startedAt := time.Now() + for _, k := range keys { + if _, ok := lt.requests[k]; !ok { + lt.requests[k] = &requestData{startedAt, time.AfterFunc(timeoutDuration, makeAfterTimeout(afterTimeout, k))} + } + } +} + +func makeAfterTimeout(afterTimeout afterTimeoutFunc, k cid.Cid) func() { + return func() { afterTimeout(k) } +} + +func (lt *latencyTracker) CheckDuration(key cid.Cid) (time.Duration, bool) { + request, ok := lt.requests[key] + var latency time.Duration + if ok { + latency = time.Now().Sub(request.startedAt) + } + return latency, ok +} + +func (lt *latencyTracker) RecordResponse(key cid.Cid) (time.Duration, bool) { + request, ok := lt.requests[key] + var latency time.Duration + if ok { + latency = time.Now().Sub(request.startedAt) + request.timeoutFunc.Stop() + delete(lt.requests, key) + } + return latency, ok +} + +func (lt *latencyTracker) Shutdown() { + for _, request := range lt.requests { + request.timeoutFunc.Stop() + } +} diff --git a/bitswap/sessionpeermanager/peerdata.go b/bitswap/sessionpeermanager/peerdata.go new file mode 100644 index 000000000..02ea833fc --- /dev/null +++ b/bitswap/sessionpeermanager/peerdata.go @@ -0,0 +1,41 @@ +package sessionpeermanager + +import ( + "time" + + "github.com/ipfs/go-cid" +) + +const ( + newLatencyWeight = 0.5 +) + +type peerData struct { + hasLatency bool + latency time.Duration + lt *latencyTracker +} + +func newPeerData() *peerData { + return &peerData{ + hasLatency: false, + lt: newLatencyTracker(), + latency: 0, + } +} + +func (pd *peerData) AdjustLatency(k cid.Cid, hasFallbackLatency bool, fallbackLatency time.Duration) { + + latency, hasLatency := pd.lt.RecordResponse(k) + if !hasLatency { + latency, hasLatency = fallbackLatency, hasFallbackLatency + } + if hasLatency { + if pd.hasLatency { + pd.latency = time.Duration(float64(pd.latency)*(1.0-newLatencyWeight) + float64(latency)*newLatencyWeight) + } else { + pd.latency = latency + pd.hasLatency = true + } + } +} diff --git a/bitswap/sessionpeermanager/sessionpeermanager.go b/bitswap/sessionpeermanager/sessionpeermanager.go index 59bfbf497..82967c57c 100644 --- a/bitswap/sessionpeermanager/sessionpeermanager.go +++ b/bitswap/sessionpeermanager/sessionpeermanager.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "math/rand" + "sort" cid "github.com/ipfs/go-cid" peer "github.com/libp2p/go-libp2p-core/peer" @@ -11,7 +12,6 @@ import ( const ( maxOptimizedPeers = 32 - reservePeers = 2 unoptimizedTagValue = 5 // tag value for "unoptimized" session peers. optimizedTagValue = 10 // tag value for "optimized" session peers. ) @@ -43,20 +43,21 @@ type SessionPeerManager struct { peerMessages chan peerMessage // do not touch outside of run loop - activePeers map[peer.ID]bool + activePeers map[peer.ID]*peerData unoptimizedPeersArr []peer.ID optimizedPeersArr []peer.ID + broadcastLatency *latencyTracker } // New creates a new SessionPeerManager func New(ctx context.Context, id uint64, tagger PeerTagger, providerFinder PeerProviderFinder) *SessionPeerManager { spm := &SessionPeerManager{ - id: id, - ctx: ctx, + ctx: ctx, tagger: tagger, providerFinder: providerFinder, - peerMessages: make(chan peerMessage, 16), - activePeers: make(map[peer.ID]bool), + peerMessages: make(chan peerMessage, 16), + activePeers: make(map[peer.ID]*peerData), + broadcastLatency: newLatencyTracker(), } spm.tag = fmt.Sprint("bs-ses-", id) @@ -72,7 +73,7 @@ func (spm *SessionPeerManager) RecordPeerResponse(p peer.ID, k cid.Cid) { // at the moment, we're just adding peers here // in the future, we'll actually use this to record metrics select { - case spm.peerMessages <- &peerResponseMessage{p}: + case spm.peerMessages <- &peerResponseMessage{p, k}: case <-spm.ctx.Done(): } } @@ -81,6 +82,10 @@ func (spm *SessionPeerManager) RecordPeerResponse(p peer.ID, k cid.Cid) { func (spm *SessionPeerManager) RecordPeerRequests(p []peer.ID, ks []cid.Cid) { // at the moment, we're not doing anything here // soon we'll use this to track latency by peer + select { + case spm.peerMessages <- &peerRequestMessage{p, ks}: + case <-spm.ctx.Done(): + } } // GetOptimizedPeers returns the best peers available for a session @@ -89,7 +94,7 @@ func (spm *SessionPeerManager) GetOptimizedPeers() []peer.ID { // ordered by optimization, or only a subset resp := make(chan []peer.ID, 1) select { - case spm.peerMessages <- &peerReqMessage{resp}: + case spm.peerMessages <- &getPeersMessage{resp}: case <-spm.ctx.Done(): return nil } @@ -133,14 +138,16 @@ func (spm *SessionPeerManager) tagPeer(p peer.ID, value int) { spm.tagger.TagPeer(p, spm.tag, value) } -func (spm *SessionPeerManager) insertOptimizedPeer(p peer.ID) { - if len(spm.optimizedPeersArr) >= (maxOptimizedPeers - reservePeers) { - tailPeer := spm.optimizedPeersArr[len(spm.optimizedPeersArr)-1] - spm.optimizedPeersArr = spm.optimizedPeersArr[:len(spm.optimizedPeersArr)-1] - spm.unoptimizedPeersArr = append(spm.unoptimizedPeersArr, tailPeer) +func (spm *SessionPeerManager) insertPeer(p peer.ID, data *peerData) { + if data.hasLatency { + insertPos := sort.Search(len(spm.optimizedPeersArr), func(i int) bool { + return spm.activePeers[spm.optimizedPeersArr[i]].latency > data.latency + }) + spm.optimizedPeersArr = append(spm.optimizedPeersArr[:insertPos], + append([]peer.ID{p}, spm.optimizedPeersArr[insertPos:]...)...) + } else { + spm.unoptimizedPeersArr = append(spm.unoptimizedPeersArr, p) } - - spm.optimizedPeersArr = append([]peer.ID{p}, spm.optimizedPeersArr...) } func (spm *SessionPeerManager) removeOptimizedPeer(p peer.ID) { @@ -169,38 +176,65 @@ type peerFoundMessage struct { func (pfm *peerFoundMessage) handle(spm *SessionPeerManager) { p := pfm.p if _, ok := spm.activePeers[p]; !ok { - spm.activePeers[p] = false - spm.unoptimizedPeersArr = append(spm.unoptimizedPeersArr, p) + spm.activePeers[p] = newPeerData() + spm.insertPeer(p, spm.activePeers[p]) spm.tagPeer(p, unoptimizedTagValue) } } type peerResponseMessage struct { p peer.ID + k cid.Cid } func (prm *peerResponseMessage) handle(spm *SessionPeerManager) { p := prm.p - isOptimized, ok := spm.activePeers[p] - if isOptimized { - spm.removeOptimizedPeer(p) + k := prm.k + data, ok := spm.activePeers[p] + if !ok { + data = newPeerData() + spm.activePeers[p] = data + spm.tagPeer(p) } else { - spm.activePeers[p] = true - spm.tagPeer(p, optimizedTagValue) - - // transition from unoptimized. - if ok { + if data.hasLatency { + spm.removeOptimizedPeer(p) + } else { spm.removeUnoptimizedPeer(p) } } - spm.insertOptimizedPeer(p) + fallbackLatency, hasFallbackLatency := spm.broadcastLatency.CheckDuration(k) + data.AdjustLatency(k, hasFallbackLatency, fallbackLatency) + spm.insertPeer(p, data) +} + +type peerRequestMessage struct { + peers []peer.ID + keys []cid.Cid +} + +func (spm *SessionPeerManager) makeTimeout(p peer.ID) afterTimeoutFunc { + return func(k cid.Cid) { + spm.RecordPeerResponse(p, k) + } +} + +func (prm *peerRequestMessage) handle(spm *SessionPeerManager) { + if prm.peers == nil { + spm.broadcastLatency.SetupRequests(prm.keys, func(k cid.Cid) {}) + } else { + for _, p := range prm.peers { + if data, ok := spm.activePeers[p]; ok { + data.lt.SetupRequests(prm.keys, spm.makeTimeout(p)) + } + } + } } -type peerReqMessage struct { +type getPeersMessage struct { resp chan<- []peer.ID } -func (prm *peerReqMessage) handle(spm *SessionPeerManager) { +func (prm *getPeersMessage) handle(spm *SessionPeerManager) { randomOrder := rand.Perm(len(spm.unoptimizedPeersArr)) maxPeers := len(spm.unoptimizedPeersArr) + len(spm.optimizedPeersArr) if maxPeers > maxOptimizedPeers { @@ -215,7 +249,8 @@ func (prm *peerReqMessage) handle(spm *SessionPeerManager) { } func (spm *SessionPeerManager) handleShutdown() { - for p := range spm.activePeers { + for p, data := range spm.activePeers { spm.tagger.UntagPeer(p, spm.tag) + data.lt.Shutdown() } } diff --git a/bitswap/sessionpeermanager/sessionpeermanager_test.go b/bitswap/sessionpeermanager/sessionpeermanager_test.go index 2aceeecd3..a48da2bd6 100644 --- a/bitswap/sessionpeermanager/sessionpeermanager_test.go +++ b/bitswap/sessionpeermanager/sessionpeermanager_test.go @@ -167,7 +167,7 @@ func TestOrderingPeers(t *testing.T) { peer3 := peers[rand.Intn(100)] time.Sleep(1 * time.Millisecond) sessionPeerManager.RecordPeerResponse(peer1, c[0]) - time.Sleep(1 * time.Millisecond) + time.Sleep(5 * time.Millisecond) sessionPeerManager.RecordPeerResponse(peer2, c[0]) time.Sleep(1 * time.Millisecond) sessionPeerManager.RecordPeerResponse(peer3, c[0]) @@ -177,13 +177,18 @@ func TestOrderingPeers(t *testing.T) { t.Fatal("Should not return more than the max of optimized peers") } - // should prioritize peers which have received blocks - if (sessionPeers[0] != peer3) || (sessionPeers[1] != peer2) || (sessionPeers[2] != peer1) { + // should prioritize peers which are fastest + if (sessionPeers[0] != peer1) || (sessionPeers[1] != peer2) || (sessionPeers[2] != peer3) { t.Fatal("Did not prioritize peers that received blocks") } - // Receive a second time from same node - sessionPeerManager.RecordPeerResponse(peer3, c[0]) + c2 := testutil.GenerateCids(1) + + // Request again + sessionPeerManager.RecordPeerRequests(nil, c2) + + // Receive a second time + sessionPeerManager.RecordPeerResponse(peer3, c2[0]) // call again nextSessionPeers := sessionPeerManager.GetOptimizedPeers() @@ -191,9 +196,9 @@ func TestOrderingPeers(t *testing.T) { t.Fatal("Should not return more than the max of optimized peers") } - // should not duplicate - if (nextSessionPeers[0] != peer3) || (nextSessionPeers[1] != peer2) || (nextSessionPeers[2] != peer1) { - t.Fatal("Did dedup peers which received multiple blocks") + // should sort by average latency + if (nextSessionPeers[0] != peer1) || (nextSessionPeers[1] != peer3) || (nextSessionPeers[2] != peer2) { + t.Fatal("Did not dedup peers which received multiple blocks") } // should randomize other peers From 12dd49be112f4dac4913af80bfeca4f4cce8ddc6 Mon Sep 17 00:00:00 2001 From: hannahhoward Date: Fri, 21 Dec 2018 15:23:06 -0800 Subject: [PATCH 0778/1035] feat(sessions): pass optimization rating When fetching optimized peers from the peer manager, return an optimization rating, and pass on to request splitter BREAKING CHANGE: interface change to GetOptimizedPeers and SplitRequests public package methods This commit was moved from ipfs/go-bitswap@8e59a716dbbd51cd6141f8f3cf2efa6c8619c09e --- bitswap/session/session.go | 6 +- bitswap/session/session_test.go | 18 ++++-- bitswap/sessiondata/sessiondata.go | 18 ++++++ bitswap/sessionmanager/sessionmanager_test.go | 6 +- .../sessionpeermanager/sessionpeermanager.go | 60 +++++++++++++------ .../sessionpeermanager_test.go | 38 ++++++++++-- .../sessionrequestsplitter.go | 32 +++++----- .../sessionrequestsplitter_test.go | 22 +++---- bitswap/testutil/testutil.go | 19 ++++++ 9 files changed, 160 insertions(+), 59 deletions(-) create mode 100644 bitswap/sessiondata/sessiondata.go diff --git a/bitswap/session/session.go b/bitswap/session/session.go index 0757ab11e..f4ddc2433 100644 --- a/bitswap/session/session.go +++ b/bitswap/session/session.go @@ -8,6 +8,7 @@ import ( lru "github.com/hashicorp/golang-lru" bsgetter "github.com/ipfs/go-bitswap/getter" notifications "github.com/ipfs/go-bitswap/notifications" + bssd "github.com/ipfs/go-bitswap/sessiondata" blocks "github.com/ipfs/go-block-format" cid "github.com/ipfs/go-cid" delay "github.com/ipfs/go-ipfs-delay" @@ -15,7 +16,6 @@ import ( peer "github.com/libp2p/go-libp2p-core/peer" loggables "github.com/libp2p/go-libp2p-loggables" - bssrs "github.com/ipfs/go-bitswap/sessionrequestsplitter" ) const ( @@ -34,7 +34,7 @@ type WantManager interface { // requesting more when neccesary. type PeerManager interface { FindMorePeers(context.Context, cid.Cid) - GetOptimizedPeers() []peer.ID + GetOptimizedPeers() []bssd.OptimizedPeer RecordPeerRequests([]peer.ID, []cid.Cid) RecordPeerResponse(peer.ID, cid.Cid) } @@ -42,7 +42,7 @@ type PeerManager interface { // RequestSplitter provides an interface for splitting // a request for Cids up among peers. type RequestSplitter interface { - SplitRequest([]peer.ID, []cid.Cid) []*bssrs.PartialRequest + SplitRequest([]bssd.OptimizedPeer, []cid.Cid) []bssd.PartialRequest RecordDuplicateBlock() RecordUniqueBlock() } diff --git a/bitswap/session/session_test.go b/bitswap/session/session_test.go index 751f9f0cd..6a9cc0aa4 100644 --- a/bitswap/session/session_test.go +++ b/bitswap/session/session_test.go @@ -6,7 +6,7 @@ import ( "testing" "time" - bssrs "github.com/ipfs/go-bitswap/sessionrequestsplitter" + bssd "github.com/ipfs/go-bitswap/sessiondata" "github.com/ipfs/go-bitswap/testutil" blocks "github.com/ipfs/go-block-format" cid "github.com/ipfs/go-cid" @@ -52,10 +52,14 @@ func (fpm *fakePeerManager) FindMorePeers(ctx context.Context, k cid.Cid) { } } -func (fpm *fakePeerManager) GetOptimizedPeers() []peer.ID { +func (fpm *fakePeerManager) GetOptimizedPeers() []bssd.OptimizedPeer { fpm.lk.Lock() defer fpm.lk.Unlock() - return fpm.peers + optimizedPeers := make([]bssd.OptimizedPeer, 0, len(fpm.peers)) + for _, peer := range fpm.peers { + optimizedPeers = append(optimizedPeers, bssd.OptimizedPeer{Peer: peer, OptimizationRating: 1.0}) + } + return optimizedPeers } func (fpm *fakePeerManager) RecordPeerRequests([]peer.ID, []cid.Cid) {} @@ -68,8 +72,12 @@ func (fpm *fakePeerManager) RecordPeerResponse(p peer.ID, c cid.Cid) { type fakeRequestSplitter struct { } -func (frs *fakeRequestSplitter) SplitRequest(peers []peer.ID, keys []cid.Cid) []*bssrs.PartialRequest { - return []*bssrs.PartialRequest{&bssrs.PartialRequest{Peers: peers, Keys: keys}} +func (frs *fakeRequestSplitter) SplitRequest(optimizedPeers []bssd.OptimizedPeer, keys []cid.Cid) []bssd.PartialRequest { + peers := make([]peer.ID, len(optimizedPeers)) + for i, optimizedPeer := range optimizedPeers { + peers[i] = optimizedPeer.Peer + } + return []bssd.PartialRequest{bssd.PartialRequest{Peers: peers, Keys: keys}} } func (frs *fakeRequestSplitter) RecordDuplicateBlock() {} diff --git a/bitswap/sessiondata/sessiondata.go b/bitswap/sessiondata/sessiondata.go new file mode 100644 index 000000000..a56f93be5 --- /dev/null +++ b/bitswap/sessiondata/sessiondata.go @@ -0,0 +1,18 @@ +package sessiondata + +import ( + cid "github.com/ipfs/go-cid" + peer "github.com/libp2p/go-libp2p-core/peer" +) + +// OptimizedPeer describes a peer and its level of optimization from 0 to 1. +type OptimizedPeer struct { + Peer peer.ID + OptimizationRating float64 +} + +// PartialRequest is represents one slice of an over request split among peers +type PartialRequest struct { + Peers []peer.ID + Keys []cid.Cid +} diff --git a/bitswap/sessionmanager/sessionmanager_test.go b/bitswap/sessionmanager/sessionmanager_test.go index b858f7dd7..467d07ea9 100644 --- a/bitswap/sessionmanager/sessionmanager_test.go +++ b/bitswap/sessionmanager/sessionmanager_test.go @@ -5,10 +5,10 @@ import ( "testing" "time" - bssrs "github.com/ipfs/go-bitswap/sessionrequestsplitter" delay "github.com/ipfs/go-ipfs-delay" bssession "github.com/ipfs/go-bitswap/session" + bssd "github.com/ipfs/go-bitswap/sessiondata" blocks "github.com/ipfs/go-block-format" cid "github.com/ipfs/go-cid" @@ -39,14 +39,14 @@ type fakePeerManager struct { } func (*fakePeerManager) FindMorePeers(context.Context, cid.Cid) {} -func (*fakePeerManager) GetOptimizedPeers() []peer.ID { return nil } +func (*fakePeerManager) GetOptimizedPeers() []bssd.OptimizedPeer { return nil } func (*fakePeerManager) RecordPeerRequests([]peer.ID, []cid.Cid) {} func (*fakePeerManager) RecordPeerResponse(peer.ID, cid.Cid) {} type fakeRequestSplitter struct { } -func (frs *fakeRequestSplitter) SplitRequest(peers []peer.ID, keys []cid.Cid) []*bssrs.PartialRequest { +func (frs *fakeRequestSplitter) SplitRequest(optimizedPeers []bssd.OptimizedPeer, keys []cid.Cid) []bssd.PartialRequest { return nil } func (frs *fakeRequestSplitter) RecordDuplicateBlock() {} diff --git a/bitswap/sessionpeermanager/sessionpeermanager.go b/bitswap/sessionpeermanager/sessionpeermanager.go index 82967c57c..cd65c9634 100644 --- a/bitswap/sessionpeermanager/sessionpeermanager.go +++ b/bitswap/sessionpeermanager/sessionpeermanager.go @@ -6,6 +6,8 @@ import ( "math/rand" "sort" + bssd "github.com/ipfs/go-bitswap/sessiondata" + cid "github.com/ipfs/go-cid" peer "github.com/libp2p/go-libp2p-core/peer" ) @@ -78,7 +80,7 @@ func (spm *SessionPeerManager) RecordPeerResponse(p peer.ID, k cid.Cid) { } } -// RecordPeerRequests records that a given set of peers requested the given cids +// RecordPeerRequests records that a given set of peers requested the given cids. func (spm *SessionPeerManager) RecordPeerRequests(p []peer.ID, ks []cid.Cid) { // at the moment, we're not doing anything here // soon we'll use this to track latency by peer @@ -88,11 +90,12 @@ func (spm *SessionPeerManager) RecordPeerRequests(p []peer.ID, ks []cid.Cid) { } } -// GetOptimizedPeers returns the best peers available for a session -func (spm *SessionPeerManager) GetOptimizedPeers() []peer.ID { +// GetOptimizedPeers returns the best peers available for a session, along with +// a rating for how good they are, in comparison to the best peer. +func (spm *SessionPeerManager) GetOptimizedPeers() []bssd.OptimizedPeer { // right now this just returns all peers, but soon we might return peers // ordered by optimization, or only a subset - resp := make(chan []peer.ID, 1) + resp := make(chan []bssd.OptimizedPeer, 1) select { case spm.peerMessages <- &getPeersMessage{resp}: case <-spm.ctx.Done(): @@ -191,19 +194,28 @@ func (prm *peerResponseMessage) handle(spm *SessionPeerManager) { p := prm.p k := prm.k data, ok := spm.activePeers[p] - if !ok { - data = newPeerData() - spm.activePeers[p] = data - spm.tagPeer(p) + wasOptimized := ok && data.hasLatency + if wasOptimized { + spm.removeOptimizedPeer(p) } else { - if data.hasLatency { - spm.removeOptimizedPeer(p) - } else { + if ok { spm.removeUnoptimizedPeer(p) + } else { + data = newPeerData() + spm.activePeers[p] = data } } fallbackLatency, hasFallbackLatency := spm.broadcastLatency.CheckDuration(k) data.AdjustLatency(k, hasFallbackLatency, fallbackLatency) + var tagValue int + if data.hasLatency { + tagValue = optimizedTagValue + } else { + tagValue = unoptimizedTagValue + } + if !ok || wasOptimized != data.hasLatency { + spm.tagPeer(p, tagValue) + } spm.insertPeer(p, data) } @@ -231,7 +243,7 @@ func (prm *peerRequestMessage) handle(spm *SessionPeerManager) { } type getPeersMessage struct { - resp chan<- []peer.ID + resp chan<- []bssd.OptimizedPeer } func (prm *getPeersMessage) handle(spm *SessionPeerManager) { @@ -240,12 +252,26 @@ func (prm *getPeersMessage) handle(spm *SessionPeerManager) { if maxPeers > maxOptimizedPeers { maxPeers = maxOptimizedPeers } - - extraPeers := make([]peer.ID, maxPeers-len(spm.optimizedPeersArr)) - for i := range extraPeers { - extraPeers[i] = spm.unoptimizedPeersArr[randomOrder[i]] + var bestPeerLatency float64 + if len(spm.optimizedPeersArr) > 0 { + bestPeerLatency = float64(spm.activePeers[spm.optimizedPeersArr[0]].latency) + } else { + bestPeerLatency = 0 + } + optimizedPeers := make([]bssd.OptimizedPeer, 0, maxPeers) + for i := 0; i < maxPeers; i++ { + if i < len(spm.optimizedPeersArr) { + p := spm.optimizedPeersArr[i] + optimizedPeers = append(optimizedPeers, bssd.OptimizedPeer{ + Peer: p, + OptimizationRating: bestPeerLatency / float64(spm.activePeers[p].latency), + }) + } else { + p := spm.unoptimizedPeersArr[randomOrder[i-len(spm.optimizedPeersArr)]] + optimizedPeers = append(optimizedPeers, bssd.OptimizedPeer{Peer: p, OptimizationRating: 0.0}) + } } - prm.resp <- append(spm.optimizedPeersArr, extraPeers...) + prm.resp <- optimizedPeers } func (spm *SessionPeerManager) handleShutdown() { diff --git a/bitswap/sessionpeermanager/sessionpeermanager_test.go b/bitswap/sessionpeermanager/sessionpeermanager_test.go index a48da2bd6..bfbe878b2 100644 --- a/bitswap/sessionpeermanager/sessionpeermanager_test.go +++ b/bitswap/sessionpeermanager/sessionpeermanager_test.go @@ -74,6 +74,15 @@ func (fpt *fakePeerTagger) count() int { return len(fpt.taggedPeers) } +func getPeers(sessionPeerManager *SessionPeerManager) []peer.ID { + optimizedPeers := sessionPeerManager.GetOptimizedPeers() + var peers []peer.ID + for _, optimizedPeer := range optimizedPeers { + peers = append(peers, optimizedPeer.Peer) + } + return peers +} + func TestFindingMorePeers(t *testing.T) { ctx := context.Background() ctx, cancel := context.WithCancel(ctx) @@ -98,7 +107,7 @@ func TestFindingMorePeers(t *testing.T) { } time.Sleep(2 * time.Millisecond) - sessionPeers := sessionPeerManager.GetOptimizedPeers() + sessionPeers := getPeers(sessionPeerManager) if len(sessionPeers) != len(peers) { t.Fatal("incorrect number of peers found") } @@ -125,7 +134,7 @@ func TestRecordingReceivedBlocks(t *testing.T) { sessionPeerManager := New(ctx, id, fpt, fppf) sessionPeerManager.RecordPeerResponse(p, c) time.Sleep(10 * time.Millisecond) - sessionPeers := sessionPeerManager.GetOptimizedPeers() + sessionPeers := getPeers(sessionPeerManager) if len(sessionPeers) != 1 { t.Fatal("did not add peer on receive") } @@ -178,10 +187,28 @@ func TestOrderingPeers(t *testing.T) { } // should prioritize peers which are fastest - if (sessionPeers[0] != peer1) || (sessionPeers[1] != peer2) || (sessionPeers[2] != peer3) { + if (sessionPeers[0].Peer != peer1) || (sessionPeers[1].Peer != peer2) || (sessionPeers[2].Peer != peer3) { t.Fatal("Did not prioritize peers that received blocks") } + // should give first peer rating of 1 + if sessionPeers[0].OptimizationRating < 1.0 { + t.Fatal("Did not assign rating to best peer correctly") + } + + // should give other optimized peers ratings between 0 & 1 + if (sessionPeers[1].OptimizationRating >= 1.0) || (sessionPeers[1].OptimizationRating <= 0.0) || + (sessionPeers[2].OptimizationRating >= 1.0) || (sessionPeers[2].OptimizationRating <= 0.0) { + t.Fatal("Did not assign rating to other optimized peers correctly") + } + + // should other peers rating of zero + for i := 3; i < maxOptimizedPeers; i++ { + if sessionPeers[i].OptimizationRating != 0.0 { + t.Fatal("Did not assign rating to unoptimized peer correctly") + } + } + c2 := testutil.GenerateCids(1) // Request again @@ -197,14 +224,15 @@ func TestOrderingPeers(t *testing.T) { } // should sort by average latency - if (nextSessionPeers[0] != peer1) || (nextSessionPeers[1] != peer3) || (nextSessionPeers[2] != peer2) { + if (nextSessionPeers[0].Peer != peer1) || (nextSessionPeers[1].Peer != peer3) || + (nextSessionPeers[2].Peer != peer2) { t.Fatal("Did not dedup peers which received multiple blocks") } // should randomize other peers totalSame := 0 for i := 3; i < maxOptimizedPeers; i++ { - if sessionPeers[i] == nextSessionPeers[i] { + if sessionPeers[i].Peer == nextSessionPeers[i].Peer { totalSame++ } } diff --git a/bitswap/sessionrequestsplitter/sessionrequestsplitter.go b/bitswap/sessionrequestsplitter/sessionrequestsplitter.go index 5400fe5c4..46998244b 100644 --- a/bitswap/sessionrequestsplitter/sessionrequestsplitter.go +++ b/bitswap/sessionrequestsplitter/sessionrequestsplitter.go @@ -3,6 +3,8 @@ package sessionrequestsplitter import ( "context" + bssd "github.com/ipfs/go-bitswap/sessiondata" + "github.com/ipfs/go-cid" "github.com/libp2p/go-libp2p-core/peer" ) @@ -15,12 +17,6 @@ const ( initialSplit = 2 ) -// PartialRequest is represents one slice of an over request split among peers -type PartialRequest struct { - Peers []peer.ID - Keys []cid.Cid -} - type srsMessage interface { handle(srs *SessionRequestSplitter) } @@ -50,11 +46,11 @@ func New(ctx context.Context) *SessionRequestSplitter { // SplitRequest splits a request for the given cids one or more times among the // given peers. -func (srs *SessionRequestSplitter) SplitRequest(peers []peer.ID, ks []cid.Cid) []*PartialRequest { - resp := make(chan []*PartialRequest, 1) +func (srs *SessionRequestSplitter) SplitRequest(optimizedPeers []bssd.OptimizedPeer, ks []cid.Cid) []bssd.PartialRequest { + resp := make(chan []bssd.PartialRequest, 1) select { - case srs.messages <- &splitRequestMessage{peers, ks, resp}: + case srs.messages <- &splitRequestMessage{optimizedPeers, ks, resp}: case <-srs.ctx.Done(): return nil } @@ -101,14 +97,18 @@ func (srs *SessionRequestSplitter) duplicateRatio() float64 { } type splitRequestMessage struct { - peers []peer.ID - ks []cid.Cid - resp chan []*PartialRequest + optimizedPeers []bssd.OptimizedPeer + ks []cid.Cid + resp chan []bssd.PartialRequest } func (s *splitRequestMessage) handle(srs *SessionRequestSplitter) { split := srs.split - peers := s.peers + // first iteration ignore optimization ratings + peers := make([]peer.ID, len(s.optimizedPeers)) + for i, optimizedPeer := range s.optimizedPeers { + peers[i] = optimizedPeer.Peer + } ks := s.ks if len(peers) < split { split = len(peers) @@ -118,9 +118,9 @@ func (s *splitRequestMessage) handle(srs *SessionRequestSplitter) { split = len(ks) } keySplits := splitKeys(ks, split) - splitRequests := make([]*PartialRequest, len(keySplits)) - for i := range splitRequests { - splitRequests[i] = &PartialRequest{peerSplits[i], keySplits[i]} + splitRequests := make([]bssd.PartialRequest, 0, len(keySplits)) + for i, keySplit := range keySplits { + splitRequests = append(splitRequests, bssd.PartialRequest{Peers: peerSplits[i], Keys: keySplit}) } s.resp <- splitRequests } diff --git a/bitswap/sessionrequestsplitter/sessionrequestsplitter_test.go b/bitswap/sessionrequestsplitter/sessionrequestsplitter_test.go index 35c5fe2a4..10ed64ead 100644 --- a/bitswap/sessionrequestsplitter/sessionrequestsplitter_test.go +++ b/bitswap/sessionrequestsplitter/sessionrequestsplitter_test.go @@ -7,14 +7,16 @@ import ( "github.com/ipfs/go-bitswap/testutil" ) +func quadEaseOut(t float64) float64 { return t * t } + func TestSplittingRequests(t *testing.T) { ctx := context.Background() - peers := testutil.GeneratePeers(10) + optimizedPeers := testutil.GenerateOptimizedPeers(10, 5, quadEaseOut) keys := testutil.GenerateCids(6) srs := New(ctx) - partialRequests := srs.SplitRequest(peers, keys) + partialRequests := srs.SplitRequest(optimizedPeers, keys) if len(partialRequests) != 2 { t.Fatal("Did not generate right number of partial requests") } @@ -27,12 +29,12 @@ func TestSplittingRequests(t *testing.T) { func TestSplittingRequestsTooFewKeys(t *testing.T) { ctx := context.Background() - peers := testutil.GeneratePeers(10) + optimizedPeers := testutil.GenerateOptimizedPeers(10, 5, quadEaseOut) keys := testutil.GenerateCids(1) srs := New(ctx) - partialRequests := srs.SplitRequest(peers, keys) + partialRequests := srs.SplitRequest(optimizedPeers, keys) if len(partialRequests) != 1 { t.Fatal("Should only generate as many requests as keys") } @@ -45,12 +47,12 @@ func TestSplittingRequestsTooFewKeys(t *testing.T) { func TestSplittingRequestsTooFewPeers(t *testing.T) { ctx := context.Background() - peers := testutil.GeneratePeers(1) + optimizedPeers := testutil.GenerateOptimizedPeers(1, 1, quadEaseOut) keys := testutil.GenerateCids(6) srs := New(ctx) - partialRequests := srs.SplitRequest(peers, keys) + partialRequests := srs.SplitRequest(optimizedPeers, keys) if len(partialRequests) != 1 { t.Fatal("Should only generate as many requests as peers") } @@ -63,7 +65,7 @@ func TestSplittingRequestsTooFewPeers(t *testing.T) { func TestSplittingRequestsIncreasingSplitDueToDupes(t *testing.T) { ctx := context.Background() - peers := testutil.GeneratePeers(maxSplit) + optimizedPeers := testutil.GenerateOptimizedPeers(maxSplit, maxSplit, quadEaseOut) keys := testutil.GenerateCids(maxSplit) srs := New(ctx) @@ -72,7 +74,7 @@ func TestSplittingRequestsIncreasingSplitDueToDupes(t *testing.T) { srs.RecordDuplicateBlock() } - partialRequests := srs.SplitRequest(peers, keys) + partialRequests := srs.SplitRequest(optimizedPeers, keys) if len(partialRequests) != maxSplit { t.Fatal("Did not adjust split up as duplicates came in") } @@ -80,7 +82,7 @@ func TestSplittingRequestsIncreasingSplitDueToDupes(t *testing.T) { func TestSplittingRequestsDecreasingSplitDueToNoDupes(t *testing.T) { ctx := context.Background() - peers := testutil.GeneratePeers(maxSplit) + optimizedPeers := testutil.GenerateOptimizedPeers(maxSplit, maxSplit, quadEaseOut) keys := testutil.GenerateCids(maxSplit) srs := New(ctx) @@ -89,7 +91,7 @@ func TestSplittingRequestsDecreasingSplitDueToNoDupes(t *testing.T) { srs.RecordUniqueBlock() } - partialRequests := srs.SplitRequest(peers, keys) + partialRequests := srs.SplitRequest(optimizedPeers, keys) if len(partialRequests) != 1 { t.Fatal("Did not adjust split down as unique blocks came in") } diff --git a/bitswap/testutil/testutil.go b/bitswap/testutil/testutil.go index 96d4241c5..de6777ff3 100644 --- a/bitswap/testutil/testutil.go +++ b/bitswap/testutil/testutil.go @@ -4,6 +4,7 @@ import ( "math/rand" bsmsg "github.com/ipfs/go-bitswap/message" + bssd "github.com/ipfs/go-bitswap/sessiondata" "github.com/ipfs/go-bitswap/wantlist" blocks "github.com/ipfs/go-block-format" cid "github.com/ipfs/go-cid" @@ -76,6 +77,24 @@ func GeneratePeers(n int) []peer.ID { return peerIds } +// GenerateOptimizedPeers creates n peer ids, +// with optimization fall off up to optCount, curveFunc to scale it +func GenerateOptimizedPeers(n int, optCount int, curveFunc func(float64) float64) []bssd.OptimizedPeer { + peers := GeneratePeers(n) + optimizedPeers := make([]bssd.OptimizedPeer, 0, n) + for i, peer := range peers { + var optimizationRating float64 + if i <= optCount { + optimizationRating = 1.0 - float64(i)/float64(optCount) + } else { + optimizationRating = 0.0 + } + optimizationRating = curveFunc(optimizationRating) + optimizedPeers = append(optimizedPeers, bssd.OptimizedPeer{Peer: peer, OptimizationRating: optimizationRating}) + } + return optimizedPeers +} + var nextSession uint64 // GenerateSessionID make a unit session identifier. From 54341e1746e4cc251d1ab9637b816c042bc9b16e Mon Sep 17 00:00:00 2001 From: hannahhoward Date: Thu, 4 Jul 2019 09:31:19 -0700 Subject: [PATCH 0779/1035] feat(sessionpeermanager): track cancels Better estimate latency per peer by tracking cancellations This commit was moved from ipfs/go-bitswap@1bf9ed3144e6fdb0195b5a29fa4fdadaf4c940e4 --- bitswap/session/session.go | 4 +- bitswap/session/session_test.go | 1 + bitswap/sessionmanager/sessionmanager_test.go | 1 + bitswap/sessionpeermanager/latencytracker.go | 34 ++-- bitswap/sessionpeermanager/peerdata.go | 4 +- .../sessionpeermanager/sessionpeermanager.go | 146 +++++++++++++----- .../sessionpeermanager_test.go | 109 +++++++++++++ 7 files changed, 244 insertions(+), 55 deletions(-) diff --git a/bitswap/session/session.go b/bitswap/session/session.go index f4ddc2433..e847bf43d 100644 --- a/bitswap/session/session.go +++ b/bitswap/session/session.go @@ -15,7 +15,6 @@ import ( logging "github.com/ipfs/go-log" peer "github.com/libp2p/go-libp2p-core/peer" loggables "github.com/libp2p/go-libp2p-loggables" - ) const ( @@ -37,6 +36,7 @@ type PeerManager interface { GetOptimizedPeers() []bssd.OptimizedPeer RecordPeerRequests([]peer.ID, []cid.Cid) RecordPeerResponse(peer.ID, cid.Cid) + RecordCancel(cid.Cid) } // RequestSplitter provides an interface for splitting @@ -141,8 +141,8 @@ func (s *Session) ReceiveBlockFrom(from peer.ID, blk blocks.Block) { case <-s.ctx.Done(): } ks := []cid.Cid{blk.Cid()} + s.pm.RecordCancel(blk.Cid()) s.wm.CancelWants(s.ctx, ks, nil, s.id) - } // UpdateReceiveCounters updates receive counters for a block, diff --git a/bitswap/session/session_test.go b/bitswap/session/session_test.go index 6a9cc0aa4..ade9e6425 100644 --- a/bitswap/session/session_test.go +++ b/bitswap/session/session_test.go @@ -68,6 +68,7 @@ func (fpm *fakePeerManager) RecordPeerResponse(p peer.ID, c cid.Cid) { fpm.peers = append(fpm.peers, p) fpm.lk.Unlock() } +func (fpm *fakePeerManager) RecordCancel(c cid.Cid) {} type fakeRequestSplitter struct { } diff --git a/bitswap/sessionmanager/sessionmanager_test.go b/bitswap/sessionmanager/sessionmanager_test.go index 467d07ea9..ef1293b35 100644 --- a/bitswap/sessionmanager/sessionmanager_test.go +++ b/bitswap/sessionmanager/sessionmanager_test.go @@ -42,6 +42,7 @@ func (*fakePeerManager) FindMorePeers(context.Context, cid.Cid) {} func (*fakePeerManager) GetOptimizedPeers() []bssd.OptimizedPeer { return nil } func (*fakePeerManager) RecordPeerRequests([]peer.ID, []cid.Cid) {} func (*fakePeerManager) RecordPeerResponse(peer.ID, cid.Cid) {} +func (*fakePeerManager) RecordCancel(c cid.Cid) {} type fakeRequestSplitter struct { } diff --git a/bitswap/sessionpeermanager/latencytracker.go b/bitswap/sessionpeermanager/latencytracker.go index ca756a037..5ace5c8fc 100644 --- a/bitswap/sessionpeermanager/latencytracker.go +++ b/bitswap/sessionpeermanager/latencytracker.go @@ -6,13 +6,10 @@ import ( "github.com/ipfs/go-cid" ) -const ( - timeoutDuration = 5 * time.Second -) - type requestData struct { - startedAt time.Time - timeoutFunc *time.Timer + startedAt time.Time + wasCancelled bool + timeoutFunc *time.Timer } type latencyTracker struct { @@ -25,11 +22,15 @@ func newLatencyTracker() *latencyTracker { type afterTimeoutFunc func(cid.Cid) -func (lt *latencyTracker) SetupRequests(keys []cid.Cid, afterTimeout afterTimeoutFunc) { +func (lt *latencyTracker) SetupRequests(keys []cid.Cid, timeoutDuration time.Duration, afterTimeout afterTimeoutFunc) { startedAt := time.Now() for _, k := range keys { if _, ok := lt.requests[k]; !ok { - lt.requests[k] = &requestData{startedAt, time.AfterFunc(timeoutDuration, makeAfterTimeout(afterTimeout, k))} + lt.requests[k] = &requestData{ + startedAt, + false, + time.AfterFunc(timeoutDuration, makeAfterTimeout(afterTimeout, k)), + } } } } @@ -47,15 +48,24 @@ func (lt *latencyTracker) CheckDuration(key cid.Cid) (time.Duration, bool) { return latency, ok } -func (lt *latencyTracker) RecordResponse(key cid.Cid) (time.Duration, bool) { +func (lt *latencyTracker) RemoveRequest(key cid.Cid) { request, ok := lt.requests[key] - var latency time.Duration if ok { - latency = time.Now().Sub(request.startedAt) request.timeoutFunc.Stop() delete(lt.requests, key) } - return latency, ok +} + +func (lt *latencyTracker) RecordCancel(key cid.Cid) { + request, ok := lt.requests[key] + if ok { + request.wasCancelled = true + } +} + +func (lt *latencyTracker) WasCancelled(key cid.Cid) bool { + request, ok := lt.requests[key] + return ok && request.wasCancelled } func (lt *latencyTracker) Shutdown() { diff --git a/bitswap/sessionpeermanager/peerdata.go b/bitswap/sessionpeermanager/peerdata.go index 02ea833fc..a06198588 100644 --- a/bitswap/sessionpeermanager/peerdata.go +++ b/bitswap/sessionpeermanager/peerdata.go @@ -25,8 +25,8 @@ func newPeerData() *peerData { } func (pd *peerData) AdjustLatency(k cid.Cid, hasFallbackLatency bool, fallbackLatency time.Duration) { - - latency, hasLatency := pd.lt.RecordResponse(k) + latency, hasLatency := pd.lt.CheckDuration(k) + pd.lt.RemoveRequest(k) if !hasLatency { latency, hasLatency = fallbackLatency, hasFallbackLatency } diff --git a/bitswap/sessionpeermanager/sessionpeermanager.go b/bitswap/sessionpeermanager/sessionpeermanager.go index cd65c9634..471e982e7 100644 --- a/bitswap/sessionpeermanager/sessionpeermanager.go +++ b/bitswap/sessionpeermanager/sessionpeermanager.go @@ -5,6 +5,7 @@ import ( "fmt" "math/rand" "sort" + "time" bssd "github.com/ipfs/go-bitswap/sessiondata" @@ -13,9 +14,10 @@ import ( ) const ( - maxOptimizedPeers = 32 - unoptimizedTagValue = 5 // tag value for "unoptimized" session peers. - optimizedTagValue = 10 // tag value for "optimized" session peers. + defaultTimeoutDuration = 5 * time.Second + maxOptimizedPeers = 32 + unoptimizedTagValue = 5 // tag value for "unoptimized" session peers. + optimizedTagValue = 10 // tag value for "optimized" session peers. ) // PeerTagger is an interface for tagging peers with metadata @@ -49,17 +51,19 @@ type SessionPeerManager struct { unoptimizedPeersArr []peer.ID optimizedPeersArr []peer.ID broadcastLatency *latencyTracker + timeoutDuration time.Duration } // New creates a new SessionPeerManager func New(ctx context.Context, id uint64, tagger PeerTagger, providerFinder PeerProviderFinder) *SessionPeerManager { spm := &SessionPeerManager{ ctx: ctx, - tagger: tagger, - providerFinder: providerFinder, + tagger: tagger, + providerFinder: providerFinder, peerMessages: make(chan peerMessage, 16), activePeers: make(map[peer.ID]*peerData), broadcastLatency: newLatencyTracker(), + timeoutDuration: defaultTimeoutDuration, } spm.tag = fmt.Sprint("bs-ses-", id) @@ -72,18 +76,25 @@ func New(ctx context.Context, id uint64, tagger PeerTagger, providerFinder PeerP // the list of peers if it wasn't already added func (spm *SessionPeerManager) RecordPeerResponse(p peer.ID, k cid.Cid) { + select { + case spm.peerMessages <- &peerResponseMessage{p, k}: + case <-spm.ctx.Done(): + } +} + +// RecordCancel records the fact that cancellations were sent to peers, +// so if not blocks come in, don't let it affect peers timeout +func (spm *SessionPeerManager) RecordCancel(k cid.Cid) { // at the moment, we're just adding peers here // in the future, we'll actually use this to record metrics select { - case spm.peerMessages <- &peerResponseMessage{p, k}: + case spm.peerMessages <- &cancelMessage{k}: case <-spm.ctx.Done(): } } // RecordPeerRequests records that a given set of peers requested the given cids. func (spm *SessionPeerManager) RecordPeerRequests(p []peer.ID, ks []cid.Cid) { - // at the moment, we're not doing anything here - // soon we'll use this to track latency by peer select { case spm.peerMessages <- &peerRequestMessage{p, ks}: case <-spm.ctx.Done(): @@ -125,6 +136,15 @@ func (spm *SessionPeerManager) FindMorePeers(ctx context.Context, c cid.Cid) { }(c) } +// SetTimeoutDuration changes the length of time used to timeout recording of +// requests +func (spm *SessionPeerManager) SetTimeoutDuration(timeoutDuration time.Duration) { + select { + case spm.peerMessages <- &setTimeoutMessage{timeoutDuration}: + case <-spm.ctx.Done(): + } +} + func (spm *SessionPeerManager) run(ctx context.Context) { for { select { @@ -137,7 +157,13 @@ func (spm *SessionPeerManager) run(ctx context.Context) { } } -func (spm *SessionPeerManager) tagPeer(p peer.ID, value int) { +func (spm *SessionPeerManager) tagPeer(p peer.ID, data *peerData) { + var value int + if data.hasLatency { + value = optimizedTagValue + } else { + value = unoptimizedTagValue + } spm.tagger.TagPeer(p, spm.tag, value) } @@ -172,6 +198,27 @@ func (spm *SessionPeerManager) removeUnoptimizedPeer(p peer.ID) { } } +func (spm *SessionPeerManager) recordResponse(p peer.ID, k cid.Cid) { + data, ok := spm.activePeers[p] + wasOptimized := ok && data.hasLatency + if wasOptimized { + spm.removeOptimizedPeer(p) + } else { + if ok { + spm.removeUnoptimizedPeer(p) + } else { + data = newPeerData() + spm.activePeers[p] = data + } + } + fallbackLatency, hasFallbackLatency := spm.broadcastLatency.CheckDuration(k) + data.AdjustLatency(k, hasFallbackLatency, fallbackLatency) + if !ok || wasOptimized != data.hasLatency { + spm.tagPeer(p, data) + } + spm.insertPeer(p, data) +} + type peerFoundMessage struct { p peer.ID } @@ -181,7 +228,7 @@ func (pfm *peerFoundMessage) handle(spm *SessionPeerManager) { if _, ok := spm.activePeers[p]; !ok { spm.activePeers[p] = newPeerData() spm.insertPeer(p, spm.activePeers[p]) - spm.tagPeer(p, unoptimizedTagValue) + spm.tagPeer(p, spm.activePeers[p]) } } @@ -191,32 +238,7 @@ type peerResponseMessage struct { } func (prm *peerResponseMessage) handle(spm *SessionPeerManager) { - p := prm.p - k := prm.k - data, ok := spm.activePeers[p] - wasOptimized := ok && data.hasLatency - if wasOptimized { - spm.removeOptimizedPeer(p) - } else { - if ok { - spm.removeUnoptimizedPeer(p) - } else { - data = newPeerData() - spm.activePeers[p] = data - } - } - fallbackLatency, hasFallbackLatency := spm.broadcastLatency.CheckDuration(k) - data.AdjustLatency(k, hasFallbackLatency, fallbackLatency) - var tagValue int - if data.hasLatency { - tagValue = optimizedTagValue - } else { - tagValue = unoptimizedTagValue - } - if !ok || wasOptimized != data.hasLatency { - spm.tagPeer(p, tagValue) - } - spm.insertPeer(p, data) + spm.recordResponse(prm.p, prm.k) } type peerRequestMessage struct { @@ -226,17 +248,25 @@ type peerRequestMessage struct { func (spm *SessionPeerManager) makeTimeout(p peer.ID) afterTimeoutFunc { return func(k cid.Cid) { - spm.RecordPeerResponse(p, k) + select { + case spm.peerMessages <- &peerTimeoutMessage{p, k}: + case <-spm.ctx.Done(): + } } } func (prm *peerRequestMessage) handle(spm *SessionPeerManager) { if prm.peers == nil { - spm.broadcastLatency.SetupRequests(prm.keys, func(k cid.Cid) {}) + spm.broadcastLatency.SetupRequests(prm.keys, spm.timeoutDuration, func(k cid.Cid) { + select { + case spm.peerMessages <- &broadcastTimeoutMessage{k}: + case <-spm.ctx.Done(): + } + }) } else { for _, p := range prm.peers { if data, ok := spm.activePeers[p]; ok { - data.lt.SetupRequests(prm.keys, spm.makeTimeout(p)) + data.lt.SetupRequests(prm.keys, spm.timeoutDuration, spm.makeTimeout(p)) } } } @@ -274,9 +304,47 @@ func (prm *getPeersMessage) handle(spm *SessionPeerManager) { prm.resp <- optimizedPeers } +type cancelMessage struct { + k cid.Cid +} + +func (cm *cancelMessage) handle(spm *SessionPeerManager) { + for _, data := range spm.activePeers { + data.lt.RecordCancel(cm.k) + } +} + func (spm *SessionPeerManager) handleShutdown() { for p, data := range spm.activePeers { spm.tagger.UntagPeer(p, spm.tag) data.lt.Shutdown() } } + +type peerTimeoutMessage struct { + p peer.ID + k cid.Cid +} + +func (ptm *peerTimeoutMessage) handle(spm *SessionPeerManager) { + data, ok := spm.activePeers[ptm.p] + if !ok || !data.lt.WasCancelled(ptm.k) { + spm.recordResponse(ptm.p, ptm.k) + } +} + +type broadcastTimeoutMessage struct { + k cid.Cid +} + +func (btm *broadcastTimeoutMessage) handle(spm *SessionPeerManager) { + spm.broadcastLatency.RemoveRequest(btm.k) +} + +type setTimeoutMessage struct { + timeoutDuration time.Duration +} + +func (stm *setTimeoutMessage) handle(spm *SessionPeerManager) { + spm.timeoutDuration = stm.timeoutDuration +} diff --git a/bitswap/sessionpeermanager/sessionpeermanager_test.go b/bitswap/sessionpeermanager/sessionpeermanager_test.go index bfbe878b2..c0d6512b4 100644 --- a/bitswap/sessionpeermanager/sessionpeermanager_test.go +++ b/bitswap/sessionpeermanager/sessionpeermanager_test.go @@ -241,6 +241,115 @@ func TestOrderingPeers(t *testing.T) { } } +func TestTimeoutsAndCancels(t *testing.T) { + ctx := context.Background() + ctx, cancel := context.WithTimeout(ctx, 2*time.Second) + defer cancel() + peers := testutil.GeneratePeers(3) + completed := make(chan struct{}) + fpt := &fakePeerTagger{} + fppf := &fakePeerProviderFinder{peers, completed} + c := testutil.GenerateCids(1) + id := testutil.GenerateSessionID() + sessionPeerManager := New(ctx, id, fpt, fppf) + + // add all peers to session + sessionPeerManager.FindMorePeers(ctx, c[0]) + select { + case <-completed: + case <-ctx.Done(): + t.Fatal("Did not finish finding providers") + } + time.Sleep(2 * time.Millisecond) + + sessionPeerManager.SetTimeoutDuration(20 * time.Millisecond) + + // record broadcast + sessionPeerManager.RecordPeerRequests(nil, c) + + // record receives + peer1 := peers[0] + peer2 := peers[1] + peer3 := peers[2] + time.Sleep(1 * time.Millisecond) + sessionPeerManager.RecordPeerResponse(peer1, c[0]) + time.Sleep(2 * time.Millisecond) + sessionPeerManager.RecordPeerResponse(peer2, c[0]) + time.Sleep(40 * time.Millisecond) + sessionPeerManager.RecordPeerResponse(peer3, c[0]) + + sessionPeers := sessionPeerManager.GetOptimizedPeers() + + // should prioritize peers which are fastest + if (sessionPeers[0].Peer != peer1) || (sessionPeers[1].Peer != peer2) || (sessionPeers[2].Peer != peer3) { + t.Fatal("Did not prioritize peers that received blocks") + } + + // should give first peer rating of 1 + if sessionPeers[0].OptimizationRating < 1.0 { + t.Fatal("Did not assign rating to best peer correctly") + } + + // should give other optimized peers ratings between 0 & 1 + if (sessionPeers[1].OptimizationRating >= 1.0) || (sessionPeers[1].OptimizationRating <= 0.0) { + t.Fatal("Did not assign rating to other optimized peers correctly") + } + + // should not record a response for a broadcast return that arrived AFTER the timeout period + // leaving peer unoptimized + if sessionPeers[2].OptimizationRating != 0 { + t.Fatal("should not have recorded broadcast response for peer that arrived after timeout period") + } + + // now we make a targeted request, which SHOULD affect peer + // rating if it times out + c2 := testutil.GenerateCids(1) + + // Request again + sessionPeerManager.RecordPeerRequests([]peer.ID{peer2}, c2) + // wait for a timeout + time.Sleep(40 * time.Millisecond) + + // call again + nextSessionPeers := sessionPeerManager.GetOptimizedPeers() + if sessionPeers[1].OptimizationRating <= nextSessionPeers[1].OptimizationRating { + t.Fatal("Timeout should have affected optimization rating but did not") + } + + // now we make a targeted request, but later cancel it + // timing out should not affect rating + c3 := testutil.GenerateCids(1) + + // Request again + sessionPeerManager.RecordPeerRequests([]peer.ID{peer2}, c3) + sessionPeerManager.RecordCancel(c3[0]) + // wait for a timeout + time.Sleep(40 * time.Millisecond) + + // call again + thirdSessionPeers := sessionPeerManager.GetOptimizedPeers() + if nextSessionPeers[1].OptimizationRating != thirdSessionPeers[1].OptimizationRating { + t.Fatal("Timeout should not have affected optimization rating but did") + } + + // if we make a targeted request that is then cancelled, but we still + // receive the block before the timeout, it's worth recording and affecting latency + + c4 := testutil.GenerateCids(1) + + // Request again + sessionPeerManager.RecordPeerRequests([]peer.ID{peer2}, c4) + sessionPeerManager.RecordCancel(c4[0]) + time.Sleep(2 * time.Millisecond) + sessionPeerManager.RecordPeerResponse(peer2, c4[0]) + + // call again + fourthSessionPeers := sessionPeerManager.GetOptimizedPeers() + if thirdSessionPeers[1].OptimizationRating >= fourthSessionPeers[1].OptimizationRating { + t.Fatal("Timeout should have affected optimization rating but did not") + } +} + func TestUntaggingPeers(t *testing.T) { ctx := context.Background() ctx, cancel := context.WithTimeout(ctx, 10*time.Millisecond) From ba98db8ace0866ace9e517030f15fd514276e5ee Mon Sep 17 00:00:00 2001 From: hannahhoward Date: Thu, 4 Jul 2019 11:06:47 -0700 Subject: [PATCH 0780/1035] feat(sessions): record duplicate responses send duplicate responses to the session peer manager to track latencies This commit was moved from ipfs/go-bitswap@0d8b75d72dc9894e3600746126801821f44592f3 --- bitswap/bitswap.go | 3 +-- bitswap/session/session.go | 8 +++++--- bitswap/sessionmanager/sessionmanager.go | 6 +++--- bitswap/sessionmanager/sessionmanager_test.go | 6 +++--- 4 files changed, 12 insertions(+), 11 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index a05c4ca6b..1056cd69b 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -333,9 +333,8 @@ func (bs *Bitswap) ReceiveMessage(ctx context.Context, p peer.ID, incoming bsmsg defer wg.Done() bs.updateReceiveCounters(b) - bs.sm.UpdateReceiveCounters(b) + bs.sm.UpdateReceiveCounters(p, b) log.Debugf("[recv] block; cid=%s, peer=%s", b.Cid(), p) - // skip received blocks that are not in the wantlist if !bs.wm.IsWanted(b.Cid()) { log.Debugf("[recv] block not in wantlist; cid=%s, peer=%s", b.Cid(), p) diff --git a/bitswap/session/session.go b/bitswap/session/session.go index e847bf43d..8a77baa22 100644 --- a/bitswap/session/session.go +++ b/bitswap/session/session.go @@ -147,9 +147,9 @@ func (s *Session) ReceiveBlockFrom(from peer.ID, blk blocks.Block) { // UpdateReceiveCounters updates receive counters for a block, // which may be a duplicate and adjusts the split factor based on that. -func (s *Session) UpdateReceiveCounters(blk blocks.Block) { +func (s *Session) UpdateReceiveCounters(from peer.ID, blk blocks.Block) { select { - case s.incoming <- blkRecv{from: "", blk: blk, counterMessage: true}: + case s.incoming <- blkRecv{from: from, blk: blk, counterMessage: true}: case <-s.ctx.Done(): } } @@ -308,7 +308,6 @@ func (s *Session) handleCancel(keys []cid.Cid) { } func (s *Session) handleIdleTick(ctx context.Context) { - live := make([]cid.Cid, 0, len(s.liveWants)) now := time.Now() for c := range s.liveWants { @@ -415,6 +414,9 @@ func (s *Session) updateReceiveCounters(ctx context.Context, blk blkRecv) { ks := blk.blk.Cid() if s.pastWants.Has(ks) { s.srs.RecordDuplicateBlock() + if blk.from != "" { + s.pm.RecordPeerResponse(blk.from, ks) + } } } diff --git a/bitswap/sessionmanager/sessionmanager.go b/bitswap/sessionmanager/sessionmanager.go index a2617073b..5a7c7d9c3 100644 --- a/bitswap/sessionmanager/sessionmanager.go +++ b/bitswap/sessionmanager/sessionmanager.go @@ -19,7 +19,7 @@ type Session interface { exchange.Fetcher InterestedIn(cid.Cid) bool ReceiveBlockFrom(peer.ID, blocks.Block) - UpdateReceiveCounters(blocks.Block) + UpdateReceiveCounters(peer.ID, blocks.Block) } type sesTrk struct { @@ -128,11 +128,11 @@ func (sm *SessionManager) ReceiveBlockFrom(from peer.ID, blk blocks.Block) { // UpdateReceiveCounters records the fact that a block was received, allowing // sessions to track duplicates -func (sm *SessionManager) UpdateReceiveCounters(blk blocks.Block) { +func (sm *SessionManager) UpdateReceiveCounters(from peer.ID, blk blocks.Block) { sm.sessLk.Lock() defer sm.sessLk.Unlock() for _, s := range sm.sessions { - s.session.UpdateReceiveCounters(blk) + s.session.UpdateReceiveCounters(from, blk) } } diff --git a/bitswap/sessionmanager/sessionmanager_test.go b/bitswap/sessionmanager/sessionmanager_test.go index ef1293b35..19f50e335 100644 --- a/bitswap/sessionmanager/sessionmanager_test.go +++ b/bitswap/sessionmanager/sessionmanager_test.go @@ -30,9 +30,9 @@ func (*fakeSession) GetBlock(context.Context, cid.Cid) (blocks.Block, error) { func (*fakeSession) GetBlocks(context.Context, []cid.Cid) (<-chan blocks.Block, error) { return nil, nil } -func (fs *fakeSession) InterestedIn(cid.Cid) bool { return fs.interested } -func (fs *fakeSession) ReceiveBlockFrom(peer.ID, blocks.Block) { fs.receivedBlock = true } -func (fs *fakeSession) UpdateReceiveCounters(blocks.Block) { fs.updateReceiveCounters = true } +func (fs *fakeSession) InterestedIn(cid.Cid) bool { return fs.interested } +func (fs *fakeSession) ReceiveBlockFrom(peer.ID, blocks.Block) { fs.receivedBlock = true } +func (fs *fakeSession) UpdateReceiveCounters(peer.ID, blocks.Block) { fs.updateReceiveCounters = true } type fakePeerManager struct { id uint64 From 9eef1f3e177437533b12ed05673abaa609b2b3fa Mon Sep 17 00:00:00 2001 From: ZenGround0 Date: Tue, 23 Jul 2019 15:13:57 -0400 Subject: [PATCH 0781/1035] Fix typo This commit was moved from ipfs/go-bitswap@fdd54d5ef171531d4e2868af6dbdfc11d5d8a48d --- bitswap/peermanager/peermanager.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bitswap/peermanager/peermanager.go b/bitswap/peermanager/peermanager.go index 3aefbbe6d..18fc56b7d 100644 --- a/bitswap/peermanager/peermanager.go +++ b/bitswap/peermanager/peermanager.go @@ -9,7 +9,7 @@ import ( peer "github.com/libp2p/go-libp2p-core/peer" ) -// PeerQueue provides a queer of messages to be sent for a single peer. +// PeerQueue provides a queue of messages to be sent for a single peer. type PeerQueue interface { AddMessage(entries []bsmsg.Entry, ses uint64) Startup() From 82293e78e4f52cc1e5854b85c46ce59d6f1ffa3d Mon Sep 17 00:00:00 2001 From: Dirk McCormick Date: Wed, 31 Jul 2019 11:34:49 -0400 Subject: [PATCH 0782/1035] fix: memory leak in latency tracker on timeout after cancel This commit was moved from ipfs/go-bitswap@213edd7c6930f64df4706849fbdd87f86ee124d1 --- bitswap/sessionpeermanager/sessionpeermanager.go | 7 ++++++- bitswap/sessionpeermanager/sessionpeermanager_test.go | 6 ++++++ 2 files changed, 12 insertions(+), 1 deletion(-) diff --git a/bitswap/sessionpeermanager/sessionpeermanager.go b/bitswap/sessionpeermanager/sessionpeermanager.go index 471e982e7..b6fafe090 100644 --- a/bitswap/sessionpeermanager/sessionpeermanager.go +++ b/bitswap/sessionpeermanager/sessionpeermanager.go @@ -328,7 +328,12 @@ type peerTimeoutMessage struct { func (ptm *peerTimeoutMessage) handle(spm *SessionPeerManager) { data, ok := spm.activePeers[ptm.p] - if !ok || !data.lt.WasCancelled(ptm.k) { + // If the request was cancelled, make sure we clean up the request tracker + if ok && data.lt.WasCancelled(ptm.k) { + data.lt.RemoveRequest(ptm.k) + } else { + // If the request was not cancelled, record the latency. Note that we + // do this even if we didn't previously know about this peer. spm.recordResponse(ptm.p, ptm.k) } } diff --git a/bitswap/sessionpeermanager/sessionpeermanager_test.go b/bitswap/sessionpeermanager/sessionpeermanager_test.go index c0d6512b4..c743cfb7f 100644 --- a/bitswap/sessionpeermanager/sessionpeermanager_test.go +++ b/bitswap/sessionpeermanager/sessionpeermanager_test.go @@ -342,12 +342,18 @@ func TestTimeoutsAndCancels(t *testing.T) { sessionPeerManager.RecordCancel(c4[0]) time.Sleep(2 * time.Millisecond) sessionPeerManager.RecordPeerResponse(peer2, c4[0]) + time.Sleep(2 * time.Millisecond) // call again fourthSessionPeers := sessionPeerManager.GetOptimizedPeers() if thirdSessionPeers[1].OptimizationRating >= fourthSessionPeers[1].OptimizationRating { t.Fatal("Timeout should have affected optimization rating but did not") } + + // ensure all peer latency tracking has been cleaned up + if len(sessionPeerManager.activePeers[peer2].lt.requests) > 0 { + t.Fatal("Latency request tracking should have been cleaned up but was not") + } } func TestUntaggingPeers(t *testing.T) { From 0cc315bbbfe720af136d48aaa1a0af3a8863f3c0 Mon Sep 17 00:00:00 2001 From: Hannah Howard Date: Thu, 1 Aug 2019 07:36:26 -0700 Subject: [PATCH 0783/1035] docs(README): provide detail on setup, usage, and implementation (#161) * docs(README): provide detail on setup, usage, and implementation Greatly fills out the Bitswap README to provide a good intro to the library, how to set it up, how to use it, and how it works. This commit was moved from ipfs/go-bitswap@1137add2c75f05c8df86cff2c81f8a386851e27e --- bitswap/README.md | 179 +++++++++++++++++++++++++++++------ bitswap/docs/go-bitswap.png | Bin 0 -> 47568 bytes bitswap/docs/go-bitswap.puml | 46 +++++++++ 3 files changed, 198 insertions(+), 27 deletions(-) create mode 100644 bitswap/docs/go-bitswap.png create mode 100644 bitswap/docs/go-bitswap.puml diff --git a/bitswap/README.md b/bitswap/README.md index 62bbd9b39..3f0ae6f08 100644 --- a/bitswap/README.md +++ b/bitswap/README.md @@ -12,47 +12,172 @@ go-bitswap ## Table of Contents +- [Background](#background) - [Install](#install) -- [Protocol](#protocol) +- [Usage](#usage) - [Implementation](#implementation) - [Contribute](#contribute) - [License](#license) -## Protocol -Bitswap is the data trading module for ipfs, it manages requesting and sending -blocks to and from other peers in the network. Bitswap has two main jobs, the -first is to acquire blocks requested by the client from the network. The second -is to judiciously send blocks in its possession to other peers who want them. -Bitswap is a message based protocol, as opposed to response-reply. All messages -contain wantlists, or blocks. Upon receiving a wantlist, a node should consider -sending out wanted blocks if they have them. Upon receiving blocks, the node -should send out a notification called a 'Cancel' signifying that they no longer -want the block. At a protocol level, bitswap is very simple. +## Background + +Bitswap is the data trading module for ipfs. It manages requesting and sending +blocks to and from other peers in the network. Bitswap has two main jobs: +- to acquire blocks requested by the client from the network +- to judiciously send blocks in its possession to other peers who want them + +Bitswap is a message based protocol, as opposed to request-response. All messages +contain wantlists or blocks. + +A node sends a wantlist to tell peers which blocks it wants. When a node receives +a wantlist it should check which blocks it has from the wantlist, and consider +sending the matching blocks to the requestor. + +When a node receives blocks that it asked for, the node should send out a +notification called a 'Cancel' to tell its peers that the node no longer +wants those blocks. + +`go-bitswap` provides an implementation of the Bitswap protocol in go. + +## Install + +`go-bitswap` requires Go >= 1.11 and can be installed using Go modules + +## Usage + +### Initializing a Bitswap Exchange + +```golang +import ( + "context" + bitswap "github.com/ipfs/go-bitswap" + bsnet "github.com/ipfs/go-graphsync/network" + blockstore "github.com/ipfs/go-ipfs-blockstore" + "github.com/libp2p/go-libp2p-core/routing" + "github.com/libp2p/go-libp2p-core/host" +) + +var ctx context.Context +var host host.Host +var router routing.ContentRouting +var bstore blockstore.Blockstore + +network := bsnet.NewFromIPFSHost(host, router) +exchange := bitswap.New(ctx, network, bstore) +``` + +Parameter Notes: + +1. `ctx` is just the parent context for all of Bitswap +2. `network` is a network abstraction provided to Bitswap on top +of libp2p & content routing. +3. `bstore` is an IPFS blockstore + +### Get A Block Synchronously + +```golang +var c cid.Cid +var ctx context.Context +var exchange bitswap.Bitswap + +block, err := exchange.GetBlock(ctx, c) +``` + +Parameter Notes: + +1. `ctx` is the context for this request, which can be cancelled to cancel the request +2. `c` is the content ID of the block you're requesting + +### Get Several Blocks Asynchronously + +```golang +var cids []cid.Cid +var ctx context.Context +var exchange bitswap.Bitswap + +blockChannel, err := exchange.GetBlocks(ctx, cids) +``` + +Parameter Notes: + +1. `ctx` is the context for this request, which can be cancelled to cancel the request +2. `cids` is an slice of content IDs for the blocks you're requesting + +### Get Related Blocks Faster With Sessions + +In IPFS, content blocks are often connected to each other through a MerkleDAG. If you know ahead of time that block requests are related, Bitswap can make several optimizations internally in how it requests those blocks in order to get them faster. Bitswap provides a mechanism called a Bitswap session to manage a series of block requests as part of a single higher level operation. You should initialize a bitswap session any time you intend to make a series of block requests that are related -- and whose responses are likely to come from the same peers. + +```golang +var ctx context.Context +var cids []cids.cid +var exchange bitswap.Bitswap + +session := exchange.NewSession(ctx) +blocksChannel, err := session.GetBlocks(ctx, cids) +// later +var relatedCids []cids.cid +relatedBlocksChannel, err := session.GetBlocks(ctx, relatedCids) +``` + +Note that new session returns an interface with a GetBlock and GetBlocks method that have the same signature as the overall Bitswap exchange. + +### Tell bitswap a new block was added to the local datastore + +```golang +var blk blocks.Block +var exchange bitswap.Bitswap + +err := exchange.HasBlock(blk) +``` ## Implementation + +The following diagram outlines the major tasks Bitswap handles, and their consituent components: + +![Bitswap Components](./docs/go-bitswap.png) + +### Sending Blocks + Internally, when a message with a wantlist is received, it is sent to the -decision engine to be considered, and blocks that we have that are wanted are -placed into the peer request queue. Any block we possess that is wanted by -another peer has a task in the peer request queue created for it. The peer -request queue is a priority queue that sorts available tasks by some metric, -currently, that metric is very simple and aims to fairly address the tasks -of each other peer. More advanced decision logic will be implemented in the -future. Task workers pull tasks to be done off of the queue, retrieve the block -to be sent, and send it off. The number of task workers is limited by a constant -factor. - -Client requests for new blocks are handled by the want manager, for every new -block (or set of blocks) wanted, the 'WantBlocks' method is invoked. The want -manager then ensures that connected peers are notified of the new block that we -want by sending the new entries to a message queue for each peer. The message -queue will loop while there is work available and do the following: 1) Ensure it -has a connection to its peer, 2) grab the message to be sent, and 3) send it. +decision engine to be considered. The decision engine checks the CID for +each block in the wantlist against local storage and creates a task for +each block it finds in the peer request queue. The peer request queue is +a priority queue that sorts available tasks by some metric. Currently, +that metric is very simple and aims to fairly address the tasks of each peer. +More advanced decision logic will be implemented in the future. Task workers +pull tasks to be done off of the queue, retrieve the block to be sent, and +send it off. The number of task workers is limited by a constant factor. + +### Requesting Blocks + +The want manager handles client requests for new blocks. The 'WantBlocks' method +is invoked for each block (or set of blocks) requested. The want manager ensures +that connected peers are notified of the new block that we want by sending the +new entries to a message queue for each peer. The message queue will loop while +there is work available and: +1. Ensure it has a connection to its peer +2. grab the message to be sent +3. Send the message If new messages are added while the loop is in steps 1 or 3, the messages are combined into one to avoid having to keep an actual queue and send multiple messages. The same process occurs when the client receives a block and sends a cancel message for it. +### Sessions + +Sessions track related requests for blocks, and attempt to optimize transfer speed and reduce the number of duplicate blocks sent across the network. The basic optimization of sessions is to limit asks for blocks to the peers most likely to have that block and most likely to respond quickly. This is accomplished by tracking who responds to each block request, and how quickly they respond, and then optimizing future requests with that information. Sessions try to distribute requests amongst peers such that there is some duplication of data in the responses from different peers, for redundancy, but not too much. + +### Finding Providers + +When bitswap can't find a connected peer who already has the block it wants, it falls back to querying a content routing system (a DHT in IPFS's case) to try to locate a peer with the block. + +Bitswap routes these requests through the ProviderQueryManager system, which rate-limits these requests and also deduplicates in-process requests. + +### Providing + +As a bitswap client receives blocks, by default it announces them on the provided content routing system (again, a DHT in most cases). This behaviour can be disabled by passing `bitswap.ProvideEnabled(false)` as a parameter when initializing Bitswap. IPFS currently has its own experimental provider system ([go-ipfs-provider](https://github.com/ipfs/go-ipfs-provider)) which will eventually replace Bitswap's system entirely. + ## Contribute PRs are welcome! diff --git a/bitswap/docs/go-bitswap.png b/bitswap/docs/go-bitswap.png new file mode 100644 index 0000000000000000000000000000000000000000..2b45b8d9b5a84b02dc83d0aaf33a713b6fc2bdef GIT binary patch literal 47568 zcmaI;bwJeX_XP@L7lMQ+$bgd4NHf$?y1PrHJBKbsLAtvHq=)Vn>7k^hk(BPP_Zbk+ z`M&peFMp^^e4g5S?X}mlx870`f)DTG-A6$|c_=KzCxe1=lN<%*y5QaG;1z1?>`?F* zg$=)wjkdXkgNd%54T_+ynXZ+Vjjqn~cMi`DY-}u`baWObT4pvdQxjTkb5jg@c6{)J z=Z11hHdn8sTmz4>kNYIIZahl==#;kg++s(TA>XnPUVJ7cGOCl(XsVFD_&h8LswmkQ z!5}PgFXjBWt6=;jx4v<6yvj~;okSh4y`;1EX6{F3b7!<6%6Ka8-k96Q^qFaQ$zLEc zgI=kTQpQAM9Spri2~F_()VR=jK1Fh16i-KwJH;eW6WUL7R)j#q*!>(k+>KhKetY3M znZug!BCe4^ZO%lqzr1hIy0JX^0V}We{wFR=!yB}RUhbX$^gMr(e`^@^(MP7+2BlY`+;2L%q?Z6MSl%)e&w3Wyk4-uxa z3u}-OXT9)9n~XP5Io;ytlOu|^R$u%~QfTREK@+1nug|gl>NDsd%zAAc=V7zwy^6i3 zsBd4(>);fR2MlrB)%59UKDc{;2~{_6;^vAsr?19+onT%tzyAxx4`K6>{&BO>W{5J8 zL4j)R)JBjnmCVkIbEAL{I3H^$?{ZZg*<9;#JbevCjNjIl zWf1tpTh)$uj=7hvYH4YAk8`tK8;g#Jh=`8% zhJg<$RajX>-arw{#AkpjdBr&s5fg`ogfQVfL{7xr3I*k<7`xnZ6ub4j=fh{&g@Dy`>gek`+1s~C{gntiH|OaatABAoDmB6{^jS~VM?S!d73=7 zD@qa)0rl1sb#87pHai2^isGeoW#*G-i|L6OvK|}lbaiUg_Q6))pcm%}PkF<~&%e7g zkZm-Og%Gt@Q&)c&>o!^ILOL21^|WtALP0^n3k_0xF#W)2@cGKmf*Eh}1UBabdj89) zpePBlSHPAPJZ?Yy`h`p)?kOG~)TlqBY{IcUpJ#J-NYlx|0m5P1!Q?c%wx*FT5pO({ z8(mJ)(9qBlNLc4`Y%7|1BxAO5dc4nJyYhQ_T1HCh+vbOh4F!*OH~_Q$8m3ZeayV6g zgX(9z%kf^@n)A_OdfK^R0pxITu02v!L^<5mgVl6|MAUV=lNF!tYCpoffR%_+$meNr z5fi7h!cR_4a2fTA4g1nuPmgkQbC()UBk42~xLqoJeMMJCN;qtOe>p3v{)m^4kg@_#0G7-vOV`FN{Y0@bIV4@H1 z-E%nI9nx+3{{2lfv&m0-+%qtY&cR5DF~eB4VxCq1P;aWJuepVA6R7~@vy-8^v+$W) z%NH(|A7Hf=S$D#JU*vixFh+Tlvu(HY2ZY?tP;@`8-ZY zNGK>MI7)xAIP{{v%&*jR)b{M;5OWKW&sRAj3LK>Z?C0ueS)|skWnKsa-3PuUW}|+o z_0ivxB=_=~Nby+Bs2jK(cOG>IyA+p|MM^Wj#Ah-HDm?m;#H(8w7#L`a1_r{m3qf%i zEj8`t+K&?po?GaMPO%>KffEf%%iN>tOMNuo8M{C2OGkNX8g?fxOq1PW%AM#Q!&tsn z16XSUmy?fqe!`oz!JOC*L`TQ0ef^bfbE;L^WIFAyi5O7T?^jqa=p^&;GmLzBeS`F8 z_#*{@@Z%Sl@b--uW@DdO*9#-b? zny7J_tgsg3_&v1!O(s}ggiEwqNp|$VadFA|*adz>K32Z1 zzs`IozON)@H9wyockM@#N2N%SUZ)OXFh|vwTDcLrbCQ7oj7L}DcyG0Yd};Wrpo*Za z?OvZG4~;I7{jmUHmdN@z{ZX*8q04J{H!czAeHy*$qrM|x~kRANv^hlhp#IZxyvSd+P~<}a`< zwESZ`g)&E_#LC8odMjWkgk>>!FzMq~ihzH}uUJ;|r7|>hbo6LUY-~9Z11z+g za3E)6I{*ulIg8f>cGj0JdA<-B&@U3X%2C@tSg?we6p5{m31GiF#Th=84Gab{jIGK&7snOwovl+4NLgy(VgBophFU`EtHL=|;%o zTHQE|821LY^s`Rx4ejutpf9G7}?7~4{yR?SF6j*Kaw6p>L zK}MoqaDkNIN1Qx7bq8ZeaCJJ~GhOw5cYUEdvCeK?BlweOj7D5_HD_C?SYj2e?);?N zd5vw|La(`uuv(q#>DqV|V&l_;C;eLSLa)PjUwsV`QF2}8X*z;s%qt9ziBNtddZy;w z=I4hR%)7hV?Ec3R?r@S&Q?CF=%nxwsF@&U%5D|SJ8d9vXdwAto*U+VTunlev{@S~P zC~Knt%mm3lkbn+Q9T~^JL8s8ybSEn@6Ga#jZPo?QIx+v?JzDmg?_)4|4Wekt%1VxK zJ?|^?d#uvp{q2_G^iVJ*Yy4d$ndJ<-*^uT@-u^s zUXLIpB~49D73PeIjs}5BT!rEaxNro5*Jg0vqZ5z&eY=IZl|{khM29A&JXoOB^}V%K z$XifWMuvo>6M;zXd3QCMR#@6GTNyWH4^*K$?gTaxAy)_FRZwn6ys)$;$E-E?TTSFd>h0dAox*HWDRIm zmO@FMa?w!|ukpjj`hj@NZ-0E5c-;0{Arf(VvQ069)z>@N))=YjdhOpwQ-6b+O-dO3 z__8*6wkKe_M}Uj=aF+{4iIB3~i(z(s@#0+8a{2d2Ny=WMa#8btX5FOJQtzQ9<9NKE z$IvaL4o`8|CSP4y>53oCk^mekf&t?{gET-33#TFYSw{!XQqBrmzv{uW0IABboM~}! zInR_BGAOOyPlyebb%{RP9V*%pi{o}#sXJp9=>!<%R$@MxWj3xtgi1kS+>aXGgozok zzp~hy+!VUKAURkXAROw+wWn0_?VBMYjMr;W1Z}1TT{=g*FYObrS1noH@%IN^_4SOR zp{Byw*x2|d2QvkQ?KvvUhfCSGF2}Vy<96IfqP&OU`T2)kT{$GfIM<2J1B;5!w&Ly6dvU^W-1h`N}`?6CeQt;PzCZ(DO5J64X0mx23#S<3PemPW>*Dw!QhzMnOH*UP?wiziiFAfDSH4o6z!)@$uvOiQ+w) zcw1Daqb%zqB{PU{YeohlWGZm!z4>U6#j{hndkE6+W&>H?l&E&=YkBh7o{nC>{8=6S z@rN~W*M|FaJDv`G|L!po{W~s0MNmVdaF#_E@pL!h!_74!%B6P?pABN&3Ho(%f-NZM zl`6_JP#Q3heLgfXan_rv6ixKvEP}?jIEPo92)!3GIi=1~rxdE#);X?Oqz8i$lQNs1 zEkH22QeV{d78|CtN6y%PzyB9f@&g1L8@;`$0R5)cDno4rQ6u)N9EY8)>+V3ENXS8z zYMBHg;N<8WsV*#|rA|3w#660LUnHMULiq1vK03d@ZlKS9u{jm+vtS-5#)o zXRMf{+f!4~chEh#&g^TP_R+8l0>-QAVj1;&kJr(|H4lGz-3shbSfB*_#$#{E2ljFw zx)Ua_INA5&z*lqEDM$!Pt@@tWSmW3NV_efWNZu(cw{%TmhgSMij zug_|?=H*D1g;BAUJ4&O9llcH0cPr~iFH2Cxi{VGvy6D7&+Y`wk=>aJ4TL@!wiFm9Y zFxm(jYCzA55f#>|x@-(v?b9GYGF_bA1&;=sgm?=HxsCdJqUP8d!N@<5#qQ`B&|kFC z5lw)Jc;b*H;*^XRyfC{`QdS0X*AJCSDFP2CT=qMARa>&;&zGm9N|cMpZP!i|MxniZ ztRkyaSlipOBH9o~1&6)i(jq;7#j2bkxUzZy8doYZ*Sa)#@aN{;Wg+p04<5wQNtd9e ze}xDCRDpTmGBz_CNbMi5FU-xab$3mq)WVjk)}5JRXr>=)XlOg`TJ-2y3O*)Gl-j(1lETQ4DyOcX!v^LIRF)vnpGLR4zTXkMG z`OcR*)B4?}WKtqW*)H(Q23L2A1-=#xWyjN!BMyIq3GJJdw(1Z z1>PVcWoGtUGId?e%$S0c^KHjd#Z7+#u7v$9ywLE+=HEIB{$d3j0hPE(`>5n)g2YV_ z5GbksML?x+SlH}iktt*dyoy-JBl#QT=1n2w*Uz$#%VUt<{cp(9hU}2FK#>#zg#COp zZOe6mu7UAPBg9>m4B)tgZTu(1A$>fXMl zR;e5F5f!>64r^()imq3Cm5;Vuj23c8^6&4)yj9kRrS7`V=Bd@VtS2bVdAwby*j=`o zDw|J-0#APTh`KJ-b=4YYZaI%yt6M87*KX}gNZwce4+@l!kdpEX2zY+_^eN_=@sXs` z_R^$iiNUvN*h5V!-BK+#NR8uH#f)L%lIO4&%*j0l2GLjZRYEs3G{n69$;(T$wW?4? z`804?%XCuZaNMVty4vX1bcuwEXe{h)K6N#M?rkbA@f_X7GeV-Hh1(ndB$y43K`1Z5 z42Y6iTU#H7w?6S5VqPla-iVcI#3&?lF*1z~mU`8GdEI)HXvdQYW&sL|A46QBBii4|cV& zE@JIvX^dkMy_(F{*4Dq1aj$2qg6auI7JRbXZm(46&5%SKsYYstmf7OF&>iD6pGQ89MR1F~qG#1pPk=N+dpyND@|@F2 zm|cOoI+k#TbGmns1VMNyjiD^(yM_q3ST%B#J1_fKeYOuXAAh!(o~(jC&rbE1Ng>#= zFdq*Yzf-QV&iE>}!BInnpa%={#$6 zpp3fl$0J??j+=%(J8NcbnfJEa=fE`qERZUsp>rlCW6)z=t^AGuU*8!N*kYU1-sYu> z5?0IeR`&LyifW4v%J}$RmH-=DhT`wAyB1?^;{27?8GL2WVz!q*`It*i2Q5BUeE!GX z;5Rxx#kMfVF6}darr~3;V{C1ZwsWf@Q&u01lvw>c9FF8`iM7Fil=gjCz%<TXDr<_XU1p_ zx1Ku7K3Jb`VGEq{nEN|(QcJ5IPl$T6!pSl||AE6^h)rT@^Uo=;HNl_Uo~ZQc~q9 z?LR|(S){!G4ys&~-Tf6z$}h~pgf$S1)jVq7cH%Ws^Kk1S3~5|QaW)@~{yzB=(?_r9 z`^y@9vDW%0eFxS>Fv*_xif7q&}!vJ~48iWD+mF3$PP@10kuQQ2J6(PcC`W z^^G%G<+v2qO1o)i>gVQM)?P{JoWE1SEod>#Dp*I?N!N{9fyg2~tD^kby7a5Rqad2W z!IGiIy<2&DD&oAvZATco+W%fSaWn4m-wX{$4RsO%97to2M*IB7qAo@x7y*Jrrq1<&U>Yl4;Sc84y-Tt<@pE|=FJ?NBpSD(pX68jO)OX?7}e(293 zUhPq62X;Hd;A%Q4RzaV~ZjQ5`X?E^yS7Rm`9gUCm=&lS8ypm)6I~^lhQ*j@YHN6${ zJwvRXpCvN2c&^*!WMi4VoBB$TDGg1^R35F;q}b~+cD zN{HSHr*J39{{aEuZ%_S3V?@JuKXHGmhOazbz?heP{!UYsBY?Yx`a|iTfU9)psb{$c zR(H<0wD&U$Q_Z6ZtqDDOGUt5!e+TohY~)~bh%=uvYXG}BjA~644crabZt^FK@0TL! zH|Uo)Y60RC$o;cjkm_t{OngH}`VP`^I`l(=?bqcIzlBIim?fUlG&?l<@5qMNl6v!| zDIx^l5UBIH?bx?Tw0|l2m!F2ccN#7OAQAkJn|gR;$1KMfn0Z7FL-#Zk909_m>d9Zb zaVXs6U7fghBRx(7=f$8F+OxecU+4=fV%eX>V=HE*^N8R5LzRHMfe~E8=1KI-L3tR# z$5;@sd?%o3H^!hV)o$T?IPkLhjYXR)n{9AzJKTM>!dac^I?3@Q_eV1FB#@~d{Dw~my#=Ca zgQp=4pbwant`ECX3)oBPoI9#g!XRh@j0Dzpq5lqEgMKzxaWrxL4ZNP8#V$0Ap?-aA z1hUi)rwrbHX6JLRBqfY9(HRIJJwa3Ek4NM`UM`PxSr&25Qci8RyfY~8KrufoeTO^B z%~AQIUj#?C6RQ=fs@fvAs4vfv?mWOBr600df80dD0D>7K`q?~Z5r`ZOI77MIp}?gJ z%LVK~R0EKC=hi3uz$dn6dW18U1D2Nh+OYeB8@Ua`oYFP*lVJs7p5C z7wS@{PuM`y#<~d{>_P`--{RY8}e6i%9R0% z(>DYHktNil{7!L;XazFw(n{t2-|xq)%_vwb^A-dQFfIThiDZQC;TKnj%E8IL%goXu z-N~xjuO-ZMtjpky4fs)4JMnZJDze0#YxBJnZ&_(>^$8vx|qnxf`G3#@B?W~@5^umi-eBBq})gw){^CS27Y2H#hHK3HHihwy1p0^xYHM;8t& zZkx5N4Pj&XcYiwCp#h!fB{2jTfK>t0Nt`SXZNRz$PGTVFxu1jikWo2(N4O6b02E{X zvwXkTv)xscSJyTZN)5?R{_~gx-vzC(_Vt^;%u%#bl zT2o<(`gFgfn*MP2lwSDER6YIh+3B(3ut^=-rhg7ZP}LHv#`>2C2swb=vZ-8ceFpw= zE5gUlB4cT0`}L;sr%!kEJMkugV$$qSxW9Xv4pmPX-kNYS18Ris(U4|$FQetrPQm%n z&`ahrhwZb0T9@ofm~qX?)`R~`A7e>{>Dmn1a;b7XT>x_WeROoGwDavmVl{QA1kS@5NuEy`@&DLJ z^sf!Qc-Qyk^*CMahKPg)O&_5Xd}Ogtdg_@9nl7%%7V^3lE$n3 zNu4k9+(8_hb^p6lR`3~+4}0$d@ld72DEWTFc-1k;)~8>hIW?a9-g{4N472R=7buDT1tRuCTS0lcj98@c!z_WUIV8-_^<4}2q$J{rlvNjW%ER8LKuBg<8Pb&EFa?apPn5% zR8$-n742u3#~-b65c=!++JA~1iqfc2+Y##@yoVlGFUd1*)t&J8-Sh4FQ_=gT>k~f^ zCBPvp($7lwZq)!X0yxYSE`+KGnyiEXb7p6gGwp3`12=o)rw^O{2^)pTrdJ}RKwu;N z8Jkh%??)`vBJworBqC`cjYJn?ZASdncs6~I8qdx>>rmwu5H$8Hj(@&Q>GTuvTb-}J z(|+Z7^ue%~iz~xXv$F$=d25v1%M&#>fI`)-x#|rT<#tR{+8P2S$2rG70z%#Tq$IOf z-c69#MIMZ@jG6+52>1?u-PO5!wD$kZQ(aX=64zN_|Jt1@P*XkTS8vejP;Vn?fX zm}F}ndupKpSSP@GWOVv?HIbK$h|Y(nnLF|AIX|Hc*N4c^qRraHSs5uUjazyE9OF0r z9i`=eq?$8$xoZMQLVwyXi|_96051=CcV!6(?aK(D4FUG|OXAe@SLo~!1xV5?2`mkb zG4C*3Q4uAt_c=2wOHa>Q@&MO!)UjseRtmVn(CC~QzHe?YLPab|C5jcxSVwl2=)vrb z?G@60;XHsr^-fLOt_9t(0-v^Gtc#PpB}ib+R@q|EBeefq#Yn5Hjki`*a>$Ff@M!K{ zDL|$tofR$fUTKa;eKsW(!LA}=e*gIjiP9S9l)Lr7hdCs#3AWH1%ad7;=~aJW5Nv%}3*1v~Av zg|>ff!2TvOByx0cRF3}uY%6@|`w~#hDufqt>S|l6mX~(WoGkl2>2M+$nnef}oW|T9 zt}2itlj}l+wZhe@{GPWmv2y0=1{6;JO>*(EaAcZw#a!Ah_X7pj)76+)vPk9TFmEk0 z+pS+y1RMv5RGq5tb}ejS9V}1mDo3t%X;Tx)R<+&%S_H@#j@wJk|A&e-z;T{1!GdVYfF#gY&?JvT{#N76{2G;zbSa`} zcHjfx#AUhX#vdvy46eDfw6u~0c;UqTv#kVeu5A;BOn4OcF#~@exOg|;k!{nvHqB9l86rO>Ir@jCw_UAi+$(NDWokqp$pc!PHm}qO( z`xUNJO3I3nkB~~-yI`aa*8Cc-?O_<~63i8H8dB4aK=e*b0$&PkITW#^a9Oqk`v53| ziV21A?#_ZC)1@=y(=0!lY`v@YY^|TivUSIbhd3-cnnGF#Tm;I<^h(SCdem3kT6?+r z@oI;n1C2?}Tz1aY_Rr+EP2h(jYfah3KV@WsLZ5-e_@p@=`_`rNDAmC@U%o^##{owR zu;0L7rGnukFS`+lkl$)4J#kEE?;zyjzAL^f!7Gs~>KYmv!pF5OrbcS4fgrJHn{lR6 z@+)m9aakm)#D4PUnH4Y{0n%o+K%u8>srcQp^7r3C^vdPSm?KFXYmY}uzE>RrDdDrp zTEx;K9hPBD;4Gk5fK>lR^|*9eK)4a7{-(5RmNjH5gV`TD`k+8D!H=&}gVaR2P>t&^ zrX9KLmy4HR$7fSF_SxFt#$uF5B>%~9vI_%ze^sT7J`8tPa&DMDYccU0w7krN5RoG# z^;{xtoiliU5n@mq5?M;N`kTR4F;N)ML7HNwjvr7NvI7VZ>kMp_59;q?664W5jbA~5tdW0ec(>VEJ+X&7Vu zE#ePhq3(qi_AyK!I{+N!sk2Ofe6tFC5$vNVAr;jc#%Sk_{g(kzu?Mkp6Q3F`$=I`! z03i15p=}3%oJRawuCj6q5aU|s&A@O9gO9^iCCddd7*a4(RM6A@`D5D*aN>ob2jH=! z&$-v?(l*R$nZrGmFIdc~qGEUh*Iv1}eDM!NR_54e@^rC{ls}2_H+as>n)wi#*b((u zX5E3nYl+<5aL+@f7LW~2!yePkt`wu*D|yUcjG&~#lDQ6Y8^#n&_KnLp@C`qQ*Lh~6 zn2yw5d|z4RgeF(Xt9}>=BAMnaxRk5|T!5bg0_j7;deC0bmfY#Ajd2+_0d)s>t9LUu z{GZhzMfbxe7Uc_zQ-@%Un`L}7 zV8Q$v;{p3T;yr`A4rxS?bI+28v>eW#IM&}Dbq1>_Ug~QnVmrB<=$DWjVi22vkc+gi zBXTzCE@*)L9diGp#+zo^?Q6mD5y;&8GMfcVJX{arW3JQ*KGzN}zUI6Vg=2x&i;T3s8QIeBryPPJ1%n@Tg|#_J<$@se{xl=bv; zTZW*Z{jQqvZsGykZYNm*b=Qr{DDalAD`)&g?dCdVS3!GiIujTdpf7_%T;3t}r`{U! z1d2uoNTdIq4TKA+(jZ~xlpUNOXOh2Zk*+K_5|5iD?Zw|$Gk5zu8 zPd2dvvoV8Dh@NTiSacX=)dJVCx)GFevBlpM!WzcMyd^SIAT-*Sz-JgYa^zDRjikNZ8asAjAaHy_LPGY@t^}^w zWE9@bbuEH3c_@w1j+HMZ^Vj!_6&>1;@C%h`Wr?xPs?HAyUm!gL$4%)Zy|{kDeAg@8 zrJ}`@^3HXnC%6wJkIlWLpuPE$YG^N&&UAwA3&PrZ;;_ouqmP#|ON zwJ`gUNKAT{v#rld@>#AsJ%XtfK^M!6I5UwKZY7AHEVqvJbpqtg5?vZYbj%l!nN-`q z&u~vpO5iI1ks4j>Xl3T!d6+;d<()w-0PR4ZgeK(lHiRGa0~hQi3+z%i^FA$Vg3d-w zf4+zb`L};_quu@mD30IuUGEK&h~2F%%4B>cuL|WBkXeBnn3!$WYY$s@Z?piEosg?r z2O_gdgwtm%9|qlYT??rTO-_l$hS1B0_(czJYd&^>x4Lrap!6A_Hz01q3udZ!Y7Z1MqMMg2>}d5J9oqz!V3MdXh z1mMs7`ALXLj>!@ThiDHkp4Ltbg{pLAy?*_=>Ka0*Bh2C1vl@aPC+9Ict&zS|cHxwf zLfr?0T3v%$9^DGzjUFGq15^dqP!lsOyWqacdO~iv8pQ2n!hXC9!{kq}zPI%Sp41&c zt4(@T0Tl9|57-~}G7VZT#foFfFX61gQL~i@4D@ zgW_UA@Eh<)-Y}qDn`KSzFAR(B@JzjjMHRt-K?25;FGSpwoe0!YQq(xi;jz{7b3ALO z9$%Qp-&%^_vedQPsxE_B*5*&Bf?=PURaOpw7NSXP0 z8+jtAi=HqM6&M}?MdjLI?lm%gmq6F8CFc+*h2I-L?7k4GIDfKxyO^ z9x&W6)97*sHR4jTHKasKMri3KCG~X3cpBV4^k3dg4JL6gxX$>qYBi(zUN9D9xdn9R96Q;IP^9!pVGhm2UNyFAP`-9P>-c&j7+@{E&V$e zW4A)hjA{9qY*RS|D=I)7FK+?&6y$;1%w9t)J`eay2s-{`6iQxetxD{5=La)^44_;o z@Ci0{6fb*uRaN^QwPx*@4k^9q2oo-{AieVkwDA>SRnKcJ-}dUy9n`TQeLCsH80eKW zsVbJw&Ov1LA8;Z?i*_YGm76p3lY99`<9Z7@dbiddNyl^2thT#kw>Bm{Kfkch9rY?j zJeCC%oZ7))HTIjy44{D1?&M$-R6QDjr3neOg38{BOxg7B-@VQq30X{{P|vs8=@^_> zN0@Qf2C^g1#%Ar(B@@>`xqX%GDjEd(l9e@rR-MwfZI8)(;^c54Q86Np%`#ahDk>_2 zD<&vtW3z>j)p!u@c7ApToj^{njn>}Yo{gQo#B@}lG#~UbEa5Csya%;_ZTaP15~h!E zx3{-LY2fnN5wkeh*f!^9F1iu3)fT_LFC;kS>UG9|lJ*2B%qV!k?d(v0_`$#zo3%m$ zKtQSJ&}sI0nhMZ7f}DQ&D7Aq59Mq6e5M_WX>m9QwiP{8$TTMYG+wlQ?8B*72UP`9g z#07A-VRZ9?W-b4~uoe|M1x*7+=jrD>=XPaU2OGbvfO2O63goi)cGARSMWW6wO38`B zI7l!Z+Be?cxFhFp_%lE0Y|IlCm#D8L08;g?>As|7_s^e7(dGKx1fihzTHT!cZkLJr^UO1t&knom&Cw{wVa(7s{Z`&Ae`3v5o# z_5z=kjvSL7sGOgE!$3X%r6Y#PFvQQtUo4t|-?kofv^c?}pG!X&o&>e(i^IC{V}oiV z9386HGXeHcVkW$U+}3JmY)DagC<5}0>8$5kmL{y|jL!GqsVUAQoLReXDJBp{h$?$S z9pAFw*%vv6I}F>ug0wbIwJaA^$UV3Ag-F*tLn>H^NIvcBB0oJH9b|1hX!>A)@8ksq zRWgBEdozZW>1m<{aL^QmrcH_Yy@sHC0$KUaVYe1%l`>!NafiV~U!Ob$af{-zX81>{ zXryYH+2TosY`O%617(>(Z(pD8&z>G>A5m)l=+qV1<^aDU=q|CF@4$sA1gq07x-9@O zk<#dw-Q2jfoWa&|X4m9+Y_<%kTz8(k5&X?ZCp;2H3~rhLWJ}lyyeZxg6Osd94&aJv zuC}F;l5W*9R^@1)>*Bi1U0lhlwHa9+e&g)yYzglFj1>_k;E(t4+#`ZU74aMND@;#M zV@C!ZXy8=2ox2*sQ@)PrD1hqukOt5RQ?1(`QK;fo=u?Ty@O&8Y4NHU=?VHwo{pG$p}XZ8a9Uyd?>kA|fM4D;XII9*E7j@P0jk zsS!|2K!Rm`Jod4qvIPL!sM?tLxx?$sX}|$N1PD(|tGwX!`2jqM`H0q53mY4_M$OUY zOv`z-QLbuw;~HZL=*R&t7mJzqrTKS7)slenX;xA!?-kI@5blgatt3UZEG1Bq!@HRb z%CN&GpkY*Tg~pOxyJ19fBZpp}KDAkZQt|%BvCPI~vZZF@Ssg~8a=t4L^ytvJB|2{j zQ#@R)K+q25O~34EdsEllzqns$4(|Gw&wl_kb1da#RFg&{B}Ge6KrFyX#yb2&IO!LQ zK>ij20!6%PRf7;An^#CVek?U!9=<}FC3>pQTp6fq06D2bg`tD0wRhzuje#u({aWlx zZ?^zA7OpXb;N=4<=t$u0d#q+-QVIE$uBVlLWaQ+=Ejx788y`^De-`k1KB3cqgQDMx zOrbC93k&IxT4WQJfs~Y#at<*aT^@iN2Ir;zhO%L}QbBUqU=rtTvX_Z_eO){>G~%8> z(k?72(%k_KGl&q-9ub|&ZKdFB3>-JRI!-YdcU2wD^wmGfSg=S^&hT$cAXP{@qSA)k ze(g{yG71$rj|-=!-dqFvWl}K4piOFQ zY^+z+%dfJs5@7*)hN@u8X=AR^`is=>y|Kt0ot*4IJ$w&IJP)#GEVZH1q%<}*78U&& zi*xr(A1|=Y>;${5?k1Q%*!f_U6_H`E^NoVLF#`*0_q&@`E{(m z34j?Kd&ZR7KBaIYJPfpse^1tR zy!lWH`^?FCyi&wKiE#qD3X3PTA@vZZSvfPHEi8tCpAG0p>@ zS_hqJlET8$(3oY4YoF2+9G6b_CnbUSkx*wbhj`qBI>#LddQ1$$SAwC&AcP?FaaLDX z@1Wr;>Fk!5vkmJii#qIy*;Xj*;$4$ZP?ep_dk|Y2!sMpRr#%GHm{$`GlS+d8KRHYU zESG<7?Qo=4553+o^c)l7Ax${vtr-d%@iW*G&|xeS-_?*8VC5cUJR5LRn8grWf?Ni& zmyF;d60;GcgsM)-H}{{=svSt)t05z~PiJffx)BsLsk8m59adGMd3Lg0t@xBmjKa4P zXg$ING&B-S>bks2wzjvM0@fyKRAZc7H&1D&GhbtVGI6WtDvk9j;4$@t$=&qkW*u;m zS7lFDc{nnDu9@mB;?3QOyLkfOXmHey#w=V~@?mCcEF#11l4mx2`@Np6u6!~>cy2HV z9|mRzqE*h=hXX(kI`y}KuJ-7xv4H@xyi%Da{*a;GZaP2Uz7;7gKc(nu|E4ymaXq|= z){`Dhd}rn;YutvLZ(kd0$cjgw|H?N!%h1YrejLuNBZz2e&6fHz^J?}+wm~0U7sq;Q zQycugsof;HTgJd~_oGsyvd?7ZB*2P%*X@Qv-Cp%Ru+0zfN#WL)N9bq!+=&@6@0#P& z7hvXa9=zQ`03}E^<`F~d*gBQ{Myt7fM+7a{se`0%7Pbo!);%^XkL{9Bmja_>%ZW-^7 zi0QfqswPa%>JsM8!_VxsLEQmA15Xjr(v;VkB^SAfLaD<9mFx&Xc{Oi%CC3Ma-zN5# zJYR5FD*08&t6os%nO1;C+0*>&@xslJ#b*Q;a?_x6!rd9ZsSi|}%R-(ae_Ro0Jc43Q)97cuK*e0@i4&SL__IrQFm8jF-$B66UGBUWuF}Qz zJY^PHLR$Xwi?!S?MvK8hmIDJDq_2kkuA9>EWVRPiRf5+<3yF-=UPQ z5?|Qm>BBQo6yZ@tI&CI@o^6x^G6_B8Eu-M^o=c{B70wdi$%(L)pIy&~N?&_8iW&lj zj=kz_Xt=C14G5DvOzsEtd&3Q=oo%j0jmkjQ9<#9mkBvc2lC5oj#x)tc$9axP3ORV- zZh3Fi{2`9Yuuuv^4NQs8I!iHcuQm7-q=O@<#zpwIGSnF!(~@qV*lf-A%vDA43%KH9Gt~&ef0P&tuIlP_;swIw#{Sqhx z{56GI&*kXF!W<<OQ+C!HCSM?n9gj(83BsS8x|LzWh<0KG7Oi}C`D5}+1=Q1 z-%{^rRFX@Se)nmD+*W+jW5?}cRF4-TNZ$W@|3ybIr z64IQoU%KsQb(hOpgp7|L z=eV3Iiw!nsPYy%F7vI0{!(+-&%~tRS?HUwzqO%{MhQ0YM1leRoVY+A#6~OCrHfOvt zuDw%s^$6ORy%D(Rk24n$-C4=&YzUPY8czWeocK zZOkoYK?Q|s4_F=)RGWr+3WcGf1| zAGFh&y2~pAx-x1HzKh-_lfVaU$**UsM{Wn`%xJuC^((T@h>guU%}cGC z=fvV8G!~APl@c$uV*-f&J$13r7LE(JQ@oNbQd0xb# z%#=(|Ae?xWeiL*`WS`%rH5?Vue9h9EA?7BL|9L(vw#uzFNO5k5lqJJ$o5ZVyFpvB? z#RAbQnN=UG8^IJ=dadYfmAd1@)9F&VhoGY7d`|6eN-2L&d-l2X0kmHDtHvBjdcg610K#!<)M+Wn?xV*b{*m!vO zX_G}GT~&Iq6hcOq2F{Jm&D|Dd;9|kP)5wmc8;w>cK3(!8_?e(vE{sCEzoIkt{BXUu z%5HxsOP=?1=-`?84jy}9-4PYgLMolvyD*(XC+iX^ACv!*%T^HYzbo0@wYRd!0D7`M zDql2D-TueHp)DjJ{fEq60??LsKILJzW1<>O*(!h+zSwJ{~i{@{c6{LcfI0(1E7M>Ecd#_Dzcg0flQO!F};v*X8z>4;Mx!YvaHA z`Mjw<-OE053SD;ij>x!*hV;%KU;LVy2^^}jB<`tCT3W9o|7a5CP=QmmOKt<7ZcHWtMh}imIj;!+JGwo9IE^#rno=9Mzt1~nlV81E5{A*DHD zk9COs5jzVv8ONm1+aUYP6)*$#4G$MvrDHXRKgZ4VaZvgNh`{F)#_DMg< z%HY;YO}koNRw2BhBUVr-8&Cq-?DH7MJ^1=g35NKUnM3+ZYep}>A*0a&Up~b z<)fvraQ+Eg8yL+sPIrKc(o<@>4SJ&}Wr5&101CrEaZOmy&W^u~6o;K_HfOGs$npU& zFvlJHT6_0$qk(fMsGb>Ixkt*&{10_Y-fv}oRHH%orV;u~40AS>WizO-%DyNJQVyoO z2h?04Jo+zyZg($EDl>P+Exwpjp0&hS^)@Q_{9$Bt)Yxr2RXXMNdhM~^Ay7C>sbp_~ zz?;RlOus!(;*0FEn{@RZD00B;6CKMl@}y4VqErTrMpC!|ftqQjALXNMOIsHu^snJe z=GA)~XAtsHwn2Sn;ao=jp2+rJpyxg%{r-D&pOnI9u6^NUiq&eQ^zH!d|Fnt$SyIB~ zPVm{(6zDrM>bDnj7}4PPM=rt3u(wgTLM8j!J9bwPMi`cU|9(!utc!?B;8G{)Qp$gu z1qJ$A<)j-A9jl!iXm8!^WREb0WM+brhi%fE!CJ$Rdwl(GbHB!ikMtOn3O%-W@R#0| zS>RiJxA|{3+l!fMhx3q}@haA0=fNcXE3mr=tTu;MaUC+CWmtb(AYg@#2m4tS3A@AY zxA$CpPX=5eiLTJi%=DPkBjed-|1$`*xX1N(Hk>0nVQmNd%SvQx_*M*@&?!;wM7&SPgKf}4pM2(F0ovx2 zfxL)USgm8WIQZh1+<0XPK7N+SPa2G(s^e!OjVb#2sX96i_Z%y27N2V+CU}oD5ovXP z#>g8TQN!meIQn#*pzL{nP9|GQhS0Z$v`H(<-&W(ZnShWMy@waF}+zmB5*^T@|X zf@x4SO#SuqwwpKK4s$ai-ikokXR0UJP`MNLwB`$W_h&N?VZ?VUOjpBxRLAga!Ew!d+77-%O!C$0H#{;Vm-J@tK&t`kGk9f7{$6 zIzXHISxU9dz}RG*C^E%4m{A0?idIPwj}X-d4Bu|S1uxB zU|;~vbBv5wAZ7Xn-ANh`wo(26G4|B~Q8r(=(nu`bE}MDE=1a&}> z-u3r0D*mg;keSr{?Q=%;Wc7UYZ{1|L?I1s#FD3a6r(-LvCq8)ct~702Z!%ePE+!5R zK>iIb^EKK*op`!5zFH&@h5)U7+7c2P3c}*|uCIY~0*WTT&dLR1WYV>_!NEP1W?3gp zqoqSb(H~;>V%6iyo+XPt_y+&z&P*E(3KF%s^y`NXS!YlUzai5$JA2hDIICY`?8n32 zy^GLL33WO?Il;ohV(ZrmP3X2{=iuPr<|ewfiF~-e2#O2e8ExH#y)OwET3JlYY|Cs} z`T8G~jG-?LL*LpEMV#8=UfKn0s@1DaYzU#f>!vdD8cdVKIpJtA?TEA;Ob;q3C@3y2 zRy_Ir+1^H9=o;c+mwNd!TVG;mXlUtq+3-2>`naX#Q#pBe`8@dvb`?hIU$CF`q=cRA z?HI0JS1roFGG1qtO*o@d$D#B)fXr7oB^ND7GZGL`KWOgnSBW*t6) z5|e)KWR_YWh>&p}0&joxUvD3KX{s0eNjn{FVrnD8Xdy;`Z?*MpRT^vP5Mq=PV zVw-^26F${_0zAA^&}=CoQc}`?wY`w06jETRNl^-6U`Tl#5;co38IPQci^X{J9+!Zi za1Tc^7(z%$_-r<&#{gB+09IPdy+RD~OM|u=0Ih)EsjW?v=N!M?qs555j@L2POKz@r z_VsC9;;bz$@`9w@MER@!3X?|biP>%YtB*Htw3`5E3z*XY$hiRNp`~}^WMly#%?@^1 zW2k@$baLl;+MoW$@|jmm+_dNq*n!Lq``#qqzhBR^K36I6Ihmn`YfjjZ57Sr^^ruzO z=VX~GLgqj3c?GT4vRE%*xOMFM7`&z=(G7?uTz^6*ctRKFZq)u`GE!+Fp!b1N5c{Wm z2PFUkg5JLcI{*BG>X+D?J~;`}su1ur>gND#FyZt`Zdh8KT8_m4az&`W2TxIl1G4Q0 zRvfSZ!K6mjJ^uXz`2TAM^^af)sv5=_fNKCIO#@PiD2xLGWmX+P+yV4Wh5xT6l^2*E z)|~xslTb)Ik~b7+xdGA|K)~BD-bB1lN{YDd&L~=|f^qqnw}xKFRE=}NjW?TWQ}?f% z)eL*{`liiFt-boXDul_+&F!41S%zA`hWZ~v5ObrAc@OIb1Fb5X?;8{4LZ5Gp;{uYP z6M~3ARR6M@=C1~e0^mqGfD+MI$tKpQhiSoh z_&=(4Q4N67QC~sbOc@duhMcae>eC<^6%Pr858MUztKOn(gZT#poZPzehHwbvmU)6a zba0pe;1^S34!Sm4boRC?a&^_>Br7#Fb^3-eUFYt7*$a@PD}hKT76#n=+d`>P@w;fo z*0J1(MSaw}Kl0KQ9IMJek&&4;b7}99Z)|RY6zMq- zADl)3_F6T*ebjpRz|P-;2I52Q1o;3{F&1OgL5pfa?H>Z$b3O>-7>2NDpDYo@V47X<*up!$KZ9*QIlPu0imK^ z&!zXxYiG{S#QDAxChhildc`2na_idpY~P3y2_k3wy8wcFpLeuJ(3!sml3en{PKO)1 zWakw(U(gqw=>)eqAMcp3&4B#wD>F0lPjM+JPe9HTY~P4tquG&>KMQT)-3K7w1jLuv zn3(1lqn_A}7V2JIUJ%iX;>DJDlB+Rup4P#z8B5tb49`1n9!F1i2k3^GOB*`D?RPw%x= zvjU>2iCXwNU_nsoUaX|}`dpr@0J*;+PT!NgW$?PX>S{!hfgVrnKJ8cBf4o%*q`V9g z${Z`(5qCdP9>LU+C&dPY{*F-!e0T(2-rnYXjErKvT9kBvM6&y|z4hp9N|l@uKLdqA{EAC!~C+*5+NZoED{uuK|d!l{4P&Ggv!@*E@zdrCeA@SrFLJnSq< zY#Q0GJv=I%? znqCh)zr)%Pb7k5Q$T|Xd%A;nbx%MuL&--Sbxn>PR`Yn-v1oML8GM+<}yJSzA{P!3$ zTGyjQ(8FKdZUMj-Pd;BBNKpa}7Kw!W21|F<&Eof*fjp%8o~GScW=>8|Nk+yZnBi^e zvAQ7b#+qDSc>h(F#~);pkh4tEGN37xgPA1R^BpGk8B}%yt{VJ#aBCupJ>fU45 z=C_|PVW^1g29|xi#9@w86a~pipf<^8Ig|+&)%^T?+gH>5-@V;WNC9WS9k{0iHbA{m zf7ulVbM^sqe;J&riVq7@QyI-;41G?_&Ck%*-vm=l<7>C_0rHS)D-5K|yGnu@c_`K& zFrH0$XFgQDUh6GgU2ELFCq2MfP|(omn#I!wuHGatL;U)s_x7=fh}mmIuG+$bHr|Kc zKU?4o>m}?ciN27!*h8>5V(*w_Qw9?;+;`%%WDpq~9ON0uR-#M7iR!yk^7>x4$yB;@ zI7lK7$;26H+lN}MFi=w7YXJ*RRoiB?@RK4~Uj;k`V1dG%>4D1Ix%XZd3%5~Y;bZMG zxT~v{fX#P*tcqLUv?KuXjF^%&i)lI?p#GdjXL}$Cp9!2mP(@y3I>CE^aeca0#OW*# zqUSSYqq-fEU~rTeA)qs1VqsDAw;udmXOq##dh(t?l(KtT#?#YtGdTQ!K?Fq%$Xo_W zGDO@Dy1JGhq3`eQfwj-p0w&W3-7@B^3dr*N2bHrQT`$2Y8%1A9NJ_d$d9oG8u3*JS zYaZ_rTyZQ4xM5l~26{_SBOWB+5De=8Jqq2TEz!G?X(7Sq-nq994a}RJQhuWXw!#+n zuaYXpshp_?!_goXzlDgpuzdC6^dH~4F=0Dnj6VIFcOvAq9J z&c0^s)9256=PRJcIW99F3>=)DW9Gpr+OppR;MWOUdiUzsq|+K|zi3^j?QK>AZ<57x z>N+iTlC;4*4pzlEe!#?-%YuHjCl;UYbSOm;ju;u*&)U|73sXs;PHMmfUikU#fB=Yn zKKM}fXPDWGg@3wc^Xpsh%#5Msa;4RX2cWi%G~)etn*NEHzrA^Ua)4~20`Tdv$)q=L z-dq+&=cnzpE^Yv#Jq=?8(9r&l)$KyU?AOQjA|jCnGNAK^7euB12BBR+`p2!YZIxA{cT4 znep16wV|2(HsL+HA}UhXvXvwZC}&$In?I9kR0abtI6+R4&->H`2HP?MtJ~E2c_(?x zTf#G0y%Q@QCaQnWJ_NIkhszYF4&LC55}nM?s*&iz%x*gwq4mHaFpJmg+*pe zpX5VcVUY4v83QgN%@sK(NAMJEYQ~}BPoIX=%WHnwH=eEEFWu*3DS;wGM?*nkP~w~_={BayQ{JUi(!tl%w%?_2-#cv_sro_1ptQ`%tzxS}ME+d480-eX z(O6Gbtbvi|E~8Ou--(bV5%uIOf=@Lh(ZqiByOxPT&_D1e!Q; zE{1x2q+an|WaoK12^mu>+t|p}Pj#wsH1LlW2{WJtTS>iywnFAF@Wc559%fVB0q-tuBr%aa0>K|rjWtqQv6~hK-jzHh?of#IT z>OgnFzx5 zI3lqUYR-EH19!?3;1QsSR_{O)v5;?7e#+5#j*DZVKA&!GqDzQSi(giikaBT^@#VFh%~Wj1;r;rvy`r?XmCc0R0~}l zEjoCo9;A&j6<09$&~S5Zd0ATPUcG{X0$He3n7o$pLmkEHG})Kj>-XTzTqC4%V$iTZ zW~#yLc^Kjt($+u9CUYfc9+?nQ*g>Ec+lu!@ffO(LD{dVEN-__W32Nn_4hof0q+nV} zR$n)p--HI?2L?bAH9}(0)W|ZVnzCWx#9c5&{QPfIBdZtoQkGTqNlyPrboZp0Jk%jT zq!DWCeCHB}qL1+oCgo~n7~_l>m;|Nzp(Ate!Y$S(EtZ!DfW@$p1L(*%dj8%+8KI-WVI0}=>JRI`jht8>39{_+tN&;qD&%pp0FA_eTlmezUr%4Jn^=gnWtE#1 z<)5JpO$yaQ6-cgoF316`@XgsKpCdcrWFisIc-MJ}&I0YVR7l#F)8_5iRLJ+d)wS?B zq_+tUg=(;>zf4eh;aD-RMPIEu@3-n>7k8HL>MT+tu?h)4Vjr$pbq>nKI0y_t|@EW+fQx#+r-y}AK7M|?ayh0l7%o15Pl z9w0}u^lepTo~zF|9ohf~uo)hv%u)YR=@D#gVSQbU5Lk{CF4ceb{#&NbeO8ZC2`8x(D5X4Bz<}c>!D8oQ z^sOY~!tM3zTq(g~L_-q8;H+#GJDF(9y=8VY@A&z@0;>AgJX_zZBoJ}r9UmLa`Up$& zBS>g!BC<_`#xnsi7bwYU$>}sg0cgtk4y!K_%(w|-rc0AuoZk|5wOL*$OV=bjQ+`fqj8Hh0Aq#`8smURYgJTkFi&YrF z4nijcyHA;;0oekaWiVMr3!_pB*E&%F*Hu81Yd;&rzHt%6VW0TyJBW@$3Ttvx7WLO zLt9F}(KiRZohWZQoe#FJe);)RWq7#r%9AiWFR(y0k*b9%>b>8-nF1yt5_wu2o-DLz zRQGMSPg9E=REY;2qxh&^~bNf4Y%x4pMUmYCH z_Rq6tZcf+v9-p7c?ee{0>0yWar&@@;4{ny6Vd}J|CW7w-O1)ZH1{wG?-7=-m+26k_ zlkXx&zuMa92i)Tqu;1H=e1fNj5r04Fr(qPG;Kzwic)f9&Cu_r3hQ<<`CgNrK!XJdD z6F7{!f8M?e^Jn4gu|}?s(LBqi&dfPmy$v6q=mnbB&(w#w{rvQ-MtC4ZZYIArH&;fB zje>vIfB9mZDD~&hpPJngs}UEC;pa=PRNQaXi9)Xw3~dodKtyzXuw|p!FGA#rCNCLI zz2Kwf)SPXzXebSYLqjljA#R%_I6u;q1esFA;Q9surm?#EaPy2gK&mZ+a(B z{`4iMIL6DB%tNH=nNC_>`l83+^VO7Q*ASCI8jDr*Uhnn9l8f z5G49cC>wA{z+zsWe*bf=Q6Ej?B_)&L>mNKS8WUE}PT%yY;baX;6-*Jq0SfN+BarD` zOJ^j9mLkQIqmJ6Ewr~xEyF$ET{AEJ@q>d3O{!Cp$>+FwT?+uwP?WWPtM$UFraW1@l z)_wBa=8Dd_YMHfSoS$e8&m#)7gNM*xa@@Jo^4PO+MKJO}H9;36ityyoSEpsAd9qVF z&4yzA&!2T3AggH(tn(-{q`D{E-LEcFRKFw&yAkw@KWjEQE-pzwe|Jd-ZAfZhe(^TNg-7(HXhEDJ*jNuMjH3MyRV;WK4nPVSOvb!>j4k{8|Jak!!QtJoksa}y0sFSGbjy#rI=B?ajM=LNTW zW!wd_*4KIUaMTCdv}bM4m2|1Trk*-4rv#?&_eU6R+D5e~!u-;06Vw2VufHU6kEP#AdW(XC51jWwKQRfd|+CjI0Uu(ZUf{xz!o)k^BM3@;D=o6hdm%zN__ z?GgAOU#0H{?fSCSL-D};C5it0&7O0mEFRYlM(cGnm@JfYqO2-KA>KUbVyfpB?4e^- z1)KfMyDVR68M_$NQs`EIn!QM|AH_t_QSY2oG}B&P-e8z9c96<2Y1)oZ?TnI)(;s8v zHl2H@Op_PT@s?dj$)|*2`~8c!W% zi0FjGe}$EO2b|W_i%ZlCRd|rb+_`||k}S^quwgkjphIX``i8ran$;ZE^#L^tn{W-S z%`Qf%{KK)j7?3ckGCv2Jk>&vgc>@1AiVr;dA-6OHuk*GW|e< zyEQLkqb|k=7T>z<9f;Nw@2%we5v*9@UZ;h?F`)6kw7w3)HI!}X9%Pl{?*0nt!be$G zB3JU}+xvzl(q=Ud^O&J3LqEq-{n!%j4|lZA`W&bMhT<#70v1r0(|5^S{0&dFqv@yw zDauy3-r9LTh8!LyIf3|Y`h`awCyMZyGZDvDrmWXdo-FpyoH2^V;AeT)6E-^%Y%r4_ zzMSI=zXC_=sgXl76W0&1BVOxTx1@163Ph*iRi^DapkhYxT#6lw4QxRA`>WOWlxW)x zb22w5n)Kyo)HLTfQXwO%!4h5{G9dQbDFlQCe72|b`;w8c*IzUYG3QRg_RzVnPieU~ z;HpS_V`IkTQ$ZUYx9(W(nk!Pc&Gac37S6Y0@p{xWeMg2_S>kg6i=u}nTvGYAEE$+E zJLhm4#@aAEOiyWbP4}N^sEc=VLN~%hSiO_1~YfJ zyUw-6I?d$$w_C&OOMcF3vV#*b%j|#;lGuo@H(s{*Ie&xQ^d#o;l3q|yNAz4 za{-i?3u|klt?p*>cHZdTZJ@HI@5>WuC{2&t z7SkZ~i?Knl*EYG`IhZp$q3<7fn&$?rq;{`A7(zH+cvLhtUKSZx=egwyY`wf=^X% zU8}Ty4~`$;C_?hYoqhZ71h&eRWD@OC!>Q}cmy5x+QC|3G_9hpza_{)V5b3L~87YQ! zSi-mou-u+O%p@_LvBinR%I;aYR_EWEkt45AB$X4Ix;8v$WtDY;`4l<*hp|xBDv(}u;wIN z+XR#B#Mn*73v9w zZzz;}PN0%bTA446T!eqBeD)E>hn!f-1iDi@yckiA-A|!=HmtF)VtNQh_aOQ8kUP!> z9SdzSU^fgmbJzP-s!1elBHti})`O^^#PX&+8RGBg*iL^vKUWgH@6KE9ij9q}^tn;i z({iT%{c(AhWe&KPDr1wuXFSoMw~HyT{_oz&o)Ou7c2^dv`60$PjV+~t(rApSU&#IT zoBDs*|IUSNm!AoaxCTyE6i~R&)(!aRCsW$0l$D-Qr9lU-X5uRUJMCN7hJAkjhUz3L zlVn(r9mZlNOg?bW3#m_1w^BZVDP?Pd)8WgieV^k8S=z<-L5Nq{6WCoXye3g@w%Gos z=Cg3AhFGid#%7+zjEl>!Z{FIJ0sE@Ktu97~Z`gRlKf>UTD#I?sy2#?)6KkE8D~g*g zCC*aDzH5YhC^m}vT_iBy&EVVGtykZVmqzFISS+tHZ&~hFTw0_&<)`*Mvnbrj7r>4` zAb03PS$UjPXsDhDdmY)G(5Jg>8xGkwxzodz1RUma$S+wNF|jUwK5hwFaTVK(Oh-3D zgXjL_id%`vg1s_tOdoH?N8SKlm0ctGu|l0YZ+zXXVhtYfpY0?{^S62T8Qq>Yejtpg z(g=Gx^lW*B#o>Ma4_8KdYTkBrb8PNn_nu_wPJ*5xrH0oJ5kJ!Fs;4ogXP6kPNq5AFyuO;NVT#V-^WjopeLE`r-;6E;c7^Ki_wtz8M>r-3ot6R2}AMa7& zErUV%_KiLjq1;*v(49$Bd=$F#72#K-xZkri?Yi-{rj|Iqp~1J-_el*83UB#t-0*=Y zBiG?3k=rMciLi}db#7x>3M}D9LFuJJ72TZKB8wIUZK`4@)T4t^HJEm2K^%XZIlBeY zoelYE&ib4(enbYUt)FC>`!s>$ejhqiy1VZmkOK>aTqywLidAcESWy4yoj+7l5{gU9 z`kV~)&|dpZYgVV#GNbOB;cJm?V$kQPub@uLL%S&O0$@xK!tk_fPz4oGVgNWL-CSFz z=KPfqxwbRd^>q7whVFp1qYOk$v`6j?b~LajCD2SYslpSPy{5MB}W zIGwYg)OMOaSJ=W_`F*!hMn1&NUt_`B_!^yI@tl1-8uQ5mhtiN``co&PrmrstTY$C{%+xfeFY}ai&N?FW!hjSy|CD zA@1Q>n406Y_v4lakd@;})d3~c^30wzC?n}^p!Vj9x7Y9<9SYv7$j;?NA$6lKo(}`u z?a^BlQYTqjyRqcy+9ohA&V|C3rb-G?TL34;N|~U=g%*I!>a~Sc^{{kB8sd^9t^M23 zJ$#_!mVBIV)oTmR=k69Kz`wvS=i_ZA2mHtJLFD;w9P%T;A=f-&$wuqBIXDYaV68wO z639fXt*optI}9=n_zld5NZrnAzgMaI)BM#NHSTS~z7p!zJ8o`A;A)#8qfaQDk_ope z^F&AZ1!{jx1DIZoB3qYYgWrRIz2DxUC55^Oz*bO-z$GHexkpc2n7iwO>%!>r=;Y)? zK0?W=VqCny{=8Ntu#J&kNJz;0WH|vSukir;q@CEwA=03wRKS)4JjiDKY+`r&>9yHr zQdiZXdTwYue>lwIh=zs+0ZC71{2oTD5-f%z5v-xAnu-|DR1V)1CN_pKuC64NP~TMn zfDtMUw_5zu5F2h$t%vQgfnR8!_up@)uDPNea3KDfn(1x?dpGbVMVO z*S6kj&CY`8p$q4}2exLDa;$>{x>6Ggvs%t1Kzb6Tp$Nv?Oje(s-mPWVQCnOiUKLw) zFl_M10$nf+H|VONo^;C|_ z01Lke+Yq{0$r9^nK-izhR+d7#>10T;xfTe4IY5M%u^^T zc(QQ=`6e*Ov?amx;2P-9m82q1J)(Jc14&#vlw)7LMS~^$2F5tQHsAFHV6$P#ji@dK zQ?fomR*#DK(t>Gt11^TbV=|w?EKPkK7e&X>&;jsTv6lSd8m7VcFj_j_v!4-p-JfWW z*cd7+tCRCl?~mx{*JtqOg<>s+I-NM+i;)La4JjY4znI2u`mM#iZn^{m)g91y9WW@5 z$)-^W5Pz*NHN7dH5{z+xwKN+*9G6~7q3m|u4AgPd4>s5S@kksRO$xWG^lZ1mK{W~Y zIq+Z4_YnYs7~%&;62yj!JL%)gwyL{5 zVa*4HkB>)B7n6nslSKmc}zf01UgZ>l9UuP7bF4-QDG!5(lH0WSDB z>Gikb@FG(p)*jIp2&2+hsgTW&xS$SqZ67cQ9Eb>F!YtQ@vX&%3CNHCDNg35nI?;zL zwi{qN<7C>x9=gI&^Z5;Eq}X$|gV5-t<>)^0d3sf#%QI1C{&Tl8qjMdUe}gm*3C1%v zCu1km>#u%JHmswVG4A~QgXMbUBp*x*TFf`Jx+n$?b*3%85)y;ri9`B!JCndQqQgT& zQnyK7q_W4YkR)2x(YS)i|2%hbl*2Nx1%eu;w5Ol#BJ!g{i94B!cohsBu!#EVO&0 zrrb+?)!>qQ5}XXO?b!}XVg#CXeP3OhmR7G<$|j1n7+Upxzb1NQ#GWHZEE4Um6GF!&#GS`6BrqDJ)Tx1udvn1GiM7^VOk06xtZ zQA43hzow_AA|=$r3^u(i#=XY=?XkpB^VE(SVW3|%!@mNmk49BmMX_kJ!$5v%I5EVM({`cZN!#kAeQuyqy6 zU1Tn!-1xAgh|+V^XamFI=&Oa)rhEuuh?;wi>j;(#=^MLX2Bi%lGFOx_!`WzxIx~Ra zg|w_J>h@3lSr5RC@^?Eu+iv2}5B10k$mu1O#pwtjUO@FQ#7&)(J(B;k)CQUcs>J%z{(mBfPVzY z#Cdsnc!D861r%;UZxQ8x{6R{og3tKWk~t54_$q5&NFA7LP; zBDj79?&cDmYy@`=0<8|iTFu5^_D`3VDGD+<5f2^ann86u9WCupW1^tFp?+b>>%QTa zAh%15gYx0aTI#bxf?k*V8kF?Qz`#0wY`U-V7MuMr@bZ+f+)s}CPwhu}@gb1xvEAKW z8#fde!|yA|R7>1KdAm40td4$}N?Mess9G+I;r{8bNmMGa>EV9Fq}0`KKOM}Gp8x~m z!^7ZePHJ zuLID6LFC1hmOi}b>Ftf+>KZRG!Ds*y4D=7{CGf?JfIoyswAiBGe zfVs60%r^55Rr0Q1GX$tht*wEyk|mfO!@I;scee!;fjf73+VPgVe;;>wW+9t3L^OmV zF%`ib|8^9aGUXQOs*k~@kBrutX`TU4<|fb^KwZ7qK>zRs=2=2S1YtwgCSdv^XBve2 zYaz+<_Y^;h*@MeH0WEM8oO9&9sK55oy&D6g#;z#RS^-zT=wG~i`LdNdc1ojG<%XDE zh}kVD6zF%z6tkV>sN7>_j`J#ouLJ#~A-x};0e`Ppr&I#fvyMxCaGxVmTvDCa}@w#*1R$Nkq<3v|N-J(!NB;fXwHVnLi zt&Pk)-lN++<>KVbr8yAb=8k&S(iLtPXhu-1se>6ug&GrqwiXTO3-#5d zX46XHM0V7m1WdtkD%5e^_1HmVn!xcrmy8VGiGNTv@uJUjXEwI$ z6?p&0J5>z@JWO9q77g5LaE|&6C=wiqQELpUz>f#7_8q%o^~;UA-<(J^;fnVZmAtTT z^J9X+5)A=aQRd!**izE?DFD;!)5BvK1M`^OrF{^)@4a!hhIn%_ufVa_7G#o`i+ceN zlbJ;l244^zsIb(SB&DNMDCRb-e_9Mw!r@0-(}2lEO#M{*hqDVB8rr+q(GVc%Tz=JA zb9BD}T-G*AcL@^~ZrOEF11K_8u z`zvD}1IJBUG=v;}sNSG9{rNvQ;r$(&5dM>eqXrTOX{GE)a7k*Zbu_57Q90EH>`1I! z6rYgadNgy)-QE457|eGC4cW^y&EFKfwa*7;Q@4}Q2|VP_pWiL512T3Vpw7WaM`zC_ z5O6n3tfX&~oZZmN5ufGZ<6EM~XEnUpKUlh6334b!MooS8wO763_Oq}@k4oV897Lc5 zj-^TWo(5LWAybTg(GV;GAN}>255iGkve;9G#7Cr&6OX@A@z@4t+jVkDHI9Su(@CS0 zQ*2+59>6cENQSL)>74^6@fq;guB!uS=m>H9uZ4xHm7sD2P=`Tb^6g7*<={j@qK8{6 zw&NMC5A%-*`BrJp2Bj`td;!1MruP(ZO5YMOC>H}Io1P0daJRCJ>{Pnzn*8fXJD?bI zFgZ2l1^x+4K~P-wV$4oX&=e2@>)f_Q-L`IJL;{UIFdh$Dv7T*acr!szXH|I;y*5yT zYPTY|=kRo1!4%DhC6XY+s3* zU<(`lC2JitU7m)YfeT5A#+|W&_{-8WFpo4xK-o{e*4OU7Tp(HYs7JC1tD9mDtGPe?P2Hl4QiiuY(_u7(mNqD^mU+_ zMolLvehQxohO0peDk_a-Ff<&M9{^uCV0xY^GUqc4W~OG5gmzUEQc+4V3IY~JgTc)Z z&0QhRqII)zOw^#?M@1^FtqZ=<89Tpj+z@1!lM>e~=&D@6=5}b~v6`uO2i46eMZ2IN zEa5AM)d8lps3e1B`qZl}|6|$|BbV_gHH;22tq?M!21=c*6{A;0&77d4!BUn=E}x6X zDYSwUz0d-T+ocOtGn@=A{PPc9bUMQIZs@DI)rzvQSpqUND3L*N7C$~T?WE;-a+tky zv+j{-u9f}nnZKBWi1^bb3h3#UvNsr~{^I>N;@~LXw`Qy6c#8NVzMV{Z!}|K=#VUxafyeF@#$eh(T)${?fwMoP?@g zQuUg-Ee^w{AaQK=^pD%#g_9U`IdSj>$t?7y-rc0e>UNNZ5{-fAxl_{axSeTfFHoPk zO)L`+58v@J^g+;zBnAZ2T{Wt`sTxwOEY9WcgPf{3GJG>N z_XA*o2^sp1@WvF~6B84B$$#l!1mNeR zyJ`-A8n22IhLi*+PwBBQpp}_z*)>JX4IkH$iql)*1J4takf^z9ORc2G3NEp;WB)i(4O}<}{3C9Gwrv{Ld~13XnlP>E3uS} z44|@wO5Fw~tB8#9zcHE32#k>)`*?{x9yaI6nz#)>~N%Q0l?w0(cNWHXWhM|K_$_!pV zmm7}y66$B8$?2%zhkCG90=lYC4p+nRB0}Muxv?`M|A8d!f>Ds9DnV5Xpp+R)W#lHo z25MNZ#|3-`k%kQaL7{iJZPeshl_ciovZg)`($_yKH%NW%Pbdu&`ml8=S|IhbC_LJQWH|c6tMI@rQ+o^YRj@J38yh9TE^u zchCI(EyLP6oudq$X}V(B;Qwy7IoV#3@Z?IQ`pRH;5O5aE~wM`-o zI_o%y6nI!yUjZyf18klz7wE={lCnzW#sRSeFxJzsSC*wqQxCr5aIt#6KI9}sFiqE( zo__#>iiMo+C9 z0>{(d{mI*aBye~N$8PBBx>=;&_U4e3YNhY;SCPT~jsw-7$O6gWDZpYb%v?v~=^c(Go~ozmvCZ7dj!66z$fe6uqYRB{x|Co3rutjH^v8s|P_AUHGY-*K zmtjYHt%fa%uJsM8`@>;gDH+zvj(;|!*X2p(t>@9ed}7d75|UMs^p8s%7f%^{rv|6J z39Ah5!bIT69c)6yH6#525;wPnV<3nl;=cDQOTk3b7Vz38tKGJV;^R}mKJaR>9qS=!O7+@sN<)Hr|OI4)^Evkq{D>gVwE-sk zSt(<&4iPsTLdpuah&+=kOc3j48i+-qV~-@ozuY?7TQ2#tZp)SpN`t=GrabYjk9)>| z2Wz_cEw^jn4_jDt1M48^vh5qlDj%(+Bsk5#ZG2><>Fcq+Tsa@30xRtw|J4s7W&!Va zUp^qyye}?2$+ba{*z0ldK-j_fj&*W96x`eLKSZqGK10z(;`IYN4$gXAhTM9G@Ft(> zYK>AeVmnf+=~_#h!aC$<_F=?=`AGg+c6McMjs+xpC#|@+AIl;+yNvnq0h(&C7MPZJ z2MG!_pf*B|tjiy3v4(3;L^ZpqYl@Zo(axjHpGk&I^hynq5N=2ESk?i~HlOm$&y5p4FK(j;egauog5!vyVlbeZ>mM`R#mZyTj;qv&Gl zn;@IdlTiy(TSyfV8GZKEs?~sNf3r&CEoAeVOmMHh$SVrG>Vw(U)y)mt6w%)xJuEjB`=&;CM9i+El>4kEoG<`f7v9#oakmN- z8=*BM3D{yPQ5-Cc6u6i!(t=$1@280}ST&;BTxx$BJmt=XJx=z>pZ&NP{nJL}&7w$s zyBNFZ{Es#}bSzEM?hDDgcLXV-Tr~#9pfeWSHd?}N_d^n;HZ{F9%o7tixDCIgcmAjh zPI~v!B)ln4^YrM-to_~lRETZXKYXRFDY;B8zT#+mXF2L9obCfb9~N>VhzNwuJ_kj4 zDrP}Zo)i{?zLm=<82;#XW~4X{Q_T%fHv_CbJ|8x4v#;B`p99}{&++lpS-VRwZ(FNc zyeO{m3S#st1J}`H=QofsL&9DM>`}Xh(_1ZhZja#Oj;m7!uYBsUpaN>aOcloP59Ws( zCx)f;eNS+1M#A_8E;9Q|n3joObfaMJ*w_qNUjsm%Q+xe1g!dQqtqrNctHmf`QdoGM zu$u7h0pNPJTBqVZYw}5wjf$kB)kERjbXp5`{)c&T;ts$O zfh4Dv!nH-I?gxe(g&#AQ{KMc0>$^54t>co=)|Rz$%3%iD@o%Mj34yz>Qx$zvxNZiC$O1MB8o_Or&4`UMc=<;$8Db zyxkoq1KZ*gM4I0g%O)IbWFJu*SrY8>QaD^f5)goInvs!;+|}O%lnHQe6wp^K=eYOj zO#K0Jva+GZ9mkc8i6ckZeb~)yL+4^p)^=k1*}+#Xy?BR^emlpfgv3{0{p*-Qe>yTZ z+!5HE_|>2Fum$g^g-!)G^&$5|-Jy^A-!`mirJYwI)4IxJBCJ@HUwfaf$u>4ZkXHE> zc{NV^FFBu4(9|fwFn0vw@YN zUvqP_*g5?#pdFUPv#gMevJh<%;23b}KT4~2gQv>3+8;Dp4&Mgb4<+UFgwO!k_XZHs z{N>CRRyO(7d2RSuqbNp_H7kmdByj@}^}w0McYV|z7)TL1GxLOoR?Bko;ITkhTXD&U z6~FV#cH5^4?ZS!Le=I6z5tdRTSOweP|dx2j#%%Hfg(pPw>Bk5k)9P0zXPdV+XX5_vv)5B2P#*>SRRKN%< z4AP8y|DGvG8x}3<{gX}1xhj9jl6^B4cGMn0jXHGU)cwvx5qo|UH_b^Uqx{=igvj3& zd9b(qAhDK}ISYFjP!Py0TTzqsfacI|yD4S377qB}w*vw|-JI1OA9^FhB;fZ1)NrnL za(LX!C{D1esUYN@!BrDiGY-SZ&*i$!}N zi%wr;jY#u2iedd{RhATG+-=mb$Te(0*22qeY&%dSprns?v@`rnte%IbW4nK>IBW0N zzTM>Fpwl@$@rL~s$6+ogJUum--CTB+q8IWxH%2!P<0d~k+BWJZ>s#6kdE;~9L}yh} z>PTqCe#EJGNh}sT1qh5=%J>_>lwmFlD^^vuXUYajahAwDjbM#}@r_GxQ2Yb&w%M9` z=u&V}6+|rVfa`M5;;=`JWoHyY$Y&e2?I0>(~xfvVZ4 z*z{oj$k_oJ-do_t+UyPfnvBgPeLHNn`x_^OyWhZl6wW%(0*{7iZu=~NHA;+HqjqY+ z^%8bc4nH4zYn^LhyKUcNKNRAT8WOZ`A7U(2WVW9%1E+yrgV|^C5&_OXsTCq@ttOPb z7Xqo=p50K<;5H+r0ZT`bkDW9WyD&FyK0(@nvCa5@I{B(X1F zBqII&eSXvDv^nE5y2G{RIB1oj`2_jC=Oi54S_1hSQfqWyR3}@LB=XMnwLTTcj_)gM2#Dt;p`GOL5O&VR$4L_BPl%nKJv}>HGig44KR2> z4=hx1U{rnBNHJ8c65o{vLBdtx%|$%r&mb;+kgmqJj>qA0Kdct8Y5qQ^qZEk_c70;Hn}e_jV7$oig-z!D&$sg%S#P0& zW6UZa0$;zWF3+y_-M24P|TKaBWNV88oQRDON^ z`t|YS$KKxFckbK)^=m+8xH1l=iV($HQj;~;|Gyv??Vl^+P^3{9?f(#Pn$j)Yb<2eu zy1U=_3}Za^rZFDpO>Lv{OHqP+DdBXO?q4qI9@dM=3%Hnk2!wuiWelqw(o$2!&Rzud zMOf_3BfT+ReO2>SxD>`SQ&-WMDw93?trZU=_cze3bZ?bh>5|d5MEJjXb0e!Z-9h;1O@_5tgCDsAu^Yv*%>t7))QCr=^`9onA(%)!#)Y)PG_;Eixma1jRTpufanm$| zjm~4If+Yu+?sXoYnMCmwbV*dG8xa3e`yh$EjO(-?7YLl21bm#H7Ul$(<^MSpjO zZ$*O7 zgX?qAUp=5uhEDO6l?Bqi--7cKV)y9LhQ{Yr=_hTTrx|)_c#nMByyy{WK58p! z9je@6Qt1W6ca0K;%6Z&Q7?PUU^6QiwHqlBkerNYhL@&~T+iRhF>3C7L(rO*6gBdmE zX682^PttO@I7sE(pY+PT_=wMDY4v~VI_t0~n=kG^B9DMdhysGFtO6n>N=d_#3(_Hi z0)mB7A}l3{f&wZXqSD>n4Fb}=bVy6*B1pbxF`nQ1Uf1j8U+a4BJ!fXloSFHY@97ur z|CFl4&(g%g#B{lv>P>eMmhqHXs*2D8YDLp6M?NA?^~uT9!ezbkXZ#%dGeZ$%n;&H- zc>5nBS{SvTU5UOP)rfyBeI2Z!g+&Ab%C|F+{uH9X@)0(WFA5<`)H8FgXK&tkkYOT! zL5z|rX8UVlaq(Q4M$wB!iPZK>e(Y|!=jF|lMjlpD)k;aD-;bT49t|y?wVV9sT1qGx zX^6^cY2)v!?lh)~L(38Vfu5}{*qQalXfk3K%}e*n*bo~}yt++_^_Q!M9T*NGdoA7c zo6b9IB+qN9J>YTJTvD=hPZMOnF@s8N=T_=A$uxUE@~#wY(viDkPu0bo|Hl0!UXEt|D2B6i9v5Rcv$7$=MvWyw+NJ(hezLq&L*eMIrKei~mISt*ZOo~5VE#s377xbrDhg`<-=^L**>Qf#@Eq#_5PbIrNKj>9~2Kht)ul4%;2 zak7bBv)O?BXon*viC;(I>QDud|5}zeHA=K#tem5GawZ61x|L&6#=KMZuP<^-3Bf5h z;UTM1?;{jGKd18Z;wd!nceFVOAevqPOEDoq)#Q5QJlnle0J0tvSJO$-)D8}g$#q>y ze{XC`^~hWbYsqyDag_v6FyhW>k?FSP8^K|YKDYu>xKi#h?JVkEs$l{^xl%xtRwhXu z=m`LOE6*rUJxF{_k5>FQ4KS*8n-bPex6iti>EDaD#mhNdbbyE=LHDUv zdUKh~*tg^=)1#8o#DL6D)Fi+=HYt~bJ<0jP2a@D(80kPh0 zhImzvdjI=v!L|%v&V1&WixlxuHS&a9>VWOm3#z53L)=Ufk;I@I;QNJma;$;=vq4eN z43dI)H0tT3-KFBA8paR*VagRKF}iKk_UXbUYIwy1Ckw(Yio>?EY6A@f_AFka4H9#b zf}|-V#Y;KpU+5vjcgNq$p7vryy2s zm4x*ju4OT%l`VL>s23g{2E{Ao-9&Z&lhccsLA&R}#nzvueucD`PZ410cbXranpm8W3?&C4TXt zOBu|n*7QEN1UIQaKVd0kzjj}tKd^A~?$hcjl0i`oq>jQgzA#Wjz!rzYkHqY(I$cX=0l0v=P}rnyxYqY zd!XE!9t zWJF8Cq{tjv5?!}1gYYGkv0!yfXG-%W*8#T!}mr znk_$USpigGqZHXr z2u|>#?)!))OQuU=v9&7@)36(8X=(-n%BnaC;sF%8p|=;$cUd}0-%IiACxfeH;1?yV zT@EKR1+jF<)Y4LV_IE3vp6B1>Gpsm8rP3U?3;n{Ygvz{maJn};y0kUJ03QkNH*tya zhB5pY^cXTjR8L{f(2KwC`fY_yJ%Xsz8MN~t{f;PLu$Q$ux9|CQd&lo~x3nxKDp(u$ z?fTmEux63Sm3CkBnY8^!0?SZW8LpI71Kr8El zWq@LeQ@SIUX=%E(_H9#k6!ikU3@n5$jcL%WKo~vZw0_4Ia^}gChEpEx8jtc+>%^B^ z?NsB_|Mj-){2CsL8bh+3nY-Jo+7THk$>q_R1!MHk=DtR_wIwN8}SY+7&^E z+1Bb?$X{&(S8h3?(g2w!R)DK`yp;hb2`=waB#WXH!est zX*3}Y>*u}*3y|9+%dBPjn zYw4FdDdr+yTqv&pxJc2__wKH%Hy!z!>u*ZuZ>eU`|F%UteE)9dL;WSLX0*BJl4iE|L#6-xnuNe40^CPjQBYWQ^1;Q+UMa zu>j%Ws)JObd0^Tr8gn_z8&>3+8W(@TmZEbR4oTH;By3<%QLG;X{BJG5tH)YSZ3U8NwO#M8@b_NckJx%0u^^&QAev@$*sx195^1w^&?16WiN&AvKl zoy9z8# z#iHD88an1zADsHi`nxL@PnFQz+{TeYl$LFUOs+VWF_1I?#slCdj^9+j;#z8V{i;LD zS&Zr>rE|_#x!Kv+q6-2yy9NRLbH1-<*q!~L354f?j|EcjG%f=~7&jOC1M-Ch1R!7E zN0o_-O9gJ^_iL3u0xcPMXXwFw+-TKNZz0fx^0-!*@07B6M2#p*0S=7;Vbkx7MWC`C z254HP`DG_fKxzjpo()qM>2AZEx9t)~CDC}Iez|uSUdaV|G(-_g=B2QHrHty|dXhlq zaEg{z-Yiik=i$uAQDI#1(!v3s*-1cyy^c6UIEg;PL52X_<)bVnKv+<{4*0*vHyj;H z!@~4H|Idx|+(TpIEyz6p=uJ6Pz)y^Vnp&0tDPS1k3_2bqit|uy>Y!e?_S*fI%!>|< zfvmYsz?S7sxv3~ZgvVAQlggdwTIez}9_uAqz*`C+x)k|u-{Soa?OH-Z&FsPIO5HgR z(R`4lq0c2%qPKl7KX*V4KR1VNM@6`(l{`v7a28{V;K#Ha%){$(C%HP=f!N$|iLC+X zoq<&83?!Ue1hHw6(F;g0tgNrcaDwf|%fBdR)_nci80+01#%bIOgF1TAi;hpIpi4Zt zI}tL6*m%4Z=SC@V7qF{9pW(L*^kon;!=^^vo%6!rf1?^ntHXG^su_m0dEO2~Rl_@7 zqUa0Q)zKfI>91x{8E`)a5<&j^>ha-l z8pUAT6;E0Q)gng34=O5MN2sYk+>x{X-ipZQ@QlCtGk%iOb{OUw2KRS=={l$0g{H#X_0={nZee zxUM0Az5S-4zl8@FwxO=h-LDdKGjDwiyD_?I7K{qq-kEjI@9JRX;4tk@)>?46Z1XS% z%FOPsFayjLuB*KqOSW<~|!p5$1WR`t4sX~phVF7y=F2HCKykX~s z{kY=b%Cz5<$jHhsHYmOg+lPvOETIoP=g@)%U(-LGv?8h<~+e~2hl%1h?m*0N9|~S_@+U^J6Eg!8oQ8?{cn3XZF&n>+!+#As% zv3R=o<$U^TA_LBT%^j97H&=v4TmB8lkiMtqHs8HA4vR2NmSrJA9M zU;F_8cpS;pz*6R|oVVEg5R`o5NN?CjUK{hV>@F1Fc`l-Jcix)vUo39X9twJPxJ!kN zB+A#7OP< zWx6O*qW2l0(~VTx-*;G#T%Kdoz7v{6v&0;G0Mo0D5r@rX(5ax;&&+xkE`|@M@FyV@ zO#i?}ReY=L0`ptbpzsTPV&us^*VET0yH30)KWED=J||$7P_qe&eegVx>{I0u5%|nJ zVglR$x{@WlKO4|WLE*Q+_}~=T(mfb$^rO&(?LBVbYii%xRyNvk3I~&2)!j{1F0^R` zYR3cb9sM5U^q}79#Bugt8)!ILOWB&5hiuG#PHUZg1!v|DKHrN!OqLX-w+3!Ko5GBd z_`aM=Vl>|Csjo*zzX!I3z|+Nr0N(_>9gt`==re>3jdBTWL{ZPJ=;SoCwQ-4vg#N=Q zEG#T8&hSe;(FI9Nc@-r2#9BY_!I8Qo;uo7qEb=$C>_5BQo%5}u;@JBallVr6U@`2i z5UfO%*N*-D0wMn5O5#t$1+kX3Q{qi^MK0{A_!EKOZ*>j(s(bSA1rLoK%Q0uSJ~QhY zqjRSLv(8myl^znX_4(P5AS6@$naga-2+F~r?Z8UVZZIEyx3ja%p%>$^x6`{kVn&2_ zMTcK~CDf>ZV3gCHDbpyiY5Cqvi`Tvx6tf&x-41TfcQcBN4_A3g-zAjYo>Lc zird1v6=O`q?Vi3K1Qj{CG^hRD*kVw%;Ql+3Aj#E6_ZtYwNxIYxu8u<4>&qM>vQX@se{xEOHArZto!Grm1~E)4+#!u&z>!P5q4qqlSMi0FdmcKnC)VgKLAmMWWg+nmK5D?58WWYvD-Ft6c$(U zR{qt{#}pXV?2)wFaz7lV8Xv!>R*Izo0`iwmSaoyiFepzPKduhaC;*}bB|aJ|tQsuH zFv^%1ADXSMVOdyoBV9)zXu96Fx2MI(k(&+`?*A4{a|&K$=P+eAvuLlh9x1iV(2uif z0e-`}+70s@clDkL8uVofw{T#F5$B2v-SW>34g8Fs>d?t(%Tfx0y(EFz2By)X5RvVEh(QuKUq<;q;+{8#CYA6BL;CrGeE;1xDPP6yT*@q5v zSBmW2fnsWDAc^PXdFUBC+!DUm9V1s)bB>##V$c9t%Feq$hqQZ((fIn2wIL8>_hkOgyMb*s6JJ6W2){|l0n zL+U9Xu9=idyEb)Rb-7Fs6ydD@IebaVD=J@&`Oe@#zL{*Zlvz`Y7e#@h<8C_WBp&<- zVQIy8=;10TfrwLeuM)O=T!zC)Q_^tk?b%Yt%|cEQ=WYJ65B>}w&}R%ah5ZND_(5-~ zTp0AG9GsT!P^|uAyW~POPCB4xjk7T~&@0JzhCx)u{BUb7iLH&ii)hX3G`dRz}L0NSY{KQ6%ne zr-;H;@qg39I(?!itq?@;PRlfh?`b7rs~T4)c5dey-^qE1aj3fa39rY5wA^=8&3Whr zC3)@{sjqL$dxE^0CxiIMot}=EigW{urdGk%uLYX!u&z${tj}Suv0f45R7T*X;`VBe z(PP@F9XjBC{>V|WI7#8f{+t|Z*BfSicxF`vBr?*ENV(z0m3`0|ibiGj>;6Snb)Yks{C`nX z3nv>U8*Ty>jj_HH>N{E`g{V0SvEG9e0fD*5Y@0<6scN=`C+yK=Pgu$_3nI?ZFVq~@ zV$Z4CC2eXYi*z7k7^!j^8E}vQ?pOqi3bI`s7#S)(dhiag@ZX+`7<)k;(VHQsA!-^{ zV3)7d;IKTR_uz%oOgrSqVGJ{_o9W(v*wgwYdIwlToK1V=`4iNh8M@sw85QcYW&liS zg@gD}8ORN}x(msA&8cjl?-U6g;~{&TxTaUq zGg6MI01d!xCR-EM0UG9QOH1YxB_t;ml}s!TC=yjue?32zY8O}HJmh6Z*VKbLe}+vI-FZHq*T95 z2&h3aG8_Nad21D|O0jKsh+l^E1c$uo}ra^Si2RiJ0u_{k?hn@YW0E%s}M(`9--_Oa9Lw zhK?pS8$a_=K}?qfQ$T& zN@BIJRm$coAVajRdKa?CNjC3W9$2}djXJ8}0}h7o$I~+pr{S;2rB>B_^kx8F-QmyLRi#=NBB#PY z-!iU{9^%{(W;GmI(;H|q8fJw8eC5yM?j}kZl^8`(dVo0YU*`ik<A@T2p-~5v zny05HBg(0!-H*_K486G00^ zZBBMJrTB*B*q1jDI@xjm2~zB^7t$dvgTP8$T$~H7^~~1Rpy(O%-c)|`C!Pe@h)KDV z;T5zK5?C;DaPT85>xO1>*xkA&Li;C(G@XMgJWxP_4D?hKH~)#H{7*Gyf%AeKR2Xsx z?tXN16!y>QX&32A9X6hzP*sqDIr_I1h9KmW56UTo=L2Vtq1Zaw#1Dp&G+4H8WI&?bIDaDDYF%MSNe`pKBuiJfR`2~a6$Huf u_i$3cy!hkh@gE+}3;5*!^Wm(^KKZ$CldP3zEOY;XKk_ol(&>_SUH%VR8Gir( literal 0 HcmV?d00001 diff --git a/bitswap/docs/go-bitswap.puml b/bitswap/docs/go-bitswap.puml new file mode 100644 index 000000000..49da618b3 --- /dev/null +++ b/bitswap/docs/go-bitswap.puml @@ -0,0 +1,46 @@ +@startuml Bitswap Components + +node "Top Level Interface" { + [Bitswap] +} +node "Sending Blocks" { + + [Engine] -left-* [Ledger] + [Engine] -right-* [PeerTaskQueue] + [Engine] --> [TaskWorker (workers.go)] +} +[Bitswap] --* "Sending Blocks" +node "Requesting Blocks" { + [Bitswap] --* [WantManager] + [WantManager] --> [PeerManager] + [PeerManager] --* [MessageQueue] +} + +node "Providing" { + [Bitswap] --* [Provide Collector (workers.go)] + [Provide Collector (workers.go)] --* [Provide Worker (workers.go)] +} + +node "Finding Providers" { + [Bitswap] --* [ProvideQueryManager] +} + +node "Sessions (smart requests)" { + [Bitswap] --* [SessionManager] + [SessionManager] --o [Session] + [SessionManager] --o [SessionPeerManager] + [SessionManager] --o [SessionRequestSplitter] + [Session] --* [SessionPeerManager] + [Session] --* [SessionRequestSplitter] + [Session] --> [WantManager] + [SessionPeerManager] --> [ProvideQueryManager] +} + +node "Network" { + [BitSwapNetwork] + [MessageQueue] --> [BitSwapNetwork] + [ProvideQueryManager] --> [BitSwapNetwork] + [TaskWorker (workers.go)] --> [BitSwapNetwork] + [Provide Worker (workers.go)] --> [BitSwapNetwork] +} +@enduml \ No newline at end of file From 4b8d07c7076214c9192485265e442a99f253f9d6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Thu, 8 Aug 2019 18:58:31 +0200 Subject: [PATCH 0784/1035] network: Allow specifying protocol prefix This commit was moved from ipfs/go-bitswap@167327fc3c5e27302fd534c908fc83f6bf2d6c88 --- bitswap/network/ipfs_impl.go | 41 ++++++++++++++++++++++++------------ bitswap/network/options.go | 15 +++++++++++++ 2 files changed, 43 insertions(+), 13 deletions(-) create mode 100644 bitswap/network/options.go diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index 52ee64c67..005cfd585 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -8,15 +8,16 @@ import ( "time" bsmsg "github.com/ipfs/go-bitswap/message" - "github.com/libp2p/go-libp2p-core/helpers" cid "github.com/ipfs/go-cid" logging "github.com/ipfs/go-log" "github.com/libp2p/go-libp2p-core/connmgr" + "github.com/libp2p/go-libp2p-core/helpers" "github.com/libp2p/go-libp2p-core/host" "github.com/libp2p/go-libp2p-core/network" "github.com/libp2p/go-libp2p-core/peer" peerstore "github.com/libp2p/go-libp2p-core/peerstore" + "github.com/libp2p/go-libp2p-core/protocol" "github.com/libp2p/go-libp2p-core/routing" msgio "github.com/libp2p/go-msgio" ma "github.com/multiformats/go-multiaddr" @@ -27,10 +28,19 @@ var log = logging.Logger("bitswap_network") var sendMessageTimeout = time.Minute * 10 // NewFromIpfsHost returns a BitSwapNetwork supported by underlying IPFS host. -func NewFromIpfsHost(host host.Host, r routing.ContentRouting) BitSwapNetwork { +func NewFromIpfsHost(host host.Host, r routing.ContentRouting, opts ...NetOpt) BitSwapNetwork { + s := Settings{} + for _, opt := range opts { + opt(&s) + } + bitswapNetwork := impl{ host: host, routing: r, + + protocolBitswap: s.ProtocolPrefix + ProtocolBitswap, + protocolBitswapOne: s.ProtocolPrefix + ProtocolBitswapOne, + protocolBitswapNoVers: s.ProtocolPrefix + ProtocolBitswapNoVers, } return &bitswapNetwork } @@ -41,6 +51,10 @@ type impl struct { host host.Host routing routing.ContentRouting + protocolBitswap protocol.ID + protocolBitswapOne protocol.ID + protocolBitswapNoVers protocol.ID + // inbound messages from the network are forwarded to the receiver receiver Receiver @@ -48,7 +62,8 @@ type impl struct { } type streamMessageSender struct { - s network.Stream + s network.Stream + bsnet *impl } func (s *streamMessageSender) Close() error { @@ -60,10 +75,10 @@ func (s *streamMessageSender) Reset() error { } func (s *streamMessageSender) SendMsg(ctx context.Context, msg bsmsg.BitSwapMessage) error { - return msgToStream(ctx, s.s, msg) + return s.bsnet.msgToStream(ctx, s.s, msg) } -func msgToStream(ctx context.Context, s network.Stream, msg bsmsg.BitSwapMessage) error { +func (bsnet *impl) msgToStream(ctx context.Context, s network.Stream, msg bsmsg.BitSwapMessage) error { deadline := time.Now().Add(sendMessageTimeout) if dl, ok := ctx.Deadline(); ok { deadline = dl @@ -74,12 +89,12 @@ func msgToStream(ctx context.Context, s network.Stream, msg bsmsg.BitSwapMessage } switch s.Protocol() { - case ProtocolBitswap: + case bsnet.protocolBitswap: if err := msg.ToNetV1(s); err != nil { log.Debugf("error: %s", err) return err } - case ProtocolBitswapOne, ProtocolBitswapNoVers: + case bsnet.protocolBitswapOne, bsnet.protocolBitswapNoVers: if err := msg.ToNetV0(s); err != nil { log.Debugf("error: %s", err) return err @@ -100,11 +115,11 @@ func (bsnet *impl) NewMessageSender(ctx context.Context, p peer.ID) (MessageSend return nil, err } - return &streamMessageSender{s: s}, nil + return &streamMessageSender{s: s, bsnet: bsnet}, nil } func (bsnet *impl) newStreamToPeer(ctx context.Context, p peer.ID) (network.Stream, error) { - return bsnet.host.NewStream(ctx, p, ProtocolBitswap, ProtocolBitswapOne, ProtocolBitswapNoVers) + return bsnet.host.NewStream(ctx, p, bsnet.protocolBitswap, bsnet.protocolBitswapOne, bsnet.protocolBitswapNoVers) } func (bsnet *impl) SendMessage( @@ -117,7 +132,7 @@ func (bsnet *impl) SendMessage( return err } - if err = msgToStream(ctx, s, outgoing); err != nil { + if err = bsnet.msgToStream(ctx, s, outgoing); err != nil { s.Reset() return err } @@ -131,9 +146,9 @@ func (bsnet *impl) SendMessage( func (bsnet *impl) SetDelegate(r Receiver) { bsnet.receiver = r - bsnet.host.SetStreamHandler(ProtocolBitswap, bsnet.handleNewStream) - bsnet.host.SetStreamHandler(ProtocolBitswapOne, bsnet.handleNewStream) - bsnet.host.SetStreamHandler(ProtocolBitswapNoVers, bsnet.handleNewStream) + bsnet.host.SetStreamHandler(bsnet.protocolBitswap, bsnet.handleNewStream) + bsnet.host.SetStreamHandler(bsnet.protocolBitswapOne, bsnet.handleNewStream) + bsnet.host.SetStreamHandler(bsnet.protocolBitswapNoVers, bsnet.handleNewStream) bsnet.host.Network().Notify((*netNotifiee)(bsnet)) // TODO: StopNotify. diff --git a/bitswap/network/options.go b/bitswap/network/options.go new file mode 100644 index 000000000..38bb63d10 --- /dev/null +++ b/bitswap/network/options.go @@ -0,0 +1,15 @@ +package network + +import "github.com/libp2p/go-libp2p-core/protocol" + +type NetOpt func(*Settings) + +type Settings struct { + ProtocolPrefix protocol.ID +} + +func Prefix(prefix protocol.ID) NetOpt { + return func(settings *Settings) { + settings.ProtocolPrefix = prefix + } +} From 56781a3293502ce48d05ae409185fa3bb2b6c143 Mon Sep 17 00:00:00 2001 From: dirkmc Date: Tue, 13 Aug 2019 11:38:21 -0400 Subject: [PATCH 0785/1035] Feat: process response message blocks as a batch (#170) feat: process response message blocks as a batch This commit was moved from ipfs/go-bitswap@e72b2894da985eb9edc714fe3728b2c721926067 --- bitswap/bitswap.go | 139 ++++++++++------ bitswap/bitswap_test.go | 7 + bitswap/decision/engine.go | 25 +-- bitswap/session/session.go | 149 ++++++++++-------- bitswap/session/session_test.go | 10 +- bitswap/sessionmanager/sessionmanager.go | 28 ++-- bitswap/sessionmanager/sessionmanager_test.go | 111 ++++++++----- bitswap/sessionpeermanager/latencytracker.go | 10 +- .../sessionpeermanager/sessionpeermanager.go | 38 ++--- .../sessionpeermanager_test.go | 22 +-- .../sessionrequestsplitter.go | 2 +- 11 files changed, 322 insertions(+), 219 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 1056cd69b..afdf86520 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -265,23 +265,39 @@ func (bs *Bitswap) GetBlocks(ctx context.Context, keys []cid.Cid) (<-chan blocks // HasBlock announces the existence of a block to this bitswap service. The // service will potentially notify its peers. func (bs *Bitswap) HasBlock(blk blocks.Block) error { - return bs.receiveBlockFrom(blk, "") + return bs.receiveBlocksFrom("", []blocks.Block{blk}) } // TODO: Some of this stuff really only needs to be done when adding a block // from the user, not when receiving it from the network. // In case you run `git blame` on this comment, I'll save you some time: ask // @whyrusleeping, I don't know the answers you seek. -func (bs *Bitswap) receiveBlockFrom(blk blocks.Block, from peer.ID) error { +func (bs *Bitswap) receiveBlocksFrom(from peer.ID, blks []blocks.Block) error { select { case <-bs.process.Closing(): return errors.New("bitswap is closed") default: } - err := bs.blockstore.Put(blk) + wanted := blks + + // If blocks came from the network + if from != "" { + // Split blocks into wanted blocks vs duplicates + wanted = make([]blocks.Block, 0, len(blks)) + for _, b := range blks { + if bs.wm.IsWanted(b.Cid()) { + wanted = append(wanted, b) + } else { + log.Debugf("[recv] block not in wantlist; cid=%s, peer=%s", b.Cid(), from) + } + } + } + + // Put wanted blocks into blockstore + err := bs.blockstore.PutMany(wanted) if err != nil { - log.Errorf("Error writing block to datastore: %s", err) + log.Errorf("Error writing %d blocks to datastore: %s", len(wanted), err) return err } @@ -291,18 +307,25 @@ func (bs *Bitswap) receiveBlockFrom(blk blocks.Block, from peer.ID) error { // to the same node. We should address this soon, but i'm not going to do // it now as it requires more thought and isnt causing immediate problems. - bs.sm.ReceiveBlockFrom(from, blk) + // Send all blocks (including duplicates) to any sessions that want them. + // (The duplicates are needed by sessions for accounting purposes) + bs.sm.ReceiveBlocksFrom(from, blks) - bs.engine.AddBlock(blk) + // Send wanted blocks to decision engine + bs.engine.AddBlocks(wanted) + // If the reprovider is enabled, send wanted blocks to reprovider if bs.provideEnabled { - select { - case bs.newBlocks <- blk.Cid(): - // send block off to be reprovided - case <-bs.process.Closing(): - return bs.process.Close() + for _, b := range wanted { + select { + case bs.newBlocks <- b.Cid(): + // send block off to be reprovided + case <-bs.process.Closing(): + return bs.process.Close() + } } } + return nil } @@ -325,56 +348,78 @@ func (bs *Bitswap) ReceiveMessage(ctx context.Context, p peer.ID, incoming bsmsg return } - wg := sync.WaitGroup{} - for _, block := range iblocks { - - wg.Add(1) - go func(b blocks.Block) { // TODO: this probably doesnt need to be a goroutine... - defer wg.Done() - - bs.updateReceiveCounters(b) - bs.sm.UpdateReceiveCounters(p, b) - log.Debugf("[recv] block; cid=%s, peer=%s", b.Cid(), p) - // skip received blocks that are not in the wantlist - if !bs.wm.IsWanted(b.Cid()) { - log.Debugf("[recv] block not in wantlist; cid=%s, peer=%s", b.Cid(), p) - return - } - - if err := bs.receiveBlockFrom(b, p); err != nil { - log.Warningf("ReceiveMessage recvBlockFrom error: %s", err) - } - log.Event(ctx, "Bitswap.GetBlockRequest.End", b.Cid()) - }(block) + bs.updateReceiveCounters(iblocks) + for _, b := range iblocks { + log.Debugf("[recv] block; cid=%s, peer=%s", b.Cid(), p) } - wg.Wait() -} -func (bs *Bitswap) updateReceiveCounters(b blocks.Block) { - blkLen := len(b.RawData()) - has, err := bs.blockstore.Has(b.Cid()) + // Process blocks + err := bs.receiveBlocksFrom(p, iblocks) if err != nil { - log.Infof("blockstore.Has error: %s", err) + log.Warningf("ReceiveMessage recvBlockFrom error: %s", err) return } - bs.allMetric.Observe(float64(blkLen)) - if has { - bs.dupMetric.Observe(float64(blkLen)) + for _, b := range iblocks { + if bs.wm.IsWanted(b.Cid()) { + log.Event(ctx, "Bitswap.GetBlockRequest.End", b.Cid()) + } } +} + +func (bs *Bitswap) updateReceiveCounters(blocks []blocks.Block) { + // Check which blocks are in the datastore + // (Note: any errors from the blockstore are simply logged out in + // blockstoreHas()) + blocksHas := bs.blockstoreHas(blocks) bs.counterLk.Lock() defer bs.counterLk.Unlock() - c := bs.counters - c.blocksRecvd++ - c.dataRecvd += uint64(len(b.RawData())) - if has { - c.dupBlocksRecvd++ - c.dupDataRecvd += uint64(blkLen) + // Do some accounting for each block + for i, b := range blocks { + has := blocksHas[i] + + blkLen := len(b.RawData()) + bs.allMetric.Observe(float64(blkLen)) + if has { + bs.dupMetric.Observe(float64(blkLen)) + } + + c := bs.counters + + c.blocksRecvd++ + c.dataRecvd += uint64(blkLen) + if has { + c.dupBlocksRecvd++ + c.dupDataRecvd += uint64(blkLen) + } } } +func (bs *Bitswap) blockstoreHas(blks []blocks.Block) []bool { + res := make([]bool, len(blks)) + + wg := sync.WaitGroup{} + for i, block := range blks { + wg.Add(1) + go func(i int, b blocks.Block) { + defer wg.Done() + + has, err := bs.blockstore.Has(b.Cid()) + if err != nil { + log.Infof("blockstore.Has error: %s", err) + has = false + } + + res[i] = has + }(i, block) + } + wg.Wait() + + return res +} + // PeerConnected is called by the network interface // when a peer initiates a new connection to bitswap. func (bs *Bitswap) PeerConnected(p peer.ID) { diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index 777e2b46f..e13621803 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -357,6 +357,8 @@ func TestBasicBitswap(t *testing.T) { instances := ig.Instances(3) blocks := bg.Blocks(1) + + // First peer has block err := instances[0].Exchange.HasBlock(blocks[0]) if err != nil { t.Fatal(err) @@ -364,11 +366,16 @@ func TestBasicBitswap(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) defer cancel() + + // Second peer broadcasts want for block CID + // (Received by first and third peers) blk, err := instances[1].Exchange.GetBlock(ctx, blocks[0].Cid()) if err != nil { t.Fatal(err) } + // When second peer receives block, it should send out a cancel, so third + // peer should no longer keep second peer's want if err = tu.WaitFor(ctx, func() error { if len(instances[2].Exchange.WantlistForPeer(instances[1].Peer)) != 0 { return fmt.Errorf("should have no items in other peers wantlist") diff --git a/bitswap/decision/engine.go b/bitswap/decision/engine.go index 61bb4ca19..a4eee0f0d 100644 --- a/bitswap/decision/engine.go +++ b/bitswap/decision/engine.go @@ -312,17 +312,19 @@ func (e *Engine) MessageReceived(p peer.ID, m bsmsg.BitSwapMessage) { } } -func (e *Engine) addBlock(block blocks.Block) { +func (e *Engine) addBlocks(blocks []blocks.Block) { work := false for _, l := range e.ledgerMap { l.lk.Lock() - if entry, ok := l.WantListContains(block.Cid()); ok { - e.peerRequestQueue.PushBlock(l.Partner, peertask.Task{ - Identifier: entry.Cid, - Priority: entry.Priority, - }) - work = true + for _, block := range blocks { + if entry, ok := l.WantListContains(block.Cid()); ok { + e.peerRequestQueue.PushBlock(l.Partner, peertask.Task{ + Identifier: entry.Cid, + Priority: entry.Priority, + }) + work = true + } } l.lk.Unlock() } @@ -332,13 +334,14 @@ func (e *Engine) addBlock(block blocks.Block) { } } -// AddBlock is called to when a new block is received and added to a block store -// meaning there may be peers who want that block that we should send it to. -func (e *Engine) AddBlock(block blocks.Block) { +// AddBlocks is called when new blocks are received and added to a block store, +// meaning there may be peers who want those blocks, so we should send the blocks +// to them. +func (e *Engine) AddBlocks(blocks []blocks.Block) { e.lock.Lock() defer e.lock.Unlock() - e.addBlock(block) + e.addBlocks(blocks) } // TODO add contents of m.WantList() to my local wantlist? NB: could introduce diff --git a/bitswap/session/session.go b/bitswap/session/session.go index 8a77baa22..6e3f11b27 100644 --- a/bitswap/session/session.go +++ b/bitswap/session/session.go @@ -35,8 +35,8 @@ type PeerManager interface { FindMorePeers(context.Context, cid.Cid) GetOptimizedPeers() []bssd.OptimizedPeer RecordPeerRequests([]peer.ID, []cid.Cid) - RecordPeerResponse(peer.ID, cid.Cid) - RecordCancel(cid.Cid) + RecordPeerResponse(peer.ID, []cid.Cid) + RecordCancels([]cid.Cid) } // RequestSplitter provides an interface for splitting @@ -52,10 +52,9 @@ type interestReq struct { resp chan bool } -type blkRecv struct { - from peer.ID - blk blocks.Block - counterMessage bool +type blksRecv struct { + from peer.ID + blks []blocks.Block } // Session holds state for an individual bitswap transfer operation. @@ -69,7 +68,7 @@ type Session struct { srs RequestSplitter // channels - incoming chan blkRecv + incoming chan blksRecv newReqs chan []cid.Cid cancelKeys chan []cid.Cid interestReqs chan interestReq @@ -117,7 +116,7 @@ func New(ctx context.Context, wm: wm, pm: pm, srs: srs, - incoming: make(chan blkRecv), + incoming: make(chan blksRecv), notif: notifications.New(), uuid: loggables.Uuid("GetBlockRequest"), baseTickDelay: time.Millisecond * 500, @@ -134,22 +133,10 @@ func New(ctx context.Context, return s } -// ReceiveBlockFrom receives an incoming block from the given peer. -func (s *Session) ReceiveBlockFrom(from peer.ID, blk blocks.Block) { +// ReceiveBlocksFrom receives incoming blocks from the given peer. +func (s *Session) ReceiveBlocksFrom(from peer.ID, blocks []blocks.Block) { select { - case s.incoming <- blkRecv{from: from, blk: blk, counterMessage: false}: - case <-s.ctx.Done(): - } - ks := []cid.Cid{blk.Cid()} - s.pm.RecordCancel(blk.Cid()) - s.wm.CancelWants(s.ctx, ks, nil, s.id) -} - -// UpdateReceiveCounters updates receive counters for a block, -// which may be a duplicate and adjusts the split factor based on that. -func (s *Session) UpdateReceiveCounters(from peer.ID, blk blocks.Block) { - select { - case s.incoming <- blkRecv{from: from, blk: blk, counterMessage: true}: + case s.incoming <- blksRecv{from: from, blks: blocks}: case <-s.ctx.Done(): } } @@ -243,12 +230,14 @@ func (s *Session) run(ctx context.Context) { s.periodicSearchTimer = time.NewTimer(s.periodicSearchDelay.NextWaitTime()) for { select { - case blk := <-s.incoming: - if blk.counterMessage { - s.updateReceiveCounters(ctx, blk) - } else { - s.handleIncomingBlock(ctx, blk) + case rcv := <-s.incoming: + s.cancelIncomingBlocks(ctx, rcv) + // Record statistics only if the blocks came from the network + // (blocks can also be received from the local node) + if rcv.from != "" { + s.updateReceiveCounters(ctx, rcv) } + s.handleIncomingBlocks(ctx, rcv) case keys := <-s.newReqs: s.handleNewRequest(ctx, keys) case keys := <-s.cancelKeys: @@ -270,14 +259,23 @@ func (s *Session) run(ctx context.Context) { } } -func (s *Session) handleIncomingBlock(ctx context.Context, blk blkRecv) { - s.idleTick.Stop() - - if blk.from != "" { - s.pm.RecordPeerResponse(blk.from, blk.blk.Cid()) +func (s *Session) cancelIncomingBlocks(ctx context.Context, rcv blksRecv) { + // We've received the blocks so we can cancel any outstanding wants for them + ks := make([]cid.Cid, 0, len(rcv.blks)) + for _, b := range rcv.blks { + if s.cidIsWanted(b.Cid()) { + ks = append(ks, b.Cid()) + } } + s.pm.RecordCancels(ks) + s.wm.CancelWants(s.ctx, ks, nil, s.id) +} + +func (s *Session) handleIncomingBlocks(ctx context.Context, rcv blksRecv) { + s.idleTick.Stop() - s.receiveBlock(ctx, blk.blk) + // Process the received blocks + s.receiveBlocks(ctx, rcv.blks) s.resetIdleTick() } @@ -378,45 +376,64 @@ func (s *Session) cidIsWanted(c cid.Cid) bool { return ok } -func (s *Session) receiveBlock(ctx context.Context, blk blocks.Block) { - c := blk.Cid() - if s.cidIsWanted(c) { - s.srs.RecordUniqueBlock() - tval, ok := s.liveWants[c] - if ok { - s.latTotal += time.Since(tval) - delete(s.liveWants, c) - } else { - s.tofetch.Remove(c) - } - s.fetchcnt++ - // we've received new wanted blocks, so future ticks are not consecutive - s.consecutiveTicks = 0 - s.notif.Publish(blk) - - toAdd := s.wantBudget() - if toAdd > s.tofetch.Len() { - toAdd = s.tofetch.Len() - } - if toAdd > 0 { - var keys []cid.Cid - for i := 0; i < toAdd; i++ { - keys = append(keys, s.tofetch.Pop()) +func (s *Session) receiveBlocks(ctx context.Context, blocks []blocks.Block) { + for _, blk := range blocks { + c := blk.Cid() + if s.cidIsWanted(c) { + // If the block CID was in the live wants queue, remove it + tval, ok := s.liveWants[c] + if ok { + s.latTotal += time.Since(tval) + delete(s.liveWants, c) + } else { + // Otherwise remove it from the tofetch queue, if it was there + s.tofetch.Remove(c) } - s.wantBlocks(ctx, keys) + s.fetchcnt++ + + // We've received new wanted blocks, so reset the number of ticks + // that have occurred since the last new block + s.consecutiveTicks = 0 + + s.notif.Publish(blk) + + // Keep track of CIDs we've successfully fetched + s.pastWants.Push(c) } + } - s.pastWants.Push(c) + // Transfer as many CIDs as possible from the tofetch queue into the + // live wants queue + toAdd := s.wantBudget() + if toAdd > s.tofetch.Len() { + toAdd = s.tofetch.Len() + } + if toAdd > 0 { + var keys []cid.Cid + for i := 0; i < toAdd; i++ { + keys = append(keys, s.tofetch.Pop()) + } + s.wantBlocks(ctx, keys) } } -func (s *Session) updateReceiveCounters(ctx context.Context, blk blkRecv) { - ks := blk.blk.Cid() - if s.pastWants.Has(ks) { - s.srs.RecordDuplicateBlock() - if blk.from != "" { - s.pm.RecordPeerResponse(blk.from, ks) +func (s *Session) updateReceiveCounters(ctx context.Context, rcv blksRecv) { + ks := make([]cid.Cid, len(rcv.blks)) + + for _, blk := range rcv.blks { + // Inform the request splitter of unique / duplicate blocks + if s.cidIsWanted(blk.Cid()) { + s.srs.RecordUniqueBlock() + } else if s.pastWants.Has(blk.Cid()) { + s.srs.RecordDuplicateBlock() } + + ks = append(ks, blk.Cid()) + } + + // Record response (to be able to time latency) + if len(ks) > 0 { + s.pm.RecordPeerResponse(rcv.from, ks) } } diff --git a/bitswap/session/session_test.go b/bitswap/session/session_test.go index ade9e6425..7a2e66bba 100644 --- a/bitswap/session/session_test.go +++ b/bitswap/session/session_test.go @@ -63,12 +63,12 @@ func (fpm *fakePeerManager) GetOptimizedPeers() []bssd.OptimizedPeer { } func (fpm *fakePeerManager) RecordPeerRequests([]peer.ID, []cid.Cid) {} -func (fpm *fakePeerManager) RecordPeerResponse(p peer.ID, c cid.Cid) { +func (fpm *fakePeerManager) RecordPeerResponse(p peer.ID, c []cid.Cid) { fpm.lk.Lock() fpm.peers = append(fpm.peers, p) fpm.lk.Unlock() } -func (fpm *fakePeerManager) RecordCancel(c cid.Cid) {} +func (fpm *fakePeerManager) RecordCancels(c []cid.Cid) {} type fakeRequestSplitter struct { } @@ -122,7 +122,7 @@ func TestSessionGetBlocks(t *testing.T) { var newBlockReqs []wantReq var receivedBlocks []blocks.Block for i, p := range peers { - session.ReceiveBlockFrom(p, blks[testutil.IndexOf(blks, receivedWantReq.cids[i])]) + session.ReceiveBlocksFrom(p, []blocks.Block{blks[testutil.IndexOf(blks, receivedWantReq.cids[i])]}) select { case cancelBlock := <-cancelReqs: newCancelReqs = append(newCancelReqs, cancelBlock) @@ -178,7 +178,7 @@ func TestSessionGetBlocks(t *testing.T) { // receive remaining blocks for i, p := range peers { - session.ReceiveBlockFrom(p, blks[testutil.IndexOf(blks, newCidsRequested[i])]) + session.ReceiveBlocksFrom(p, []blocks.Block{blks[testutil.IndexOf(blks, newCidsRequested[i])]}) receivedBlock := <-getBlocksCh receivedBlocks = append(receivedBlocks, receivedBlock) cancelBlock := <-cancelReqs @@ -230,7 +230,7 @@ func TestSessionFindMorePeers(t *testing.T) { // or there will be no tick set -- time precision on Windows in go is in the // millisecond range p := testutil.GeneratePeers(1)[0] - session.ReceiveBlockFrom(p, blks[0]) + session.ReceiveBlocksFrom(p, []blocks.Block{blks[0]}) select { case <-cancelReqs: case <-ctx.Done(): diff --git a/bitswap/sessionmanager/sessionmanager.go b/bitswap/sessionmanager/sessionmanager.go index 5a7c7d9c3..bd9ef18c5 100644 --- a/bitswap/sessionmanager/sessionmanager.go +++ b/bitswap/sessionmanager/sessionmanager.go @@ -18,8 +18,7 @@ import ( type Session interface { exchange.Fetcher InterestedIn(cid.Cid) bool - ReceiveBlockFrom(peer.ID, blocks.Block) - UpdateReceiveCounters(peer.ID, blocks.Block) + ReceiveBlocksFrom(peer.ID, []blocks.Block) } type sesTrk struct { @@ -112,27 +111,20 @@ func (sm *SessionManager) GetNextSessionID() uint64 { return sm.sessID } -// ReceiveBlockFrom receives a block from a peer and dispatches to interested +// ReceiveBlocksFrom receives blocks from a peer and dispatches to interested // sessions. -func (sm *SessionManager) ReceiveBlockFrom(from peer.ID, blk blocks.Block) { +func (sm *SessionManager) ReceiveBlocksFrom(from peer.ID, blks []blocks.Block) { sm.sessLk.Lock() defer sm.sessLk.Unlock() - k := blk.Cid() + // Only give each session the blocks / dups that it is interested in for _, s := range sm.sessions { - if s.session.InterestedIn(k) { - s.session.ReceiveBlockFrom(from, blk) + sessBlks := make([]blocks.Block, 0, len(blks)) + for _, b := range blks { + if s.session.InterestedIn(b.Cid()) { + sessBlks = append(sessBlks, b) + } } - } -} - -// UpdateReceiveCounters records the fact that a block was received, allowing -// sessions to track duplicates -func (sm *SessionManager) UpdateReceiveCounters(from peer.ID, blk blocks.Block) { - sm.sessLk.Lock() - defer sm.sessLk.Unlock() - - for _, s := range sm.sessions { - s.session.UpdateReceiveCounters(from, blk) + s.session.ReceiveBlocksFrom(from, sessBlks) } } diff --git a/bitswap/sessionmanager/sessionmanager_test.go b/bitswap/sessionmanager/sessionmanager_test.go index 19f50e335..6a60f5afc 100644 --- a/bitswap/sessionmanager/sessionmanager_test.go +++ b/bitswap/sessionmanager/sessionmanager_test.go @@ -9,6 +9,7 @@ import ( bssession "github.com/ipfs/go-bitswap/session" bssd "github.com/ipfs/go-bitswap/sessiondata" + "github.com/ipfs/go-bitswap/testutil" blocks "github.com/ipfs/go-block-format" cid "github.com/ipfs/go-cid" @@ -16,7 +17,9 @@ import ( ) type fakeSession struct { - interested bool + interested []cid.Cid + blks []blocks.Block + fromNetwork bool receivedBlock bool updateReceiveCounters bool id uint64 @@ -30,9 +33,17 @@ func (*fakeSession) GetBlock(context.Context, cid.Cid) (blocks.Block, error) { func (*fakeSession) GetBlocks(context.Context, []cid.Cid) (<-chan blocks.Block, error) { return nil, nil } -func (fs *fakeSession) InterestedIn(cid.Cid) bool { return fs.interested } -func (fs *fakeSession) ReceiveBlockFrom(peer.ID, blocks.Block) { fs.receivedBlock = true } -func (fs *fakeSession) UpdateReceiveCounters(peer.ID, blocks.Block) { fs.updateReceiveCounters = true } +func (fs *fakeSession) InterestedIn(c cid.Cid) bool { + for _, ic := range fs.interested { + if c == ic { + return true + } + } + return false +} +func (fs *fakeSession) ReceiveBlocksFrom(p peer.ID, blks []blocks.Block) { + fs.blks = append(fs.blks, blks...) +} type fakePeerManager struct { id uint64 @@ -41,8 +52,8 @@ type fakePeerManager struct { func (*fakePeerManager) FindMorePeers(context.Context, cid.Cid) {} func (*fakePeerManager) GetOptimizedPeers() []bssd.OptimizedPeer { return nil } func (*fakePeerManager) RecordPeerRequests([]peer.ID, []cid.Cid) {} -func (*fakePeerManager) RecordPeerResponse(peer.ID, cid.Cid) {} -func (*fakePeerManager) RecordCancel(c cid.Cid) {} +func (*fakePeerManager) RecordPeerResponse(peer.ID, []cid.Cid) {} +func (*fakePeerManager) RecordCancels(c []cid.Cid) {} type fakeRequestSplitter struct { } @@ -53,7 +64,7 @@ func (frs *fakeRequestSplitter) SplitRequest(optimizedPeers []bssd.OptimizedPeer func (frs *fakeRequestSplitter) RecordDuplicateBlock() {} func (frs *fakeRequestSplitter) RecordUniqueBlock() {} -var nextInterestedIn bool +var nextInterestedIn []cid.Cid func sessionFactory(ctx context.Context, id uint64, @@ -62,11 +73,10 @@ func sessionFactory(ctx context.Context, provSearchDelay time.Duration, rebroadcastDelay delay.D) Session { return &fakeSession{ - interested: nextInterestedIn, - receivedBlock: false, - id: id, - pm: pm.(*fakePeerManager), - srs: srs.(*fakeRequestSplitter), + interested: nextInterestedIn, + id: id, + pm: pm.(*fakePeerManager), + srs: srs.(*fakeRequestSplitter), } } @@ -78,6 +88,28 @@ func requestSplitterFactory(ctx context.Context) bssession.RequestSplitter { return &fakeRequestSplitter{} } +func cmpSessionCids(s *fakeSession, cids []cid.Cid) bool { + return cmpBlockCids(s.blks, cids) +} + +func cmpBlockCids(blks []blocks.Block, cids []cid.Cid) bool { + if len(blks) != len(cids) { + return false + } + for _, b := range blks { + has := false + for _, c := range cids { + if c == b.Cid() { + has = true + } + } + if !has { + return false + } + } + return true +} + func TestAddingSessions(t *testing.T) { ctx := context.Background() ctx, cancel := context.WithCancel(ctx) @@ -87,7 +119,7 @@ func TestAddingSessions(t *testing.T) { p := peer.ID(123) block := blocks.NewBlock([]byte("block")) // we'll be interested in all blocks for this test - nextInterestedIn = true + nextInterestedIn = []cid.Cid{block.Cid()} currentID := sm.GetNextSessionID() firstSession := sm.NewSession(ctx, time.Second, delay.Fixed(time.Minute)).(*fakeSession) @@ -106,10 +138,10 @@ func TestAddingSessions(t *testing.T) { thirdSession.id != secondSession.id+2 { t.Fatal("session does not have correct id set") } - sm.ReceiveBlockFrom(p, block) - if !firstSession.receivedBlock || - !secondSession.receivedBlock || - !thirdSession.receivedBlock { + sm.ReceiveBlocksFrom(p, []blocks.Block{block}) + if len(firstSession.blks) == 0 || + len(secondSession.blks) == 0 || + len(thirdSession.blks) == 0 { t.Fatal("should have received blocks but didn't") } } @@ -121,20 +153,25 @@ func TestReceivingBlocksWhenNotInterested(t *testing.T) { sm := New(ctx, sessionFactory, peerManagerFactory, requestSplitterFactory) p := peer.ID(123) - block := blocks.NewBlock([]byte("block")) - // we'll be interested in all blocks for this test - nextInterestedIn = false + blks := testutil.GenerateBlocksOfSize(3, 1024) + var cids []cid.Cid + for _, b := range blks { + cids = append(cids, b.Cid()) + } + + nextInterestedIn = []cid.Cid{cids[0], cids[1]} firstSession := sm.NewSession(ctx, time.Second, delay.Fixed(time.Minute)).(*fakeSession) - nextInterestedIn = true + nextInterestedIn = []cid.Cid{cids[0]} secondSession := sm.NewSession(ctx, time.Second, delay.Fixed(time.Minute)).(*fakeSession) - nextInterestedIn = false + nextInterestedIn = []cid.Cid{} thirdSession := sm.NewSession(ctx, time.Second, delay.Fixed(time.Minute)).(*fakeSession) - sm.ReceiveBlockFrom(p, block) - if firstSession.receivedBlock || - !secondSession.receivedBlock || - thirdSession.receivedBlock { - t.Fatal("did not receive blocks only for interested sessions") + sm.ReceiveBlocksFrom(p, []blocks.Block{blks[0], blks[1]}) + + if !cmpSessionCids(firstSession, []cid.Cid{cids[0], cids[1]}) || + !cmpSessionCids(secondSession, []cid.Cid{cids[0]}) || + !cmpSessionCids(thirdSession, []cid.Cid{}) { + t.Fatal("did not receive correct blocks for sessions") } } @@ -146,7 +183,7 @@ func TestRemovingPeersWhenManagerContextCancelled(t *testing.T) { p := peer.ID(123) block := blocks.NewBlock([]byte("block")) // we'll be interested in all blocks for this test - nextInterestedIn = true + nextInterestedIn = []cid.Cid{block.Cid()} firstSession := sm.NewSession(ctx, time.Second, delay.Fixed(time.Minute)).(*fakeSession) secondSession := sm.NewSession(ctx, time.Second, delay.Fixed(time.Minute)).(*fakeSession) thirdSession := sm.NewSession(ctx, time.Second, delay.Fixed(time.Minute)).(*fakeSession) @@ -154,10 +191,10 @@ func TestRemovingPeersWhenManagerContextCancelled(t *testing.T) { cancel() // wait for sessions to get removed time.Sleep(10 * time.Millisecond) - sm.ReceiveBlockFrom(p, block) - if firstSession.receivedBlock || - secondSession.receivedBlock || - thirdSession.receivedBlock { + sm.ReceiveBlocksFrom(p, []blocks.Block{block}) + if len(firstSession.blks) > 0 || + len(secondSession.blks) > 0 || + len(thirdSession.blks) > 0 { t.Fatal("received blocks for sessions after manager is shutdown") } } @@ -171,7 +208,7 @@ func TestRemovingPeersWhenSessionContextCancelled(t *testing.T) { p := peer.ID(123) block := blocks.NewBlock([]byte("block")) // we'll be interested in all blocks for this test - nextInterestedIn = true + nextInterestedIn = []cid.Cid{block.Cid()} firstSession := sm.NewSession(ctx, time.Second, delay.Fixed(time.Minute)).(*fakeSession) sessionCtx, sessionCancel := context.WithCancel(ctx) secondSession := sm.NewSession(sessionCtx, time.Second, delay.Fixed(time.Minute)).(*fakeSession) @@ -180,10 +217,10 @@ func TestRemovingPeersWhenSessionContextCancelled(t *testing.T) { sessionCancel() // wait for sessions to get removed time.Sleep(10 * time.Millisecond) - sm.ReceiveBlockFrom(p, block) - if !firstSession.receivedBlock || - secondSession.receivedBlock || - !thirdSession.receivedBlock { + sm.ReceiveBlocksFrom(p, []blocks.Block{block}) + if len(firstSession.blks) == 0 || + len(secondSession.blks) > 0 || + len(thirdSession.blks) == 0 { t.Fatal("received blocks for sessions that are canceled") } } diff --git a/bitswap/sessionpeermanager/latencytracker.go b/bitswap/sessionpeermanager/latencytracker.go index 5ace5c8fc..da22d13d8 100644 --- a/bitswap/sessionpeermanager/latencytracker.go +++ b/bitswap/sessionpeermanager/latencytracker.go @@ -56,10 +56,12 @@ func (lt *latencyTracker) RemoveRequest(key cid.Cid) { } } -func (lt *latencyTracker) RecordCancel(key cid.Cid) { - request, ok := lt.requests[key] - if ok { - request.wasCancelled = true +func (lt *latencyTracker) RecordCancel(keys []cid.Cid) { + for _, key := range keys { + request, ok := lt.requests[key] + if ok { + request.wasCancelled = true + } } } diff --git a/bitswap/sessionpeermanager/sessionpeermanager.go b/bitswap/sessionpeermanager/sessionpeermanager.go index b6fafe090..b516d9c4c 100644 --- a/bitswap/sessionpeermanager/sessionpeermanager.go +++ b/bitswap/sessionpeermanager/sessionpeermanager.go @@ -72,23 +72,21 @@ func New(ctx context.Context, id uint64, tagger PeerTagger, providerFinder PeerP return spm } -// RecordPeerResponse records that a peer received a block, and adds to it -// the list of peers if it wasn't already added -func (spm *SessionPeerManager) RecordPeerResponse(p peer.ID, k cid.Cid) { +// RecordPeerResponse records that a peer received some blocks, and adds the +// peer to the list of peers if it wasn't already added +func (spm *SessionPeerManager) RecordPeerResponse(p peer.ID, ks []cid.Cid) { select { - case spm.peerMessages <- &peerResponseMessage{p, k}: + case spm.peerMessages <- &peerResponseMessage{p, ks}: case <-spm.ctx.Done(): } } -// RecordCancel records the fact that cancellations were sent to peers, -// so if not blocks come in, don't let it affect peers timeout -func (spm *SessionPeerManager) RecordCancel(k cid.Cid) { - // at the moment, we're just adding peers here - // in the future, we'll actually use this to record metrics +// RecordCancels records the fact that cancellations were sent to peers, +// so if blocks don't arrive, don't let it affect the peer's timeout +func (spm *SessionPeerManager) RecordCancels(ks []cid.Cid) { select { - case spm.peerMessages <- &cancelMessage{k}: + case spm.peerMessages <- &cancelMessage{ks}: case <-spm.ctx.Done(): } } @@ -198,7 +196,7 @@ func (spm *SessionPeerManager) removeUnoptimizedPeer(p peer.ID) { } } -func (spm *SessionPeerManager) recordResponse(p peer.ID, k cid.Cid) { +func (spm *SessionPeerManager) recordResponse(p peer.ID, ks []cid.Cid) { data, ok := spm.activePeers[p] wasOptimized := ok && data.hasLatency if wasOptimized { @@ -211,8 +209,10 @@ func (spm *SessionPeerManager) recordResponse(p peer.ID, k cid.Cid) { spm.activePeers[p] = data } } - fallbackLatency, hasFallbackLatency := spm.broadcastLatency.CheckDuration(k) - data.AdjustLatency(k, hasFallbackLatency, fallbackLatency) + for _, k := range ks { + fallbackLatency, hasFallbackLatency := spm.broadcastLatency.CheckDuration(k) + data.AdjustLatency(k, hasFallbackLatency, fallbackLatency) + } if !ok || wasOptimized != data.hasLatency { spm.tagPeer(p, data) } @@ -233,12 +233,12 @@ func (pfm *peerFoundMessage) handle(spm *SessionPeerManager) { } type peerResponseMessage struct { - p peer.ID - k cid.Cid + p peer.ID + ks []cid.Cid } func (prm *peerResponseMessage) handle(spm *SessionPeerManager) { - spm.recordResponse(prm.p, prm.k) + spm.recordResponse(prm.p, prm.ks) } type peerRequestMessage struct { @@ -305,12 +305,12 @@ func (prm *getPeersMessage) handle(spm *SessionPeerManager) { } type cancelMessage struct { - k cid.Cid + ks []cid.Cid } func (cm *cancelMessage) handle(spm *SessionPeerManager) { for _, data := range spm.activePeers { - data.lt.RecordCancel(cm.k) + data.lt.RecordCancel(cm.ks) } } @@ -334,7 +334,7 @@ func (ptm *peerTimeoutMessage) handle(spm *SessionPeerManager) { } else { // If the request was not cancelled, record the latency. Note that we // do this even if we didn't previously know about this peer. - spm.recordResponse(ptm.p, ptm.k) + spm.recordResponse(ptm.p, []cid.Cid{ptm.k}) } } diff --git a/bitswap/sessionpeermanager/sessionpeermanager_test.go b/bitswap/sessionpeermanager/sessionpeermanager_test.go index c743cfb7f..e6808307e 100644 --- a/bitswap/sessionpeermanager/sessionpeermanager_test.go +++ b/bitswap/sessionpeermanager/sessionpeermanager_test.go @@ -132,7 +132,7 @@ func TestRecordingReceivedBlocks(t *testing.T) { id := testutil.GenerateSessionID() sessionPeerManager := New(ctx, id, fpt, fppf) - sessionPeerManager.RecordPeerResponse(p, c) + sessionPeerManager.RecordPeerResponse(p, []cid.Cid{c}) time.Sleep(10 * time.Millisecond) sessionPeers := getPeers(sessionPeerManager) if len(sessionPeers) != 1 { @@ -175,11 +175,11 @@ func TestOrderingPeers(t *testing.T) { peer2 := peers[rand.Intn(100)] peer3 := peers[rand.Intn(100)] time.Sleep(1 * time.Millisecond) - sessionPeerManager.RecordPeerResponse(peer1, c[0]) + sessionPeerManager.RecordPeerResponse(peer1, []cid.Cid{c[0]}) time.Sleep(5 * time.Millisecond) - sessionPeerManager.RecordPeerResponse(peer2, c[0]) + sessionPeerManager.RecordPeerResponse(peer2, []cid.Cid{c[0]}) time.Sleep(1 * time.Millisecond) - sessionPeerManager.RecordPeerResponse(peer3, c[0]) + sessionPeerManager.RecordPeerResponse(peer3, []cid.Cid{c[0]}) sessionPeers := sessionPeerManager.GetOptimizedPeers() if len(sessionPeers) != maxOptimizedPeers { @@ -215,7 +215,7 @@ func TestOrderingPeers(t *testing.T) { sessionPeerManager.RecordPeerRequests(nil, c2) // Receive a second time - sessionPeerManager.RecordPeerResponse(peer3, c2[0]) + sessionPeerManager.RecordPeerResponse(peer3, []cid.Cid{c2[0]}) // call again nextSessionPeers := sessionPeerManager.GetOptimizedPeers() @@ -272,11 +272,11 @@ func TestTimeoutsAndCancels(t *testing.T) { peer2 := peers[1] peer3 := peers[2] time.Sleep(1 * time.Millisecond) - sessionPeerManager.RecordPeerResponse(peer1, c[0]) + sessionPeerManager.RecordPeerResponse(peer1, []cid.Cid{c[0]}) time.Sleep(2 * time.Millisecond) - sessionPeerManager.RecordPeerResponse(peer2, c[0]) + sessionPeerManager.RecordPeerResponse(peer2, []cid.Cid{c[0]}) time.Sleep(40 * time.Millisecond) - sessionPeerManager.RecordPeerResponse(peer3, c[0]) + sessionPeerManager.RecordPeerResponse(peer3, []cid.Cid{c[0]}) sessionPeers := sessionPeerManager.GetOptimizedPeers() @@ -322,7 +322,7 @@ func TestTimeoutsAndCancels(t *testing.T) { // Request again sessionPeerManager.RecordPeerRequests([]peer.ID{peer2}, c3) - sessionPeerManager.RecordCancel(c3[0]) + sessionPeerManager.RecordCancels([]cid.Cid{c3[0]}) // wait for a timeout time.Sleep(40 * time.Millisecond) @@ -339,9 +339,9 @@ func TestTimeoutsAndCancels(t *testing.T) { // Request again sessionPeerManager.RecordPeerRequests([]peer.ID{peer2}, c4) - sessionPeerManager.RecordCancel(c4[0]) + sessionPeerManager.RecordCancels([]cid.Cid{c4[0]}) time.Sleep(2 * time.Millisecond) - sessionPeerManager.RecordPeerResponse(peer2, c4[0]) + sessionPeerManager.RecordPeerResponse(peer2, []cid.Cid{c4[0]}) time.Sleep(2 * time.Millisecond) // call again diff --git a/bitswap/sessionrequestsplitter/sessionrequestsplitter.go b/bitswap/sessionrequestsplitter/sessionrequestsplitter.go index 46998244b..94535e174 100644 --- a/bitswap/sessionrequestsplitter/sessionrequestsplitter.go +++ b/bitswap/sessionrequestsplitter/sessionrequestsplitter.go @@ -72,7 +72,7 @@ func (srs *SessionRequestSplitter) RecordDuplicateBlock() { } } -// RecordUniqueBlock records the fact that the session received unique block +// RecordUniqueBlock records the fact that the session received a unique block // and adjusts the split factor as neccesary. func (srs *SessionRequestSplitter) RecordUniqueBlock() { select { From f3d49b6adb7fe143388099bddf8046f8ade9fa54 Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Wed, 14 Aug 2019 00:46:06 -0700 Subject: [PATCH 0786/1035] sessionpeermanager: set the id This commit was moved from ipfs/go-bitswap@2a9ebedf2bc8c97d04a0db9beeff8a1da6bccafd --- bitswap/sessionpeermanager/sessionpeermanager.go | 1 + 1 file changed, 1 insertion(+) diff --git a/bitswap/sessionpeermanager/sessionpeermanager.go b/bitswap/sessionpeermanager/sessionpeermanager.go index b516d9c4c..93723c9ec 100644 --- a/bitswap/sessionpeermanager/sessionpeermanager.go +++ b/bitswap/sessionpeermanager/sessionpeermanager.go @@ -58,6 +58,7 @@ type SessionPeerManager struct { func New(ctx context.Context, id uint64, tagger PeerTagger, providerFinder PeerProviderFinder) *SessionPeerManager { spm := &SessionPeerManager{ ctx: ctx, + id: id, tagger: tagger, providerFinder: providerFinder, peerMessages: make(chan peerMessage, 16), From d96f2cf11d91f4e17a3ca47034ea7b279cc99922 Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Wed, 14 Aug 2019 00:50:17 -0700 Subject: [PATCH 0787/1035] test: remove overlap3 and simplify overlap2 overlap3 and 2 are identical This commit was moved from ipfs/go-bitswap@26bf7962c91fb432a7ac94bbd9f472b81c39a6c5 --- bitswap/benchmarks_test.go | 54 +++++++++++--------------------------- 1 file changed, 15 insertions(+), 39 deletions(-) diff --git a/bitswap/benchmarks_test.go b/bitswap/benchmarks_test.go index 4293a9870..3e765210e 100644 --- a/bitswap/benchmarks_test.go +++ b/bitswap/benchmarks_test.go @@ -52,24 +52,20 @@ func BenchmarkDups2Nodes(b *testing.B) { subtestDistributeAndFetch(b, 3, 100, fixedDelay, overlap1, oneAtATime) }) - b.Run("Overlap2-BatchBy10", func(b *testing.B) { - subtestDistributeAndFetch(b, 3, 100, fixedDelay, overlap2, batchFetchBy10) - }) - b.Run("Overlap3-OneAtATime", func(b *testing.B) { - subtestDistributeAndFetch(b, 3, 100, fixedDelay, overlap3, oneAtATime) + subtestDistributeAndFetch(b, 3, 100, fixedDelay, overlap2, oneAtATime) }) b.Run("Overlap3-BatchBy10", func(b *testing.B) { - subtestDistributeAndFetch(b, 3, 100, fixedDelay, overlap3, batchFetchBy10) + subtestDistributeAndFetch(b, 3, 100, fixedDelay, overlap2, batchFetchBy10) }) b.Run("Overlap3-AllConcurrent", func(b *testing.B) { - subtestDistributeAndFetch(b, 3, 100, fixedDelay, overlap3, fetchAllConcurrent) + subtestDistributeAndFetch(b, 3, 100, fixedDelay, overlap2, fetchAllConcurrent) }) b.Run("Overlap3-BigBatch", func(b *testing.B) { - subtestDistributeAndFetch(b, 3, 100, fixedDelay, overlap3, batchFetchAll) + subtestDistributeAndFetch(b, 3, 100, fixedDelay, overlap2, batchFetchAll) }) b.Run("Overlap3-UnixfsFetch", func(b *testing.B) { - subtestDistributeAndFetch(b, 3, 100, fixedDelay, overlap3, unixfsFileFetch) + subtestDistributeAndFetch(b, 3, 100, fixedDelay, overlap2, unixfsFileFetch) }) b.Run("10Nodes-AllToAll-OneAtATime", func(b *testing.B) { subtestDistributeAndFetch(b, 10, 100, fixedDelay, allToAll, oneAtATime) @@ -250,38 +246,18 @@ func overlap2(b *testing.B, provs []testinstance.Instance, blks []blocks.Block) bill := provs[0] jeff := provs[1] - bill.Blockstore().Put(blks[0]) - jeff.Blockstore().Put(blks[0]) for i, blk := range blks { - if i%3 == 0 { - bill.Blockstore().Put(blk) - jeff.Blockstore().Put(blk) - } else if i%2 == 1 { - bill.Blockstore().Put(blk) - } else { - jeff.Blockstore().Put(blk) + even := i%2 == 0 + third := i%3 == 0 + if third || even { + if err := bill.Blockstore().Put(blk); err != nil { + b.Fatal(err) + } } - } -} - -func overlap3(b *testing.B, provs []testinstance.Instance, blks []blocks.Block) { - if len(provs) != 2 { - b.Fatal("overlap3 only works with 2 provs") - } - - bill := provs[0] - jeff := provs[1] - - bill.Blockstore().Put(blks[0]) - jeff.Blockstore().Put(blks[0]) - for i, blk := range blks { - if i%3 == 0 { - bill.Blockstore().Put(blk) - jeff.Blockstore().Put(blk) - } else if i%2 == 1 { - bill.Blockstore().Put(blk) - } else { - jeff.Blockstore().Put(blk) + if third || !even { + if err := jeff.Blockstore().Put(blk); err != nil { + b.Fatal(err) + } } } } From 1c9b85ce903e8300474367ffd45de091450a15b4 Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Wed, 14 Aug 2019 00:52:06 -0700 Subject: [PATCH 0788/1035] chore: explicitly handle errors This commit was moved from ipfs/go-bitswap@8f653d3cf227aea8c351a09d2711ab974461fcd6 --- bitswap/benchmarks_test.go | 9 ++++++--- bitswap/bitswap_test.go | 17 +++++++++++++---- bitswap/messagequeue/messagequeue.go | 4 ++-- bitswap/network/ipfs_impl.go | 7 ++++--- bitswap/network/ipfs_impl_test.go | 20 ++++++++++++++++---- bitswap/testinstance/testinstance.go | 5 ++++- bitswap/testnet/network_test.go | 5 ++++- 7 files changed, 49 insertions(+), 18 deletions(-) diff --git a/bitswap/benchmarks_test.go b/bitswap/benchmarks_test.go index 3e765210e..779269b48 100644 --- a/bitswap/benchmarks_test.go +++ b/bitswap/benchmarks_test.go @@ -95,7 +95,7 @@ func BenchmarkDups2Nodes(b *testing.B) { subtestDistributeAndFetch(b, 200, 20, fixedDelay, allToAll, batchFetchAll) }) out, _ := json.MarshalIndent(benchmarkLog, "", " ") - ioutil.WriteFile("tmp/benchmark.json", out, 0666) + _ = ioutil.WriteFile("tmp/benchmark.json", out, 0666) } const fastSpeed = 60 * time.Millisecond @@ -145,7 +145,7 @@ func BenchmarkDupsManyNodesRealWorldNetwork(b *testing.B) { subtestDistributeAndFetchRateLimited(b, 300, 200, slowNetworkDelay, slowBandwidthGenerator, stdBlockSize, allToAll, batchFetchAll) }) out, _ := json.MarshalIndent(benchmarkLog, "", " ") - ioutil.WriteFile("tmp/rw-benchmark.json", out, 0666) + _ = ioutil.WriteFile("tmp/rw-benchmark.json", out, 0666) } func subtestDistributeAndFetch(b *testing.B, numnodes, numblks int, d delay.D, df distFunc, ff fetchFunc) { @@ -267,7 +267,10 @@ func overlap2(b *testing.B, provs []testinstance.Instance, blks []blocks.Block) // but we're mostly just testing performance of the sync algorithm func onePeerPerBlock(b *testing.B, provs []testinstance.Instance, blks []blocks.Block) { for _, blk := range blks { - provs[rand.Intn(len(provs))].Blockstore().Put(blk) + err := provs[rand.Intn(len(provs))].Blockstore().Put(blk) + if err != nil { + b.Fatal(err) + } } } diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index e13621803..c6c3c8b87 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -44,7 +44,10 @@ func TestClose(t *testing.T) { bitswap := ig.Next() bitswap.Exchange.Close() - bitswap.Exchange.GetBlock(context.Background(), block.Cid()) + _, err := bitswap.Exchange.GetBlock(context.Background(), block.Cid()) + if err == nil { + t.Fatal("expected GetBlock to fail") + } } func TestProviderForKeyButNetworkCannotFind(t *testing.T) { // TODO revisit this @@ -56,14 +59,17 @@ func TestProviderForKeyButNetworkCannotFind(t *testing.T) { // TODO revisit this block := blocks.NewBlock([]byte("block")) pinfo := p2ptestutil.RandTestBogusIdentityOrFatal(t) - rs.Client(pinfo).Provide(context.Background(), block.Cid(), true) // but not on network + err := rs.Client(pinfo).Provide(context.Background(), block.Cid(), true) // but not on network + if err != nil { + t.Fatal(err) + } solo := ig.Next() defer solo.Exchange.Close() ctx, cancel := context.WithTimeout(context.Background(), time.Nanosecond) defer cancel() - _, err := solo.Exchange.GetBlock(ctx, block.Cid()) + _, err = solo.Exchange.GetBlock(ctx, block.Cid()) if err != context.DeadlineExceeded { t.Fatal("Expected DeadlineExceeded error") @@ -224,7 +230,10 @@ func PerformDistributionTest(t *testing.T, numInstances, numBlocks int) { first := instances[0] for _, b := range blocks { blkeys = append(blkeys, b.Cid()) - first.Exchange.HasBlock(b) + err := first.Exchange.HasBlock(b) + if err != nil { + t.Fatal(err) + } } t.Log("Distribute!") diff --git a/bitswap/messagequeue/messagequeue.go b/bitswap/messagequeue/messagequeue.go index 9e4724244..601a70748 100644 --- a/bitswap/messagequeue/messagequeue.go +++ b/bitswap/messagequeue/messagequeue.go @@ -113,7 +113,7 @@ func (mq *MessageQueue) runQueue() { return case <-mq.ctx.Done(): if mq.sender != nil { - mq.sender.Reset() + _ = mq.sender.Reset() } return } @@ -220,7 +220,7 @@ func (mq *MessageQueue) attemptSendAndRecovery(message bsmsg.BitSwapMessage) boo } log.Infof("bitswap send error: %s", err) - mq.sender.Reset() + _ = mq.sender.Reset() mq.sender = nil select { diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index 005cfd585..036d15328 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -133,12 +133,13 @@ func (bsnet *impl) SendMessage( } if err = bsnet.msgToStream(ctx, s, outgoing); err != nil { - s.Reset() + _ = s.Reset() return err } atomic.AddUint64(&bsnet.stats.MessagesSent, 1) // TODO(https://github.com/libp2p/go-libp2p-net/issues/28): Avoid this goroutine. + //nolint go helpers.AwaitEOF(s) return s.Close() @@ -189,7 +190,7 @@ func (bsnet *impl) handleNewStream(s network.Stream) { defer s.Close() if bsnet.receiver == nil { - s.Reset() + _ = s.Reset() return } @@ -198,7 +199,7 @@ func (bsnet *impl) handleNewStream(s network.Stream) { received, err := bsmsg.FromMsgReader(reader) if err != nil { if err != io.EOF { - s.Reset() + _ = s.Reset() go bsnet.receiver.ReceiveError(err) log.Debugf("bitswap net handleNewStream from %s error: %s", s.Conn().RemotePeer(), err) } diff --git a/bitswap/network/ipfs_impl_test.go b/bitswap/network/ipfs_impl_test.go index 2a8fab4c4..eab3081a0 100644 --- a/bitswap/network/ipfs_impl_test.go +++ b/bitswap/network/ipfs_impl_test.go @@ -81,14 +81,23 @@ func TestMessageSendAndReceive(t *testing.T) { bsnet1.SetDelegate(r1) bsnet2.SetDelegate(r2) - mn.LinkAll() - bsnet1.ConnectTo(ctx, p2.ID()) + err = mn.LinkAll() + if err != nil { + t.Fatal(err) + } + err = bsnet1.ConnectTo(ctx, p2.ID()) + if err != nil { + t.Fatal(err) + } select { case <-ctx.Done(): t.Fatal("did not connect peer") case <-r1.connectionEvent: } - bsnet2.ConnectTo(ctx, p1.ID()) + err = bsnet2.ConnectTo(ctx, p1.ID()) + if err != nil { + t.Fatal(err) + } select { case <-ctx.Done(): t.Fatal("did not connect peer") @@ -107,7 +116,10 @@ func TestMessageSendAndReceive(t *testing.T) { sent.AddEntry(block1.Cid(), 1) sent.AddBlock(block2) - bsnet1.SendMessage(ctx, p2.ID(), sent) + err = bsnet1.SendMessage(ctx, p2.ID(), sent) + if err != nil { + t.Fatal(err) + } select { case <-ctx.Done(): diff --git a/bitswap/testinstance/testinstance.go b/bitswap/testinstance/testinstance.go index 65d25f135..be9eb10f6 100644 --- a/bitswap/testinstance/testinstance.go +++ b/bitswap/testinstance/testinstance.go @@ -65,7 +65,10 @@ func (g *InstanceGenerator) Instances(n int) []Instance { for i, inst := range instances { for j := i + 1; j < len(instances); j++ { oinst := instances[j] - inst.Adapter.ConnectTo(context.Background(), oinst.Peer) + err := inst.Adapter.ConnectTo(context.Background(), oinst.Peer) + if err != nil { + panic(err.Error()) + } } } return instances diff --git a/bitswap/testnet/network_test.go b/bitswap/testnet/network_test.go index d0b55ed55..350e95eef 100644 --- a/bitswap/testnet/network_test.go +++ b/bitswap/testnet/network_test.go @@ -35,7 +35,10 @@ func TestSendMessageAsyncButWaitForResponse(t *testing.T) { msgToWaiter := bsmsg.New(true) msgToWaiter.AddBlock(blocks.NewBlock([]byte(expectedStr))) - waiter.SendMessage(ctx, fromWaiter, msgToWaiter) + err := waiter.SendMessage(ctx, fromWaiter, msgToWaiter) + if err != nil { + t.Error(err) + } })) waiter.SetDelegate(lambda(func( From 8f3675cc08ec571bfef8de34853a366b403a6054 Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Wed, 14 Aug 2019 00:52:28 -0700 Subject: [PATCH 0789/1035] chore: simplify This commit was moved from ipfs/go-bitswap@e3e719730a7e3ffb8c52fd63834cb3092eee9c6e --- bitswap/benchmarks_test.go | 2 +- bitswap/network/ipfs_impl_test.go | 8 ++------ bitswap/sessionpeermanager/latencytracker.go | 2 +- 3 files changed, 4 insertions(+), 8 deletions(-) diff --git a/bitswap/benchmarks_test.go b/bitswap/benchmarks_test.go index 779269b48..f8e777982 100644 --- a/bitswap/benchmarks_test.go +++ b/bitswap/benchmarks_test.go @@ -202,7 +202,7 @@ func runDistribution(b *testing.B, instances []testinstance.Instance, blocks []b nst := fetcher.Adapter.Stats() stats := runStats{ - Time: time.Now().Sub(start), + Time: time.Since(start), MsgRecd: nst.MessagesRecvd, MsgSent: nst.MessagesSent, Dups: st.DupBlksReceived, diff --git a/bitswap/network/ipfs_impl_test.go b/bitswap/network/ipfs_impl_test.go index eab3081a0..7cae0b3e2 100644 --- a/bitswap/network/ipfs_impl_test.go +++ b/bitswap/network/ipfs_impl_test.go @@ -41,16 +41,12 @@ func (r *receiver) ReceiveError(err error) { func (r *receiver) PeerConnected(p peer.ID) { r.peers[p] = struct{}{} - select { - case r.connectionEvent <- struct{}{}: - } + r.connectionEvent <- struct{}{} } func (r *receiver) PeerDisconnected(p peer.ID) { delete(r.peers, p) - select { - case r.connectionEvent <- struct{}{}: - } + r.connectionEvent <- struct{}{} } func TestMessageSendAndReceive(t *testing.T) { // create network diff --git a/bitswap/sessionpeermanager/latencytracker.go b/bitswap/sessionpeermanager/latencytracker.go index da22d13d8..326d2fa4c 100644 --- a/bitswap/sessionpeermanager/latencytracker.go +++ b/bitswap/sessionpeermanager/latencytracker.go @@ -43,7 +43,7 @@ func (lt *latencyTracker) CheckDuration(key cid.Cid) (time.Duration, bool) { request, ok := lt.requests[key] var latency time.Duration if ok { - latency = time.Now().Sub(request.startedAt) + latency = time.Since(request.startedAt) } return latency, ok } From 1266186eaa4d2c6f992ff0c80e4ae329443518d8 Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Wed, 14 Aug 2019 00:52:45 -0700 Subject: [PATCH 0790/1035] testing: fix panic on failure This commit was moved from ipfs/go-bitswap@a884776a16b05c7c74616ee1fc340270b0dd2198 --- bitswap/benchmarks_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bitswap/benchmarks_test.go b/bitswap/benchmarks_test.go index f8e777982..1671b9bbb 100644 --- a/bitswap/benchmarks_test.go +++ b/bitswap/benchmarks_test.go @@ -309,7 +309,7 @@ func fetchAllConcurrent(b *testing.B, bs *bitswap.Bitswap, ks []cid.Cid) { defer wg.Done() _, err := ses.GetBlock(context.Background(), c) if err != nil { - b.Fatal(err) + b.Error(err) } }(c) } From 20e156ef242ea518cb1810f74a2de1afed0d06cf Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Wed, 14 Aug 2019 00:53:01 -0700 Subject: [PATCH 0791/1035] test: fix unused warnings This commit was moved from ipfs/go-bitswap@d6002bcb303bb0105ec7d455fe229b412dcad59d --- bitswap/decision/engine_test.go | 2 +- bitswap/session/session_test.go | 3 +++ bitswap/sessionmanager/sessionmanager_test.go | 13 +++++-------- 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/bitswap/decision/engine_test.go b/bitswap/decision/engine_test.go index d654c191c..5202ce631 100644 --- a/bitswap/decision/engine_test.go +++ b/bitswap/decision/engine_test.go @@ -245,7 +245,7 @@ func TestTaggingPeers(t *testing.T) { t.Fatal("Incorrect number of peers tagged") } envelope.Sent() - next = <-sanfrancisco.Engine.Outbox() + <-sanfrancisco.Engine.Outbox() sanfrancisco.PeerTagger.wait.Wait() if sanfrancisco.PeerTagger.count() != 0 { t.Fatal("Peers should be untagged but weren't") diff --git a/bitswap/session/session_test.go b/bitswap/session/session_test.go index 7a2e66bba..d075f8010 100644 --- a/bitswap/session/session_test.go +++ b/bitswap/session/session_test.go @@ -188,6 +188,9 @@ func TestSessionGetBlocks(t *testing.T) { if len(receivedBlocks) != len(blks) { t.Fatal("did not receive enough blocks") } + if len(newCancelReqs) != len(receivedBlocks) { + t.Fatal("expected an equal number of received blocks and cancels") + } for _, block := range receivedBlocks { if !testutil.ContainsBlock(blks, block) { t.Fatal("received incorrect block") diff --git a/bitswap/sessionmanager/sessionmanager_test.go b/bitswap/sessionmanager/sessionmanager_test.go index 6a60f5afc..25e33b25d 100644 --- a/bitswap/sessionmanager/sessionmanager_test.go +++ b/bitswap/sessionmanager/sessionmanager_test.go @@ -17,14 +17,11 @@ import ( ) type fakeSession struct { - interested []cid.Cid - blks []blocks.Block - fromNetwork bool - receivedBlock bool - updateReceiveCounters bool - id uint64 - pm *fakePeerManager - srs *fakeRequestSplitter + interested []cid.Cid + blks []blocks.Block + id uint64 + pm *fakePeerManager + srs *fakeRequestSplitter } func (*fakeSession) GetBlock(context.Context, cid.Cid) (blocks.Block, error) { From a8dec9ec75f8ddd1ab3f3e1fcfbeb73899470a82 Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Wed, 14 Aug 2019 00:53:12 -0700 Subject: [PATCH 0792/1035] test: fix incorrect check This commit was moved from ipfs/go-bitswap@11d0c726013488a9fdd85aeb9a0f2f0e5366aaaf --- bitswap/network/ipfs_impl_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/bitswap/network/ipfs_impl_test.go b/bitswap/network/ipfs_impl_test.go index 7cae0b3e2..cbcc4fecb 100644 --- a/bitswap/network/ipfs_impl_test.go +++ b/bitswap/network/ipfs_impl_test.go @@ -141,8 +141,8 @@ func TestMessageSendAndReceive(t *testing.T) { } receivedWant := receivedWants[0] if receivedWant.Cid != sentWant.Cid || - receivedWant.Priority != receivedWant.Priority || - receivedWant.Cancel != receivedWant.Cancel { + receivedWant.Priority != sentWant.Priority || + receivedWant.Cancel != sentWant.Cancel { t.Fatal("Sent message wants did not match received message wants") } sentBlocks := sent.Blocks() From 50a2fc1dfdebb5060c421d081466ea9b70280959 Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Wed, 14 Aug 2019 09:18:40 -0700 Subject: [PATCH 0793/1035] ci: fix ci badge This commit was moved from ipfs/go-bitswap@0ce6ec824b397534f295c1afe1c533083b1be444 --- bitswap/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bitswap/README.md b/bitswap/README.md index 3f0ae6f08..062fbb625 100644 --- a/bitswap/README.md +++ b/bitswap/README.md @@ -5,7 +5,7 @@ go-bitswap [![](https://img.shields.io/badge/project-IPFS-blue.svg?style=flat-square)](http://ipfs.io/) [![](https://img.shields.io/badge/freenode-%23ipfs-blue.svg?style=flat-square)](http://webchat.freenode.net/?channels=%23ipfs) [![Coverage Status](https://codecov.io/gh/ipfs/go-bitswap/branch/master/graph/badge.svg)](https://codecov.io/gh/ipfs/go-bitswap/branch/master) -[![Travis CI](https://travis-ci.org/ipfs/go-bitswap.svg?branch=master)](https://travis-ci.org/ipfs/go-bitswap) +[![Build Status](https://circleci.com/gh/ipfs/go-bitswap.svg?style=svg)](https://circleci.com/gh/ipfs/go-bitswap) > An implementation of the bitswap protocol in go! From 363fa84e7b25b64e0feb4fd0999b4413bf3c0f57 Mon Sep 17 00:00:00 2001 From: Dirk McCormick Date: Wed, 14 Aug 2019 10:25:40 -0400 Subject: [PATCH 0794/1035] refactor: use global pubsub notifier This commit was moved from ipfs/go-bitswap@0bd2ede0758632e512150b3f0817cc23fb19ee28 --- bitswap/bitswap.go | 19 +++++++++- bitswap/session/session.go | 6 +-- bitswap/session/session_test.go | 37 ++++++++++++++++--- bitswap/sessionmanager/sessionmanager.go | 10 +++-- bitswap/sessionmanager/sessionmanager_test.go | 20 ++++++++-- 5 files changed, 73 insertions(+), 19 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index afdf86520..3a5872689 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -16,6 +16,7 @@ import ( bsmsg "github.com/ipfs/go-bitswap/message" bsmq "github.com/ipfs/go-bitswap/messagequeue" bsnet "github.com/ipfs/go-bitswap/network" + notifications "github.com/ipfs/go-bitswap/notifications" bspm "github.com/ipfs/go-bitswap/peermanager" bspqm "github.com/ipfs/go-bitswap/providerquerymanager" bssession "github.com/ipfs/go-bitswap/session" @@ -116,9 +117,10 @@ func New(parent context.Context, network bsnet.BitSwapNetwork, pqm := bspqm.New(ctx, network) sessionFactory := func(ctx context.Context, id uint64, pm bssession.PeerManager, srs bssession.RequestSplitter, + notif notifications.PubSub, provSearchDelay time.Duration, rebroadcastDelay delay.D) bssm.Session { - return bssession.New(ctx, id, wm, pm, srs, provSearchDelay, rebroadcastDelay) + return bssession.New(ctx, id, wm, pm, srs, notif, provSearchDelay, rebroadcastDelay) } sessionPeerManagerFactory := func(ctx context.Context, id uint64) bssession.PeerManager { return bsspm.New(ctx, id, network.ConnectionManager(), pqm) @@ -126,6 +128,7 @@ func New(parent context.Context, network bsnet.BitSwapNetwork, sessionRequestSplitterFactory := func(ctx context.Context) bssession.RequestSplitter { return bssrs.New(ctx) } + notif := notifications.New() bs := &Bitswap{ blockstore: bstore, @@ -136,7 +139,8 @@ func New(parent context.Context, network bsnet.BitSwapNetwork, provideKeys: make(chan cid.Cid, provideKeysBufferSize), wm: wm, pqm: pqm, - sm: bssm.New(ctx, sessionFactory, sessionPeerManagerFactory, sessionRequestSplitterFactory), + sm: bssm.New(ctx, sessionFactory, sessionPeerManagerFactory, sessionRequestSplitterFactory, notif), + notif: notif, counters: new(counters), dupMetric: dupHist, allMetric: allHist, @@ -163,6 +167,7 @@ func New(parent context.Context, network bsnet.BitSwapNetwork, go func() { <-px.Closing() // process closes first cancelFunc() + notif.Shutdown() }() procctx.CloseAfterContext(px, ctx) // parent cancelled first @@ -187,6 +192,9 @@ type Bitswap struct { // NB: ensure threadsafety blockstore blockstore.Blockstore + // manages channels of outgoing blocks for sessions + notif notifications.PubSub + // newBlocks is a channel for newly added blocks to be provided to the // network. blocks pushed down this channel get buffered and fed to the // provideKeys channel later on to avoid too much network activity @@ -314,6 +322,13 @@ func (bs *Bitswap) receiveBlocksFrom(from peer.ID, blks []blocks.Block) error { // Send wanted blocks to decision engine bs.engine.AddBlocks(wanted) + // Publish the block to any Bitswap clients that had requested blocks. + // (the sessions use this pubsub mechanism to inform clients of received + // blocks) + for _, b := range wanted { + bs.notif.Publish(b) + } + // If the reprovider is enabled, send wanted blocks to reprovider if bs.provideEnabled { for _, b := range wanted { diff --git a/bitswap/session/session.go b/bitswap/session/session.go index 6e3f11b27..518f7b69f 100644 --- a/bitswap/session/session.go +++ b/bitswap/session/session.go @@ -101,6 +101,7 @@ func New(ctx context.Context, wm WantManager, pm PeerManager, srs RequestSplitter, + notif notifications.PubSub, initialSearchDelay time.Duration, periodicSearchDelay delay.D) *Session { s := &Session{ @@ -117,7 +118,7 @@ func New(ctx context.Context, pm: pm, srs: srs, incoming: make(chan blksRecv), - notif: notifications.New(), + notif: notif, uuid: loggables.Uuid("GetBlockRequest"), baseTickDelay: time.Millisecond * 500, id: id, @@ -359,7 +360,6 @@ func (s *Session) randomLiveWant() cid.Cid { } func (s *Session) handleShutdown() { s.idleTick.Stop() - s.notif.Shutdown() live := make([]cid.Cid, 0, len(s.liveWants)) for c := range s.liveWants { @@ -395,8 +395,6 @@ func (s *Session) receiveBlocks(ctx context.Context, blocks []blocks.Block) { // that have occurred since the last new block s.consecutiveTicks = 0 - s.notif.Publish(blk) - // Keep track of CIDs we've successfully fetched s.pastWants.Push(c) } diff --git a/bitswap/session/session_test.go b/bitswap/session/session_test.go index d075f8010..5ff460214 100644 --- a/bitswap/session/session_test.go +++ b/bitswap/session/session_test.go @@ -6,6 +6,7 @@ import ( "testing" "time" + notifications "github.com/ipfs/go-bitswap/notifications" bssd "github.com/ipfs/go-bitswap/sessiondata" "github.com/ipfs/go-bitswap/testutil" blocks "github.com/ipfs/go-block-format" @@ -92,8 +93,10 @@ func TestSessionGetBlocks(t *testing.T) { fwm := &fakeWantManager{wantReqs, cancelReqs} fpm := &fakePeerManager{} frs := &fakeRequestSplitter{} + notif := notifications.New() + defer notif.Shutdown() id := testutil.GenerateSessionID() - session := New(ctx, id, fwm, fpm, frs, time.Second, delay.Fixed(time.Minute)) + session := New(ctx, id, fwm, fpm, frs, notif, time.Second, delay.Fixed(time.Minute)) blockGenerator := blocksutil.NewBlockGenerator() blks := blockGenerator.Blocks(broadcastLiveWantsLimit * 2) var cids []cid.Cid @@ -122,7 +125,13 @@ func TestSessionGetBlocks(t *testing.T) { var newBlockReqs []wantReq var receivedBlocks []blocks.Block for i, p := range peers { - session.ReceiveBlocksFrom(p, []blocks.Block{blks[testutil.IndexOf(blks, receivedWantReq.cids[i])]}) + // simulate what bitswap does on receiving a message: + // - calls ReceiveBlocksFrom() on session + // - publishes block to pubsub channel + blk := blks[testutil.IndexOf(blks, receivedWantReq.cids[i])] + session.ReceiveBlocksFrom(p, []blocks.Block{blk}) + notif.Publish(blk) + select { case cancelBlock := <-cancelReqs: newCancelReqs = append(newCancelReqs, cancelBlock) @@ -178,7 +187,13 @@ func TestSessionGetBlocks(t *testing.T) { // receive remaining blocks for i, p := range peers { - session.ReceiveBlocksFrom(p, []blocks.Block{blks[testutil.IndexOf(blks, newCidsRequested[i])]}) + // simulate what bitswap does on receiving a message: + // - calls ReceiveBlocksFrom() on session + // - publishes block to pubsub channel + blk := blks[testutil.IndexOf(blks, newCidsRequested[i])] + session.ReceiveBlocksFrom(p, []blocks.Block{blk}) + notif.Publish(blk) + receivedBlock := <-getBlocksCh receivedBlocks = append(receivedBlocks, receivedBlock) cancelBlock := <-cancelReqs @@ -207,8 +222,10 @@ func TestSessionFindMorePeers(t *testing.T) { fwm := &fakeWantManager{wantReqs, cancelReqs} fpm := &fakePeerManager{findMorePeersRequested: make(chan cid.Cid, 1)} frs := &fakeRequestSplitter{} + notif := notifications.New() + defer notif.Shutdown() id := testutil.GenerateSessionID() - session := New(ctx, id, fwm, fpm, frs, time.Second, delay.Fixed(time.Minute)) + session := New(ctx, id, fwm, fpm, frs, notif, time.Second, delay.Fixed(time.Minute)) session.SetBaseTickDelay(200 * time.Microsecond) blockGenerator := blocksutil.NewBlockGenerator() blks := blockGenerator.Blocks(broadcastLiveWantsLimit * 2) @@ -233,7 +250,13 @@ func TestSessionFindMorePeers(t *testing.T) { // or there will be no tick set -- time precision on Windows in go is in the // millisecond range p := testutil.GeneratePeers(1)[0] - session.ReceiveBlocksFrom(p, []blocks.Block{blks[0]}) + + // simulate what bitswap does on receiving a message: + // - calls ReceiveBlocksFrom() on session + // - publishes block to pubsub channel + blk := blks[0] + session.ReceiveBlocksFrom(p, []blocks.Block{blk}) + notif.Publish(blk) select { case <-cancelReqs: case <-ctx.Done(): @@ -279,9 +302,11 @@ func TestSessionFailingToGetFirstBlock(t *testing.T) { fwm := &fakeWantManager{wantReqs, cancelReqs} fpm := &fakePeerManager{findMorePeersRequested: make(chan cid.Cid, 1)} frs := &fakeRequestSplitter{} + notif := notifications.New() + defer notif.Shutdown() id := testutil.GenerateSessionID() - session := New(ctx, id, fwm, fpm, frs, 10*time.Millisecond, delay.Fixed(100*time.Millisecond)) + session := New(ctx, id, fwm, fpm, frs, notif, 10*time.Millisecond, delay.Fixed(100*time.Millisecond)) blockGenerator := blocksutil.NewBlockGenerator() blks := blockGenerator.Blocks(4) var cids []cid.Cid diff --git a/bitswap/sessionmanager/sessionmanager.go b/bitswap/sessionmanager/sessionmanager.go index bd9ef18c5..e56d3f3c6 100644 --- a/bitswap/sessionmanager/sessionmanager.go +++ b/bitswap/sessionmanager/sessionmanager.go @@ -9,6 +9,7 @@ import ( cid "github.com/ipfs/go-cid" delay "github.com/ipfs/go-ipfs-delay" + notifications "github.com/ipfs/go-bitswap/notifications" bssession "github.com/ipfs/go-bitswap/session" exchange "github.com/ipfs/go-ipfs-exchange-interface" peer "github.com/libp2p/go-libp2p-core/peer" @@ -28,7 +29,7 @@ type sesTrk struct { } // SessionFactory generates a new session for the SessionManager to track. -type SessionFactory func(ctx context.Context, id uint64, pm bssession.PeerManager, srs bssession.RequestSplitter, provSearchDelay time.Duration, rebroadcastDelay delay.D) Session +type SessionFactory func(ctx context.Context, id uint64, pm bssession.PeerManager, srs bssession.RequestSplitter, notif notifications.PubSub, provSearchDelay time.Duration, rebroadcastDelay delay.D) Session // RequestSplitterFactory generates a new request splitter for a session. type RequestSplitterFactory func(ctx context.Context) bssession.RequestSplitter @@ -43,6 +44,7 @@ type SessionManager struct { sessionFactory SessionFactory peerManagerFactory PeerManagerFactory requestSplitterFactory RequestSplitterFactory + notif notifications.PubSub // Sessions sessLk sync.Mutex @@ -54,12 +56,14 @@ type SessionManager struct { } // New creates a new SessionManager. -func New(ctx context.Context, sessionFactory SessionFactory, peerManagerFactory PeerManagerFactory, requestSplitterFactory RequestSplitterFactory) *SessionManager { +func New(ctx context.Context, sessionFactory SessionFactory, peerManagerFactory PeerManagerFactory, + requestSplitterFactory RequestSplitterFactory, notif notifications.PubSub) *SessionManager { return &SessionManager{ ctx: ctx, sessionFactory: sessionFactory, peerManagerFactory: peerManagerFactory, requestSplitterFactory: requestSplitterFactory, + notif: notif, } } @@ -73,7 +77,7 @@ func (sm *SessionManager) NewSession(ctx context.Context, pm := sm.peerManagerFactory(sessionctx, id) srs := sm.requestSplitterFactory(sessionctx) - session := sm.sessionFactory(sessionctx, id, pm, srs, provSearchDelay, rebroadcastDelay) + session := sm.sessionFactory(sessionctx, id, pm, srs, sm.notif, provSearchDelay, rebroadcastDelay) tracked := sesTrk{session, pm, srs} sm.sessLk.Lock() sm.sessions = append(sm.sessions, tracked) diff --git a/bitswap/sessionmanager/sessionmanager_test.go b/bitswap/sessionmanager/sessionmanager_test.go index 25e33b25d..c8d30b821 100644 --- a/bitswap/sessionmanager/sessionmanager_test.go +++ b/bitswap/sessionmanager/sessionmanager_test.go @@ -7,6 +7,7 @@ import ( delay "github.com/ipfs/go-ipfs-delay" + notifications "github.com/ipfs/go-bitswap/notifications" bssession "github.com/ipfs/go-bitswap/session" bssd "github.com/ipfs/go-bitswap/sessiondata" "github.com/ipfs/go-bitswap/testutil" @@ -22,6 +23,7 @@ type fakeSession struct { id uint64 pm *fakePeerManager srs *fakeRequestSplitter + notif notifications.PubSub } func (*fakeSession) GetBlock(context.Context, cid.Cid) (blocks.Block, error) { @@ -67,6 +69,7 @@ func sessionFactory(ctx context.Context, id uint64, pm bssession.PeerManager, srs bssession.RequestSplitter, + notif notifications.PubSub, provSearchDelay time.Duration, rebroadcastDelay delay.D) Session { return &fakeSession{ @@ -74,6 +77,7 @@ func sessionFactory(ctx context.Context, id: id, pm: pm.(*fakePeerManager), srs: srs.(*fakeRequestSplitter), + notif: notif, } } @@ -111,7 +115,9 @@ func TestAddingSessions(t *testing.T) { ctx := context.Background() ctx, cancel := context.WithCancel(ctx) defer cancel() - sm := New(ctx, sessionFactory, peerManagerFactory, requestSplitterFactory) + notif := notifications.New() + defer notif.Shutdown() + sm := New(ctx, sessionFactory, peerManagerFactory, requestSplitterFactory, notif) p := peer.ID(123) block := blocks.NewBlock([]byte("block")) @@ -147,7 +153,9 @@ func TestReceivingBlocksWhenNotInterested(t *testing.T) { ctx := context.Background() ctx, cancel := context.WithCancel(ctx) defer cancel() - sm := New(ctx, sessionFactory, peerManagerFactory, requestSplitterFactory) + notif := notifications.New() + defer notif.Shutdown() + sm := New(ctx, sessionFactory, peerManagerFactory, requestSplitterFactory, notif) p := peer.ID(123) blks := testutil.GenerateBlocksOfSize(3, 1024) @@ -175,7 +183,9 @@ func TestReceivingBlocksWhenNotInterested(t *testing.T) { func TestRemovingPeersWhenManagerContextCancelled(t *testing.T) { ctx := context.Background() ctx, cancel := context.WithCancel(ctx) - sm := New(ctx, sessionFactory, peerManagerFactory, requestSplitterFactory) + notif := notifications.New() + defer notif.Shutdown() + sm := New(ctx, sessionFactory, peerManagerFactory, requestSplitterFactory, notif) p := peer.ID(123) block := blocks.NewBlock([]byte("block")) @@ -200,7 +210,9 @@ func TestRemovingPeersWhenSessionContextCancelled(t *testing.T) { ctx := context.Background() ctx, cancel := context.WithCancel(ctx) defer cancel() - sm := New(ctx, sessionFactory, peerManagerFactory, requestSplitterFactory) + notif := notifications.New() + defer notif.Shutdown() + sm := New(ctx, sessionFactory, peerManagerFactory, requestSplitterFactory, notif) p := peer.ID(123) block := blocks.NewBlock([]byte("block")) From 67374bd5ac1b9edda9bde2967c640753994bb596 Mon Sep 17 00:00:00 2001 From: Dirk McCormick Date: Wed, 14 Aug 2019 11:01:17 -0400 Subject: [PATCH 0795/1035] refactor: pass around keys instead of blocks This commit was moved from ipfs/go-bitswap@38c6f533f06735d168e439f24b4240656ee9ef54 --- bitswap/bitswap.go | 25 ++++++++--- bitswap/decision/engine.go | 11 +++-- bitswap/session/session.go | 39 ++++++++--------- bitswap/session/session_test.go | 6 +-- bitswap/sessionmanager/sessionmanager.go | 15 ++++--- bitswap/sessionmanager/sessionmanager_test.go | 42 +++++++++---------- 6 files changed, 70 insertions(+), 68 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 3a5872689..82757ff8a 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -315,12 +315,25 @@ func (bs *Bitswap) receiveBlocksFrom(from peer.ID, blks []blocks.Block) error { // to the same node. We should address this soon, but i'm not going to do // it now as it requires more thought and isnt causing immediate problems. - // Send all blocks (including duplicates) to any sessions that want them. + allKs := make([]cid.Cid, 0, len(blks)) + for _, b := range blks { + allKs = append(allKs, b.Cid()) + } + + wantedKs := allKs + if len(blks) != len(wanted) { + wantedKs = make([]cid.Cid, 0, len(wanted)) + for _, b := range wanted { + wantedKs = append(wantedKs, b.Cid()) + } + } + + // Send all block keys (including duplicates) to any sessions that want them. // (The duplicates are needed by sessions for accounting purposes) - bs.sm.ReceiveBlocksFrom(from, blks) + bs.sm.ReceiveBlocksFrom(from, allKs) - // Send wanted blocks to decision engine - bs.engine.AddBlocks(wanted) + // Send wanted block keys to decision engine + bs.engine.AddBlocks(wantedKs) // Publish the block to any Bitswap clients that had requested blocks. // (the sessions use this pubsub mechanism to inform clients of received @@ -331,9 +344,9 @@ func (bs *Bitswap) receiveBlocksFrom(from peer.ID, blks []blocks.Block) error { // If the reprovider is enabled, send wanted blocks to reprovider if bs.provideEnabled { - for _, b := range wanted { + for _, k := range wantedKs { select { - case bs.newBlocks <- b.Cid(): + case bs.newBlocks <- k: // send block off to be reprovided case <-bs.process.Closing(): return bs.process.Close() diff --git a/bitswap/decision/engine.go b/bitswap/decision/engine.go index a4eee0f0d..94b5ae5e5 100644 --- a/bitswap/decision/engine.go +++ b/bitswap/decision/engine.go @@ -10,7 +10,6 @@ import ( "github.com/google/uuid" bsmsg "github.com/ipfs/go-bitswap/message" wl "github.com/ipfs/go-bitswap/wantlist" - blocks "github.com/ipfs/go-block-format" cid "github.com/ipfs/go-cid" bstore "github.com/ipfs/go-ipfs-blockstore" logging "github.com/ipfs/go-log" @@ -312,13 +311,13 @@ func (e *Engine) MessageReceived(p peer.ID, m bsmsg.BitSwapMessage) { } } -func (e *Engine) addBlocks(blocks []blocks.Block) { +func (e *Engine) addBlocks(ks []cid.Cid) { work := false for _, l := range e.ledgerMap { l.lk.Lock() - for _, block := range blocks { - if entry, ok := l.WantListContains(block.Cid()); ok { + for _, k := range ks { + if entry, ok := l.WantListContains(k); ok { e.peerRequestQueue.PushBlock(l.Partner, peertask.Task{ Identifier: entry.Cid, Priority: entry.Priority, @@ -337,11 +336,11 @@ func (e *Engine) addBlocks(blocks []blocks.Block) { // AddBlocks is called when new blocks are received and added to a block store, // meaning there may be peers who want those blocks, so we should send the blocks // to them. -func (e *Engine) AddBlocks(blocks []blocks.Block) { +func (e *Engine) AddBlocks(ks []cid.Cid) { e.lock.Lock() defer e.lock.Unlock() - e.addBlocks(blocks) + e.addBlocks(ks) } // TODO add contents of m.WantList() to my local wantlist? NB: could introduce diff --git a/bitswap/session/session.go b/bitswap/session/session.go index 518f7b69f..ccdbf1319 100644 --- a/bitswap/session/session.go +++ b/bitswap/session/session.go @@ -54,7 +54,7 @@ type interestReq struct { type blksRecv struct { from peer.ID - blks []blocks.Block + ks []cid.Cid } // Session holds state for an individual bitswap transfer operation. @@ -135,9 +135,9 @@ func New(ctx context.Context, } // ReceiveBlocksFrom receives incoming blocks from the given peer. -func (s *Session) ReceiveBlocksFrom(from peer.ID, blocks []blocks.Block) { +func (s *Session) ReceiveBlocksFrom(from peer.ID, ks []cid.Cid) { select { - case s.incoming <- blksRecv{from: from, blks: blocks}: + case s.incoming <- blksRecv{from: from, ks: ks}: case <-s.ctx.Done(): } } @@ -262,21 +262,21 @@ func (s *Session) run(ctx context.Context) { func (s *Session) cancelIncomingBlocks(ctx context.Context, rcv blksRecv) { // We've received the blocks so we can cancel any outstanding wants for them - ks := make([]cid.Cid, 0, len(rcv.blks)) - for _, b := range rcv.blks { - if s.cidIsWanted(b.Cid()) { - ks = append(ks, b.Cid()) + wanted := make([]cid.Cid, 0, len(rcv.ks)) + for _, k := range rcv.ks { + if s.cidIsWanted(k) { + wanted = append(wanted, k) } } - s.pm.RecordCancels(ks) - s.wm.CancelWants(s.ctx, ks, nil, s.id) + s.pm.RecordCancels(wanted) + s.wm.CancelWants(s.ctx, wanted, nil, s.id) } func (s *Session) handleIncomingBlocks(ctx context.Context, rcv blksRecv) { s.idleTick.Stop() // Process the received blocks - s.receiveBlocks(ctx, rcv.blks) + s.receiveBlocks(ctx, rcv.ks) s.resetIdleTick() } @@ -376,9 +376,8 @@ func (s *Session) cidIsWanted(c cid.Cid) bool { return ok } -func (s *Session) receiveBlocks(ctx context.Context, blocks []blocks.Block) { - for _, blk := range blocks { - c := blk.Cid() +func (s *Session) receiveBlocks(ctx context.Context, ks []cid.Cid) { + for _, c := range ks { if s.cidIsWanted(c) { // If the block CID was in the live wants queue, remove it tval, ok := s.liveWants[c] @@ -416,22 +415,18 @@ func (s *Session) receiveBlocks(ctx context.Context, blocks []blocks.Block) { } func (s *Session) updateReceiveCounters(ctx context.Context, rcv blksRecv) { - ks := make([]cid.Cid, len(rcv.blks)) - - for _, blk := range rcv.blks { + for _, k := range rcv.ks { // Inform the request splitter of unique / duplicate blocks - if s.cidIsWanted(blk.Cid()) { + if s.cidIsWanted(k) { s.srs.RecordUniqueBlock() - } else if s.pastWants.Has(blk.Cid()) { + } else if s.pastWants.Has(k) { s.srs.RecordDuplicateBlock() } - - ks = append(ks, blk.Cid()) } // Record response (to be able to time latency) - if len(ks) > 0 { - s.pm.RecordPeerResponse(rcv.from, ks) + if len(rcv.ks) > 0 { + s.pm.RecordPeerResponse(rcv.from, rcv.ks) } } diff --git a/bitswap/session/session_test.go b/bitswap/session/session_test.go index 5ff460214..1d58b27ee 100644 --- a/bitswap/session/session_test.go +++ b/bitswap/session/session_test.go @@ -129,7 +129,7 @@ func TestSessionGetBlocks(t *testing.T) { // - calls ReceiveBlocksFrom() on session // - publishes block to pubsub channel blk := blks[testutil.IndexOf(blks, receivedWantReq.cids[i])] - session.ReceiveBlocksFrom(p, []blocks.Block{blk}) + session.ReceiveBlocksFrom(p, []cid.Cid{blk.Cid()}) notif.Publish(blk) select { @@ -191,7 +191,7 @@ func TestSessionGetBlocks(t *testing.T) { // - calls ReceiveBlocksFrom() on session // - publishes block to pubsub channel blk := blks[testutil.IndexOf(blks, newCidsRequested[i])] - session.ReceiveBlocksFrom(p, []blocks.Block{blk}) + session.ReceiveBlocksFrom(p, []cid.Cid{blk.Cid()}) notif.Publish(blk) receivedBlock := <-getBlocksCh @@ -255,7 +255,7 @@ func TestSessionFindMorePeers(t *testing.T) { // - calls ReceiveBlocksFrom() on session // - publishes block to pubsub channel blk := blks[0] - session.ReceiveBlocksFrom(p, []blocks.Block{blk}) + session.ReceiveBlocksFrom(p, []cid.Cid{blk.Cid()}) notif.Publish(blk) select { case <-cancelReqs: diff --git a/bitswap/sessionmanager/sessionmanager.go b/bitswap/sessionmanager/sessionmanager.go index e56d3f3c6..2f37a6db2 100644 --- a/bitswap/sessionmanager/sessionmanager.go +++ b/bitswap/sessionmanager/sessionmanager.go @@ -5,7 +5,6 @@ import ( "sync" "time" - blocks "github.com/ipfs/go-block-format" cid "github.com/ipfs/go-cid" delay "github.com/ipfs/go-ipfs-delay" @@ -19,7 +18,7 @@ import ( type Session interface { exchange.Fetcher InterestedIn(cid.Cid) bool - ReceiveBlocksFrom(peer.ID, []blocks.Block) + ReceiveBlocksFrom(peer.ID, []cid.Cid) } type sesTrk struct { @@ -117,18 +116,18 @@ func (sm *SessionManager) GetNextSessionID() uint64 { // ReceiveBlocksFrom receives blocks from a peer and dispatches to interested // sessions. -func (sm *SessionManager) ReceiveBlocksFrom(from peer.ID, blks []blocks.Block) { +func (sm *SessionManager) ReceiveBlocksFrom(from peer.ID, ks []cid.Cid) { sm.sessLk.Lock() defer sm.sessLk.Unlock() // Only give each session the blocks / dups that it is interested in for _, s := range sm.sessions { - sessBlks := make([]blocks.Block, 0, len(blks)) - for _, b := range blks { - if s.session.InterestedIn(b.Cid()) { - sessBlks = append(sessBlks, b) + sessKs := make([]cid.Cid, 0, len(ks)) + for _, k := range ks { + if s.session.InterestedIn(k) { + sessKs = append(sessKs, k) } } - s.session.ReceiveBlocksFrom(from, sessBlks) + s.session.ReceiveBlocksFrom(from, sessKs) } } diff --git a/bitswap/sessionmanager/sessionmanager_test.go b/bitswap/sessionmanager/sessionmanager_test.go index c8d30b821..08dfb9d8a 100644 --- a/bitswap/sessionmanager/sessionmanager_test.go +++ b/bitswap/sessionmanager/sessionmanager_test.go @@ -19,7 +19,7 @@ import ( type fakeSession struct { interested []cid.Cid - blks []blocks.Block + ks []cid.Cid id uint64 pm *fakePeerManager srs *fakeRequestSplitter @@ -40,8 +40,8 @@ func (fs *fakeSession) InterestedIn(c cid.Cid) bool { } return false } -func (fs *fakeSession) ReceiveBlocksFrom(p peer.ID, blks []blocks.Block) { - fs.blks = append(fs.blks, blks...) +func (fs *fakeSession) ReceiveBlocksFrom(p peer.ID, ks []cid.Cid) { + fs.ks = append(fs.ks, ks...) } type fakePeerManager struct { @@ -90,17 +90,13 @@ func requestSplitterFactory(ctx context.Context) bssession.RequestSplitter { } func cmpSessionCids(s *fakeSession, cids []cid.Cid) bool { - return cmpBlockCids(s.blks, cids) -} - -func cmpBlockCids(blks []blocks.Block, cids []cid.Cid) bool { - if len(blks) != len(cids) { + if len(s.ks) != len(cids) { return false } - for _, b := range blks { + for _, bk := range s.ks { has := false for _, c := range cids { - if c == b.Cid() { + if c == bk { has = true } } @@ -141,10 +137,10 @@ func TestAddingSessions(t *testing.T) { thirdSession.id != secondSession.id+2 { t.Fatal("session does not have correct id set") } - sm.ReceiveBlocksFrom(p, []blocks.Block{block}) - if len(firstSession.blks) == 0 || - len(secondSession.blks) == 0 || - len(thirdSession.blks) == 0 { + sm.ReceiveBlocksFrom(p, []cid.Cid{block.Cid()}) + if len(firstSession.ks) == 0 || + len(secondSession.ks) == 0 || + len(thirdSession.ks) == 0 { t.Fatal("should have received blocks but didn't") } } @@ -171,7 +167,7 @@ func TestReceivingBlocksWhenNotInterested(t *testing.T) { nextInterestedIn = []cid.Cid{} thirdSession := sm.NewSession(ctx, time.Second, delay.Fixed(time.Minute)).(*fakeSession) - sm.ReceiveBlocksFrom(p, []blocks.Block{blks[0], blks[1]}) + sm.ReceiveBlocksFrom(p, []cid.Cid{blks[0].Cid(), blks[1].Cid()}) if !cmpSessionCids(firstSession, []cid.Cid{cids[0], cids[1]}) || !cmpSessionCids(secondSession, []cid.Cid{cids[0]}) || @@ -198,10 +194,10 @@ func TestRemovingPeersWhenManagerContextCancelled(t *testing.T) { cancel() // wait for sessions to get removed time.Sleep(10 * time.Millisecond) - sm.ReceiveBlocksFrom(p, []blocks.Block{block}) - if len(firstSession.blks) > 0 || - len(secondSession.blks) > 0 || - len(thirdSession.blks) > 0 { + sm.ReceiveBlocksFrom(p, []cid.Cid{block.Cid()}) + if len(firstSession.ks) > 0 || + len(secondSession.ks) > 0 || + len(thirdSession.ks) > 0 { t.Fatal("received blocks for sessions after manager is shutdown") } } @@ -226,10 +222,10 @@ func TestRemovingPeersWhenSessionContextCancelled(t *testing.T) { sessionCancel() // wait for sessions to get removed time.Sleep(10 * time.Millisecond) - sm.ReceiveBlocksFrom(p, []blocks.Block{block}) - if len(firstSession.blks) == 0 || - len(secondSession.blks) > 0 || - len(thirdSession.blks) == 0 { + sm.ReceiveBlocksFrom(p, []cid.Cid{block.Cid()}) + if len(firstSession.ks) == 0 || + len(secondSession.ks) > 0 || + len(thirdSession.ks) == 0 { t.Fatal("received blocks for sessions that are canceled") } } From 44952b23a7bbd515c188e1de7f1870d28a678986 Mon Sep 17 00:00:00 2001 From: Dirk McCormick Date: Thu, 15 Aug 2019 08:33:57 -0400 Subject: [PATCH 0796/1035] refactor: change naming to reflect blocks -> keys This commit was moved from ipfs/go-bitswap@693e97d08b09a97e2c3c28f0a11ccdbd21ea0bc6 --- bitswap/bitswap.go | 2 +- bitswap/session/session.go | 28 +++++++++---------- bitswap/session/session_test.go | 12 ++++---- bitswap/sessionmanager/sessionmanager.go | 8 +++--- bitswap/sessionmanager/sessionmanager_test.go | 10 +++---- 5 files changed, 30 insertions(+), 30 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 82757ff8a..c7af851fd 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -330,7 +330,7 @@ func (bs *Bitswap) receiveBlocksFrom(from peer.ID, blks []blocks.Block) error { // Send all block keys (including duplicates) to any sessions that want them. // (The duplicates are needed by sessions for accounting purposes) - bs.sm.ReceiveBlocksFrom(from, allKs) + bs.sm.ReceiveFrom(from, allKs) // Send wanted block keys to decision engine bs.engine.AddBlocks(wantedKs) diff --git a/bitswap/session/session.go b/bitswap/session/session.go index ccdbf1319..f2455e7fc 100644 --- a/bitswap/session/session.go +++ b/bitswap/session/session.go @@ -52,9 +52,9 @@ type interestReq struct { resp chan bool } -type blksRecv struct { +type rcvFrom struct { from peer.ID - ks []cid.Cid + ks []cid.Cid } // Session holds state for an individual bitswap transfer operation. @@ -68,7 +68,7 @@ type Session struct { srs RequestSplitter // channels - incoming chan blksRecv + incoming chan rcvFrom newReqs chan []cid.Cid cancelKeys chan []cid.Cid interestReqs chan interestReq @@ -117,7 +117,7 @@ func New(ctx context.Context, wm: wm, pm: pm, srs: srs, - incoming: make(chan blksRecv), + incoming: make(chan rcvFrom), notif: notif, uuid: loggables.Uuid("GetBlockRequest"), baseTickDelay: time.Millisecond * 500, @@ -134,10 +134,10 @@ func New(ctx context.Context, return s } -// ReceiveBlocksFrom receives incoming blocks from the given peer. -func (s *Session) ReceiveBlocksFrom(from peer.ID, ks []cid.Cid) { +// ReceiveFrom receives incoming blocks from the given peer. +func (s *Session) ReceiveFrom(from peer.ID, ks []cid.Cid) { select { - case s.incoming <- blksRecv{from: from, ks: ks}: + case s.incoming <- rcvFrom{from: from, ks: ks}: case <-s.ctx.Done(): } } @@ -232,13 +232,13 @@ func (s *Session) run(ctx context.Context) { for { select { case rcv := <-s.incoming: - s.cancelIncomingBlocks(ctx, rcv) + s.cancelIncoming(ctx, rcv) // Record statistics only if the blocks came from the network // (blocks can also be received from the local node) if rcv.from != "" { s.updateReceiveCounters(ctx, rcv) } - s.handleIncomingBlocks(ctx, rcv) + s.handleIncoming(ctx, rcv) case keys := <-s.newReqs: s.handleNewRequest(ctx, keys) case keys := <-s.cancelKeys: @@ -260,7 +260,7 @@ func (s *Session) run(ctx context.Context) { } } -func (s *Session) cancelIncomingBlocks(ctx context.Context, rcv blksRecv) { +func (s *Session) cancelIncoming(ctx context.Context, rcv rcvFrom) { // We've received the blocks so we can cancel any outstanding wants for them wanted := make([]cid.Cid, 0, len(rcv.ks)) for _, k := range rcv.ks { @@ -272,11 +272,11 @@ func (s *Session) cancelIncomingBlocks(ctx context.Context, rcv blksRecv) { s.wm.CancelWants(s.ctx, wanted, nil, s.id) } -func (s *Session) handleIncomingBlocks(ctx context.Context, rcv blksRecv) { +func (s *Session) handleIncoming(ctx context.Context, rcv rcvFrom) { s.idleTick.Stop() // Process the received blocks - s.receiveBlocks(ctx, rcv.ks) + s.processIncoming(ctx, rcv.ks) s.resetIdleTick() } @@ -376,7 +376,7 @@ func (s *Session) cidIsWanted(c cid.Cid) bool { return ok } -func (s *Session) receiveBlocks(ctx context.Context, ks []cid.Cid) { +func (s *Session) processIncoming(ctx context.Context, ks []cid.Cid) { for _, c := range ks { if s.cidIsWanted(c) { // If the block CID was in the live wants queue, remove it @@ -414,7 +414,7 @@ func (s *Session) receiveBlocks(ctx context.Context, ks []cid.Cid) { } } -func (s *Session) updateReceiveCounters(ctx context.Context, rcv blksRecv) { +func (s *Session) updateReceiveCounters(ctx context.Context, rcv rcvFrom) { for _, k := range rcv.ks { // Inform the request splitter of unique / duplicate blocks if s.cidIsWanted(k) { diff --git a/bitswap/session/session_test.go b/bitswap/session/session_test.go index 1d58b27ee..375b94afe 100644 --- a/bitswap/session/session_test.go +++ b/bitswap/session/session_test.go @@ -126,10 +126,10 @@ func TestSessionGetBlocks(t *testing.T) { var receivedBlocks []blocks.Block for i, p := range peers { // simulate what bitswap does on receiving a message: - // - calls ReceiveBlocksFrom() on session + // - calls ReceiveFrom() on session // - publishes block to pubsub channel blk := blks[testutil.IndexOf(blks, receivedWantReq.cids[i])] - session.ReceiveBlocksFrom(p, []cid.Cid{blk.Cid()}) + session.ReceiveFrom(p, []cid.Cid{blk.Cid()}) notif.Publish(blk) select { @@ -188,10 +188,10 @@ func TestSessionGetBlocks(t *testing.T) { // receive remaining blocks for i, p := range peers { // simulate what bitswap does on receiving a message: - // - calls ReceiveBlocksFrom() on session + // - calls ReceiveFrom() on session // - publishes block to pubsub channel blk := blks[testutil.IndexOf(blks, newCidsRequested[i])] - session.ReceiveBlocksFrom(p, []cid.Cid{blk.Cid()}) + session.ReceiveFrom(p, []cid.Cid{blk.Cid()}) notif.Publish(blk) receivedBlock := <-getBlocksCh @@ -252,10 +252,10 @@ func TestSessionFindMorePeers(t *testing.T) { p := testutil.GeneratePeers(1)[0] // simulate what bitswap does on receiving a message: - // - calls ReceiveBlocksFrom() on session + // - calls ReceiveFrom() on session // - publishes block to pubsub channel blk := blks[0] - session.ReceiveBlocksFrom(p, []cid.Cid{blk.Cid()}) + session.ReceiveFrom(p, []cid.Cid{blk.Cid()}) notif.Publish(blk) select { case <-cancelReqs: diff --git a/bitswap/sessionmanager/sessionmanager.go b/bitswap/sessionmanager/sessionmanager.go index 2f37a6db2..d65b86f4a 100644 --- a/bitswap/sessionmanager/sessionmanager.go +++ b/bitswap/sessionmanager/sessionmanager.go @@ -18,7 +18,7 @@ import ( type Session interface { exchange.Fetcher InterestedIn(cid.Cid) bool - ReceiveBlocksFrom(peer.ID, []cid.Cid) + ReceiveFrom(peer.ID, []cid.Cid) } type sesTrk struct { @@ -114,9 +114,9 @@ func (sm *SessionManager) GetNextSessionID() uint64 { return sm.sessID } -// ReceiveBlocksFrom receives blocks from a peer and dispatches to interested +// ReceiveFrom receives blocks from a peer and dispatches to interested // sessions. -func (sm *SessionManager) ReceiveBlocksFrom(from peer.ID, ks []cid.Cid) { +func (sm *SessionManager) ReceiveFrom(from peer.ID, ks []cid.Cid) { sm.sessLk.Lock() defer sm.sessLk.Unlock() @@ -128,6 +128,6 @@ func (sm *SessionManager) ReceiveBlocksFrom(from peer.ID, ks []cid.Cid) { sessKs = append(sessKs, k) } } - s.session.ReceiveBlocksFrom(from, sessKs) + s.session.ReceiveFrom(from, sessKs) } } diff --git a/bitswap/sessionmanager/sessionmanager_test.go b/bitswap/sessionmanager/sessionmanager_test.go index 08dfb9d8a..0d0c94d64 100644 --- a/bitswap/sessionmanager/sessionmanager_test.go +++ b/bitswap/sessionmanager/sessionmanager_test.go @@ -40,7 +40,7 @@ func (fs *fakeSession) InterestedIn(c cid.Cid) bool { } return false } -func (fs *fakeSession) ReceiveBlocksFrom(p peer.ID, ks []cid.Cid) { +func (fs *fakeSession) ReceiveFrom(p peer.ID, ks []cid.Cid) { fs.ks = append(fs.ks, ks...) } @@ -137,7 +137,7 @@ func TestAddingSessions(t *testing.T) { thirdSession.id != secondSession.id+2 { t.Fatal("session does not have correct id set") } - sm.ReceiveBlocksFrom(p, []cid.Cid{block.Cid()}) + sm.ReceiveFrom(p, []cid.Cid{block.Cid()}) if len(firstSession.ks) == 0 || len(secondSession.ks) == 0 || len(thirdSession.ks) == 0 { @@ -167,7 +167,7 @@ func TestReceivingBlocksWhenNotInterested(t *testing.T) { nextInterestedIn = []cid.Cid{} thirdSession := sm.NewSession(ctx, time.Second, delay.Fixed(time.Minute)).(*fakeSession) - sm.ReceiveBlocksFrom(p, []cid.Cid{blks[0].Cid(), blks[1].Cid()}) + sm.ReceiveFrom(p, []cid.Cid{blks[0].Cid(), blks[1].Cid()}) if !cmpSessionCids(firstSession, []cid.Cid{cids[0], cids[1]}) || !cmpSessionCids(secondSession, []cid.Cid{cids[0]}) || @@ -194,7 +194,7 @@ func TestRemovingPeersWhenManagerContextCancelled(t *testing.T) { cancel() // wait for sessions to get removed time.Sleep(10 * time.Millisecond) - sm.ReceiveBlocksFrom(p, []cid.Cid{block.Cid()}) + sm.ReceiveFrom(p, []cid.Cid{block.Cid()}) if len(firstSession.ks) > 0 || len(secondSession.ks) > 0 || len(thirdSession.ks) > 0 { @@ -222,7 +222,7 @@ func TestRemovingPeersWhenSessionContextCancelled(t *testing.T) { sessionCancel() // wait for sessions to get removed time.Sleep(10 * time.Millisecond) - sm.ReceiveBlocksFrom(p, []cid.Cid{block.Cid()}) + sm.ReceiveFrom(p, []cid.Cid{block.Cid()}) if len(firstSession.ks) == 0 || len(secondSession.ks) > 0 || len(thirdSession.ks) == 0 { From 8a5caefbcd8b574bbf8ccba0a116bc39497676e7 Mon Sep 17 00:00:00 2001 From: Dirk McCormick Date: Thu, 15 Aug 2019 10:16:21 -0400 Subject: [PATCH 0797/1035] fix: make sure GetBlocks() channel is closed on session close This commit was moved from ipfs/go-bitswap@994279bd930b13a475f9b85d853c616bfb41fd75 --- bitswap/getter/getter.go | 30 +++++++++++++++--- bitswap/notifications/notifications.go | 6 ++-- bitswap/session/session.go | 3 +- bitswap/session/session_test.go | 42 ++++++++++++++++++++++++++ 4 files changed, 73 insertions(+), 8 deletions(-) diff --git a/bitswap/getter/getter.go b/bitswap/getter/getter.go index 4f1c29db6..018bf87a4 100644 --- a/bitswap/getter/getter.go +++ b/bitswap/getter/getter.go @@ -61,15 +61,19 @@ func SyncGetBlock(p context.Context, k cid.Cid, gb GetBlocksFunc) (blocks.Block, type WantFunc func(context.Context, []cid.Cid) // AsyncGetBlocks take a set of block cids, a pubsub channel for incoming -// blocks, a want function, and a close function, -// and returns a channel of incoming blocks. -func AsyncGetBlocks(ctx context.Context, keys []cid.Cid, notif notifications.PubSub, want WantFunc, cwants func([]cid.Cid)) (<-chan blocks.Block, error) { +// blocks, a want function, and a close function, and returns a channel of +// incoming blocks. +func AsyncGetBlocks(ctx context.Context, sessctx context.Context, keys []cid.Cid, notif notifications.PubSub, + want WantFunc, cwants func([]cid.Cid)) (<-chan blocks.Block, error) { + + // If there are no keys supplied, just return a closed channel if len(keys) == 0 { out := make(chan blocks.Block) close(out) return out, nil } + // Use a PubSub notifier to listen for incoming blocks for each key remaining := cid.NewSet() promise := notif.Subscribe(ctx, keys...) for _, k := range keys { @@ -77,24 +81,36 @@ func AsyncGetBlocks(ctx context.Context, keys []cid.Cid, notif notifications.Pub remaining.Add(k) } + // Send the want request for the keys to the network want(ctx, keys) out := make(chan blocks.Block) - go handleIncoming(ctx, remaining, promise, out, cwants) + go handleIncoming(ctx, sessctx, remaining, promise, out, cwants) return out, nil } -func handleIncoming(ctx context.Context, remaining *cid.Set, in <-chan blocks.Block, out chan blocks.Block, cfun func([]cid.Cid)) { +// Listens for incoming blocks, passing them to the out channel. +// If the context is cancelled or the incoming channel closes, calls cfun with +// any keys corresponding to blocks that were never received. +func handleIncoming(ctx context.Context, sessctx context.Context, remaining *cid.Set, + in <-chan blocks.Block, out chan blocks.Block, cfun func([]cid.Cid)) { + ctx, cancel := context.WithCancel(ctx) + + // Clean up before exiting this function, and call the cancel function on + // any remaining keys defer func() { cancel() close(out) // can't just defer this call on its own, arguments are resolved *when* the defer is created cfun(remaining.Keys()) }() + for { select { case blk, ok := <-in: + // If the channel is closed, we're done (note that PubSub closes + // the channel once all the keys have been received) if !ok { return } @@ -104,9 +120,13 @@ func handleIncoming(ctx context.Context, remaining *cid.Set, in <-chan blocks.Bl case out <- blk: case <-ctx.Done(): return + case <-sessctx.Done(): + return } case <-ctx.Done(): return + case <-sessctx.Done(): + return } } } diff --git a/bitswap/notifications/notifications.go b/bitswap/notifications/notifications.go index 0934fa5f5..7defea739 100644 --- a/bitswap/notifications/notifications.go +++ b/bitswap/notifications/notifications.go @@ -60,8 +60,8 @@ func (ps *impl) Shutdown() { } // Subscribe returns a channel of blocks for the given |keys|. |blockChannel| -// is closed if the |ctx| times out or is cancelled, or after sending len(keys) -// blocks. +// is closed if the |ctx| times out or is cancelled, or after receiving the blocks +// corresponding to |keys|. func (ps *impl) Subscribe(ctx context.Context, keys ...cid.Cid) <-chan blocks.Block { blocksCh := make(chan blocks.Block, len(keys)) @@ -82,6 +82,8 @@ func (ps *impl) Subscribe(ctx context.Context, keys ...cid.Cid) <-chan blocks.Bl default: } + // AddSubOnceEach listens for each key in the list, and closes the channel + // once all keys have been received ps.wrapped.AddSubOnceEach(valuesCh, toStrings(keys)...) go func() { defer func() { diff --git a/bitswap/session/session.go b/bitswap/session/session.go index f2455e7fc..886971c9f 100644 --- a/bitswap/session/session.go +++ b/bitswap/session/session.go @@ -182,7 +182,8 @@ func (s *Session) GetBlock(parent context.Context, k cid.Cid) (blocks.Block, err // guaranteed on the returned blocks. func (s *Session) GetBlocks(ctx context.Context, keys []cid.Cid) (<-chan blocks.Block, error) { ctx = logging.ContextWithLoggable(ctx, s.uuid) - return bsgetter.AsyncGetBlocks(ctx, keys, s.notif, + + return bsgetter.AsyncGetBlocks(ctx, s.ctx, keys, s.notif, func(ctx context.Context, keys []cid.Cid) { select { case s.newReqs <- keys: diff --git a/bitswap/session/session_test.go b/bitswap/session/session_test.go index 375b94afe..07b834a8d 100644 --- a/bitswap/session/session_test.go +++ b/bitswap/session/session_test.go @@ -416,3 +416,45 @@ func TestSessionFailingToGetFirstBlock(t *testing.T) { t.Fatal("Did not rebroadcast to find more peers") } } + +func TestSessionCtxCancelClosesGetBlocksChannel(t *testing.T) { + wantReqs := make(chan wantReq, 1) + cancelReqs := make(chan wantReq, 1) + fwm := &fakeWantManager{wantReqs, cancelReqs} + fpm := &fakePeerManager{} + frs := &fakeRequestSplitter{} + notif := notifications.New() + defer notif.Shutdown() + id := testutil.GenerateSessionID() + + // Create a new session with its own context + sessctx, sesscancel := context.WithTimeout(context.Background(), 100*time.Millisecond) + session := New(sessctx, id, fwm, fpm, frs, notif, time.Second, delay.Fixed(time.Minute)) + + timerCtx, timerCancel := context.WithTimeout(context.Background(), 10*time.Millisecond) + defer timerCancel() + + // Request a block with a new context + blockGenerator := blocksutil.NewBlockGenerator() + blks := blockGenerator.Blocks(1) + getctx, getcancel := context.WithTimeout(context.Background(), 100*time.Millisecond) + defer getcancel() + + getBlocksCh, err := session.GetBlocks(getctx, []cid.Cid{blks[0].Cid()}) + if err != nil { + t.Fatal("error getting blocks") + } + + // Cancel the session context + sesscancel() + + // Expect the GetBlocks() channel to be closed + select { + case _, ok := <-getBlocksCh: + if ok { + t.Fatal("expected channel to be closed but was not closed") + } + case <-timerCtx.Done(): + t.Fatal("expected channel to be closed before timeout") + } +} From afbde62a85b5d8760095a3b82f001eeb9261cae3 Mon Sep 17 00:00:00 2001 From: Dirk McCormick Date: Tue, 20 Aug 2019 10:02:56 -0700 Subject: [PATCH 0798/1035] test: fix flakey session peer manager tests This commit was moved from ipfs/go-bitswap@da7f7eac3d4e5dd17908012ee34c2b110519d74f --- .../sessionpeermanager/sessionpeermanager_test.go | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/bitswap/sessionpeermanager/sessionpeermanager_test.go b/bitswap/sessionpeermanager/sessionpeermanager_test.go index e6808307e..e7ca6ca96 100644 --- a/bitswap/sessionpeermanager/sessionpeermanager_test.go +++ b/bitswap/sessionpeermanager/sessionpeermanager_test.go @@ -150,7 +150,8 @@ func TestOrderingPeers(t *testing.T) { ctx := context.Background() ctx, cancel := context.WithTimeout(ctx, 30*time.Millisecond) defer cancel() - peers := testutil.GeneratePeers(100) + peerCount := 100 + peers := testutil.GeneratePeers(peerCount) completed := make(chan struct{}) fpt := &fakePeerTagger{} fppf := &fakePeerProviderFinder{peers, completed} @@ -171,9 +172,10 @@ func TestOrderingPeers(t *testing.T) { sessionPeerManager.RecordPeerRequests(nil, c) // record receives - peer1 := peers[rand.Intn(100)] - peer2 := peers[rand.Intn(100)] - peer3 := peers[rand.Intn(100)] + randi := rand.Perm(peerCount) + peer1 := peers[randi[0]] + peer2 := peers[randi[1]] + peer3 := peers[randi[2]] time.Sleep(1 * time.Millisecond) sessionPeerManager.RecordPeerResponse(peer1, []cid.Cid{c[0]}) time.Sleep(5 * time.Millisecond) @@ -358,7 +360,7 @@ func TestTimeoutsAndCancels(t *testing.T) { func TestUntaggingPeers(t *testing.T) { ctx := context.Background() - ctx, cancel := context.WithTimeout(ctx, 10*time.Millisecond) + ctx, cancel := context.WithTimeout(ctx, 30*time.Millisecond) defer cancel() peers := testutil.GeneratePeers(5) completed := make(chan struct{}) @@ -375,7 +377,7 @@ func TestUntaggingPeers(t *testing.T) { case <-ctx.Done(): t.Fatal("Did not finish finding providers") } - time.Sleep(2 * time.Millisecond) + time.Sleep(15 * time.Millisecond) if fpt.count() != len(peers) { t.Fatal("Peers were not tagged!") From 0683b0f20e06bf135b946439d6a60f773f3df838 Mon Sep 17 00:00:00 2001 From: Dirk McCormick Date: Tue, 20 Aug 2019 10:15:03 -0700 Subject: [PATCH 0799/1035] test: fix flakey session peer manager ordering test This commit was moved from ipfs/go-bitswap@295cc213dbf81b87e4428d44cd5b0ef24253acff --- bitswap/sessionpeermanager/sessionpeermanager_test.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/bitswap/sessionpeermanager/sessionpeermanager_test.go b/bitswap/sessionpeermanager/sessionpeermanager_test.go index e7ca6ca96..e02aa2491 100644 --- a/bitswap/sessionpeermanager/sessionpeermanager_test.go +++ b/bitswap/sessionpeermanager/sessionpeermanager_test.go @@ -166,7 +166,7 @@ func TestOrderingPeers(t *testing.T) { case <-ctx.Done(): t.Fatal("Did not finish finding providers") } - time.Sleep(2 * time.Millisecond) + time.Sleep(20 * time.Millisecond) // record broadcast sessionPeerManager.RecordPeerRequests(nil, c) @@ -176,11 +176,11 @@ func TestOrderingPeers(t *testing.T) { peer1 := peers[randi[0]] peer2 := peers[randi[1]] peer3 := peers[randi[2]] - time.Sleep(1 * time.Millisecond) + time.Sleep(10 * time.Millisecond) sessionPeerManager.RecordPeerResponse(peer1, []cid.Cid{c[0]}) - time.Sleep(5 * time.Millisecond) + time.Sleep(50 * time.Millisecond) sessionPeerManager.RecordPeerResponse(peer2, []cid.Cid{c[0]}) - time.Sleep(1 * time.Millisecond) + time.Sleep(10 * time.Millisecond) sessionPeerManager.RecordPeerResponse(peer3, []cid.Cid{c[0]}) sessionPeers := sessionPeerManager.GetOptimizedPeers() @@ -228,7 +228,7 @@ func TestOrderingPeers(t *testing.T) { // should sort by average latency if (nextSessionPeers[0].Peer != peer1) || (nextSessionPeers[1].Peer != peer3) || (nextSessionPeers[2].Peer != peer2) { - t.Fatal("Did not dedup peers which received multiple blocks") + t.Fatal("Did not correctly update order of peers sorted by average latency") } // should randomize other peers From 44a9ef4ee0b27e01af3c1a2485afac3311a19e69 Mon Sep 17 00:00:00 2001 From: Dirk McCormick Date: Tue, 20 Aug 2019 10:46:52 -0700 Subject: [PATCH 0800/1035] refactor: session peer manager ordering This commit was moved from ipfs/go-bitswap@a41460dcdfea0a7c39c33fe948c7166318f24061 --- .../sessionpeermanager/sessionpeermanager.go | 36 ++++++++++++------- .../sessionpeermanager_test.go | 13 +++++-- 2 files changed, 34 insertions(+), 15 deletions(-) diff --git a/bitswap/sessionpeermanager/sessionpeermanager.go b/bitswap/sessionpeermanager/sessionpeermanager.go index 93723c9ec..fe9a93a2d 100644 --- a/bitswap/sessionpeermanager/sessionpeermanager.go +++ b/bitswap/sessionpeermanager/sessionpeermanager.go @@ -277,30 +277,42 @@ type getPeersMessage struct { resp chan<- []bssd.OptimizedPeer } +// Get all optimized peers in order followed by randomly ordered unoptimized +// peers, with a limit of maxOptimizedPeers func (prm *getPeersMessage) handle(spm *SessionPeerManager) { - randomOrder := rand.Perm(len(spm.unoptimizedPeersArr)) + // Number of peers to get in total: unoptimized + optimized + // limited by maxOptimizedPeers maxPeers := len(spm.unoptimizedPeersArr) + len(spm.optimizedPeersArr) if maxPeers > maxOptimizedPeers { maxPeers = maxOptimizedPeers } + + // The best peer latency is 1 if we have recorded at least one peer's + // latency, 0 otherwise var bestPeerLatency float64 if len(spm.optimizedPeersArr) > 0 { bestPeerLatency = float64(spm.activePeers[spm.optimizedPeersArr[0]].latency) } else { bestPeerLatency = 0 } + optimizedPeers := make([]bssd.OptimizedPeer, 0, maxPeers) - for i := 0; i < maxPeers; i++ { - if i < len(spm.optimizedPeersArr) { - p := spm.optimizedPeersArr[i] - optimizedPeers = append(optimizedPeers, bssd.OptimizedPeer{ - Peer: p, - OptimizationRating: bestPeerLatency / float64(spm.activePeers[p].latency), - }) - } else { - p := spm.unoptimizedPeersArr[randomOrder[i-len(spm.optimizedPeersArr)]] - optimizedPeers = append(optimizedPeers, bssd.OptimizedPeer{Peer: p, OptimizationRating: 0.0}) - } + + // Add optimized peers in order + for i := 0; i < maxPeers && i < len(spm.optimizedPeersArr); i++ { + p := spm.optimizedPeersArr[i] + optimizedPeers = append(optimizedPeers, bssd.OptimizedPeer{ + Peer: p, + OptimizationRating: bestPeerLatency / float64(spm.activePeers[p].latency), + }) + } + + // Add unoptimized peers in random order + randomOrder := rand.Perm(len(spm.unoptimizedPeersArr)) + remaining := maxPeers - len(optimizedPeers) + for i := 0; i < remaining; i++ { + p := spm.unoptimizedPeersArr[randomOrder[i]] + optimizedPeers = append(optimizedPeers, bssd.OptimizedPeer{Peer: p, OptimizationRating: 0.0}) } prm.resp <- optimizedPeers } diff --git a/bitswap/sessionpeermanager/sessionpeermanager_test.go b/bitswap/sessionpeermanager/sessionpeermanager_test.go index e02aa2491..7e11ad751 100644 --- a/bitswap/sessionpeermanager/sessionpeermanager_test.go +++ b/bitswap/sessionpeermanager/sessionpeermanager_test.go @@ -2,6 +2,7 @@ package sessionpeermanager import ( "context" + "fmt" "math/rand" "sync" "testing" @@ -185,10 +186,13 @@ func TestOrderingPeers(t *testing.T) { sessionPeers := sessionPeerManager.GetOptimizedPeers() if len(sessionPeers) != maxOptimizedPeers { - t.Fatal("Should not return more than the max of optimized peers") + t.Fatal(fmt.Sprintf("Should not return more (%d) than the max of optimized peers (%d)", len(sessionPeers), maxOptimizedPeers)) } // should prioritize peers which are fastest + // peer1: ~10ms + // peer2: 10 + 50 = ~60ms + // peer3: 10 + 50 + 10 = ~70ms if (sessionPeers[0].Peer != peer1) || (sessionPeers[1].Peer != peer2) || (sessionPeers[2].Peer != peer3) { t.Fatal("Did not prioritize peers that received blocks") } @@ -204,7 +208,7 @@ func TestOrderingPeers(t *testing.T) { t.Fatal("Did not assign rating to other optimized peers correctly") } - // should other peers rating of zero + // should give other non-optimized peers rating of zero for i := 3; i < maxOptimizedPeers; i++ { if sessionPeers[i].OptimizationRating != 0.0 { t.Fatal("Did not assign rating to unoptimized peer correctly") @@ -222,10 +226,13 @@ func TestOrderingPeers(t *testing.T) { // call again nextSessionPeers := sessionPeerManager.GetOptimizedPeers() if len(nextSessionPeers) != maxOptimizedPeers { - t.Fatal("Should not return more than the max of optimized peers") + t.Fatal(fmt.Sprintf("Should not return more (%d) than the max of optimized peers (%d)", len(nextSessionPeers), maxOptimizedPeers)) } // should sort by average latency + // peer1: ~10ms + // peer3: (~70ms + ~0ms) / 2 = ~35ms + // peer2: ~60ms if (nextSessionPeers[0].Peer != peer1) || (nextSessionPeers[1].Peer != peer3) || (nextSessionPeers[2].Peer != peer2) { t.Fatal("Did not correctly update order of peers sorted by average latency") From cf79fb1f849324a30388c66dc684be419d298186 Mon Sep 17 00:00:00 2001 From: Dirk McCormick Date: Tue, 20 Aug 2019 11:02:03 -0700 Subject: [PATCH 0801/1035] fix: session peer manager ordering test timing This commit was moved from ipfs/go-bitswap@ae2753965030c116eba6c343400fa372cb902b3b --- .../sessionpeermanager_test.go | 22 +++++++++++-------- 1 file changed, 13 insertions(+), 9 deletions(-) diff --git a/bitswap/sessionpeermanager/sessionpeermanager_test.go b/bitswap/sessionpeermanager/sessionpeermanager_test.go index 7e11ad751..5231434f7 100644 --- a/bitswap/sessionpeermanager/sessionpeermanager_test.go +++ b/bitswap/sessionpeermanager/sessionpeermanager_test.go @@ -177,22 +177,24 @@ func TestOrderingPeers(t *testing.T) { peer1 := peers[randi[0]] peer2 := peers[randi[1]] peer3 := peers[randi[2]] - time.Sleep(10 * time.Millisecond) + time.Sleep(5 * time.Millisecond) sessionPeerManager.RecordPeerResponse(peer1, []cid.Cid{c[0]}) - time.Sleep(50 * time.Millisecond) + time.Sleep(25 * time.Millisecond) sessionPeerManager.RecordPeerResponse(peer2, []cid.Cid{c[0]}) - time.Sleep(10 * time.Millisecond) + time.Sleep(5 * time.Millisecond) sessionPeerManager.RecordPeerResponse(peer3, []cid.Cid{c[0]}) + time.Sleep(5 * time.Millisecond) + sessionPeers := sessionPeerManager.GetOptimizedPeers() if len(sessionPeers) != maxOptimizedPeers { t.Fatal(fmt.Sprintf("Should not return more (%d) than the max of optimized peers (%d)", len(sessionPeers), maxOptimizedPeers)) } // should prioritize peers which are fastest - // peer1: ~10ms - // peer2: 10 + 50 = ~60ms - // peer3: 10 + 50 + 10 = ~70ms + // peer1: ~5ms + // peer2: 5 + 25 = ~30ms + // peer3: 5 + 25 + 5 = ~35ms if (sessionPeers[0].Peer != peer1) || (sessionPeers[1].Peer != peer2) || (sessionPeers[2].Peer != peer3) { t.Fatal("Did not prioritize peers that received blocks") } @@ -223,6 +225,8 @@ func TestOrderingPeers(t *testing.T) { // Receive a second time sessionPeerManager.RecordPeerResponse(peer3, []cid.Cid{c2[0]}) + time.Sleep(5 * time.Millisecond) + // call again nextSessionPeers := sessionPeerManager.GetOptimizedPeers() if len(nextSessionPeers) != maxOptimizedPeers { @@ -230,9 +234,9 @@ func TestOrderingPeers(t *testing.T) { } // should sort by average latency - // peer1: ~10ms - // peer3: (~70ms + ~0ms) / 2 = ~35ms - // peer2: ~60ms + // peer1: ~5ms + // peer3: (~35ms + ~5ms + ~5ms) / 2 = ~23ms + // peer2: ~30ms if (nextSessionPeers[0].Peer != peer1) || (nextSessionPeers[1].Peer != peer3) || (nextSessionPeers[2].Peer != peer2) { t.Fatal("Did not correctly update order of peers sorted by average latency") From 19192b33f3a7181d5fda648663fc2f8efe02e9ee Mon Sep 17 00:00:00 2001 From: Dirk McCormick Date: Tue, 20 Aug 2019 11:09:07 -0700 Subject: [PATCH 0802/1035] fix: session peer manager ordering test timing (2) This commit was moved from ipfs/go-bitswap@6a1362ca6a40cdf17e63f13458d67d6567893df2 --- bitswap/sessionpeermanager/sessionpeermanager_test.go | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/bitswap/sessionpeermanager/sessionpeermanager_test.go b/bitswap/sessionpeermanager/sessionpeermanager_test.go index 5231434f7..8c341a05c 100644 --- a/bitswap/sessionpeermanager/sessionpeermanager_test.go +++ b/bitswap/sessionpeermanager/sessionpeermanager_test.go @@ -149,7 +149,7 @@ func TestRecordingReceivedBlocks(t *testing.T) { func TestOrderingPeers(t *testing.T) { ctx := context.Background() - ctx, cancel := context.WithTimeout(ctx, 30*time.Millisecond) + ctx, cancel := context.WithTimeout(ctx, 60*time.Millisecond) defer cancel() peerCount := 100 peers := testutil.GeneratePeers(peerCount) @@ -184,8 +184,6 @@ func TestOrderingPeers(t *testing.T) { time.Sleep(5 * time.Millisecond) sessionPeerManager.RecordPeerResponse(peer3, []cid.Cid{c[0]}) - time.Sleep(5 * time.Millisecond) - sessionPeers := sessionPeerManager.GetOptimizedPeers() if len(sessionPeers) != maxOptimizedPeers { t.Fatal(fmt.Sprintf("Should not return more (%d) than the max of optimized peers (%d)", len(sessionPeers), maxOptimizedPeers)) @@ -225,8 +223,6 @@ func TestOrderingPeers(t *testing.T) { // Receive a second time sessionPeerManager.RecordPeerResponse(peer3, []cid.Cid{c2[0]}) - time.Sleep(5 * time.Millisecond) - // call again nextSessionPeers := sessionPeerManager.GetOptimizedPeers() if len(nextSessionPeers) != maxOptimizedPeers { @@ -235,7 +231,7 @@ func TestOrderingPeers(t *testing.T) { // should sort by average latency // peer1: ~5ms - // peer3: (~35ms + ~5ms + ~5ms) / 2 = ~23ms + // peer3: (~35ms + ~5ms) / 2 = ~20ms // peer2: ~30ms if (nextSessionPeers[0].Peer != peer1) || (nextSessionPeers[1].Peer != peer3) || (nextSessionPeers[2].Peer != peer2) { From 8f1d37b79aa9c4b24bf9ba6cdf7f724dd0fd57b3 Mon Sep 17 00:00:00 2001 From: Dirk McCormick Date: Tue, 20 Aug 2019 11:16:57 -0700 Subject: [PATCH 0803/1035] refactor: session peer manager ordering This commit was moved from ipfs/go-bitswap@64ecba67faa16cb5df04c9caec2c826ca409d0eb --- .../sessionpeermanager/sessionpeermanager.go | 35 +++++++++---------- .../sessionpeermanager_test.go | 2 +- 2 files changed, 18 insertions(+), 19 deletions(-) diff --git a/bitswap/sessionpeermanager/sessionpeermanager.go b/bitswap/sessionpeermanager/sessionpeermanager.go index fe9a93a2d..3c4e13749 100644 --- a/bitswap/sessionpeermanager/sessionpeermanager.go +++ b/bitswap/sessionpeermanager/sessionpeermanager.go @@ -280,6 +280,8 @@ type getPeersMessage struct { // Get all optimized peers in order followed by randomly ordered unoptimized // peers, with a limit of maxOptimizedPeers func (prm *getPeersMessage) handle(spm *SessionPeerManager) { + randomOrder := rand.Perm(len(spm.unoptimizedPeersArr)) + // Number of peers to get in total: unoptimized + optimized // limited by maxOptimizedPeers maxPeers := len(spm.unoptimizedPeersArr) + len(spm.optimizedPeersArr) @@ -287,8 +289,8 @@ func (prm *getPeersMessage) handle(spm *SessionPeerManager) { maxPeers = maxOptimizedPeers } - // The best peer latency is 1 if we have recorded at least one peer's - // latency, 0 otherwise + // The best peer latency is the first optimized peer's latency. + // If we haven't recorded any peer's latency, use 0. var bestPeerLatency float64 if len(spm.optimizedPeersArr) > 0 { bestPeerLatency = float64(spm.activePeers[spm.optimizedPeersArr[0]].latency) @@ -297,22 +299,19 @@ func (prm *getPeersMessage) handle(spm *SessionPeerManager) { } optimizedPeers := make([]bssd.OptimizedPeer, 0, maxPeers) - - // Add optimized peers in order - for i := 0; i < maxPeers && i < len(spm.optimizedPeersArr); i++ { - p := spm.optimizedPeersArr[i] - optimizedPeers = append(optimizedPeers, bssd.OptimizedPeer{ - Peer: p, - OptimizationRating: bestPeerLatency / float64(spm.activePeers[p].latency), - }) - } - - // Add unoptimized peers in random order - randomOrder := rand.Perm(len(spm.unoptimizedPeersArr)) - remaining := maxPeers - len(optimizedPeers) - for i := 0; i < remaining; i++ { - p := spm.unoptimizedPeersArr[randomOrder[i]] - optimizedPeers = append(optimizedPeers, bssd.OptimizedPeer{Peer: p, OptimizationRating: 0.0}) + for i := 0; i < maxPeers; i++ { + // First add optimized peers in order + if i < len(spm.optimizedPeersArr) { + p := spm.optimizedPeersArr[i] + optimizedPeers = append(optimizedPeers, bssd.OptimizedPeer{ + Peer: p, + OptimizationRating: bestPeerLatency / float64(spm.activePeers[p].latency), + }) + } else { + // Then add unoptimized peers in random order + p := spm.unoptimizedPeersArr[randomOrder[i-len(spm.optimizedPeersArr)]] + optimizedPeers = append(optimizedPeers, bssd.OptimizedPeer{Peer: p, OptimizationRating: 0.0}) + } } prm.resp <- optimizedPeers } diff --git a/bitswap/sessionpeermanager/sessionpeermanager_test.go b/bitswap/sessionpeermanager/sessionpeermanager_test.go index 8c341a05c..87262b69d 100644 --- a/bitswap/sessionpeermanager/sessionpeermanager_test.go +++ b/bitswap/sessionpeermanager/sessionpeermanager_test.go @@ -167,7 +167,7 @@ func TestOrderingPeers(t *testing.T) { case <-ctx.Done(): t.Fatal("Did not finish finding providers") } - time.Sleep(20 * time.Millisecond) + time.Sleep(5 * time.Millisecond) // record broadcast sessionPeerManager.RecordPeerRequests(nil, c) From 035fa83ba55813807d5190b4163d2683a452f894 Mon Sep 17 00:00:00 2001 From: Dirk McCormick Date: Tue, 13 Aug 2019 15:04:41 -0400 Subject: [PATCH 0804/1035] fix: don't ignore received blocks for pending wants This commit was moved from ipfs/go-bitswap@e6b35e9731d0467330426870bf21ca20f57e8c74 --- bitswap/bitswap.go | 20 +++--- bitswap/bitswap_test.go | 65 +++++++++++++++++++ bitswap/sessionmanager/sessionmanager.go | 14 ++++ bitswap/sessionmanager/sessionmanager_test.go | 27 ++++++++ bitswap/wantmanager/wantmanager.go | 26 -------- 5 files changed, 116 insertions(+), 36 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index c7af851fd..29a377820 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -273,14 +273,14 @@ func (bs *Bitswap) GetBlocks(ctx context.Context, keys []cid.Cid) (<-chan blocks // HasBlock announces the existence of a block to this bitswap service. The // service will potentially notify its peers. func (bs *Bitswap) HasBlock(blk blocks.Block) error { - return bs.receiveBlocksFrom("", []blocks.Block{blk}) + return bs.receiveBlocksFrom(nil, "", []blocks.Block{blk}) } // TODO: Some of this stuff really only needs to be done when adding a block // from the user, not when receiving it from the network. // In case you run `git blame` on this comment, I'll save you some time: ask // @whyrusleeping, I don't know the answers you seek. -func (bs *Bitswap) receiveBlocksFrom(from peer.ID, blks []blocks.Block) error { +func (bs *Bitswap) receiveBlocksFrom(ctx context.Context, from peer.ID, blks []blocks.Block) error { select { case <-bs.process.Closing(): return errors.New("bitswap is closed") @@ -294,7 +294,7 @@ func (bs *Bitswap) receiveBlocksFrom(from peer.ID, blks []blocks.Block) error { // Split blocks into wanted blocks vs duplicates wanted = make([]blocks.Block, 0, len(blks)) for _, b := range blks { - if bs.wm.IsWanted(b.Cid()) { + if bs.sm.InterestedIn(b.Cid()) { wanted = append(wanted, b) } else { log.Debugf("[recv] block not in wantlist; cid=%s, peer=%s", b.Cid(), from) @@ -354,6 +354,12 @@ func (bs *Bitswap) receiveBlocksFrom(from peer.ID, blks []blocks.Block) error { } } + if from != "" { + for _, b := range wanted { + log.Event(ctx, "Bitswap.GetBlockRequest.End", b.Cid()) + } + } + return nil } @@ -382,17 +388,11 @@ func (bs *Bitswap) ReceiveMessage(ctx context.Context, p peer.ID, incoming bsmsg } // Process blocks - err := bs.receiveBlocksFrom(p, iblocks) + err := bs.receiveBlocksFrom(ctx, p, iblocks) if err != nil { log.Warningf("ReceiveMessage recvBlockFrom error: %s", err) return } - - for _, b := range iblocks { - if bs.wm.IsWanted(b.Cid()) { - log.Event(ctx, "Bitswap.GetBlockRequest.End", b.Cid()) - } - } } func (bs *Bitswap) updateReceiveCounters(blocks []blocks.Block) { diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index c6c3c8b87..9b7571820 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -21,6 +21,7 @@ import ( blocksutil "github.com/ipfs/go-ipfs-blocksutil" delay "github.com/ipfs/go-ipfs-delay" mockrouting "github.com/ipfs/go-ipfs-routing/mock" + peer "github.com/libp2p/go-libp2p-core/peer" p2ptestutil "github.com/libp2p/go-libp2p-netutil" travis "github.com/libp2p/go-libp2p-testing/ci/travis" tu "github.com/libp2p/go-libp2p-testing/etc" @@ -138,6 +139,8 @@ func TestDoesNotProvideWhenConfiguredNotTo(t *testing.T) { } } +// Tests that a received block is not stored in the blockstore if the block was +// not requested by the client func TestUnwantedBlockNotAdded(t *testing.T) { net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay)) @@ -170,6 +173,68 @@ func TestUnwantedBlockNotAdded(t *testing.T) { } } +// Tests that a received block is returned to the client and stored in the +// blockstore in the following scenario: +// - the want for the block has been requested by the client +// - the want for the block has not yet been sent out to a peer +// (because the live request queue is full) +func TestPendingBlockAdded(t *testing.T) { + ctx := context.Background() + net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay)) + bg := blocksutil.NewBlockGenerator() + sessionBroadcastWantCapacity := 4 + + ig := testinstance.NewTestInstanceGenerator(net) + defer ig.Close() + + instance := ig.Instances(1)[0] + defer instance.Exchange.Close() + + oneSecCtx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + + // Request enough blocks to exceed the session's broadcast want list + // capacity (by one block). The session will put the remaining block + // into the "tofetch" queue + blks := bg.Blocks(sessionBroadcastWantCapacity + 1) + ks := make([]cid.Cid, 0, len(blks)) + for _, b := range blks { + ks = append(ks, b.Cid()) + } + outch, err := instance.Exchange.GetBlocks(ctx, ks) + if err != nil { + t.Fatal(err) + } + + // Wait a little while to make sure the session has time to process the wants + time.Sleep(time.Millisecond * 20) + + // Simulate receiving a message which contains the block in the "tofetch" queue + lastBlock := blks[len(blks)-1] + bsMessage := message.New(true) + bsMessage.AddBlock(lastBlock) + unknownPeer := peer.ID("QmUHfvCQrzyR6vFXmeyCptfCWedfcmfa12V6UuziDtrw23") + instance.Exchange.ReceiveMessage(oneSecCtx, unknownPeer, bsMessage) + + // Make sure Bitswap adds the block to the output channel + blkrecvd, ok := <-outch + if !ok { + t.Fatal("timed out waiting for block") + } + if !blkrecvd.Cid().Equals(lastBlock.Cid()) { + t.Fatal("received wrong block") + } + + // Make sure Bitswap adds the block to the blockstore + blockInStore, err := instance.Blockstore().Has(lastBlock.Cid()) + if err != nil { + t.Fatal(err) + } + if !blockInStore { + t.Fatal("Block was not added to block store") + } +} + func TestLargeSwarm(t *testing.T) { if testing.Short() { t.SkipNow() diff --git a/bitswap/sessionmanager/sessionmanager.go b/bitswap/sessionmanager/sessionmanager.go index d65b86f4a..a702e6d5f 100644 --- a/bitswap/sessionmanager/sessionmanager.go +++ b/bitswap/sessionmanager/sessionmanager.go @@ -131,3 +131,17 @@ func (sm *SessionManager) ReceiveFrom(from peer.ID, ks []cid.Cid) { s.session.ReceiveFrom(from, sessKs) } } + +// InterestedIn indicates whether any of the sessions are waiting to receive +// the block with the given CID. +func (sm *SessionManager) InterestedIn(cid cid.Cid) bool { + sm.sessLk.Lock() + defer sm.sessLk.Unlock() + + for _, s := range sm.sessions { + if s.session.InterestedIn(cid) { + return true + } + } + return false +} diff --git a/bitswap/sessionmanager/sessionmanager_test.go b/bitswap/sessionmanager/sessionmanager_test.go index 0d0c94d64..0522a5b02 100644 --- a/bitswap/sessionmanager/sessionmanager_test.go +++ b/bitswap/sessionmanager/sessionmanager_test.go @@ -176,6 +176,33 @@ func TestReceivingBlocksWhenNotInterested(t *testing.T) { } } +func TestInterestedIn(t *testing.T) { + ctx := context.Background() + ctx, cancel := context.WithCancel(ctx) + defer cancel() + sm := New(ctx, sessionFactory, peerManagerFactory, requestSplitterFactory) + + blks := testutil.GenerateBlocksOfSize(4, 1024) + var cids []cid.Cid + for _, b := range blks { + cids = append(cids, b.Cid()) + } + + nextInterestedIn = []cid.Cid{cids[0], cids[1]} + _ = sm.NewSession(ctx, time.Second, delay.Fixed(time.Minute)).(*fakeSession) + nextInterestedIn = []cid.Cid{cids[0], cids[2]} + _ = sm.NewSession(ctx, time.Second, delay.Fixed(time.Minute)).(*fakeSession) + + if !sm.InterestedIn(cids[0]) || + !sm.InterestedIn(cids[1]) || + !sm.InterestedIn(cids[2]) { + t.Fatal("expected interest but session manager was not interested") + } + if sm.InterestedIn(cids[3]) { + t.Fatal("expected no interest but session manager was interested") + } +} + func TestRemovingPeersWhenManagerContextCancelled(t *testing.T) { ctx := context.Background() ctx, cancel := context.WithCancel(ctx) diff --git a/bitswap/wantmanager/wantmanager.go b/bitswap/wantmanager/wantmanager.go index 2ed7082e4..f726d6843 100644 --- a/bitswap/wantmanager/wantmanager.go +++ b/bitswap/wantmanager/wantmanager.go @@ -80,22 +80,6 @@ func (wm *WantManager) CancelWants(ctx context.Context, ks []cid.Cid, peers []pe wm.addEntries(context.Background(), ks, peers, true, ses) } -// IsWanted returns whether a CID is currently wanted. -func (wm *WantManager) IsWanted(c cid.Cid) bool { - resp := make(chan bool, 1) - select { - case wm.wantMessages <- &isWantedMessage{c, resp}: - case <-wm.ctx.Done(): - return false - } - select { - case wanted := <-resp: - return wanted - case <-wm.ctx.Done(): - return false - } -} - // CurrentWants returns the list of current wants. func (wm *WantManager) CurrentWants() []wantlist.Entry { resp := make(chan []wantlist.Entry, 1) @@ -232,16 +216,6 @@ func (ws *wantSet) handle(wm *WantManager) { wm.peerHandler.SendMessage(ws.entries, ws.targets, ws.from) } -type isWantedMessage struct { - c cid.Cid - resp chan<- bool -} - -func (iwm *isWantedMessage) handle(wm *WantManager) { - _, isWanted := wm.wl.Contains(iwm.c) - iwm.resp <- isWanted -} - type currentWantsMessage struct { resp chan<- []wantlist.Entry } From e8a735e4cbddbf99898a9675dff341b003d2d2fc Mon Sep 17 00:00:00 2001 From: Dirk McCormick Date: Fri, 16 Aug 2019 09:19:41 -0400 Subject: [PATCH 0805/1035] fix: use context.Background() instead of nil This commit was moved from ipfs/go-bitswap@38dcf8c329199e123d0b89de7ece3d61a8865eda --- bitswap/bitswap.go | 2 +- bitswap/sessionmanager/sessionmanager_test.go | 4 +++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 29a377820..1bcf5e718 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -273,7 +273,7 @@ func (bs *Bitswap) GetBlocks(ctx context.Context, keys []cid.Cid) (<-chan blocks // HasBlock announces the existence of a block to this bitswap service. The // service will potentially notify its peers. func (bs *Bitswap) HasBlock(blk blocks.Block) error { - return bs.receiveBlocksFrom(nil, "", []blocks.Block{blk}) + return bs.receiveBlocksFrom(context.Background(), "", []blocks.Block{blk}) } // TODO: Some of this stuff really only needs to be done when adding a block diff --git a/bitswap/sessionmanager/sessionmanager_test.go b/bitswap/sessionmanager/sessionmanager_test.go index 0522a5b02..2b303b6df 100644 --- a/bitswap/sessionmanager/sessionmanager_test.go +++ b/bitswap/sessionmanager/sessionmanager_test.go @@ -180,7 +180,9 @@ func TestInterestedIn(t *testing.T) { ctx := context.Background() ctx, cancel := context.WithCancel(ctx) defer cancel() - sm := New(ctx, sessionFactory, peerManagerFactory, requestSplitterFactory) + notif := notifications.New() + defer notif.Shutdown() + sm := New(ctx, sessionFactory, peerManagerFactory, requestSplitterFactory, notif) blks := testutil.GenerateBlocksOfSize(4, 1024) var cids []cid.Cid From 6631d866f8277bfe82c58e49f1eced05dbfbad68 Mon Sep 17 00:00:00 2001 From: dirkmc Date: Mon, 19 Aug 2019 22:47:37 -0700 Subject: [PATCH 0806/1035] refactor: use locks for session want management This commit was moved from ipfs/go-bitswap@56219bd23b1a02bcdf74590f396e8fb6427b59f7 --- bitswap/bitswap.go | 2 +- bitswap/session/session.go | 334 +++++++++--------- bitswap/session/session_test.go | 16 + bitswap/sessionmanager/sessionmanager.go | 7 +- bitswap/sessionmanager/sessionmanager_test.go | 11 +- 5 files changed, 201 insertions(+), 169 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 1bcf5e718..c42d80adc 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -294,7 +294,7 @@ func (bs *Bitswap) receiveBlocksFrom(ctx context.Context, from peer.ID, blks []b // Split blocks into wanted blocks vs duplicates wanted = make([]blocks.Block, 0, len(blks)) for _, b := range blks { - if bs.sm.InterestedIn(b.Cid()) { + if bs.sm.IsWanted(b.Cid()) { wanted = append(wanted, b) } else { log.Debugf("[recv] block not in wantlist; cid=%s, peer=%s", b.Cid(), from) diff --git a/bitswap/session/session.go b/bitswap/session/session.go index 886971c9f..76c8f3fd9 100644 --- a/bitswap/session/session.go +++ b/bitswap/session/session.go @@ -3,9 +3,9 @@ package session import ( "context" "math/rand" + "sync" "time" - lru "github.com/hashicorp/golang-lru" bsgetter "github.com/ipfs/go-bitswap/getter" notifications "github.com/ipfs/go-bitswap/notifications" bssd "github.com/ipfs/go-bitswap/sessiondata" @@ -47,16 +47,18 @@ type RequestSplitter interface { RecordUniqueBlock() } -type interestReq struct { - c cid.Cid - resp chan bool -} - type rcvFrom struct { from peer.ID ks []cid.Cid } +type sessionWants struct { + sync.RWMutex + toFetch *cidQueue + liveWants map[cid.Cid]time.Time + pastWants *cid.Set +} + // Session holds state for an individual bitswap transfer operation. // This allows bitswap to make smarter decisions about who to send wantlist // info to, and who to request blocks from. @@ -67,19 +69,16 @@ type Session struct { pm PeerManager srs RequestSplitter + sw sessionWants + // channels incoming chan rcvFrom newReqs chan []cid.Cid cancelKeys chan []cid.Cid - interestReqs chan interestReq latencyReqs chan chan time.Duration tickDelayReqs chan time.Duration // do not touch outside run loop - tofetch *cidQueue - interest *lru.Cache - pastWants *cidQueue - liveWants map[cid.Cid]time.Time idleTick *time.Timer periodicSearchTimer *time.Timer baseTickDelay time.Duration @@ -105,12 +104,13 @@ func New(ctx context.Context, initialSearchDelay time.Duration, periodicSearchDelay delay.D) *Session { s := &Session{ - liveWants: make(map[cid.Cid]time.Time), + sw: sessionWants{ + toFetch: newCidQueue(), + liveWants: make(map[cid.Cid]time.Time), + pastWants: cid.NewSet(), + }, newReqs: make(chan []cid.Cid), cancelKeys: make(chan []cid.Cid), - tofetch: newCidQueue(), - pastWants: newCidQueue(), - interestReqs: make(chan interestReq), latencyReqs: make(chan chan time.Duration), tickDelayReqs: make(chan time.Duration), ctx: ctx, @@ -126,9 +126,6 @@ func New(ctx context.Context, periodicSearchDelay: periodicSearchDelay, } - cache, _ := lru.New(2048) - s.interest = cache - go s.run(ctx) return s @@ -142,34 +139,20 @@ func (s *Session) ReceiveFrom(from peer.ID, ks []cid.Cid) { } } -// InterestedIn returns true if this session is interested in the given Cid. +// IsWanted returns true if this session is waiting to receive the given Cid. +func (s *Session) IsWanted(c cid.Cid) bool { + s.sw.RLock() + defer s.sw.RUnlock() + + return s.unlockedIsWanted(c) +} + +// InterestedIn returns true if this session has ever requested the given Cid. func (s *Session) InterestedIn(c cid.Cid) bool { - if s.interest.Contains(c) { - return true - } - // TODO: PERF: this is using a channel to guard a map access against race - // conditions. This is definitely much slower than a mutex, though its unclear - // if it will actually induce any noticeable slowness. This is implemented this - // way to avoid adding a more complex set of mutexes around the liveWants map. - // note that in the average case (where this session *is* interested in the - // block we received) this function will not be called, as the cid will likely - // still be in the interest cache. - resp := make(chan bool, 1) - select { - case s.interestReqs <- interestReq{ - c: c, - resp: resp, - }: - case <-s.ctx.Done(): - return false - } + s.sw.RLock() + defer s.sw.RUnlock() - select { - case want := <-resp: - return want - case <-s.ctx.Done(): - return false - } + return s.unlockedIsWanted(c) || s.sw.pastWants.Has(c) } // GetBlock fetches a single block. @@ -233,23 +216,15 @@ func (s *Session) run(ctx context.Context) { for { select { case rcv := <-s.incoming: - s.cancelIncoming(ctx, rcv) - // Record statistics only if the blocks came from the network - // (blocks can also be received from the local node) - if rcv.from != "" { - s.updateReceiveCounters(ctx, rcv) - } s.handleIncoming(ctx, rcv) case keys := <-s.newReqs: - s.handleNewRequest(ctx, keys) + s.wantBlocks(ctx, keys) case keys := <-s.cancelKeys: s.handleCancel(keys) case <-s.idleTick.C: s.handleIdleTick(ctx) case <-s.periodicSearchTimer.C: s.handlePeriodicSearch(ctx) - case lwchk := <-s.interestReqs: - lwchk.resp <- s.cidIsWanted(lwchk.c) case resp := <-s.latencyReqs: resp <- s.averageLatency() case baseTickDelay := <-s.tickDelayReqs: @@ -261,59 +236,17 @@ func (s *Session) run(ctx context.Context) { } } -func (s *Session) cancelIncoming(ctx context.Context, rcv rcvFrom) { - // We've received the blocks so we can cancel any outstanding wants for them - wanted := make([]cid.Cid, 0, len(rcv.ks)) - for _, k := range rcv.ks { - if s.cidIsWanted(k) { - wanted = append(wanted, k) - } - } - s.pm.RecordCancels(wanted) - s.wm.CancelWants(s.ctx, wanted, nil, s.id) -} - -func (s *Session) handleIncoming(ctx context.Context, rcv rcvFrom) { - s.idleTick.Stop() - - // Process the received blocks - s.processIncoming(ctx, rcv.ks) - - s.resetIdleTick() -} - -func (s *Session) handleNewRequest(ctx context.Context, keys []cid.Cid) { - for _, k := range keys { - s.interest.Add(k, nil) - } - if toadd := s.wantBudget(); toadd > 0 { - if toadd > len(keys) { - toadd = len(keys) - } - - now := keys[:toadd] - keys = keys[toadd:] +func (s *Session) handleCancel(keys []cid.Cid) { + s.sw.Lock() + defer s.sw.Unlock() - s.wantBlocks(ctx, now) - } for _, k := range keys { - s.tofetch.Push(k) - } -} - -func (s *Session) handleCancel(keys []cid.Cid) { - for _, c := range keys { - s.tofetch.Remove(c) + s.sw.toFetch.Remove(k) } } func (s *Session) handleIdleTick(ctx context.Context) { - live := make([]cid.Cid, 0, len(s.liveWants)) - now := time.Now() - for c := range s.liveWants { - live = append(live, c) - s.liveWants[c] = now - } + live := s.prepareBroadcast() // Broadcast these keys to everyone we're connected to s.pm.RecordPeerRequests(nil, live) @@ -326,11 +259,27 @@ func (s *Session) handleIdleTick(ctx context.Context) { } s.resetIdleTick() - if len(s.liveWants) > 0 { + s.sw.RLock() + defer s.sw.RUnlock() + + if len(s.sw.liveWants) > 0 { s.consecutiveTicks++ } } +func (s *Session) prepareBroadcast() []cid.Cid { + s.sw.Lock() + defer s.sw.Unlock() + + live := make([]cid.Cid, 0, len(s.sw.liveWants)) + now := time.Now() + for c := range s.sw.liveWants { + live = append(live, c) + s.sw.liveWants[c] = now + } + return live +} + func (s *Session) handlePeriodicSearch(ctx context.Context) { randomWant := s.randomLiveWant() if !randomWant.Defined() { @@ -346,12 +295,15 @@ func (s *Session) handlePeriodicSearch(ctx context.Context) { } func (s *Session) randomLiveWant() cid.Cid { - if len(s.liveWants) == 0 { + s.sw.RLock() + defer s.sw.RUnlock() + + if len(s.sw.liveWants) == 0 { return cid.Cid{} } - i := rand.Intn(len(s.liveWants)) + i := rand.Intn(len(s.sw.liveWants)) // picking a random live want - for k := range s.liveWants { + for k := range s.sw.liveWants { if i == 0 { return k } @@ -359,83 +311,127 @@ func (s *Session) randomLiveWant() cid.Cid { } return cid.Cid{} } + func (s *Session) handleShutdown() { s.idleTick.Stop() - live := make([]cid.Cid, 0, len(s.liveWants)) - for c := range s.liveWants { + live := s.liveWants() + s.wm.CancelWants(s.ctx, live, nil, s.id) +} + +func (s *Session) liveWants() []cid.Cid { + s.sw.RLock() + defer s.sw.RUnlock() + + live := make([]cid.Cid, 0, len(s.sw.liveWants)) + for c := range s.sw.liveWants { live = append(live, c) } - s.wm.CancelWants(s.ctx, live, nil, s.id) + return live } -func (s *Session) cidIsWanted(c cid.Cid) bool { - _, ok := s.liveWants[c] +func (s *Session) unlockedIsWanted(c cid.Cid) bool { + _, ok := s.sw.liveWants[c] if !ok { - ok = s.tofetch.Has(c) + ok = s.sw.toFetch.Has(c) } return ok } -func (s *Session) processIncoming(ctx context.Context, ks []cid.Cid) { - for _, c := range ks { - if s.cidIsWanted(c) { - // If the block CID was in the live wants queue, remove it - tval, ok := s.liveWants[c] - if ok { - s.latTotal += time.Since(tval) - delete(s.liveWants, c) - } else { - // Otherwise remove it from the tofetch queue, if it was there - s.tofetch.Remove(c) - } - s.fetchcnt++ - - // We've received new wanted blocks, so reset the number of ticks - // that have occurred since the last new block - s.consecutiveTicks = 0 - - // Keep track of CIDs we've successfully fetched - s.pastWants.Push(c) - } +func (s *Session) handleIncoming(ctx context.Context, rcv rcvFrom) { + // Record statistics only if the blocks came from the network + // (blocks can also be received from the local node) + if rcv.from != "" { + s.updateReceiveCounters(ctx, rcv) } - // Transfer as many CIDs as possible from the tofetch queue into the - // live wants queue - toAdd := s.wantBudget() - if toAdd > s.tofetch.Len() { - toAdd = s.tofetch.Len() - } - if toAdd > 0 { - var keys []cid.Cid - for i := 0; i < toAdd; i++ { - keys = append(keys, s.tofetch.Pop()) - } - s.wantBlocks(ctx, keys) + // Update the want list + wanted, totalLatency := s.blocksReceived(rcv.ks) + if len(wanted) == 0 { + return } + + // We've received the blocks so we can cancel any outstanding wants for them + s.cancelIncoming(ctx, wanted) + + s.idleTick.Stop() + + // Process the received blocks + s.processIncoming(ctx, wanted, totalLatency) + + s.resetIdleTick() } func (s *Session) updateReceiveCounters(ctx context.Context, rcv rcvFrom) { - for _, k := range rcv.ks { - // Inform the request splitter of unique / duplicate blocks - if s.cidIsWanted(k) { + s.sw.RLock() + + for _, c := range rcv.ks { + if s.unlockedIsWanted(c) { s.srs.RecordUniqueBlock() - } else if s.pastWants.Has(k) { + } else if s.sw.pastWants.Has(c) { s.srs.RecordDuplicateBlock() } } + s.sw.RUnlock() + // Record response (to be able to time latency) if len(rcv.ks) > 0 { s.pm.RecordPeerResponse(rcv.from, rcv.ks) } } -func (s *Session) wantBlocks(ctx context.Context, ks []cid.Cid) { - now := time.Now() - for _, c := range ks { - s.liveWants[c] = now +func (s *Session) blocksReceived(cids []cid.Cid) ([]cid.Cid, time.Duration) { + s.sw.Lock() + defer s.sw.Unlock() + + totalLatency := time.Duration(0) + wanted := make([]cid.Cid, 0, len(cids)) + for _, c := range cids { + if s.unlockedIsWanted(c) { + wanted = append(wanted, c) + + // If the block CID was in the live wants queue, remove it + tval, ok := s.sw.liveWants[c] + if ok { + totalLatency += time.Since(tval) + delete(s.sw.liveWants, c) + } else { + // Otherwise remove it from the toFetch queue, if it was there + s.sw.toFetch.Remove(c) + } + + // Keep track of CIDs we've successfully fetched + s.sw.pastWants.Add(c) + } + } + + return wanted, totalLatency +} + +func (s *Session) cancelIncoming(ctx context.Context, ks []cid.Cid) { + s.pm.RecordCancels(ks) + s.wm.CancelWants(s.ctx, ks, nil, s.id) +} + +func (s *Session) processIncoming(ctx context.Context, ks []cid.Cid, totalLatency time.Duration) { + // Keep track of the total number of blocks received and total latency + s.fetchcnt += len(ks) + s.latTotal += totalLatency + + // We've received new wanted blocks, so reset the number of ticks + // that have occurred since the last new block + s.consecutiveTicks = 0 + + s.wantBlocks(ctx, nil) +} + +func (s *Session) wantBlocks(ctx context.Context, newks []cid.Cid) { + ks := s.getNextWants(s.wantLimit(), newks) + if len(ks) == 0 { + return } + peers := s.pm.GetOptimizedPeers() if len(peers) > 0 { splitRequests := s.srs.SplitRequest(peers, ks) @@ -449,6 +445,29 @@ func (s *Session) wantBlocks(ctx context.Context, ks []cid.Cid) { } } +func (s *Session) getNextWants(limit int, newWants []cid.Cid) []cid.Cid { + s.sw.Lock() + defer s.sw.Unlock() + + now := time.Now() + + for _, k := range newWants { + s.sw.toFetch.Push(k) + } + + currentLiveCount := len(s.sw.liveWants) + toAdd := limit - currentLiveCount + + var live []cid.Cid + for ; toAdd > 0 && s.sw.toFetch.Len() > 0; toAdd-- { + c := s.sw.toFetch.Pop() + live = append(live, c) + s.sw.liveWants[c] = now + } + + return live +} + func (s *Session) averageLatency() time.Duration { return s.latTotal / time.Duration(s.fetchcnt) } @@ -465,16 +484,9 @@ func (s *Session) resetIdleTick() { s.idleTick.Reset(tickDelay) } -func (s *Session) wantBudget() int { - live := len(s.liveWants) - var budget int +func (s *Session) wantLimit() int { if len(s.pm.GetOptimizedPeers()) > 0 { - budget = targetedLiveWantsLimit - live - } else { - budget = broadcastLiveWantsLimit - live - } - if budget < 0 { - budget = 0 + return targetedLiveWantsLimit } - return budget + return broadcastLiveWantsLimit } diff --git a/bitswap/session/session_test.go b/bitswap/session/session_test.go index 07b834a8d..3a52fbdfb 100644 --- a/bitswap/session/session_test.go +++ b/bitswap/session/session_test.go @@ -118,6 +118,14 @@ func TestSessionGetBlocks(t *testing.T) { if receivedWantReq.peers != nil { t.Fatal("first want request should be a broadcast") } + for _, c := range cids { + if !session.IsWanted(c) { + t.Fatal("expected session to want cids") + } + if !session.InterestedIn(c) { + t.Fatal("expected session to be interested in cids") + } + } // now receive the first set of blocks peers := testutil.GeneratePeers(broadcastLiveWantsLimit) @@ -211,6 +219,14 @@ func TestSessionGetBlocks(t *testing.T) { t.Fatal("received incorrect block") } } + for _, c := range cids { + if session.IsWanted(c) { + t.Fatal("expected session NOT to want cids") + } + if !session.InterestedIn(c) { + t.Fatal("expected session to still be interested in cids") + } + } } func TestSessionFindMorePeers(t *testing.T) { diff --git a/bitswap/sessionmanager/sessionmanager.go b/bitswap/sessionmanager/sessionmanager.go index a702e6d5f..7e73bfe47 100644 --- a/bitswap/sessionmanager/sessionmanager.go +++ b/bitswap/sessionmanager/sessionmanager.go @@ -19,6 +19,7 @@ type Session interface { exchange.Fetcher InterestedIn(cid.Cid) bool ReceiveFrom(peer.ID, []cid.Cid) + IsWanted(cid.Cid) bool } type sesTrk struct { @@ -132,14 +133,14 @@ func (sm *SessionManager) ReceiveFrom(from peer.ID, ks []cid.Cid) { } } -// InterestedIn indicates whether any of the sessions are waiting to receive +// IsWanted indicates whether any of the sessions are waiting to receive // the block with the given CID. -func (sm *SessionManager) InterestedIn(cid cid.Cid) bool { +func (sm *SessionManager) IsWanted(cid cid.Cid) bool { sm.sessLk.Lock() defer sm.sessLk.Unlock() for _, s := range sm.sessions { - if s.session.InterestedIn(cid) { + if s.session.IsWanted(cid) { return true } } diff --git a/bitswap/sessionmanager/sessionmanager_test.go b/bitswap/sessionmanager/sessionmanager_test.go index 2b303b6df..022b6c025 100644 --- a/bitswap/sessionmanager/sessionmanager_test.go +++ b/bitswap/sessionmanager/sessionmanager_test.go @@ -40,6 +40,9 @@ func (fs *fakeSession) InterestedIn(c cid.Cid) bool { } return false } +func (fs *fakeSession) IsWanted(c cid.Cid) bool { + return fs.InterestedIn(c) +} func (fs *fakeSession) ReceiveFrom(p peer.ID, ks []cid.Cid) { fs.ks = append(fs.ks, ks...) } @@ -195,12 +198,12 @@ func TestInterestedIn(t *testing.T) { nextInterestedIn = []cid.Cid{cids[0], cids[2]} _ = sm.NewSession(ctx, time.Second, delay.Fixed(time.Minute)).(*fakeSession) - if !sm.InterestedIn(cids[0]) || - !sm.InterestedIn(cids[1]) || - !sm.InterestedIn(cids[2]) { + if !sm.IsWanted(cids[0]) || + !sm.IsWanted(cids[1]) || + !sm.IsWanted(cids[2]) { t.Fatal("expected interest but session manager was not interested") } - if sm.InterestedIn(cids[3]) { + if sm.IsWanted(cids[3]) { t.Fatal("expected no interest but session manager was interested") } } From 5e1d9160c107b9f38292118f21c3853d7f9c9257 Mon Sep 17 00:00:00 2001 From: Dirk McCormick Date: Tue, 20 Aug 2019 08:54:33 -0700 Subject: [PATCH 0807/1035] test: better session manager test naming This commit was moved from ipfs/go-bitswap@7458eb8f2036347be0e83461e983204e0be4edde --- bitswap/sessionmanager/sessionmanager_test.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/bitswap/sessionmanager/sessionmanager_test.go b/bitswap/sessionmanager/sessionmanager_test.go index 022b6c025..411aee702 100644 --- a/bitswap/sessionmanager/sessionmanager_test.go +++ b/bitswap/sessionmanager/sessionmanager_test.go @@ -179,7 +179,7 @@ func TestReceivingBlocksWhenNotInterested(t *testing.T) { } } -func TestInterestedIn(t *testing.T) { +func TestIsWanted(t *testing.T) { ctx := context.Background() ctx, cancel := context.WithCancel(ctx) defer cancel() @@ -201,10 +201,10 @@ func TestInterestedIn(t *testing.T) { if !sm.IsWanted(cids[0]) || !sm.IsWanted(cids[1]) || !sm.IsWanted(cids[2]) { - t.Fatal("expected interest but session manager was not interested") + t.Fatal("expected unwanted but session manager did want cid") } if sm.IsWanted(cids[3]) { - t.Fatal("expected no interest but session manager was interested") + t.Fatal("expected wanted but session manager did not want cid") } } From 133e46ec74e346d02caaa1233ec0850ac962b135 Mon Sep 17 00:00:00 2001 From: Dirk McCormick Date: Thu, 22 Aug 2019 13:16:31 -0700 Subject: [PATCH 0808/1035] refactor: session want management This commit was moved from ipfs/go-bitswap@e9661edcdb47ef54b26a34eea6e0a51a5f788803 --- bitswap/session/session.go | 171 ++-------------- bitswap/session/session_test.go | 6 - bitswap/session/sessionwants.go | 190 ++++++++++++++++++ bitswap/session/sessionwants_test.go | 152 ++++++++++++++ bitswap/sessionmanager/sessionmanager.go | 19 +- bitswap/sessionmanager/sessionmanager_test.go | 47 ++--- 6 files changed, 390 insertions(+), 195 deletions(-) create mode 100644 bitswap/session/sessionwants.go create mode 100644 bitswap/session/sessionwants_test.go diff --git a/bitswap/session/session.go b/bitswap/session/session.go index 76c8f3fd9..d2263aa61 100644 --- a/bitswap/session/session.go +++ b/bitswap/session/session.go @@ -2,8 +2,6 @@ package session import ( "context" - "math/rand" - "sync" "time" bsgetter "github.com/ipfs/go-bitswap/getter" @@ -52,13 +50,6 @@ type rcvFrom struct { ks []cid.Cid } -type sessionWants struct { - sync.RWMutex - toFetch *cidQueue - liveWants map[cid.Cid]time.Time - pastWants *cid.Set -} - // Session holds state for an individual bitswap transfer operation. // This allows bitswap to make smarter decisions about who to send wantlist // info to, and who to request blocks from. @@ -133,26 +124,20 @@ func New(ctx context.Context, // ReceiveFrom receives incoming blocks from the given peer. func (s *Session) ReceiveFrom(from peer.ID, ks []cid.Cid) { + interested := s.sw.FilterInteresting(ks) + if len(interested) == 0 { + return + } + select { - case s.incoming <- rcvFrom{from: from, ks: ks}: + case s.incoming <- rcvFrom{from: from, ks: interested}: case <-s.ctx.Done(): } } // IsWanted returns true if this session is waiting to receive the given Cid. func (s *Session) IsWanted(c cid.Cid) bool { - s.sw.RLock() - defer s.sw.RUnlock() - - return s.unlockedIsWanted(c) -} - -// InterestedIn returns true if this session has ever requested the given Cid. -func (s *Session) InterestedIn(c cid.Cid) bool { - s.sw.RLock() - defer s.sw.RUnlock() - - return s.unlockedIsWanted(c) || s.sw.pastWants.Has(c) + return s.sw.IsWanted(c) } // GetBlock fetches a single block. @@ -220,7 +205,7 @@ func (s *Session) run(ctx context.Context) { case keys := <-s.newReqs: s.wantBlocks(ctx, keys) case keys := <-s.cancelKeys: - s.handleCancel(keys) + s.sw.CancelPending(keys) case <-s.idleTick.C: s.handleIdleTick(ctx) case <-s.periodicSearchTimer.C: @@ -236,17 +221,8 @@ func (s *Session) run(ctx context.Context) { } } -func (s *Session) handleCancel(keys []cid.Cid) { - s.sw.Lock() - defer s.sw.Unlock() - - for _, k := range keys { - s.sw.toFetch.Remove(k) - } -} - func (s *Session) handleIdleTick(ctx context.Context) { - live := s.prepareBroadcast() + live := s.sw.PrepareBroadcast() // Broadcast these keys to everyone we're connected to s.pm.RecordPeerRequests(nil, live) @@ -259,29 +235,13 @@ func (s *Session) handleIdleTick(ctx context.Context) { } s.resetIdleTick() - s.sw.RLock() - defer s.sw.RUnlock() - - if len(s.sw.liveWants) > 0 { + if s.sw.HasLiveWants() { s.consecutiveTicks++ } } -func (s *Session) prepareBroadcast() []cid.Cid { - s.sw.Lock() - defer s.sw.Unlock() - - live := make([]cid.Cid, 0, len(s.sw.liveWants)) - now := time.Now() - for c := range s.sw.liveWants { - live = append(live, c) - s.sw.liveWants[c] = now - } - return live -} - func (s *Session) handlePeriodicSearch(ctx context.Context) { - randomWant := s.randomLiveWant() + randomWant := s.sw.RandomLiveWant() if !randomWant.Defined() { return } @@ -294,50 +254,13 @@ func (s *Session) handlePeriodicSearch(ctx context.Context) { s.periodicSearchTimer.Reset(s.periodicSearchDelay.NextWaitTime()) } -func (s *Session) randomLiveWant() cid.Cid { - s.sw.RLock() - defer s.sw.RUnlock() - - if len(s.sw.liveWants) == 0 { - return cid.Cid{} - } - i := rand.Intn(len(s.sw.liveWants)) - // picking a random live want - for k := range s.sw.liveWants { - if i == 0 { - return k - } - i-- - } - return cid.Cid{} -} - func (s *Session) handleShutdown() { s.idleTick.Stop() - live := s.liveWants() + live := s.sw.LiveWants() s.wm.CancelWants(s.ctx, live, nil, s.id) } -func (s *Session) liveWants() []cid.Cid { - s.sw.RLock() - defer s.sw.RUnlock() - - live := make([]cid.Cid, 0, len(s.sw.liveWants)) - for c := range s.sw.liveWants { - live = append(live, c) - } - return live -} - -func (s *Session) unlockedIsWanted(c cid.Cid) bool { - _, ok := s.sw.liveWants[c] - if !ok { - ok = s.sw.toFetch.Has(c) - } - return ok -} - func (s *Session) handleIncoming(ctx context.Context, rcv rcvFrom) { // Record statistics only if the blocks came from the network // (blocks can also be received from the local node) @@ -346,7 +269,7 @@ func (s *Session) handleIncoming(ctx context.Context, rcv rcvFrom) { } // Update the want list - wanted, totalLatency := s.blocksReceived(rcv.ks) + wanted, totalLatency := s.sw.BlocksReceived(rcv.ks) if len(wanted) == 0 { return } @@ -363,17 +286,8 @@ func (s *Session) handleIncoming(ctx context.Context, rcv rcvFrom) { } func (s *Session) updateReceiveCounters(ctx context.Context, rcv rcvFrom) { - s.sw.RLock() - - for _, c := range rcv.ks { - if s.unlockedIsWanted(c) { - s.srs.RecordUniqueBlock() - } else if s.sw.pastWants.Has(c) { - s.srs.RecordDuplicateBlock() - } - } - - s.sw.RUnlock() + // Record unique vs duplicate blocks + s.sw.ForEachUniqDup(rcv.ks, s.srs.RecordUniqueBlock, s.srs.RecordDuplicateBlock) // Record response (to be able to time latency) if len(rcv.ks) > 0 { @@ -381,34 +295,6 @@ func (s *Session) updateReceiveCounters(ctx context.Context, rcv rcvFrom) { } } -func (s *Session) blocksReceived(cids []cid.Cid) ([]cid.Cid, time.Duration) { - s.sw.Lock() - defer s.sw.Unlock() - - totalLatency := time.Duration(0) - wanted := make([]cid.Cid, 0, len(cids)) - for _, c := range cids { - if s.unlockedIsWanted(c) { - wanted = append(wanted, c) - - // If the block CID was in the live wants queue, remove it - tval, ok := s.sw.liveWants[c] - if ok { - totalLatency += time.Since(tval) - delete(s.sw.liveWants, c) - } else { - // Otherwise remove it from the toFetch queue, if it was there - s.sw.toFetch.Remove(c) - } - - // Keep track of CIDs we've successfully fetched - s.sw.pastWants.Add(c) - } - } - - return wanted, totalLatency -} - func (s *Session) cancelIncoming(ctx context.Context, ks []cid.Cid) { s.pm.RecordCancels(ks) s.wm.CancelWants(s.ctx, ks, nil, s.id) @@ -427,7 +313,9 @@ func (s *Session) processIncoming(ctx context.Context, ks []cid.Cid, totalLatenc } func (s *Session) wantBlocks(ctx context.Context, newks []cid.Cid) { - ks := s.getNextWants(s.wantLimit(), newks) + // Given the want limit and any newly received blocks, get as many wants as + // we can to send out + ks := s.sw.GetNextWants(s.wantLimit(), newks) if len(ks) == 0 { return } @@ -445,29 +333,6 @@ func (s *Session) wantBlocks(ctx context.Context, newks []cid.Cid) { } } -func (s *Session) getNextWants(limit int, newWants []cid.Cid) []cid.Cid { - s.sw.Lock() - defer s.sw.Unlock() - - now := time.Now() - - for _, k := range newWants { - s.sw.toFetch.Push(k) - } - - currentLiveCount := len(s.sw.liveWants) - toAdd := limit - currentLiveCount - - var live []cid.Cid - for ; toAdd > 0 && s.sw.toFetch.Len() > 0; toAdd-- { - c := s.sw.toFetch.Pop() - live = append(live, c) - s.sw.liveWants[c] = now - } - - return live -} - func (s *Session) averageLatency() time.Duration { return s.latTotal / time.Duration(s.fetchcnt) } diff --git a/bitswap/session/session_test.go b/bitswap/session/session_test.go index 3a52fbdfb..19266d1b4 100644 --- a/bitswap/session/session_test.go +++ b/bitswap/session/session_test.go @@ -122,9 +122,6 @@ func TestSessionGetBlocks(t *testing.T) { if !session.IsWanted(c) { t.Fatal("expected session to want cids") } - if !session.InterestedIn(c) { - t.Fatal("expected session to be interested in cids") - } } // now receive the first set of blocks @@ -223,9 +220,6 @@ func TestSessionGetBlocks(t *testing.T) { if session.IsWanted(c) { t.Fatal("expected session NOT to want cids") } - if !session.InterestedIn(c) { - t.Fatal("expected session to still be interested in cids") - } } } diff --git a/bitswap/session/sessionwants.go b/bitswap/session/sessionwants.go new file mode 100644 index 000000000..58684ae84 --- /dev/null +++ b/bitswap/session/sessionwants.go @@ -0,0 +1,190 @@ +package session + +import ( + "math/rand" + "sync" + "time" + + cid "github.com/ipfs/go-cid" +) + +type sessionWants struct { + sync.RWMutex + toFetch *cidQueue + liveWants map[cid.Cid]time.Time + pastWants *cid.Set +} + +// BlocksReceived moves received block CIDs from live to past wants and +// measures latency. It returns the CIDs of blocks that were actually wanted +// (as opposed to duplicates) and the total latency for all incoming blocks. +func (sw *sessionWants) BlocksReceived(cids []cid.Cid) ([]cid.Cid, time.Duration) { + sw.Lock() + defer sw.Unlock() + + totalLatency := time.Duration(0) + wanted := make([]cid.Cid, 0, len(cids)) + for _, c := range cids { + if sw.unlockedIsWanted(c) { + wanted = append(wanted, c) + + // If the block CID was in the live wants queue, remove it + tval, ok := sw.liveWants[c] + if ok { + totalLatency += time.Since(tval) + delete(sw.liveWants, c) + } else { + // Otherwise remove it from the toFetch queue, if it was there + sw.toFetch.Remove(c) + } + + // Keep track of CIDs we've successfully fetched + sw.pastWants.Add(c) + } + } + + return wanted, totalLatency +} + +// GetNextWants adds any new wants to the list of CIDs to fetch, then moves as +// many CIDs from the fetch queue to the live wants list as possible (given the +// limit). Returns the newly live wants. +func (sw *sessionWants) GetNextWants(limit int, newWants []cid.Cid) []cid.Cid { + now := time.Now() + + sw.Lock() + defer sw.Unlock() + + // Add new wants to the fetch queue + for _, k := range newWants { + sw.toFetch.Push(k) + } + + // Move CIDs from fetch queue to the live wants queue (up to the limit) + currentLiveCount := len(sw.liveWants) + toAdd := limit - currentLiveCount + + var live []cid.Cid + for ; toAdd > 0 && sw.toFetch.Len() > 0; toAdd-- { + c := sw.toFetch.Pop() + live = append(live, c) + sw.liveWants[c] = now + } + + return live +} + +// PrepareBroadcast saves the current time for each live want and returns the +// live want CIDs. +func (sw *sessionWants) PrepareBroadcast() []cid.Cid { + now := time.Now() + + sw.Lock() + defer sw.Unlock() + + live := make([]cid.Cid, 0, len(sw.liveWants)) + for c := range sw.liveWants { + live = append(live, c) + sw.liveWants[c] = now + } + return live +} + +// CancelPending removes the given CIDs from the fetch queue. +func (sw *sessionWants) CancelPending(keys []cid.Cid) { + sw.Lock() + defer sw.Unlock() + + for _, k := range keys { + sw.toFetch.Remove(k) + } +} + +// ForEachUniqDup iterates over each of the given CIDs and calls isUniqFn +// if the session is expecting a block for the CID, or isDupFn if the session +// has already received the block. +func (sw *sessionWants) ForEachUniqDup(ks []cid.Cid, isUniqFn, isDupFn func()) { + sw.RLock() + + for _, k := range ks { + if sw.unlockedIsWanted(k) { + isUniqFn() + } else if sw.pastWants.Has(k) { + isDupFn() + } + } + + sw.RUnlock() +} + +// LiveWants returns a list of live wants +func (sw *sessionWants) LiveWants() []cid.Cid { + sw.RLock() + defer sw.RUnlock() + + live := make([]cid.Cid, 0, len(sw.liveWants)) + for c := range sw.liveWants { + live = append(live, c) + } + return live +} + +// RandomLiveWant returns a randomly selected live want +func (sw *sessionWants) RandomLiveWant() cid.Cid { + sw.RLock() + defer sw.RUnlock() + + if len(sw.liveWants) == 0 { + return cid.Cid{} + } + i := rand.Intn(len(sw.liveWants)) + // picking a random live want + for k := range sw.liveWants { + if i == 0 { + return k + } + i-- + } + return cid.Cid{} +} + +// Has live wants indicates if there are any live wants +func (sw *sessionWants) HasLiveWants() bool { + sw.RLock() + defer sw.RUnlock() + + return len(sw.liveWants) > 0 +} + +// IsWanted indicates if the session is expecting to receive the block with the +// given CID +func (sw *sessionWants) IsWanted(c cid.Cid) bool { + sw.RLock() + defer sw.RUnlock() + + return sw.unlockedIsWanted(c) +} + +// FilterInteresting filters the list so that it only contains keys for +// blocks that the session is waiting to receive or has received in the past +func (sw *sessionWants) FilterInteresting(ks []cid.Cid) []cid.Cid { + sw.RLock() + defer sw.RUnlock() + + interested := make([]cid.Cid, 0, len(ks)) + for _, k := range ks { + if sw.unlockedIsWanted(k) || sw.pastWants.Has(k) { + interested = append(interested, k) + } + } + + return interested +} + +func (sw *sessionWants) unlockedIsWanted(c cid.Cid) bool { + _, ok := sw.liveWants[c] + if !ok { + ok = sw.toFetch.Has(c) + } + return ok +} diff --git a/bitswap/session/sessionwants_test.go b/bitswap/session/sessionwants_test.go new file mode 100644 index 000000000..879729242 --- /dev/null +++ b/bitswap/session/sessionwants_test.go @@ -0,0 +1,152 @@ +package session + +import ( + "testing" + "time" + + "github.com/ipfs/go-bitswap/testutil" + cid "github.com/ipfs/go-cid" +) + +func TestSessionWants(t *testing.T) { + sw := sessionWants{ + toFetch: newCidQueue(), + liveWants: make(map[cid.Cid]time.Time), + pastWants: cid.NewSet(), + } + cids := testutil.GenerateCids(10) + others := testutil.GenerateCids(1) + + // Expect these functions to return nothing on a new sessionWants + lws := sw.PrepareBroadcast() + if len(lws) > 0 { + t.Fatal("expected no broadcast wants") + } + lws = sw.LiveWants() + if len(lws) > 0 { + t.Fatal("expected no live wants") + } + if sw.HasLiveWants() { + t.Fatal("expected not to have live wants") + } + rw := sw.RandomLiveWant() + if rw.Defined() { + t.Fatal("expected no random want") + } + if sw.IsWanted(cids[0]) { + t.Fatal("expected cid to not be wanted") + } + if len(sw.FilterInteresting(cids)) > 0 { + t.Fatal("expected no interesting wants") + } + + // Add 10 new wants with a limit of 5 + // The first 5 cids should go into the toFetch queue + // The other 5 cids should go into the live want queue + // toFetch Live Past + // 98765 43210 + nextw := sw.GetNextWants(5, cids) + if len(nextw) != 5 { + t.Fatal("expected 5 next wants") + } + lws = sw.PrepareBroadcast() + if len(lws) != 5 { + t.Fatal("expected 5 broadcast wants") + } + lws = sw.LiveWants() + if len(lws) != 5 { + t.Fatal("expected 5 live wants") + } + if !sw.HasLiveWants() { + t.Fatal("expected to have live wants") + } + rw = sw.RandomLiveWant() + if !rw.Defined() { + t.Fatal("expected random want") + } + if !sw.IsWanted(cids[0]) { + t.Fatal("expected cid to be wanted") + } + if !sw.IsWanted(cids[9]) { + t.Fatal("expected cid to be wanted") + } + if len(sw.FilterInteresting([]cid.Cid{cids[0], cids[9], others[0]})) != 2 { + t.Fatal("expected 2 interesting wants") + } + + // Two wanted blocks and one other block are received. + // The wanted blocks should be moved from the live wants queue + // to the past wants set (the other block CID should be ignored) + // toFetch Live Past + // 98765 432__ 10 + recvdCids := []cid.Cid{cids[0], cids[1], others[0]} + uniq := 0 + dup := 0 + sw.ForEachUniqDup(recvdCids, func() { uniq++ }, func() { dup++ }) + if uniq != 2 || dup != 0 { + t.Fatal("expected 2 uniqs / 0 dups", uniq, dup) + } + sw.BlocksReceived(recvdCids) + lws = sw.LiveWants() + if len(lws) != 3 { + t.Fatal("expected 3 live wants") + } + if sw.IsWanted(cids[0]) { + t.Fatal("expected cid to no longer be wanted") + } + if !sw.IsWanted(cids[9]) { + t.Fatal("expected cid to be wanted") + } + if len(sw.FilterInteresting([]cid.Cid{cids[0], cids[9], others[0]})) != 2 { + t.Fatal("expected 2 interesting wants") + } + + // Ask for next wants with a limit of 5 + // Should move 2 wants from toFetch queue to live wants + // toFetch Live Past + // 987__ 65432 10 + nextw = sw.GetNextWants(5, nil) + if len(nextw) != 2 { + t.Fatal("expected 2 next wants") + } + lws = sw.LiveWants() + if len(lws) != 5 { + t.Fatal("expected 5 live wants") + } + if !sw.IsWanted(cids[5]) { + t.Fatal("expected cid to be wanted") + } + + // One wanted block and one dup block are received. + // The wanted block should be moved from the live wants queue + // to the past wants set + // toFetch Live Past + // 987 654_2 310 + recvdCids = []cid.Cid{cids[0], cids[3]} + uniq = 0 + dup = 0 + sw.ForEachUniqDup(recvdCids, func() { uniq++ }, func() { dup++ }) + if uniq != 1 || dup != 1 { + t.Fatal("expected 1 uniq / 1 dup", uniq, dup) + } + sw.BlocksReceived(recvdCids) + lws = sw.LiveWants() + if len(lws) != 4 { + t.Fatal("expected 4 live wants") + } + + // One block in the toFetch queue should be cancelled + // toFetch Live Past + // 9_7 654_2 310 + sw.CancelPending([]cid.Cid{cids[8]}) + lws = sw.LiveWants() + if len(lws) != 4 { + t.Fatal("expected 4 live wants") + } + if sw.IsWanted(cids[8]) { + t.Fatal("expected cid to no longer be wanted") + } + if len(sw.FilterInteresting([]cid.Cid{cids[0], cids[8]})) != 1 { + t.Fatal("expected 1 interesting wants") + } +} diff --git a/bitswap/sessionmanager/sessionmanager.go b/bitswap/sessionmanager/sessionmanager.go index 7e73bfe47..3ec30bbc0 100644 --- a/bitswap/sessionmanager/sessionmanager.go +++ b/bitswap/sessionmanager/sessionmanager.go @@ -17,7 +17,6 @@ import ( // Session is a session that is managed by the session manager type Session interface { exchange.Fetcher - InterestedIn(cid.Cid) bool ReceiveFrom(peer.ID, []cid.Cid) IsWanted(cid.Cid) bool } @@ -115,22 +114,20 @@ func (sm *SessionManager) GetNextSessionID() uint64 { return sm.sessID } -// ReceiveFrom receives blocks from a peer and dispatches to interested -// sessions. +// ReceiveFrom receives block CIDs from a peer and dispatches to sessions. func (sm *SessionManager) ReceiveFrom(from peer.ID, ks []cid.Cid) { sm.sessLk.Lock() defer sm.sessLk.Unlock() - // Only give each session the blocks / dups that it is interested in + var wg sync.WaitGroup for _, s := range sm.sessions { - sessKs := make([]cid.Cid, 0, len(ks)) - for _, k := range ks { - if s.session.InterestedIn(k) { - sessKs = append(sessKs, k) - } - } - s.session.ReceiveFrom(from, sessKs) + wg.Add(1) + go func() { + defer wg.Done() + s.session.ReceiveFrom(from, ks) + }() } + wg.Wait() } // IsWanted indicates whether any of the sessions are waiting to receive diff --git a/bitswap/sessionmanager/sessionmanager_test.go b/bitswap/sessionmanager/sessionmanager_test.go index 411aee702..2bd234cb5 100644 --- a/bitswap/sessionmanager/sessionmanager_test.go +++ b/bitswap/sessionmanager/sessionmanager_test.go @@ -18,12 +18,12 @@ import ( ) type fakeSession struct { - interested []cid.Cid - ks []cid.Cid - id uint64 - pm *fakePeerManager - srs *fakeRequestSplitter - notif notifications.PubSub + wanted []cid.Cid + ks []cid.Cid + id uint64 + pm *fakePeerManager + srs *fakeRequestSplitter + notif notifications.PubSub } func (*fakeSession) GetBlock(context.Context, cid.Cid) (blocks.Block, error) { @@ -32,17 +32,14 @@ func (*fakeSession) GetBlock(context.Context, cid.Cid) (blocks.Block, error) { func (*fakeSession) GetBlocks(context.Context, []cid.Cid) (<-chan blocks.Block, error) { return nil, nil } -func (fs *fakeSession) InterestedIn(c cid.Cid) bool { - for _, ic := range fs.interested { +func (fs *fakeSession) IsWanted(c cid.Cid) bool { + for _, ic := range fs.wanted { if c == ic { return true } } return false } -func (fs *fakeSession) IsWanted(c cid.Cid) bool { - return fs.InterestedIn(c) -} func (fs *fakeSession) ReceiveFrom(p peer.ID, ks []cid.Cid) { fs.ks = append(fs.ks, ks...) } @@ -66,7 +63,7 @@ func (frs *fakeRequestSplitter) SplitRequest(optimizedPeers []bssd.OptimizedPeer func (frs *fakeRequestSplitter) RecordDuplicateBlock() {} func (frs *fakeRequestSplitter) RecordUniqueBlock() {} -var nextInterestedIn []cid.Cid +var nextWanted []cid.Cid func sessionFactory(ctx context.Context, id uint64, @@ -76,11 +73,11 @@ func sessionFactory(ctx context.Context, provSearchDelay time.Duration, rebroadcastDelay delay.D) Session { return &fakeSession{ - interested: nextInterestedIn, - id: id, - pm: pm.(*fakePeerManager), - srs: srs.(*fakeRequestSplitter), - notif: notif, + wanted: nextWanted, + id: id, + pm: pm.(*fakePeerManager), + srs: srs.(*fakeRequestSplitter), + notif: notif, } } @@ -121,7 +118,7 @@ func TestAddingSessions(t *testing.T) { p := peer.ID(123) block := blocks.NewBlock([]byte("block")) // we'll be interested in all blocks for this test - nextInterestedIn = []cid.Cid{block.Cid()} + nextWanted = []cid.Cid{block.Cid()} currentID := sm.GetNextSessionID() firstSession := sm.NewSession(ctx, time.Second, delay.Fixed(time.Minute)).(*fakeSession) @@ -163,11 +160,11 @@ func TestReceivingBlocksWhenNotInterested(t *testing.T) { cids = append(cids, b.Cid()) } - nextInterestedIn = []cid.Cid{cids[0], cids[1]} + nextWanted = []cid.Cid{cids[0], cids[1]} firstSession := sm.NewSession(ctx, time.Second, delay.Fixed(time.Minute)).(*fakeSession) - nextInterestedIn = []cid.Cid{cids[0]} + nextWanted = []cid.Cid{cids[0]} secondSession := sm.NewSession(ctx, time.Second, delay.Fixed(time.Minute)).(*fakeSession) - nextInterestedIn = []cid.Cid{} + nextWanted = []cid.Cid{} thirdSession := sm.NewSession(ctx, time.Second, delay.Fixed(time.Minute)).(*fakeSession) sm.ReceiveFrom(p, []cid.Cid{blks[0].Cid(), blks[1].Cid()}) @@ -193,9 +190,9 @@ func TestIsWanted(t *testing.T) { cids = append(cids, b.Cid()) } - nextInterestedIn = []cid.Cid{cids[0], cids[1]} + nextWanted = []cid.Cid{cids[0], cids[1]} _ = sm.NewSession(ctx, time.Second, delay.Fixed(time.Minute)).(*fakeSession) - nextInterestedIn = []cid.Cid{cids[0], cids[2]} + nextWanted = []cid.Cid{cids[0], cids[2]} _ = sm.NewSession(ctx, time.Second, delay.Fixed(time.Minute)).(*fakeSession) if !sm.IsWanted(cids[0]) || @@ -218,7 +215,7 @@ func TestRemovingPeersWhenManagerContextCancelled(t *testing.T) { p := peer.ID(123) block := blocks.NewBlock([]byte("block")) // we'll be interested in all blocks for this test - nextInterestedIn = []cid.Cid{block.Cid()} + nextWanted = []cid.Cid{block.Cid()} firstSession := sm.NewSession(ctx, time.Second, delay.Fixed(time.Minute)).(*fakeSession) secondSession := sm.NewSession(ctx, time.Second, delay.Fixed(time.Minute)).(*fakeSession) thirdSession := sm.NewSession(ctx, time.Second, delay.Fixed(time.Minute)).(*fakeSession) @@ -245,7 +242,7 @@ func TestRemovingPeersWhenSessionContextCancelled(t *testing.T) { p := peer.ID(123) block := blocks.NewBlock([]byte("block")) // we'll be interested in all blocks for this test - nextInterestedIn = []cid.Cid{block.Cid()} + nextWanted = []cid.Cid{block.Cid()} firstSession := sm.NewSession(ctx, time.Second, delay.Fixed(time.Minute)).(*fakeSession) sessionCtx, sessionCancel := context.WithCancel(ctx) secondSession := sm.NewSession(sessionCtx, time.Second, delay.Fixed(time.Minute)).(*fakeSession) From 1e1cda6b937643df1a990487823f92f79822b5b2 Mon Sep 17 00:00:00 2001 From: Dirk McCormick Date: Thu, 22 Aug 2019 20:52:03 -0400 Subject: [PATCH 0809/1035] refactor: remove extraneous go routine This commit was moved from ipfs/go-bitswap@1e10d28b3d8a443f7010c9dc9b022091cfb21dac --- bitswap/sessionmanager/sessionmanager.go | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/bitswap/sessionmanager/sessionmanager.go b/bitswap/sessionmanager/sessionmanager.go index 3ec30bbc0..cf3fe98d4 100644 --- a/bitswap/sessionmanager/sessionmanager.go +++ b/bitswap/sessionmanager/sessionmanager.go @@ -119,15 +119,9 @@ func (sm *SessionManager) ReceiveFrom(from peer.ID, ks []cid.Cid) { sm.sessLk.Lock() defer sm.sessLk.Unlock() - var wg sync.WaitGroup for _, s := range sm.sessions { - wg.Add(1) - go func() { - defer wg.Done() - s.session.ReceiveFrom(from, ks) - }() + s.session.ReceiveFrom(from, ks) } - wg.Wait() } // IsWanted indicates whether any of the sessions are waiting to receive From cf6df497e14c9db661469a768da60cb83e98c179 Mon Sep 17 00:00:00 2001 From: Dirk McCormick Date: Thu, 22 Aug 2019 20:53:54 -0400 Subject: [PATCH 0810/1035] refactor: remove extraneous alloc This commit was moved from ipfs/go-bitswap@a2d6e30b10263d4dfd7f32c840eccf4f28af03ce --- bitswap/session/sessionwants.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bitswap/session/sessionwants.go b/bitswap/session/sessionwants.go index 58684ae84..e32c34a7d 100644 --- a/bitswap/session/sessionwants.go +++ b/bitswap/session/sessionwants.go @@ -171,7 +171,7 @@ func (sw *sessionWants) FilterInteresting(ks []cid.Cid) []cid.Cid { sw.RLock() defer sw.RUnlock() - interested := make([]cid.Cid, 0, len(ks)) + var interested []cid.Cid for _, k := range ks { if sw.unlockedIsWanted(k) || sw.pastWants.Has(k) { interested = append(interested, k) From e305361fdc30568292ab9213553a15675e043b9c Mon Sep 17 00:00:00 2001 From: Dirk McCormick Date: Thu, 22 Aug 2019 20:55:46 -0400 Subject: [PATCH 0811/1035] refactor: move timing outside lock This commit was moved from ipfs/go-bitswap@95de855189029bbcb8b8c0d02149616824a94af0 --- bitswap/session/sessionwants.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/bitswap/session/sessionwants.go b/bitswap/session/sessionwants.go index e32c34a7d..fdf30cf31 100644 --- a/bitswap/session/sessionwants.go +++ b/bitswap/session/sessionwants.go @@ -19,6 +19,8 @@ type sessionWants struct { // measures latency. It returns the CIDs of blocks that were actually wanted // (as opposed to duplicates) and the total latency for all incoming blocks. func (sw *sessionWants) BlocksReceived(cids []cid.Cid) ([]cid.Cid, time.Duration) { + now := time.Now() + sw.Lock() defer sw.Unlock() @@ -31,7 +33,7 @@ func (sw *sessionWants) BlocksReceived(cids []cid.Cid) ([]cid.Cid, time.Duration // If the block CID was in the live wants queue, remove it tval, ok := sw.liveWants[c] if ok { - totalLatency += time.Since(tval) + totalLatency += now.Sub(tval) delete(sw.liveWants, c) } else { // Otherwise remove it from the toFetch queue, if it was there From ec1c50994f82cde31d8b204045ef7106c664120a Mon Sep 17 00:00:00 2001 From: Dirk McCormick Date: Thu, 22 Aug 2019 21:10:37 -0400 Subject: [PATCH 0812/1035] refactor: move rand outside lock This commit was moved from ipfs/go-bitswap@84f61d6a980e13c07e4fd057613edf4746e0c1b8 --- bitswap/session/sessionwants.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/bitswap/session/sessionwants.go b/bitswap/session/sessionwants.go index fdf30cf31..26eed8b93 100644 --- a/bitswap/session/sessionwants.go +++ b/bitswap/session/sessionwants.go @@ -1,6 +1,7 @@ package session import ( + "math" "math/rand" "sync" "time" @@ -133,13 +134,15 @@ func (sw *sessionWants) LiveWants() []cid.Cid { // RandomLiveWant returns a randomly selected live want func (sw *sessionWants) RandomLiveWant() cid.Cid { + r := rand.Float64() + sw.RLock() defer sw.RUnlock() if len(sw.liveWants) == 0 { return cid.Cid{} } - i := rand.Intn(len(sw.liveWants)) + i := math.Floor(r * float64(len(sw.liveWants))) // picking a random live want for k := range sw.liveWants { if i == 0 { From 1c929ce142ff93a1f38c2587055f78da9be8eb2b Mon Sep 17 00:00:00 2001 From: Dirk McCormick Date: Thu, 22 Aug 2019 22:03:58 -0400 Subject: [PATCH 0813/1035] test: remove test that is no longer needed This commit was moved from ipfs/go-bitswap@ec9fb77f9698b7ed899c601595bc4da0f4e2facb --- bitswap/sessionmanager/sessionmanager_test.go | 31 ------------------- 1 file changed, 31 deletions(-) diff --git a/bitswap/sessionmanager/sessionmanager_test.go b/bitswap/sessionmanager/sessionmanager_test.go index 2bd234cb5..dfd3446c1 100644 --- a/bitswap/sessionmanager/sessionmanager_test.go +++ b/bitswap/sessionmanager/sessionmanager_test.go @@ -145,37 +145,6 @@ func TestAddingSessions(t *testing.T) { } } -func TestReceivingBlocksWhenNotInterested(t *testing.T) { - ctx := context.Background() - ctx, cancel := context.WithCancel(ctx) - defer cancel() - notif := notifications.New() - defer notif.Shutdown() - sm := New(ctx, sessionFactory, peerManagerFactory, requestSplitterFactory, notif) - - p := peer.ID(123) - blks := testutil.GenerateBlocksOfSize(3, 1024) - var cids []cid.Cid - for _, b := range blks { - cids = append(cids, b.Cid()) - } - - nextWanted = []cid.Cid{cids[0], cids[1]} - firstSession := sm.NewSession(ctx, time.Second, delay.Fixed(time.Minute)).(*fakeSession) - nextWanted = []cid.Cid{cids[0]} - secondSession := sm.NewSession(ctx, time.Second, delay.Fixed(time.Minute)).(*fakeSession) - nextWanted = []cid.Cid{} - thirdSession := sm.NewSession(ctx, time.Second, delay.Fixed(time.Minute)).(*fakeSession) - - sm.ReceiveFrom(p, []cid.Cid{blks[0].Cid(), blks[1].Cid()}) - - if !cmpSessionCids(firstSession, []cid.Cid{cids[0], cids[1]}) || - !cmpSessionCids(secondSession, []cid.Cid{cids[0]}) || - !cmpSessionCids(thirdSession, []cid.Cid{}) { - t.Fatal("did not receive correct blocks for sessions") - } -} - func TestIsWanted(t *testing.T) { ctx := context.Background() ctx, cancel := context.WithCancel(ctx) From fa0a844b743ba104d21f46444a945b9a6f9e68cc Mon Sep 17 00:00:00 2001 From: Dirk McCormick Date: Fri, 23 Aug 2019 09:32:04 -0400 Subject: [PATCH 0814/1035] refactor: cheaper rand want selection This commit was moved from ipfs/go-bitswap@6197217642d193a897065d86782ad3719c1021dc --- bitswap/session/sessionwants.go | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/bitswap/session/sessionwants.go b/bitswap/session/sessionwants.go index 26eed8b93..aa487f121 100644 --- a/bitswap/session/sessionwants.go +++ b/bitswap/session/sessionwants.go @@ -1,7 +1,6 @@ package session import ( - "math" "math/rand" "sync" "time" @@ -134,7 +133,7 @@ func (sw *sessionWants) LiveWants() []cid.Cid { // RandomLiveWant returns a randomly selected live want func (sw *sessionWants) RandomLiveWant() cid.Cid { - r := rand.Float64() + i := rand.Uint64() sw.RLock() defer sw.RUnlock() @@ -142,7 +141,7 @@ func (sw *sessionWants) RandomLiveWant() cid.Cid { if len(sw.liveWants) == 0 { return cid.Cid{} } - i := math.Floor(r * float64(len(sw.liveWants))) + i %= uint64(len(sw.liveWants)) // picking a random live want for k := range sw.liveWants { if i == 0 { From e2ae1fe9bfad7aba71ee41ce691d563c91ce7a16 Mon Sep 17 00:00:00 2001 From: Dirk McCormick Date: Fri, 23 Aug 2019 09:34:04 -0400 Subject: [PATCH 0815/1035] refactor: remove unused code This commit was moved from ipfs/go-bitswap@312b40bae0b61bda59184475212f3ac4904079c8 --- bitswap/sessionmanager/sessionmanager_test.go | 18 ------------------ 1 file changed, 18 deletions(-) diff --git a/bitswap/sessionmanager/sessionmanager_test.go b/bitswap/sessionmanager/sessionmanager_test.go index dfd3446c1..95c12b128 100644 --- a/bitswap/sessionmanager/sessionmanager_test.go +++ b/bitswap/sessionmanager/sessionmanager_test.go @@ -89,24 +89,6 @@ func requestSplitterFactory(ctx context.Context) bssession.RequestSplitter { return &fakeRequestSplitter{} } -func cmpSessionCids(s *fakeSession, cids []cid.Cid) bool { - if len(s.ks) != len(cids) { - return false - } - for _, bk := range s.ks { - has := false - for _, c := range cids { - if c == bk { - has = true - } - } - if !has { - return false - } - } - return true -} - func TestAddingSessions(t *testing.T) { ctx := context.Background() ctx, cancel := context.WithCancel(ctx) From 8156534d98b0191d7d1292f2141443d012f9b378 Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Tue, 27 Aug 2019 14:24:52 -0700 Subject: [PATCH 0816/1035] sessions: fix a small memory leak This commit was moved from ipfs/go-bitswap@863aa22c4d4931570483dc9362c5c4ec94b4f4cd --- bitswap/sessionmanager/sessionmanager.go | 1 + 1 file changed, 1 insertion(+) diff --git a/bitswap/sessionmanager/sessionmanager.go b/bitswap/sessionmanager/sessionmanager.go index cf3fe98d4..f12896d9f 100644 --- a/bitswap/sessionmanager/sessionmanager.go +++ b/bitswap/sessionmanager/sessionmanager.go @@ -100,6 +100,7 @@ func (sm *SessionManager) removeSession(session sesTrk) { for i := 0; i < len(sm.sessions); i++ { if sm.sessions[i] == session { sm.sessions[i] = sm.sessions[len(sm.sessions)-1] + sm.sessions[len(sm.sessions)-1] = sesTrk{} // free memory. sm.sessions = sm.sessions[:len(sm.sessions)-1] return } From 3fe7ce14004b44ed7bb26d71ed4c4eb00c9c86ea Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Tue, 27 Aug 2019 14:25:52 -0700 Subject: [PATCH 0817/1035] sessionmanager: allow concurrent receive/wanted checks This commit was moved from ipfs/go-bitswap@1fd68ed72265140e16611e9e6fe1fca847235a85 --- bitswap/sessionmanager/sessionmanager.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/bitswap/sessionmanager/sessionmanager.go b/bitswap/sessionmanager/sessionmanager.go index f12896d9f..c967a04a4 100644 --- a/bitswap/sessionmanager/sessionmanager.go +++ b/bitswap/sessionmanager/sessionmanager.go @@ -46,7 +46,7 @@ type SessionManager struct { notif notifications.PubSub // Sessions - sessLk sync.Mutex + sessLk sync.RWMutex sessions []sesTrk // Session Index @@ -117,8 +117,8 @@ func (sm *SessionManager) GetNextSessionID() uint64 { // ReceiveFrom receives block CIDs from a peer and dispatches to sessions. func (sm *SessionManager) ReceiveFrom(from peer.ID, ks []cid.Cid) { - sm.sessLk.Lock() - defer sm.sessLk.Unlock() + sm.sessLk.RLock() + defer sm.sessLk.RUnlock() for _, s := range sm.sessions { s.session.ReceiveFrom(from, ks) @@ -128,8 +128,8 @@ func (sm *SessionManager) ReceiveFrom(from peer.ID, ks []cid.Cid) { // IsWanted indicates whether any of the sessions are waiting to receive // the block with the given CID. func (sm *SessionManager) IsWanted(cid cid.Cid) bool { - sm.sessLk.Lock() - defer sm.sessLk.Unlock() + sm.sessLk.RLock() + defer sm.sessLk.RUnlock() for _, s := range sm.sessions { if s.session.IsWanted(cid) { From d5d60c9586aa7541f38d2acce6338a2ba87595f3 Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Tue, 27 Aug 2019 14:28:23 -0700 Subject: [PATCH 0818/1035] session: buffer some request channels We're not using these synchronously so we can buffer them a bit to avoid blocking quite as much. This also combines all incoming channels into a single one to ensure all operations are processed in-order. This might be overkill bit it makes reasoning about this a bit simpler. This commit was moved from ipfs/go-bitswap@8454ba009515209fc7cc74e320a8a03ee993def4 --- bitswap/session/session.go | 64 ++++++++++++++++++++++---------------- 1 file changed, 37 insertions(+), 27 deletions(-) diff --git a/bitswap/session/session.go b/bitswap/session/session.go index d2263aa61..6c8363550 100644 --- a/bitswap/session/session.go +++ b/bitswap/session/session.go @@ -45,9 +45,18 @@ type RequestSplitter interface { RecordUniqueBlock() } -type rcvFrom struct { +type opType int + +const ( + opReceive opType = iota + opWant + opCancel +) + +type op struct { + op opType from peer.ID - ks []cid.Cid + keys []cid.Cid } // Session holds state for an individual bitswap transfer operation. @@ -63,9 +72,7 @@ type Session struct { sw sessionWants // channels - incoming chan rcvFrom - newReqs chan []cid.Cid - cancelKeys chan []cid.Cid + incoming chan op latencyReqs chan chan time.Duration tickDelayReqs chan time.Duration @@ -100,15 +107,13 @@ func New(ctx context.Context, liveWants: make(map[cid.Cid]time.Time), pastWants: cid.NewSet(), }, - newReqs: make(chan []cid.Cid), - cancelKeys: make(chan []cid.Cid), latencyReqs: make(chan chan time.Duration), tickDelayReqs: make(chan time.Duration), ctx: ctx, wm: wm, pm: pm, srs: srs, - incoming: make(chan rcvFrom), + incoming: make(chan op, 16), notif: notif, uuid: loggables.Uuid("GetBlockRequest"), baseTickDelay: time.Millisecond * 500, @@ -130,7 +135,7 @@ func (s *Session) ReceiveFrom(from peer.ID, ks []cid.Cid) { } select { - case s.incoming <- rcvFrom{from: from, ks: interested}: + case s.incoming <- op{op: opReceive, from: from, keys: interested}: case <-s.ctx.Done(): } } @@ -154,14 +159,14 @@ func (s *Session) GetBlocks(ctx context.Context, keys []cid.Cid) (<-chan blocks. return bsgetter.AsyncGetBlocks(ctx, s.ctx, keys, s.notif, func(ctx context.Context, keys []cid.Cid) { select { - case s.newReqs <- keys: + case s.incoming <- op{op: opWant, keys: keys}: case <-ctx.Done(): case <-s.ctx.Done(): } }, func(keys []cid.Cid) { select { - case s.cancelKeys <- keys: + case s.incoming <- op{op: opCancel, keys: keys}: case <-s.ctx.Done(): } }, @@ -200,12 +205,17 @@ func (s *Session) run(ctx context.Context) { s.periodicSearchTimer = time.NewTimer(s.periodicSearchDelay.NextWaitTime()) for { select { - case rcv := <-s.incoming: - s.handleIncoming(ctx, rcv) - case keys := <-s.newReqs: - s.wantBlocks(ctx, keys) - case keys := <-s.cancelKeys: - s.sw.CancelPending(keys) + case oper := <-s.incoming: + switch oper.op { + case opReceive: + s.handleReceive(ctx, oper.from, oper.keys) + case opWant: + s.wantBlocks(ctx, oper.keys) + case opCancel: + s.sw.CancelPending(oper.keys) + default: + panic("unhandled operation") + } case <-s.idleTick.C: s.handleIdleTick(ctx) case <-s.periodicSearchTimer.C: @@ -261,15 +271,15 @@ func (s *Session) handleShutdown() { s.wm.CancelWants(s.ctx, live, nil, s.id) } -func (s *Session) handleIncoming(ctx context.Context, rcv rcvFrom) { +func (s *Session) handleReceive(ctx context.Context, from peer.ID, keys []cid.Cid) { // Record statistics only if the blocks came from the network // (blocks can also be received from the local node) - if rcv.from != "" { - s.updateReceiveCounters(ctx, rcv) + if from != "" { + s.updateReceiveCounters(ctx, from, keys) } // Update the want list - wanted, totalLatency := s.sw.BlocksReceived(rcv.ks) + wanted, totalLatency := s.sw.BlocksReceived(keys) if len(wanted) == 0 { return } @@ -280,18 +290,18 @@ func (s *Session) handleIncoming(ctx context.Context, rcv rcvFrom) { s.idleTick.Stop() // Process the received blocks - s.processIncoming(ctx, wanted, totalLatency) + s.processReceive(ctx, wanted, totalLatency) s.resetIdleTick() } -func (s *Session) updateReceiveCounters(ctx context.Context, rcv rcvFrom) { +func (s *Session) updateReceiveCounters(ctx context.Context, from peer.ID, keys []cid.Cid) { // Record unique vs duplicate blocks - s.sw.ForEachUniqDup(rcv.ks, s.srs.RecordUniqueBlock, s.srs.RecordDuplicateBlock) + s.sw.ForEachUniqDup(keys, s.srs.RecordUniqueBlock, s.srs.RecordDuplicateBlock) // Record response (to be able to time latency) - if len(rcv.ks) > 0 { - s.pm.RecordPeerResponse(rcv.from, rcv.ks) + if len(keys) > 0 { + s.pm.RecordPeerResponse(from, keys) } } @@ -300,7 +310,7 @@ func (s *Session) cancelIncoming(ctx context.Context, ks []cid.Cid) { s.wm.CancelWants(s.ctx, ks, nil, s.id) } -func (s *Session) processIncoming(ctx context.Context, ks []cid.Cid, totalLatency time.Duration) { +func (s *Session) processReceive(ctx context.Context, ks []cid.Cid, totalLatency time.Duration) { // Keep track of the total number of blocks received and total latency s.fetchcnt += len(ks) s.latTotal += totalLatency From bc6030a211cb9aff9fbd70f3b9dfb836628cfa70 Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Fri, 6 Sep 2019 17:03:06 -0700 Subject: [PATCH 0819/1035] engine: tag peers based on usefulness MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This patch tracks two usefulness metrics: short-term usefulness and long-term usefulness. Short-term usefulness is sampled frequently and highly weights new observations. Long-term usefulness is sampled less frequently and highly weights on long-term trends. In practice, we do this by keeping two EWMAs. If we see an interaction within the sampling period, we record the score, otherwise, we record a 0. The short-term one has a high alpha and is sampled every shortTerm period. The long-term one has a low alpha and is sampled every longTermRatio*shortTerm period. To calculate the final score, we sum the short-term and long-term scores then adjust it ±25% based on our debt ratio. Peers that have historically been more useful to us than we are to them get the highest score. This commit was moved from ipfs/go-bitswap@9d580a65c9baf698f32f6210c5a03787bbf1123f --- bitswap/decision/engine.go | 136 ++++++++++++++++++++++++++++++++++--- bitswap/decision/ewma.go | 5 ++ bitswap/decision/ledger.go | 25 +++++-- 3 files changed, 151 insertions(+), 15 deletions(-) create mode 100644 bitswap/decision/ewma.go diff --git a/bitswap/decision/engine.go b/bitswap/decision/engine.go index 94b5ae5e5..ae4377921 100644 --- a/bitswap/decision/engine.go +++ b/bitswap/decision/engine.go @@ -57,11 +57,35 @@ const ( outboxChanBuffer = 0 // maxMessageSize is the maximum size of the batched payload maxMessageSize = 512 * 1024 - // tagPrefix is the tag given to peers associated an engine - tagPrefix = "bs-engine-%s" + // tagFormat is the tag given to peers associated an engine + tagFormat = "bs-engine-%s-%s" - // tagWeight is the default weight for peers associated with an engine - tagWeight = 5 + // queuedTagWeight is the default weight for peers that have work queued + // on their behalf. + queuedTagWeight = 10 + + // the alpha for the EWMA used to track short term usefulness + shortTermAlpha = 0.5 + + // the alpha for the EWMA used to track long term usefulness + longTermAlpha = 0.05 + + // long term ratio defines what "long term" means in terms of the + // shortTerm duration. Peers that interact once every longTermRatio are + // considered useful over the long term. + longTermRatio = 10 + + // long/short term scores for tagging peers + longTermScore = 10 // this is a high tag but it grows _very_ slowly. + shortTermScore = 10 // this is a high tag but it'll go away quickly if we aren't using the peer. +) + +var ( + // how frequently the engine should sample usefulness. Peers that + // interact every shortTerm time period are considered "active". + // + // this is only a variable to make testing easier. + shortTerm = 10 * time.Second ) // Envelope contains a message for a Peer. @@ -105,7 +129,8 @@ type Engine struct { peerTagger PeerTagger - tag string + tagQueued, tagUseful string + lock sync.Mutex // protects the fields immediatly below // ledgerMap lists Ledgers by their Partner key. ledgerMap map[peer.ID]*ledger @@ -123,18 +148,113 @@ func NewEngine(ctx context.Context, bs bstore.Blockstore, peerTagger PeerTagger) workSignal: make(chan struct{}, 1), ticker: time.NewTicker(time.Millisecond * 100), } - e.tag = fmt.Sprintf(tagPrefix, uuid.New().String()) + e.tagQueued = fmt.Sprintf(tagFormat, "queued", uuid.New().String()) + e.tagUseful = fmt.Sprintf(tagFormat, "useful", uuid.New().String()) e.peerRequestQueue = peertaskqueue.New(peertaskqueue.OnPeerAddedHook(e.onPeerAdded), peertaskqueue.OnPeerRemovedHook(e.onPeerRemoved)) go e.taskWorker(ctx) + go e.scoreWorker(ctx) return e } +// scoreWorker keeps track of how "useful" our peers are, updating scores in the +// connection manager. +// +// It does this by tracking two scores: short-term usefulness and long-term +// usefulness. Short-term usefulness is sampled frequently and highly weights +// new observations. Long-term usefulness is sampled less frequently and highly +// weights on long-term trends. +// +// In practice, we do this by keeping two EWMAs. If we see an interaction +// within the sampling period, we record the score, otherwise, we record a 0. +// The short-term one has a high alpha and is sampled every shortTerm period. +// The long-term one has a low alpha and is sampled every +// longTermRatio*shortTerm period. +// +// To calculate the final score, we sum the short-term and long-term scores then +// adjust it ±25% based on our debt ratio. Peers that have historically been more useful to us than we are to them get the highest score. +func (e *Engine) scoreWorker(ctx context.Context) { + ticker := time.NewTicker(shortTerm) + defer ticker.Stop() + + type update struct { + peer peer.ID + score int + } + var ( + lastShortUpdate, lastLongUpdate time.Time + updates []update + ) + + for i := 0; ; i = (i + 1) % longTermRatio { + var now time.Time + select { + case now = <-ticker.C: + case <-ctx.Done(): + return + } + + // The long term update ticks every `longTermRatio` short + // intervals. + updateLong := i == 0 + + e.lock.Lock() + for _, ledger := range e.ledgerMap { + ledger.lk.Lock() + + // Update the short-term score. + if ledger.lastExchange.After(lastShortUpdate) { + ledger.shortScore = ewma(ledger.shortScore, shortTermScore, shortTermAlpha) + } else { + ledger.shortScore = ewma(ledger.shortScore, 0, shortTermAlpha) + } + + // Update the long-term score. + if updateLong { + if ledger.lastExchange.After(lastLongUpdate) { + ledger.longScore = ewma(ledger.longScore, longTermScore, longTermAlpha) + } else { + ledger.longScore = ewma(ledger.longScore, 0, longTermAlpha) + } + } + + // Calculate the new score. + score := int((ledger.shortScore + ledger.longScore) * ((ledger.Accounting.Score())*.5 + .75)) + + // Avoid updating the connection manager unless there's a change. This can be expensive. + if ledger.score != score { + // put these in a list so we can perform the updates outside _global_ the lock. + updates = append(updates, update{ledger.Partner, score}) + ledger.score = score + } + ledger.lk.Unlock() + } + e.lock.Unlock() + + // record the times. + lastShortUpdate = now + if updateLong { + lastLongUpdate = now + } + + // apply the updates + for _, update := range updates { + if update.score == 0 { + e.peerTagger.UntagPeer(update.peer, e.tagUseful) + } else { + e.peerTagger.TagPeer(update.peer, e.tagUseful, update.score) + } + } + // Keep the memory. It's not much and it saves us from having to allocate. + updates = updates[:0] + } +} + func (e *Engine) onPeerAdded(p peer.ID) { - e.peerTagger.TagPeer(p, e.tag, tagWeight) + e.peerTagger.TagPeer(p, e.tagQueued, queuedTagWeight) } func (e *Engine) onPeerRemoved(p peer.ID) { - e.peerTagger.UntagPeer(p, e.tag) + e.peerTagger.UntagPeer(p, e.tagQueued) } // WantlistForPeer returns the currently understood want list for a given peer diff --git a/bitswap/decision/ewma.go b/bitswap/decision/ewma.go new file mode 100644 index 000000000..80d7d86b6 --- /dev/null +++ b/bitswap/decision/ewma.go @@ -0,0 +1,5 @@ +package decision + +func ewma(old, new, alpha float64) float64 { + return new*alpha + (1-alpha)*old +} diff --git a/bitswap/decision/ledger.go b/bitswap/decision/ledger.go index 12eca63b3..277daaa2c 100644 --- a/bitswap/decision/ledger.go +++ b/bitswap/decision/ledger.go @@ -12,9 +12,8 @@ import ( func newLedger(p peer.ID) *ledger { return &ledger{ - wantList: wl.New(), - Partner: p, - sentToPeer: make(map[string]time.Time), + wantList: wl.New(), + Partner: p, } } @@ -30,16 +29,19 @@ type ledger struct { // lastExchange is the time of the last data exchange. lastExchange time.Time + // These scores keep track of how useful we think this peer is. Short + // tracks short-term usefulness and long tracks long-term usefulness. + shortScore, longScore float64 + // Score keeps track of the score used in the peer tagger. We track it + // here to avoid unnecessarily updating the tags in the connection manager. + score int + // exchangeCount is the number of exchanges with this peer exchangeCount uint64 // wantList is a (bounded, small) set of keys that Partner desires. wantList *wl.Wantlist - // sentToPeer is a set of keys to ensure we dont send duplicate blocks - // to a given peer - sentToPeer map[string]time.Time - // ref is the reference count for this ledger, its used to ensure we // don't drop the reference to this ledger in multi-connection scenarios ref int @@ -63,10 +65,19 @@ type debtRatio struct { BytesRecv uint64 } +// Value returns the debt ratio, sent:receive. func (dr *debtRatio) Value() float64 { return float64(dr.BytesSent) / float64(dr.BytesRecv+1) } +// Score returns the debt _score_ on a 0-1 scale. +func (dr *debtRatio) Score() float64 { + if dr.BytesRecv == 0 { + return 0 + } + return float64(dr.BytesRecv) / float64(dr.BytesRecv+dr.BytesSent) +} + func (l *ledger) SentBytes(n int) { l.exchangeCount++ l.lastExchange = time.Now() From c1199007752c83154f432d8d90eb577d48e6084e Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Fri, 6 Sep 2019 19:02:51 -0700 Subject: [PATCH 0820/1035] engine(test): make the test peer tagger more reliable This commit was moved from ipfs/go-bitswap@cdc87be03386742f05230bcd099abb9b0017068e --- bitswap/decision/engine_test.go | 63 +++++++++++++++++++++++---------- 1 file changed, 44 insertions(+), 19 deletions(-) diff --git a/bitswap/decision/engine_test.go b/bitswap/decision/engine_test.go index 5202ce631..22a30597d 100644 --- a/bitswap/decision/engine_test.go +++ b/bitswap/decision/engine_test.go @@ -19,38 +19,63 @@ import ( testutil "github.com/libp2p/go-libp2p-core/test" ) +type peerTag struct { + done chan struct{} + peers map[peer.ID]int +} + type fakePeerTagger struct { - lk sync.Mutex - wait sync.WaitGroup - taggedPeers []peer.ID + lk sync.Mutex + tags map[string]*peerTag } func (fpt *fakePeerTagger) TagPeer(p peer.ID, tag string, n int) { - fpt.wait.Add(1) - fpt.lk.Lock() defer fpt.lk.Unlock() - fpt.taggedPeers = append(fpt.taggedPeers, p) + if fpt.tags == nil { + fpt.tags = make(map[string]*peerTag, 1) + } + pt, ok := fpt.tags[tag] + if !ok { + pt = &peerTag{peers: make(map[peer.ID]int, 1), done: make(chan struct{})} + fpt.tags[tag] = pt + } + pt.peers[p] = n } func (fpt *fakePeerTagger) UntagPeer(p peer.ID, tag string) { - defer fpt.wait.Done() - fpt.lk.Lock() defer fpt.lk.Unlock() - for i := 0; i < len(fpt.taggedPeers); i++ { - if fpt.taggedPeers[i] == p { - fpt.taggedPeers[i] = fpt.taggedPeers[len(fpt.taggedPeers)-1] - fpt.taggedPeers = fpt.taggedPeers[:len(fpt.taggedPeers)-1] - return - } + pt := fpt.tags[tag] + if pt == nil { + return + } + delete(pt.peers, p) + if len(pt.peers) == 0 { + close(pt.done) + delete(fpt.tags, tag) } } -func (fpt *fakePeerTagger) count() int { +func (fpt *fakePeerTagger) count(tag string) int { fpt.lk.Lock() defer fpt.lk.Unlock() - return len(fpt.taggedPeers) + if pt, ok := fpt.tags[tag]; ok { + return len(pt.peers) + } + return 0 +} + +func (fpt *fakePeerTagger) wait(tag string) { + fpt.lk.Lock() + pt := fpt.tags[tag] + if pt == nil { + fpt.lk.Unlock() + return + } + doneCh := pt.done + fpt.lk.Unlock() + <-doneCh } type engineSet struct { @@ -241,13 +266,13 @@ func TestTaggingPeers(t *testing.T) { next := <-sanfrancisco.Engine.Outbox() envelope := <-next - if sanfrancisco.PeerTagger.count() != 1 { + if sanfrancisco.PeerTagger.count(sanfrancisco.Engine.tagQueued) != 1 { t.Fatal("Incorrect number of peers tagged") } envelope.Sent() <-sanfrancisco.Engine.Outbox() - sanfrancisco.PeerTagger.wait.Wait() - if sanfrancisco.PeerTagger.count() != 0 { + sanfrancisco.PeerTagger.wait(sanfrancisco.Engine.tagQueued) + if sanfrancisco.PeerTagger.count(sanfrancisco.Engine.tagQueued) != 0 { t.Fatal("Peers should be untagged but weren't") } } From 70aff8b3d94bb6ba592eff4c87e6ecc613d9b9e2 Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Fri, 6 Sep 2019 19:02:54 -0700 Subject: [PATCH 0821/1035] engine(test): test peer usefulness tagging This commit was moved from ipfs/go-bitswap@1f09ef51e9b7d3f9329cffc2a23ec8537d0d9a04 --- bitswap/decision/engine_test.go | 40 +++++++++++++++++++++++++++++++++ 1 file changed, 40 insertions(+) diff --git a/bitswap/decision/engine_test.go b/bitswap/decision/engine_test.go index 22a30597d..d5adaa87e 100644 --- a/bitswap/decision/engine_test.go +++ b/bitswap/decision/engine_test.go @@ -276,6 +276,46 @@ func TestTaggingPeers(t *testing.T) { t.Fatal("Peers should be untagged but weren't") } } + +func TestTaggingUseful(t *testing.T) { + oldShortTerm := shortTerm + shortTerm = 1 * time.Millisecond + defer func() { shortTerm = oldShortTerm }() + + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) + defer cancel() + me := newEngine(ctx, "engine") + friend := peer.ID("friend") + + block := blocks.NewBlock([]byte("foobar")) + msg := message.New(false) + msg.AddBlock(block) + + for i := 0; i < 3; i++ { + if me.PeerTagger.count(me.Engine.tagUseful) != 0 { + t.Fatal("Peers should be untagged but weren't") + } + me.Engine.MessageSent(friend, msg) + time.Sleep(shortTerm * 2) + if me.PeerTagger.count(me.Engine.tagUseful) != 1 { + t.Fatal("Peers should be tagged but weren't") + } + time.Sleep(shortTerm * 8) + } + + if me.PeerTagger.count(me.Engine.tagUseful) == 0 { + t.Fatal("peers should still be tagged due to long-term usefulness") + } + time.Sleep(shortTerm * 2) + if me.PeerTagger.count(me.Engine.tagUseful) == 0 { + t.Fatal("peers should still be tagged due to long-term usefulness") + } + time.Sleep(shortTerm * 10) + if me.PeerTagger.count(me.Engine.tagUseful) != 0 { + t.Fatal("peers should finally be untagged") + } +} + func partnerWants(e *Engine, keys []string, partner peer.ID) { add := message.New(false) for i, letter := range keys { From 430f48caf216ff43501759e34e0f6fa2ad9d53f1 Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Mon, 9 Sep 2019 05:50:18 -0700 Subject: [PATCH 0822/1035] doc: add dirk as the lead maintainer (#190) * doc: add dirk as the lead maintainer One of my tasks this quarter is to get a lead maintainer for each repo. This commit was moved from ipfs/go-bitswap@5fa55e8ae371d16bceeb4300ce7b3222e50e6a06 --- bitswap/README.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/bitswap/README.md b/bitswap/README.md index 062fbb625..63918cfd7 100644 --- a/bitswap/README.md +++ b/bitswap/README.md @@ -9,6 +9,9 @@ go-bitswap > An implementation of the bitswap protocol in go! +## Lead Maintainer + +[Dirk McCormick](https://github.com/dirkmc) ## Table of Contents From 620452fa0758ae9ae4909b5d9cf6a821bde7e421 Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Mon, 9 Sep 2019 07:48:52 -0700 Subject: [PATCH 0823/1035] engine(doc): comment on why we have the score adjustment This commit was moved from ipfs/go-bitswap@fcb13fc986c1aacdecd51508938a489fefec307f --- bitswap/decision/engine.go | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/bitswap/decision/engine.go b/bitswap/decision/engine.go index ae4377921..6532061a4 100644 --- a/bitswap/decision/engine.go +++ b/bitswap/decision/engine.go @@ -171,7 +171,8 @@ func NewEngine(ctx context.Context, bs bstore.Blockstore, peerTagger PeerTagger) // longTermRatio*shortTerm period. // // To calculate the final score, we sum the short-term and long-term scores then -// adjust it ±25% based on our debt ratio. Peers that have historically been more useful to us than we are to them get the highest score. +// adjust it ±25% based on our debt ratio. Peers that have historically been +// more useful to us than we are to them get the highest score. func (e *Engine) scoreWorker(ctx context.Context) { ticker := time.NewTicker(shortTerm) defer ticker.Stop() @@ -218,6 +219,10 @@ func (e *Engine) scoreWorker(ctx context.Context) { } // Calculate the new score. + // + // The accounting score adjustment prefers peers _we_ + // need over peers that need us. This doesn't help with + // leeching. score := int((ledger.shortScore + ledger.longScore) * ((ledger.Accounting.Score())*.5 + .75)) // Avoid updating the connection manager unless there's a change. This can be expensive. From 34dda4c752c6a15db0db053ca1c486f8e706f3d7 Mon Sep 17 00:00:00 2001 From: swedneck <40505480+swedneck@users.noreply.github.com> Date: Tue, 24 Sep 2019 17:40:07 +0200 Subject: [PATCH 0824/1035] Add bridged chats This commit was moved from ipfs/go-bitswap@fd79e68d6d6fdd13c1eecae82d6cf2a3e5889281 --- bitswap/README.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/bitswap/README.md b/bitswap/README.md index 63918cfd7..28f07ff98 100644 --- a/bitswap/README.md +++ b/bitswap/README.md @@ -3,7 +3,9 @@ go-bitswap [![](https://img.shields.io/badge/made%20by-Protocol%20Labs-blue.svg?style=flat-square)](http://ipn.io) [![](https://img.shields.io/badge/project-IPFS-blue.svg?style=flat-square)](http://ipfs.io/) -[![](https://img.shields.io/badge/freenode-%23ipfs-blue.svg?style=flat-square)](http://webchat.freenode.net/?channels=%23ipfs) +[![Matrix](https://img.shields.io/badge/matrix-%23ipfs%3Amatrix.org-blue.svg?style=flat-square)](https://matrix.to/#/#ipfs:matrix.org) +[![IRC](https://img.shields.io/badge/freenode-%23ipfs-blue.svg?style=flat-square)](http://webchat.freenode.net/?channels=%23ipfs) +[![Discord](https://img.shields.io/discord/475789330380488707?color=blueviolet&label=discord&style=flat-square)](https://discord.gg/24fmuwR) [![Coverage Status](https://codecov.io/gh/ipfs/go-bitswap/branch/master/graph/badge.svg)](https://codecov.io/gh/ipfs/go-bitswap/branch/master) [![Build Status](https://circleci.com/gh/ipfs/go-bitswap.svg?style=svg)](https://circleci.com/gh/ipfs/go-bitswap) From 218206573ca42bb0d093d008d34a7f78bc1daa56 Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Tue, 15 Oct 2019 22:51:50 +0900 Subject: [PATCH 0825/1035] chore(proto): regenerate protobuf code This commit was moved from ipfs/go-bitswap@7bf5678860cf89c66d475e017e3af726a1eb371e --- bitswap/message/pb/message.pb.go | 254 +++++++++++++++---------------- 1 file changed, 127 insertions(+), 127 deletions(-) diff --git a/bitswap/message/pb/message.pb.go b/bitswap/message/pb/message.pb.go index 34eacb298..adf14da87 100644 --- a/bitswap/message/pb/message.pb.go +++ b/bitswap/message/pb/message.pb.go @@ -9,6 +9,7 @@ import ( proto "github.com/gogo/protobuf/proto" io "io" math "math" + math_bits "math/bits" ) // Reference imports to suppress errors if they are not otherwise used. @@ -20,7 +21,7 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package type Message struct { Wantlist Message_Wantlist `protobuf:"bytes,1,opt,name=wantlist,proto3" json:"wantlist"` @@ -42,7 +43,7 @@ func (m *Message) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Message.Marshal(b, m, deterministic) } else { b = b[:cap(b)] - n, err := m.MarshalTo(b) + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } @@ -101,7 +102,7 @@ func (m *Message_Wantlist) XXX_Marshal(b []byte, deterministic bool) ([]byte, er return xxx_messageInfo_Message_Wantlist.Marshal(b, m, deterministic) } else { b = b[:cap(b)] - n, err := m.MarshalTo(b) + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } @@ -154,7 +155,7 @@ func (m *Message_Wantlist_Entry) XXX_Marshal(b []byte, deterministic bool) ([]by return xxx_messageInfo_Message_Wantlist_Entry.Marshal(b, m, deterministic) } else { b = b[:cap(b)] - n, err := m.MarshalTo(b) + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } @@ -213,7 +214,7 @@ func (m *Message_Block) XXX_Marshal(b []byte, deterministic bool) ([]byte, error return xxx_messageInfo_Message_Block.Marshal(b, m, deterministic) } else { b = b[:cap(b)] - n, err := m.MarshalTo(b) + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } @@ -283,7 +284,7 @@ var fileDescriptor_33c57e4bae7b9afd = []byte{ func (m *Message) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -291,45 +292,55 @@ func (m *Message) Marshal() (dAtA []byte, err error) { } func (m *Message) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Message) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintMessage(dAtA, i, uint64(m.Wantlist.Size())) - n1, err := m.Wantlist.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Payload) > 0 { + for iNdEx := len(m.Payload) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Payload[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMessage(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } } - i += n1 if len(m.Blocks) > 0 { - for _, b := range m.Blocks { + for iNdEx := len(m.Blocks) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Blocks[iNdEx]) + copy(dAtA[i:], m.Blocks[iNdEx]) + i = encodeVarintMessage(dAtA, i, uint64(len(m.Blocks[iNdEx]))) + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintMessage(dAtA, i, uint64(len(b))) - i += copy(dAtA[i:], b) } } - if len(m.Payload) > 0 { - for _, msg := range m.Payload { - dAtA[i] = 0x1a - i++ - i = encodeVarintMessage(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n + { + size, err := m.Wantlist.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintMessage(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *Message_Wantlist) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -337,39 +348,46 @@ func (m *Message_Wantlist) Marshal() (dAtA []byte, err error) { } func (m *Message_Wantlist) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Message_Wantlist) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Entries) > 0 { - for _, msg := range m.Entries { - dAtA[i] = 0xa - i++ - i = encodeVarintMessage(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } if m.Full { - dAtA[i] = 0x10 - i++ + i-- if m.Full { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ + i-- + dAtA[i] = 0x10 + } + if len(m.Entries) > 0 { + for iNdEx := len(m.Entries) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Entries[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMessage(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } } - return i, nil + return len(dAtA) - i, nil } func (m *Message_Wantlist_Entry) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -377,38 +395,44 @@ func (m *Message_Wantlist_Entry) Marshal() (dAtA []byte, err error) { } func (m *Message_Wantlist_Entry) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Message_Wantlist_Entry) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Block) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintMessage(dAtA, i, uint64(len(m.Block))) - i += copy(dAtA[i:], m.Block) - } - if m.Priority != 0 { - dAtA[i] = 0x10 - i++ - i = encodeVarintMessage(dAtA, i, uint64(m.Priority)) - } if m.Cancel { - dAtA[i] = 0x18 - i++ + i-- if m.Cancel { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ + i-- + dAtA[i] = 0x18 + } + if m.Priority != 0 { + i = encodeVarintMessage(dAtA, i, uint64(m.Priority)) + i-- + dAtA[i] = 0x10 + } + if len(m.Block) > 0 { + i -= len(m.Block) + copy(dAtA[i:], m.Block) + i = encodeVarintMessage(dAtA, i, uint64(len(m.Block))) + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func (m *Message_Block) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -416,33 +440,42 @@ func (m *Message_Block) Marshal() (dAtA []byte, err error) { } func (m *Message_Block) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Message_Block) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Prefix) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintMessage(dAtA, i, uint64(len(m.Prefix))) - i += copy(dAtA[i:], m.Prefix) - } if len(m.Data) > 0 { - dAtA[i] = 0x12 - i++ + i -= len(m.Data) + copy(dAtA[i:], m.Data) i = encodeVarintMessage(dAtA, i, uint64(len(m.Data))) - i += copy(dAtA[i:], m.Data) + i-- + dAtA[i] = 0x12 + } + if len(m.Prefix) > 0 { + i -= len(m.Prefix) + copy(dAtA[i:], m.Prefix) + i = encodeVarintMessage(dAtA, i, uint64(len(m.Prefix))) + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func encodeVarintMessage(dAtA []byte, offset int, v uint64) int { + offset -= sovMessage(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m *Message) Size() (n int) { if m == nil { @@ -522,14 +555,7 @@ func (m *Message_Block) Size() (n int) { } func sovMessage(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozMessage(x uint64) (n int) { return sovMessage(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -1043,6 +1069,7 @@ func (m *Message_Block) Unmarshal(dAtA []byte) error { func skipMessage(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 + depth := 0 for iNdEx < l { var wire uint64 for shift := uint(0); ; shift += 7 { @@ -1074,10 +1101,8 @@ func skipMessage(dAtA []byte) (n int, err error) { break } } - return iNdEx, nil case 1: iNdEx += 8 - return iNdEx, nil case 2: var length int for shift := uint(0); ; shift += 7 { @@ -1098,55 +1123,30 @@ func skipMessage(dAtA []byte) (n int, err error) { return 0, ErrInvalidLengthMessage } iNdEx += length - if iNdEx < 0 { - return 0, ErrInvalidLengthMessage - } - return iNdEx, nil case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowMessage - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipMessage(dAtA[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - if iNdEx < 0 { - return 0, ErrInvalidLengthMessage - } - } - return iNdEx, nil + depth++ case 4: - return iNdEx, nil + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupMessage + } + depth-- case 5: iNdEx += 4 - return iNdEx, nil default: return 0, fmt.Errorf("proto: illegal wireType %d", wireType) } + if iNdEx < 0 { + return 0, ErrInvalidLengthMessage + } + if depth == 0 { + return iNdEx, nil + } } - panic("unreachable") + return 0, io.ErrUnexpectedEOF } var ( - ErrInvalidLengthMessage = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowMessage = fmt.Errorf("proto: integer overflow") + ErrInvalidLengthMessage = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowMessage = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupMessage = fmt.Errorf("proto: unexpected end of group") ) From b387ab5ce62cb77de619a200f855f3dd49fcc423 Mon Sep 17 00:00:00 2001 From: dirkmc Date: Mon, 28 Oct 2019 15:11:57 -0400 Subject: [PATCH 0826/1035] Merge PR Parallelize engine reads (#216) * feat: parallelize reads * feat: concurent engine task workers and concurrent bstore reads * fix: lint * fix: address review comments * refactor: in BlockstoreManager wait for process.Closing() instead of Context.Done() * fix: use channel size 0 for BlockstoreManager reads * fix: change blockstore error logs from warnings to errors * fix: flaky test * fix: lint This commit was moved from ipfs/go-bitswap@dcbe1f29c433e1c85705f7239d189e9aed910f96 --- bitswap/bitswap.go | 6 +- bitswap/decision/blockstoremanager.go | 118 ++++++++++ bitswap/decision/blockstoremanager_test.go | 251 +++++++++++++++++++++ bitswap/decision/engine.go | 95 +++++--- bitswap/decision/engine_test.go | 24 +- 5 files changed, 456 insertions(+), 38 deletions(-) create mode 100644 bitswap/decision/blockstoremanager.go create mode 100644 bitswap/decision/blockstoremanager_test.go diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index c42d80adc..93759802b 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -130,9 +130,10 @@ func New(parent context.Context, network bsnet.BitSwapNetwork, } notif := notifications.New() + engine := decision.NewEngine(ctx, bstore, network.ConnectionManager()) // TODO close the engine with Close() method bs := &Bitswap{ blockstore: bstore, - engine: decision.NewEngine(ctx, bstore, network.ConnectionManager()), // TODO close the engine with Close() method + engine: engine, network: network, process: px, newBlocks: make(chan cid.Cid, HasBlockBufferSize), @@ -161,6 +162,7 @@ func New(parent context.Context, network bsnet.BitSwapNetwork, // Start up bitswaps async worker routines bs.startWorkers(ctx, px) + engine.StartWorkers(ctx, px) // bind the context and process. // do it over here to avoid closing before all setup is done. @@ -372,7 +374,7 @@ func (bs *Bitswap) ReceiveMessage(ctx context.Context, p peer.ID, incoming bsmsg // This call records changes to wantlists, blocks received, // and number of bytes transfered. - bs.engine.MessageReceived(p, incoming) + bs.engine.MessageReceived(ctx, p, incoming) // TODO: this is bad, and could be easily abused. // Should only track *useful* messages in ledger diff --git a/bitswap/decision/blockstoremanager.go b/bitswap/decision/blockstoremanager.go new file mode 100644 index 000000000..e97bbdda5 --- /dev/null +++ b/bitswap/decision/blockstoremanager.go @@ -0,0 +1,118 @@ +package decision + +import ( + "context" + "sync" + + blocks "github.com/ipfs/go-block-format" + cid "github.com/ipfs/go-cid" + bstore "github.com/ipfs/go-ipfs-blockstore" + process "github.com/jbenet/goprocess" +) + +// blockstoreManager maintains a pool of workers that make requests to the blockstore. +type blockstoreManager struct { + bs bstore.Blockstore + workerCount int + jobs chan func() + px process.Process +} + +// newBlockstoreManager creates a new blockstoreManager with the given context +// and number of workers +func newBlockstoreManager(ctx context.Context, bs bstore.Blockstore, workerCount int) *blockstoreManager { + return &blockstoreManager{ + bs: bs, + workerCount: workerCount, + jobs: make(chan func()), + } +} + +func (bsm *blockstoreManager) start(px process.Process) { + bsm.px = px + + // Start up workers + for i := 0; i < bsm.workerCount; i++ { + px.Go(func(px process.Process) { + bsm.worker() + }) + } +} + +func (bsm *blockstoreManager) worker() { + for { + select { + case <-bsm.px.Closing(): + return + case job := <-bsm.jobs: + job() + } + } +} + +func (bsm *blockstoreManager) addJob(ctx context.Context, job func()) { + select { + case <-ctx.Done(): + case <-bsm.px.Closing(): + case bsm.jobs <- job: + } +} + +func (bsm *blockstoreManager) getBlockSizes(ctx context.Context, ks []cid.Cid) map[cid.Cid]int { + res := make(map[cid.Cid]int) + if len(ks) == 0 { + return res + } + + var lk sync.Mutex + bsm.jobPerKey(ctx, ks, func(c cid.Cid) { + size, err := bsm.bs.GetSize(c) + if err != nil { + if err != bstore.ErrNotFound { + log.Errorf("blockstore.GetSize(%s) error: %s", c, err) + } + } else { + lk.Lock() + res[c] = size + lk.Unlock() + } + }) + + return res +} + +func (bsm *blockstoreManager) getBlocks(ctx context.Context, ks []cid.Cid) map[cid.Cid]blocks.Block { + res := make(map[cid.Cid]blocks.Block) + if len(ks) == 0 { + return res + } + + var lk sync.Mutex + bsm.jobPerKey(ctx, ks, func(c cid.Cid) { + blk, err := bsm.bs.Get(c) + if err != nil { + if err != bstore.ErrNotFound { + log.Errorf("blockstore.Get(%s) error: %s", c, err) + } + } else { + lk.Lock() + res[c] = blk + lk.Unlock() + } + }) + + return res +} + +func (bsm *blockstoreManager) jobPerKey(ctx context.Context, ks []cid.Cid, jobFn func(c cid.Cid)) { + wg := sync.WaitGroup{} + for _, k := range ks { + c := k + wg.Add(1) + bsm.addJob(ctx, func() { + jobFn(c) + wg.Done() + }) + } + wg.Wait() +} diff --git a/bitswap/decision/blockstoremanager_test.go b/bitswap/decision/blockstoremanager_test.go new file mode 100644 index 000000000..a5fee74e0 --- /dev/null +++ b/bitswap/decision/blockstoremanager_test.go @@ -0,0 +1,251 @@ +package decision + +import ( + "context" + "crypto/rand" + "errors" + "sync" + "testing" + "time" + + "github.com/ipfs/go-bitswap/testutil" + cid "github.com/ipfs/go-cid" + + blocks "github.com/ipfs/go-block-format" + ds "github.com/ipfs/go-datastore" + "github.com/ipfs/go-datastore/delayed" + ds_sync "github.com/ipfs/go-datastore/sync" + blockstore "github.com/ipfs/go-ipfs-blockstore" + delay "github.com/ipfs/go-ipfs-delay" + process "github.com/jbenet/goprocess" +) + +func TestBlockstoreManagerNotFoundKey(t *testing.T) { + ctx := context.Background() + bsdelay := delay.Fixed(3 * time.Millisecond) + dstore := ds_sync.MutexWrap(delayed.New(ds.NewMapDatastore(), bsdelay)) + bstore := blockstore.NewBlockstore(ds_sync.MutexWrap(dstore)) + + bsm := newBlockstoreManager(ctx, bstore, 5) + bsm.start(process.WithTeardown(func() error { return nil })) + + cids := testutil.GenerateCids(4) + sizes := bsm.getBlockSizes(ctx, cids) + if len(sizes) != 0 { + t.Fatal("Wrong response length") + } + + for _, c := range cids { + if _, ok := sizes[c]; ok { + t.Fatal("Non-existent block should have no size") + } + } + + blks := bsm.getBlocks(ctx, cids) + if len(blks) != 0 { + t.Fatal("Wrong response length") + } + + for _, c := range cids { + if _, ok := blks[c]; ok { + t.Fatal("Non-existent block should have no size") + } + } +} + +func TestBlockstoreManager(t *testing.T) { + ctx := context.Background() + bsdelay := delay.Fixed(3 * time.Millisecond) + dstore := ds_sync.MutexWrap(delayed.New(ds.NewMapDatastore(), bsdelay)) + bstore := blockstore.NewBlockstore(ds_sync.MutexWrap(dstore)) + + bsm := newBlockstoreManager(ctx, bstore, 5) + bsm.start(process.WithTeardown(func() error { return nil })) + + exp := make(map[cid.Cid]blocks.Block) + var blks []blocks.Block + for i := 0; i < 32; i++ { + buf := make([]byte, 1024*(i+1)) + _, _ = rand.Read(buf) + b := blocks.NewBlock(buf) + blks = append(blks, b) + exp[b.Cid()] = b + } + + // Put all blocks in the blockstore except the last one + if err := bstore.PutMany(blks[:len(blks)-1]); err != nil { + t.Fatal(err) + } + + var cids []cid.Cid + for _, b := range blks { + cids = append(cids, b.Cid()) + } + + sizes := bsm.getBlockSizes(ctx, cids) + if len(sizes) != len(blks)-1 { + t.Fatal("Wrong response length") + } + + for _, c := range cids { + expSize := len(exp[c].RawData()) + size, ok := sizes[c] + + // Only the last key should be missing + if c.Equals(cids[len(cids)-1]) { + if ok { + t.Fatal("Non-existent block should not be in sizes map") + } + } else { + if !ok { + t.Fatal("Block should be in sizes map") + } + if size != expSize { + t.Fatal("Block has wrong size") + } + } + } + + fetched := bsm.getBlocks(ctx, cids) + if len(fetched) != len(blks)-1 { + t.Fatal("Wrong response length") + } + + for _, c := range cids { + blk, ok := fetched[c] + + // Only the last key should be missing + if c.Equals(cids[len(cids)-1]) { + if ok { + t.Fatal("Non-existent block should not be in blocks map") + } + } else { + if !ok { + t.Fatal("Block should be in blocks map") + } + if !blk.Cid().Equals(c) { + t.Fatal("Block has wrong cid") + } + } + } +} + +func TestBlockstoreManagerConcurrency(t *testing.T) { + ctx := context.Background() + bsdelay := delay.Fixed(3 * time.Millisecond) + dstore := ds_sync.MutexWrap(delayed.New(ds.NewMapDatastore(), bsdelay)) + bstore := blockstore.NewBlockstore(ds_sync.MutexWrap(dstore)) + + workerCount := 5 + bsm := newBlockstoreManager(ctx, bstore, workerCount) + bsm.start(process.WithTeardown(func() error { return nil })) + + blkSize := int64(8 * 1024) + blks := testutil.GenerateBlocksOfSize(32, blkSize) + var ks []cid.Cid + for _, b := range blks { + ks = append(ks, b.Cid()) + } + + err := bstore.PutMany(blks) + if err != nil { + t.Fatal(err) + } + + // Create more concurrent requests than the number of workers + wg := sync.WaitGroup{} + for i := 0; i < 16; i++ { + wg.Add(1) + + go func(t *testing.T) { + defer wg.Done() + + sizes := bsm.getBlockSizes(ctx, ks) + if len(sizes) != len(blks) { + err = errors.New("Wrong response length") + } + }(t) + } + wg.Wait() + + if err != nil { + t.Fatal(err) + } +} + +func TestBlockstoreManagerClose(t *testing.T) { + ctx := context.Background() + delayTime := 20 * time.Millisecond + bsdelay := delay.Fixed(delayTime) + dstore := ds_sync.MutexWrap(delayed.New(ds.NewMapDatastore(), bsdelay)) + bstore := blockstore.NewBlockstore(ds_sync.MutexWrap(dstore)) + + bsm := newBlockstoreManager(ctx, bstore, 3) + px := process.WithTeardown(func() error { return nil }) + bsm.start(px) + + blks := testutil.GenerateBlocksOfSize(3, 1024) + var ks []cid.Cid + for _, b := range blks { + ks = append(ks, b.Cid()) + } + + err := bstore.PutMany(blks) + if err != nil { + t.Fatal(err) + } + + go px.Close() + + time.Sleep(5 * time.Millisecond) + + fnCallDone := make(chan struct{}) + go func() { + bsm.getBlockSizes(ctx, ks) + fnCallDone <- struct{}{} + }() + + select { + case <-fnCallDone: + t.Fatal("call to BlockstoreManager should be cancelled") + case <-px.Closed(): + } +} + +func TestBlockstoreManagerCtxDone(t *testing.T) { + delayTime := 20 * time.Millisecond + ctx := context.Background() + ctx, cancel := context.WithTimeout(context.Background(), delayTime/2) + defer cancel() + bsdelay := delay.Fixed(delayTime) + + dstore := ds_sync.MutexWrap(delayed.New(ds.NewMapDatastore(), bsdelay)) + bstore := blockstore.NewBlockstore(ds_sync.MutexWrap(dstore)) + + bsm := newBlockstoreManager(ctx, bstore, 3) + proc := process.WithTeardown(func() error { return nil }) + bsm.start(proc) + + blks := testutil.GenerateBlocksOfSize(3, 1024) + var ks []cid.Cid + for _, b := range blks { + ks = append(ks, b.Cid()) + } + + err := bstore.PutMany(blks) + if err != nil { + t.Fatal(err) + } + + fnCallDone := make(chan struct{}) + go func() { + bsm.getBlockSizes(ctx, ks) + fnCallDone <- struct{}{} + }() + + select { + case <-fnCallDone: + t.Fatal("call to BlockstoreManager should be cancelled") + case <-ctx.Done(): + } +} diff --git a/bitswap/decision/engine.go b/bitswap/decision/engine.go index 6532061a4..3154b5e5f 100644 --- a/bitswap/decision/engine.go +++ b/bitswap/decision/engine.go @@ -15,6 +15,7 @@ import ( logging "github.com/ipfs/go-log" "github.com/ipfs/go-peertaskqueue" "github.com/ipfs/go-peertaskqueue/peertask" + process "github.com/jbenet/goprocess" peer "github.com/libp2p/go-libp2p-core/peer" ) @@ -55,6 +56,8 @@ var log = logging.Logger("engine") const ( // outboxChanBuffer must be 0 to prevent stale messages from being sent outboxChanBuffer = 0 + // Number of concurrent workers that pull tasks off the request queue + taskWorkerCount = 8 // maxMessageSize is the maximum size of the batched payload maxMessageSize = 512 * 1024 // tagFormat is the tag given to peers associated an engine @@ -78,6 +81,9 @@ const ( // long/short term scores for tagging peers longTermScore = 10 // this is a high tag but it grows _very_ slowly. shortTermScore = 10 // this is a high tag but it'll go away quickly if we aren't using the peer. + + // Number of concurrent workers that process requests to the blockstore + blockstoreWorkerCount = 128 ) var ( @@ -125,7 +131,7 @@ type Engine struct { // taskWorker goroutine outbox chan (<-chan *Envelope) - bs bstore.Blockstore + bsm *blockstoreManager peerTagger PeerTagger @@ -136,26 +142,43 @@ type Engine struct { ledgerMap map[peer.ID]*ledger ticker *time.Ticker + + taskWorkerLock sync.Mutex + taskWorkerCount int } // NewEngine creates a new block sending engine for the given block store func NewEngine(ctx context.Context, bs bstore.Blockstore, peerTagger PeerTagger) *Engine { e := &Engine{ - ledgerMap: make(map[peer.ID]*ledger), - bs: bs, - peerTagger: peerTagger, - outbox: make(chan (<-chan *Envelope), outboxChanBuffer), - workSignal: make(chan struct{}, 1), - ticker: time.NewTicker(time.Millisecond * 100), + ledgerMap: make(map[peer.ID]*ledger), + bsm: newBlockstoreManager(ctx, bs, blockstoreWorkerCount), + peerTagger: peerTagger, + outbox: make(chan (<-chan *Envelope), outboxChanBuffer), + workSignal: make(chan struct{}, 1), + ticker: time.NewTicker(time.Millisecond * 100), + taskWorkerCount: taskWorkerCount, } e.tagQueued = fmt.Sprintf(tagFormat, "queued", uuid.New().String()) e.tagUseful = fmt.Sprintf(tagFormat, "useful", uuid.New().String()) - e.peerRequestQueue = peertaskqueue.New(peertaskqueue.OnPeerAddedHook(e.onPeerAdded), peertaskqueue.OnPeerRemovedHook(e.onPeerRemoved)) - go e.taskWorker(ctx) + e.peerRequestQueue = peertaskqueue.New( + peertaskqueue.OnPeerAddedHook(e.onPeerAdded), + peertaskqueue.OnPeerRemovedHook(e.onPeerRemoved)) go e.scoreWorker(ctx) return e } +// Start up workers to handle requests from other nodes for the data on this node +func (e *Engine) StartWorkers(ctx context.Context, px process.Process) { + // Start up blockstore manager + e.bsm.start(px) + + for i := 0; i < e.taskWorkerCount; i++ { + px.Go(func(px process.Process) { + e.taskWorker(ctx) + }) + } +} + // scoreWorker keeps track of how "useful" our peers are, updating scores in the // connection manager. // @@ -287,8 +310,11 @@ func (e *Engine) LedgerForPeer(p peer.ID) *Receipt { } } +// Each taskWorker pulls items off the request queue up and adds them to an +// envelope. The envelope is passed off to the bitswap workers, which send +// the message to the network. func (e *Engine) taskWorker(ctx context.Context) { - defer close(e.outbox) // because taskWorker uses the channel exclusively + defer e.taskWorkerExit() for { oneTimeUse := make(chan *Envelope, 1) // buffer to prevent blocking select { @@ -308,6 +334,17 @@ func (e *Engine) taskWorker(ctx context.Context) { } } +// taskWorkerExit handles cleanup of task workers +func (e *Engine) taskWorkerExit() { + e.taskWorkerLock.Lock() + defer e.taskWorkerLock.Unlock() + + e.taskWorkerCount-- + if e.taskWorkerCount == 0 { + close(e.outbox) + } +} + // nextEnvelope runs in the taskWorker goroutine. Returns an error if the // context is cancelled before the next Envelope can be created. func (e *Engine) nextEnvelope(ctx context.Context) (*Envelope, error) { @@ -326,14 +363,15 @@ func (e *Engine) nextEnvelope(ctx context.Context) (*Envelope, error) { } // with a task in hand, we're ready to prepare the envelope... + blockCids := cid.NewSet() + for _, t := range nextTask.Tasks { + blockCids.Add(t.Identifier.(cid.Cid)) + } + blks := e.bsm.getBlocks(ctx, blockCids.Keys()) + msg := bsmsg.New(true) - for _, entry := range nextTask.Tasks { - block, err := e.bs.Get(entry.Identifier.(cid.Cid)) - if err != nil { - log.Errorf("tried to execute a task and errored fetching block: %s", err) - continue - } - msg.AddBlock(block) + for _, b := range blks { + msg.AddBlock(b) } if msg.Empty() { @@ -379,7 +417,7 @@ func (e *Engine) Peers() []peer.ID { // MessageReceived performs book-keeping. Returns error if passed invalid // arguments. -func (e *Engine) MessageReceived(p peer.ID, m bsmsg.BitSwapMessage) { +func (e *Engine) MessageReceived(ctx context.Context, p peer.ID, m bsmsg.BitSwapMessage) { if m.Empty() { log.Debugf("received empty message from %s", p) } @@ -391,6 +429,16 @@ func (e *Engine) MessageReceived(p peer.ID, m bsmsg.BitSwapMessage) { } }() + // Get block sizes + entries := m.Wantlist() + wantKs := cid.NewSet() + for _, entry := range entries { + if !entry.Cancel { + wantKs.Add(entry.Cid) + } + } + blockSizes := e.bsm.getBlockSizes(ctx, wantKs.Keys()) + l := e.findOrCreate(p) l.lk.Lock() defer l.lk.Unlock() @@ -408,13 +456,8 @@ func (e *Engine) MessageReceived(p peer.ID, m bsmsg.BitSwapMessage) { } else { log.Debugf("wants %s - %d", entry.Cid, entry.Priority) l.Wants(entry.Cid, entry.Priority) - blockSize, err := e.bs.GetSize(entry.Cid) - if err != nil { - if err == bstore.ErrNotFound { - continue - } - log.Error(err) - } else { + blockSize, ok := blockSizes[entry.Cid] + if ok { // we have the block newWorkExists = true if msgSize+blockSize > maxMessageSize { @@ -484,9 +527,7 @@ func (e *Engine) MessageSent(p peer.ID, m bsmsg.BitSwapMessage) { for _, block := range m.Blocks() { l.SentBytes(len(block.RawData())) l.wantList.Remove(block.Cid()) - e.peerRequestQueue.Remove(block.Cid(), p) } - } // PeerConnected is called when a new peer connects, meaning we should start diff --git a/bitswap/decision/engine_test.go b/bitswap/decision/engine_test.go index d5adaa87e..09962e1e9 100644 --- a/bitswap/decision/engine_test.go +++ b/bitswap/decision/engine_test.go @@ -15,6 +15,7 @@ import ( ds "github.com/ipfs/go-datastore" dssync "github.com/ipfs/go-datastore/sync" blockstore "github.com/ipfs/go-ipfs-blockstore" + process "github.com/jbenet/goprocess" peer "github.com/libp2p/go-libp2p-core/peer" testutil "github.com/libp2p/go-libp2p-core/test" ) @@ -88,13 +89,14 @@ type engineSet struct { func newEngine(ctx context.Context, idStr string) engineSet { fpt := &fakePeerTagger{} bs := blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore())) + e := NewEngine(ctx, bs, fpt) + e.StartWorkers(ctx, process.WithTeardown(func() error { return nil })) return engineSet{ Peer: peer.ID(idStr), //Strategy: New(true), PeerTagger: fpt, Blockstore: bs, - Engine: NewEngine(ctx, - bs, fpt), + Engine: e, } } @@ -112,7 +114,7 @@ func TestConsistentAccounting(t *testing.T) { m.AddBlock(blocks.NewBlock([]byte(strings.Join(content, " ")))) sender.Engine.MessageSent(receiver.Peer, m) - receiver.Engine.MessageReceived(sender.Peer, m) + receiver.Engine.MessageReceived(ctx, sender.Peer, m) } // Ensure sender records the change @@ -142,7 +144,7 @@ func TestPeerIsAddedToPeersWhenMessageReceivedOrSent(t *testing.T) { m := message.New(true) sanfrancisco.Engine.MessageSent(seattle.Peer, m) - seattle.Engine.MessageReceived(sanfrancisco.Peer, m) + seattle.Engine.MessageReceived(ctx, sanfrancisco.Peer, m) if seattle.Peer == sanfrancisco.Peer { t.Fatal("Sanity Check: Peers have same Key!") @@ -172,8 +174,10 @@ func peerIsPartner(p peer.ID, e *Engine) bool { } func TestOutboxClosedWhenEngineClosed(t *testing.T) { + ctx := context.Background() t.SkipNow() // TODO implement *Engine.Close - e := NewEngine(context.Background(), blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore())), &fakePeerTagger{}) + e := NewEngine(ctx, blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore())), &fakePeerTagger{}) + e.StartWorkers(ctx, process.WithTeardown(func() error { return nil })) var wg sync.WaitGroup wg.Add(1) go func() { @@ -228,9 +232,11 @@ func TestPartnerWantsThenCancels(t *testing.T) { } } + ctx := context.Background() for i := 0; i < numRounds; i++ { expected := make([][]string, 0, len(testcases)) - e := NewEngine(context.Background(), bs, &fakePeerTagger{}) + e := NewEngine(ctx, bs, &fakePeerTagger{}) + e.StartWorkers(ctx, process.WithTeardown(func() error { return nil })) for _, testcase := range testcases { set := testcase[0] cancels := testcase[1] @@ -310,7 +316,7 @@ func TestTaggingUseful(t *testing.T) { if me.PeerTagger.count(me.Engine.tagUseful) == 0 { t.Fatal("peers should still be tagged due to long-term usefulness") } - time.Sleep(shortTerm * 10) + time.Sleep(shortTerm * 20) if me.PeerTagger.count(me.Engine.tagUseful) != 0 { t.Fatal("peers should finally be untagged") } @@ -322,7 +328,7 @@ func partnerWants(e *Engine, keys []string, partner peer.ID) { block := blocks.NewBlock([]byte(letter)) add.AddEntry(block.Cid(), len(keys)-i) } - e.MessageReceived(partner, add) + e.MessageReceived(context.Background(), partner, add) } func partnerCancels(e *Engine, keys []string, partner peer.ID) { @@ -331,7 +337,7 @@ func partnerCancels(e *Engine, keys []string, partner peer.ID) { block := blocks.NewBlock([]byte(k)) cancels.Cancel(block.Cid()) } - e.MessageReceived(partner, cancels) + e.MessageReceived(context.Background(), partner, cancels) } func checkHandledInOrder(t *testing.T, e *Engine, expected [][]string) error { From b264719a8491b67531145de7431d54519c841bc3 Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Thu, 5 Dec 2019 13:07:54 -0500 Subject: [PATCH 0827/1035] fix: wait until we finish connecting before we cancel the context This is an interesting bug because changes to the DHT have suddenly started triggering it. I'm not sure _why_ we weren't hitting it before now. We may have been ignoring a context somewhere? This commit was moved from ipfs/go-bitswap@2e76860da585f95f07079ce6423a5fb03ae6e808 --- bitswap/providerquerymanager/providerquerymanager.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bitswap/providerquerymanager/providerquerymanager.go b/bitswap/providerquerymanager/providerquerymanager.go index e1f77edf6..d47ffdb5a 100644 --- a/bitswap/providerquerymanager/providerquerymanager.go +++ b/bitswap/providerquerymanager/providerquerymanager.go @@ -252,8 +252,8 @@ func (pqm *ProviderQueryManager) findProviderWorker() { } }(p) } - cancel() wg.Wait() + cancel() select { case pqm.providerQueryMessages <- &finishedProviderQueryMessage{ k: k, From 126ffed6ca6a14384bfe1855f8bc4bb78f6c6c3a Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Wed, 22 Jan 2020 11:55:24 -0800 Subject: [PATCH 0828/1035] fix: abort when the context is canceled while getting blocks This commit was moved from ipfs/go-bitswap@0bc3d5a46ff1b736fbb405b6a6220d9c30da0af0 --- bitswap/decision/blockstoremanager.go | 34 ++++++---- bitswap/decision/blockstoremanager_test.go | 79 ++++++++++++---------- bitswap/decision/engine.go | 12 +++- 3 files changed, 74 insertions(+), 51 deletions(-) diff --git a/bitswap/decision/blockstoremanager.go b/bitswap/decision/blockstoremanager.go index e97bbdda5..8d880a6c4 100644 --- a/bitswap/decision/blockstoremanager.go +++ b/bitswap/decision/blockstoremanager.go @@ -2,6 +2,7 @@ package decision import ( "context" + "fmt" "sync" blocks "github.com/ipfs/go-block-format" @@ -50,25 +51,29 @@ func (bsm *blockstoreManager) worker() { } } -func (bsm *blockstoreManager) addJob(ctx context.Context, job func()) { +func (bsm *blockstoreManager) addJob(ctx context.Context, job func()) error { select { case <-ctx.Done(): + return ctx.Err() case <-bsm.px.Closing(): + return fmt.Errorf("shutting down") case bsm.jobs <- job: + return nil } } -func (bsm *blockstoreManager) getBlockSizes(ctx context.Context, ks []cid.Cid) map[cid.Cid]int { +func (bsm *blockstoreManager) getBlockSizes(ctx context.Context, ks []cid.Cid) (map[cid.Cid]int, error) { res := make(map[cid.Cid]int) if len(ks) == 0 { - return res + return res, nil } var lk sync.Mutex - bsm.jobPerKey(ctx, ks, func(c cid.Cid) { + return res, bsm.jobPerKey(ctx, ks, func(c cid.Cid) { size, err := bsm.bs.GetSize(c) if err != nil { if err != bstore.ErrNotFound { + // Note: this isn't a fatal error. We shouldn't abort the request log.Errorf("blockstore.GetSize(%s) error: %s", c, err) } } else { @@ -77,21 +82,20 @@ func (bsm *blockstoreManager) getBlockSizes(ctx context.Context, ks []cid.Cid) m lk.Unlock() } }) - - return res } -func (bsm *blockstoreManager) getBlocks(ctx context.Context, ks []cid.Cid) map[cid.Cid]blocks.Block { +func (bsm *blockstoreManager) getBlocks(ctx context.Context, ks []cid.Cid) (map[cid.Cid]blocks.Block, error) { res := make(map[cid.Cid]blocks.Block) if len(ks) == 0 { - return res + return res, nil } var lk sync.Mutex - bsm.jobPerKey(ctx, ks, func(c cid.Cid) { + return res, bsm.jobPerKey(ctx, ks, func(c cid.Cid) { blk, err := bsm.bs.Get(c) if err != nil { if err != bstore.ErrNotFound { + // Note: this isn't a fatal error. We shouldn't abort the request log.Errorf("blockstore.Get(%s) error: %s", c, err) } } else { @@ -100,19 +104,23 @@ func (bsm *blockstoreManager) getBlocks(ctx context.Context, ks []cid.Cid) map[c lk.Unlock() } }) - - return res } -func (bsm *blockstoreManager) jobPerKey(ctx context.Context, ks []cid.Cid, jobFn func(c cid.Cid)) { +func (bsm *blockstoreManager) jobPerKey(ctx context.Context, ks []cid.Cid, jobFn func(c cid.Cid)) error { + var err error wg := sync.WaitGroup{} for _, k := range ks { c := k wg.Add(1) - bsm.addJob(ctx, func() { + err = bsm.addJob(ctx, func() { jobFn(c) wg.Done() }) + if err != nil { + wg.Done() + break + } } wg.Wait() + return err } diff --git a/bitswap/decision/blockstoremanager_test.go b/bitswap/decision/blockstoremanager_test.go index a5fee74e0..c57c48929 100644 --- a/bitswap/decision/blockstoremanager_test.go +++ b/bitswap/decision/blockstoremanager_test.go @@ -3,7 +3,6 @@ package decision import ( "context" "crypto/rand" - "errors" "sync" "testing" "time" @@ -30,7 +29,10 @@ func TestBlockstoreManagerNotFoundKey(t *testing.T) { bsm.start(process.WithTeardown(func() error { return nil })) cids := testutil.GenerateCids(4) - sizes := bsm.getBlockSizes(ctx, cids) + sizes, err := bsm.getBlockSizes(ctx, cids) + if err != nil { + t.Fatal(err) + } if len(sizes) != 0 { t.Fatal("Wrong response length") } @@ -41,7 +43,10 @@ func TestBlockstoreManagerNotFoundKey(t *testing.T) { } } - blks := bsm.getBlocks(ctx, cids) + blks, err := bsm.getBlocks(ctx, cids) + if err != nil { + t.Fatal(err) + } if len(blks) != 0 { t.Fatal("Wrong response length") } @@ -82,7 +87,10 @@ func TestBlockstoreManager(t *testing.T) { cids = append(cids, b.Cid()) } - sizes := bsm.getBlockSizes(ctx, cids) + sizes, err := bsm.getBlockSizes(ctx, cids) + if err != nil { + t.Fatal(err) + } if len(sizes) != len(blks)-1 { t.Fatal("Wrong response length") } @@ -106,7 +114,10 @@ func TestBlockstoreManager(t *testing.T) { } } - fetched := bsm.getBlocks(ctx, cids) + fetched, err := bsm.getBlocks(ctx, cids) + if err != nil { + t.Fatal(err) + } if len(fetched) != len(blks)-1 { t.Fatal("Wrong response length") } @@ -160,17 +171,16 @@ func TestBlockstoreManagerConcurrency(t *testing.T) { go func(t *testing.T) { defer wg.Done() - sizes := bsm.getBlockSizes(ctx, ks) + sizes, err := bsm.getBlockSizes(ctx, ks) + if err != nil { + t.Error(err) + } if len(sizes) != len(blks) { - err = errors.New("Wrong response length") + t.Error("Wrong response length") } }(t) } wg.Wait() - - if err != nil { - t.Fatal(err) - } } func TestBlockstoreManagerClose(t *testing.T) { @@ -184,7 +194,7 @@ func TestBlockstoreManagerClose(t *testing.T) { px := process.WithTeardown(func() error { return nil }) bsm.start(px) - blks := testutil.GenerateBlocksOfSize(3, 1024) + blks := testutil.GenerateBlocksOfSize(10, 1024) var ks []cid.Cid for _, b := range blks { ks = append(ks, b.Cid()) @@ -199,34 +209,29 @@ func TestBlockstoreManagerClose(t *testing.T) { time.Sleep(5 * time.Millisecond) - fnCallDone := make(chan struct{}) - go func() { - bsm.getBlockSizes(ctx, ks) - fnCallDone <- struct{}{} - }() - - select { - case <-fnCallDone: - t.Fatal("call to BlockstoreManager should be cancelled") - case <-px.Closed(): + before := time.Now() + _, err = bsm.getBlockSizes(ctx, ks) + if err == nil { + t.Error("expected an error") + } + // would expect to wait delayTime*10 if we didn't cancel. + if time.Since(before) > delayTime*2 { + t.Error("expected a fast timeout") } } func TestBlockstoreManagerCtxDone(t *testing.T) { delayTime := 20 * time.Millisecond - ctx := context.Background() - ctx, cancel := context.WithTimeout(context.Background(), delayTime/2) - defer cancel() bsdelay := delay.Fixed(delayTime) dstore := ds_sync.MutexWrap(delayed.New(ds.NewMapDatastore(), bsdelay)) bstore := blockstore.NewBlockstore(ds_sync.MutexWrap(dstore)) - bsm := newBlockstoreManager(ctx, bstore, 3) + bsm := newBlockstoreManager(context.Background(), bstore, 3) proc := process.WithTeardown(func() error { return nil }) bsm.start(proc) - blks := testutil.GenerateBlocksOfSize(3, 1024) + blks := testutil.GenerateBlocksOfSize(10, 1024) var ks []cid.Cid for _, b := range blks { ks = append(ks, b.Cid()) @@ -237,15 +242,17 @@ func TestBlockstoreManagerCtxDone(t *testing.T) { t.Fatal(err) } - fnCallDone := make(chan struct{}) - go func() { - bsm.getBlockSizes(ctx, ks) - fnCallDone <- struct{}{} - }() + ctx, cancel := context.WithTimeout(context.Background(), delayTime/2) + defer cancel() + + before := time.Now() + _, err = bsm.getBlockSizes(ctx, ks) + if err == nil { + t.Error("expected an error") + } - select { - case <-fnCallDone: - t.Fatal("call to BlockstoreManager should be cancelled") - case <-ctx.Done(): + // would expect to wait delayTime*10 if we didn't cancel. + if time.Since(before) > delayTime*2 { + t.Error("expected a fast timeout") } } diff --git a/bitswap/decision/engine.go b/bitswap/decision/engine.go index 3154b5e5f..7a58bb3f6 100644 --- a/bitswap/decision/engine.go +++ b/bitswap/decision/engine.go @@ -367,7 +367,11 @@ func (e *Engine) nextEnvelope(ctx context.Context) (*Envelope, error) { for _, t := range nextTask.Tasks { blockCids.Add(t.Identifier.(cid.Cid)) } - blks := e.bsm.getBlocks(ctx, blockCids.Keys()) + blks, err := e.bsm.getBlocks(ctx, blockCids.Keys()) + if err != nil { + // we're dropping the envelope but that's not an issue in practice. + return nil, err + } msg := bsmsg.New(true) for _, b := range blks { @@ -437,7 +441,11 @@ func (e *Engine) MessageReceived(ctx context.Context, p peer.ID, m bsmsg.BitSwap wantKs.Add(entry.Cid) } } - blockSizes := e.bsm.getBlockSizes(ctx, wantKs.Keys()) + blockSizes, err := e.bsm.getBlockSizes(ctx, wantKs.Keys()) + if err != nil { + log.Info("aborting message processing", err) + return + } l := e.findOrCreate(p) l.lk.Lock() From 5c55d2d62633f7e01fa2ca0ebb166b25186059ed Mon Sep 17 00:00:00 2001 From: dirkmc Date: Thu, 30 Jan 2020 15:32:21 -0800 Subject: [PATCH 0829/1035] feat: bitswap protocol extensions This commit extends the bitswap protocol with two additional wantlist properties: * WANT_HAVE/HAVE: Instead of asking for a block, a node can specify that they want to know if any peers "have" the block. * WANT_HAVE_NOT/HAVE_NOT: Instead of waiting for a timeout, a node can explicitly request to be told immediately if their peers don't currently have the given block. Additionally, nodes now tell their peers how much data they have queued to send them when sending messages. This allows peers to better distribute requests, keeping all peers busy but not overloaded. Changes in this PR are described in: https://github.com/ipfs/go-bitswap/issues/186 This commit was moved from ipfs/go-bitswap@b3a47bcf5080c734346fc39400802c35aedd6428 --- bitswap/benchmarks_test.go | 434 ++++++++-- bitswap/bitswap.go | 125 +-- bitswap/bitswap_test.go | 25 +- bitswap/bitswap_with_sessions_test.go | 87 ++ .../blockpresencemanager.go | 111 +++ .../blockpresencemanager_test.go | 239 ++++++ bitswap/decision/engine.go | 398 +++++++-- bitswap/decision/engine_test.go | 798 +++++++++++++++++- bitswap/decision/ledger.go | 11 +- bitswap/decision/taskmerger.go | 87 ++ bitswap/decision/taskmerger_test.go | 357 ++++++++ bitswap/logutil/logutil.go | 26 + bitswap/message/message.go | 239 +++++- bitswap/message/message_test.go | 113 ++- bitswap/message/pb/message.pb.go | 561 +++++++++++- bitswap/message/pb/message.proto | 17 + bitswap/messagequeue/messagequeue.go | 391 +++++++-- bitswap/messagequeue/messagequeue_test.go | 473 ++++++++++- bitswap/network/interface.go | 16 +- bitswap/network/ipfs_impl.go | 81 +- bitswap/network/ipfs_impl_test.go | 74 +- bitswap/network/options.go | 9 +- bitswap/peermanager/peermanager.go | 161 +++- bitswap/peermanager/peermanager_test.go | 295 +++++-- bitswap/peermanager/peerwantmanager.go | 206 +++++ bitswap/peermanager/peerwantmanager_test.go | 292 +++++++ bitswap/session/cidqueue.go | 17 + bitswap/session/peeravailabilitymanager.go | 57 ++ .../session/peeravailabilitymanager_test.go | 74 ++ bitswap/session/peerresponsetracker.go | 68 ++ bitswap/session/peerresponsetracker_test.go | 117 +++ bitswap/session/sentwantblockstracker.go | 33 + bitswap/session/sentwantblockstracker_test.go | 28 + bitswap/session/session.go | 340 ++++---- bitswap/session/session_test.go | 372 ++++---- bitswap/session/sessionwants.go | 148 ++-- bitswap/session/sessionwants_test.go | 108 +-- bitswap/session/sessionwantsender.go | 605 +++++++++++++ bitswap/session/sessionwantsender_test.go | 348 ++++++++ bitswap/session/wantinfo_test.go | 80 ++ .../sessioninterestmanager.go | 73 ++ .../sessioninterestmanager_test.go | 182 ++++ bitswap/sessionmanager/sessionmanager.go | 92 +- bitswap/sessionmanager/sessionmanager_test.go | 182 ++-- .../sessionpeermanager/sessionpeermanager.go | 25 +- bitswap/sessionwantlist/sessionwantlist.go | 126 +++ .../sessionwantlist/sessionwantlist_test.go | 258 ++++++ bitswap/testinstance/testinstance.go | 3 +- bitswap/testnet/interface.go | 4 +- bitswap/testnet/network_test.go | 2 +- bitswap/testnet/peernet.go | 6 +- bitswap/testnet/virtual.go | 42 +- bitswap/testutil/testutil.go | 51 +- bitswap/wantlist/wantlist.go | 154 +--- bitswap/wantlist/wantlist_test.go | 165 +++- bitswap/wantmanager/wantmanager.go | 288 ++----- bitswap/wantmanager/wantmanager_test.go | 343 ++++---- bitswap/workers.go | 26 +- 58 files changed, 8205 insertions(+), 1838 deletions(-) create mode 100644 bitswap/blockpresencemanager/blockpresencemanager.go create mode 100644 bitswap/blockpresencemanager/blockpresencemanager_test.go create mode 100644 bitswap/decision/taskmerger.go create mode 100644 bitswap/decision/taskmerger_test.go create mode 100644 bitswap/logutil/logutil.go create mode 100644 bitswap/peermanager/peerwantmanager.go create mode 100644 bitswap/peermanager/peerwantmanager_test.go create mode 100644 bitswap/session/peeravailabilitymanager.go create mode 100644 bitswap/session/peeravailabilitymanager_test.go create mode 100644 bitswap/session/peerresponsetracker.go create mode 100644 bitswap/session/peerresponsetracker_test.go create mode 100644 bitswap/session/sentwantblockstracker.go create mode 100644 bitswap/session/sentwantblockstracker_test.go create mode 100644 bitswap/session/sessionwantsender.go create mode 100644 bitswap/session/sessionwantsender_test.go create mode 100644 bitswap/session/wantinfo_test.go create mode 100644 bitswap/sessioninterestmanager/sessioninterestmanager.go create mode 100644 bitswap/sessioninterestmanager/sessioninterestmanager_test.go create mode 100644 bitswap/sessionwantlist/sessionwantlist.go create mode 100644 bitswap/sessionwantlist/sessionwantlist_test.go diff --git a/bitswap/benchmarks_test.go b/bitswap/benchmarks_test.go index 1671b9bbb..501488ded 100644 --- a/bitswap/benchmarks_test.go +++ b/bitswap/benchmarks_test.go @@ -3,7 +3,9 @@ package bitswap_test import ( "context" "encoding/json" + "fmt" "io/ioutil" + "math" "math/rand" "os" "strconv" @@ -19,7 +21,6 @@ import ( testinstance "github.com/ipfs/go-bitswap/testinstance" tn "github.com/ipfs/go-bitswap/testnet" cid "github.com/ipfs/go-cid" - blocksutil "github.com/ipfs/go-ipfs-blocksutil" delay "github.com/ipfs/go-ipfs-delay" mockrouting "github.com/ipfs/go-ipfs-routing/mock" ) @@ -29,89 +30,114 @@ type fetchFunc func(b *testing.B, bs *bitswap.Bitswap, ks []cid.Cid) type distFunc func(b *testing.B, provs []testinstance.Instance, blocks []blocks.Block) type runStats struct { - Dups uint64 - MsgSent uint64 - MsgRecd uint64 - Time time.Duration - Name string + DupsRcvd uint64 + BlksRcvd uint64 + MsgSent uint64 + MsgRecd uint64 + Time time.Duration + Name string } var benchmarkLog []runStats -func BenchmarkDups2Nodes(b *testing.B) { +type bench struct { + name string + nodeCount int + blockCount int + distFn distFunc + fetchFn fetchFunc +} + +var benches = []bench{ + // Fetch from two seed nodes that both have all 100 blocks + // - request one at a time, in series + bench{"3Nodes-AllToAll-OneAtATime", 3, 100, allToAll, oneAtATime}, + // - request all 100 with a single GetBlocks() call + bench{"3Nodes-AllToAll-BigBatch", 3, 100, allToAll, batchFetchAll}, + + // Fetch from two seed nodes, one at a time, where: + // - node A has blocks 0 - 74 + // - node B has blocks 25 - 99 + bench{"3Nodes-Overlap1-OneAtATime", 3, 100, overlap1, oneAtATime}, + + // Fetch from two seed nodes, where: + // - node A has even blocks + // - node B has odd blocks + // - both nodes have every third block + + // - request one at a time, in series + bench{"3Nodes-Overlap3-OneAtATime", 3, 100, overlap2, oneAtATime}, + // - request 10 at a time, in series + bench{"3Nodes-Overlap3-BatchBy10", 3, 100, overlap2, batchFetchBy10}, + // - request all 100 in parallel as individual GetBlock() calls + bench{"3Nodes-Overlap3-AllConcurrent", 3, 100, overlap2, fetchAllConcurrent}, + // - request all 100 with a single GetBlocks() call + bench{"3Nodes-Overlap3-BigBatch", 3, 100, overlap2, batchFetchAll}, + // - request 1, then 10, then 89 blocks (similar to how IPFS would fetch a file) + bench{"3Nodes-Overlap3-UnixfsFetch", 3, 100, overlap2, unixfsFileFetch}, + + // Fetch from nine seed nodes, all nodes have all blocks + // - request one at a time, in series + bench{"10Nodes-AllToAll-OneAtATime", 10, 100, allToAll, oneAtATime}, + // - request 10 at a time, in series + bench{"10Nodes-AllToAll-BatchFetchBy10", 10, 100, allToAll, batchFetchBy10}, + // - request all 100 with a single GetBlocks() call + bench{"10Nodes-AllToAll-BigBatch", 10, 100, allToAll, batchFetchAll}, + // - request all 100 in parallel as individual GetBlock() calls + bench{"10Nodes-AllToAll-AllConcurrent", 10, 100, allToAll, fetchAllConcurrent}, + // - request 1, then 10, then 89 blocks (similar to how IPFS would fetch a file) + bench{"10Nodes-AllToAll-UnixfsFetch", 10, 100, allToAll, unixfsFileFetch}, + // - follow a typical IPFS request pattern for 1000 blocks + bench{"10Nodes-AllToAll-UnixfsFetchLarge", 10, 1000, allToAll, unixfsFileFetchLarge}, + + // Fetch from nine seed nodes, blocks are distributed randomly across all nodes (no dups) + // - request one at a time, in series + bench{"10Nodes-OnePeerPerBlock-OneAtATime", 10, 100, onePeerPerBlock, oneAtATime}, + // - request all 100 with a single GetBlocks() call + bench{"10Nodes-OnePeerPerBlock-BigBatch", 10, 100, onePeerPerBlock, batchFetchAll}, + // - request 1, then 10, then 89 blocks (similar to how IPFS would fetch a file) + bench{"10Nodes-OnePeerPerBlock-UnixfsFetch", 10, 100, onePeerPerBlock, unixfsFileFetch}, + + // Fetch from 199 seed nodes, all nodes have all blocks, fetch all 20 blocks with a single GetBlocks() call + bench{"200Nodes-AllToAll-BigBatch", 200, 20, allToAll, batchFetchAll}, +} + +func BenchmarkFixedDelay(b *testing.B) { benchmarkLog = nil fixedDelay := delay.Fixed(10 * time.Millisecond) - b.Run("AllToAll-OneAtATime", func(b *testing.B) { - subtestDistributeAndFetch(b, 3, 100, fixedDelay, allToAll, oneAtATime) - }) - b.Run("AllToAll-BigBatch", func(b *testing.B) { - subtestDistributeAndFetch(b, 3, 100, fixedDelay, allToAll, batchFetchAll) - }) + bstoreLatency := time.Duration(0) - b.Run("Overlap1-OneAtATime", func(b *testing.B) { - subtestDistributeAndFetch(b, 3, 100, fixedDelay, overlap1, oneAtATime) - }) + for _, bch := range benches { + b.Run(bch.name, func(b *testing.B) { + subtestDistributeAndFetch(b, bch.nodeCount, bch.blockCount, fixedDelay, bstoreLatency, bch.distFn, bch.fetchFn) + }) + } - b.Run("Overlap3-OneAtATime", func(b *testing.B) { - subtestDistributeAndFetch(b, 3, 100, fixedDelay, overlap2, oneAtATime) - }) - b.Run("Overlap3-BatchBy10", func(b *testing.B) { - subtestDistributeAndFetch(b, 3, 100, fixedDelay, overlap2, batchFetchBy10) - }) - b.Run("Overlap3-AllConcurrent", func(b *testing.B) { - subtestDistributeAndFetch(b, 3, 100, fixedDelay, overlap2, fetchAllConcurrent) - }) - b.Run("Overlap3-BigBatch", func(b *testing.B) { - subtestDistributeAndFetch(b, 3, 100, fixedDelay, overlap2, batchFetchAll) - }) - b.Run("Overlap3-UnixfsFetch", func(b *testing.B) { - subtestDistributeAndFetch(b, 3, 100, fixedDelay, overlap2, unixfsFileFetch) - }) - b.Run("10Nodes-AllToAll-OneAtATime", func(b *testing.B) { - subtestDistributeAndFetch(b, 10, 100, fixedDelay, allToAll, oneAtATime) - }) - b.Run("10Nodes-AllToAll-BatchFetchBy10", func(b *testing.B) { - subtestDistributeAndFetch(b, 10, 100, fixedDelay, allToAll, batchFetchBy10) - }) - b.Run("10Nodes-AllToAll-BigBatch", func(b *testing.B) { - subtestDistributeAndFetch(b, 10, 100, fixedDelay, allToAll, batchFetchAll) - }) - b.Run("10Nodes-AllToAll-AllConcurrent", func(b *testing.B) { - subtestDistributeAndFetch(b, 10, 100, fixedDelay, allToAll, fetchAllConcurrent) - }) - b.Run("10Nodes-AllToAll-UnixfsFetch", func(b *testing.B) { - subtestDistributeAndFetch(b, 10, 100, fixedDelay, allToAll, unixfsFileFetch) - }) - b.Run("10Nodes-OnePeerPerBlock-OneAtATime", func(b *testing.B) { - subtestDistributeAndFetch(b, 10, 100, fixedDelay, onePeerPerBlock, oneAtATime) - }) - b.Run("10Nodes-OnePeerPerBlock-BigBatch", func(b *testing.B) { - subtestDistributeAndFetch(b, 10, 100, fixedDelay, onePeerPerBlock, batchFetchAll) - }) - b.Run("10Nodes-OnePeerPerBlock-UnixfsFetch", func(b *testing.B) { - subtestDistributeAndFetch(b, 10, 100, fixedDelay, onePeerPerBlock, unixfsFileFetch) - }) - b.Run("200Nodes-AllToAll-BigBatch", func(b *testing.B) { - subtestDistributeAndFetch(b, 200, 20, fixedDelay, allToAll, batchFetchAll) - }) out, _ := json.MarshalIndent(benchmarkLog, "", " ") _ = ioutil.WriteFile("tmp/benchmark.json", out, 0666) + printResults(benchmarkLog) } +const datacenterSpeed = 5 * time.Millisecond const fastSpeed = 60 * time.Millisecond const mediumSpeed = 200 * time.Millisecond const slowSpeed = 800 * time.Millisecond const superSlowSpeed = 4000 * time.Millisecond +const datacenterDistribution = 3 * time.Millisecond const distribution = 20 * time.Millisecond +const datacenterBandwidth = 125000000.0 +const datacenterBandwidthDeviation = 3000000.0 const fastBandwidth = 1250000.0 const fastBandwidthDeviation = 300000.0 const mediumBandwidth = 500000.0 const mediumBandwidthDeviation = 80000.0 const slowBandwidth = 100000.0 const slowBandwidthDeviation = 16500.0 +const rootBlockSize = 800 const stdBlockSize = 8000 +const largeBlockSize = int64(256 * 1024) -func BenchmarkDupsManyNodesRealWorldNetwork(b *testing.B) { +func BenchmarkRealWorld(b *testing.B) { benchmarkLog = nil benchmarkSeed, err := strconv.ParseInt(os.Getenv("BENCHMARK_SEED"), 10, 64) var randomGen *rand.Rand = nil @@ -134,67 +160,198 @@ func BenchmarkDupsManyNodesRealWorldNetwork(b *testing.B) { 0.3, 0.3, distribution, randomGen) slowNetworkDelay := delay.Delay(fastSpeed, slowNetworkDelayGenerator) slowBandwidthGenerator := tn.VariableRateLimitGenerator(slowBandwidth, slowBandwidthDeviation, randomGen) + bstoreLatency := time.Duration(0) b.Run("200Nodes-AllToAll-BigBatch-FastNetwork", func(b *testing.B) { - subtestDistributeAndFetchRateLimited(b, 300, 200, fastNetworkDelay, fastBandwidthGenerator, stdBlockSize, allToAll, batchFetchAll) + subtestDistributeAndFetchRateLimited(b, 300, 200, fastNetworkDelay, fastBandwidthGenerator, stdBlockSize, bstoreLatency, allToAll, batchFetchAll) }) b.Run("200Nodes-AllToAll-BigBatch-AverageVariableSpeedNetwork", func(b *testing.B) { - subtestDistributeAndFetchRateLimited(b, 300, 200, averageNetworkDelay, averageBandwidthGenerator, stdBlockSize, allToAll, batchFetchAll) + subtestDistributeAndFetchRateLimited(b, 300, 200, averageNetworkDelay, averageBandwidthGenerator, stdBlockSize, bstoreLatency, allToAll, batchFetchAll) }) b.Run("200Nodes-AllToAll-BigBatch-SlowVariableSpeedNetwork", func(b *testing.B) { - subtestDistributeAndFetchRateLimited(b, 300, 200, slowNetworkDelay, slowBandwidthGenerator, stdBlockSize, allToAll, batchFetchAll) + subtestDistributeAndFetchRateLimited(b, 300, 200, slowNetworkDelay, slowBandwidthGenerator, stdBlockSize, bstoreLatency, allToAll, batchFetchAll) }) out, _ := json.MarshalIndent(benchmarkLog, "", " ") _ = ioutil.WriteFile("tmp/rw-benchmark.json", out, 0666) + printResults(benchmarkLog) +} + +func BenchmarkDatacenter(b *testing.B) { + benchmarkLog = nil + benchmarkSeed, err := strconv.ParseInt(os.Getenv("BENCHMARK_SEED"), 10, 64) + var randomGen *rand.Rand = nil + if err == nil { + randomGen = rand.New(rand.NewSource(benchmarkSeed)) + } + + datacenterNetworkDelayGenerator := tn.InternetLatencyDelayGenerator( + fastSpeed-datacenterSpeed, (fastSpeed-datacenterSpeed)/2, + 0.0, 0.0, datacenterDistribution, randomGen) + datacenterNetworkDelay := delay.Delay(datacenterSpeed, datacenterNetworkDelayGenerator) + datacenterBandwidthGenerator := tn.VariableRateLimitGenerator(datacenterBandwidth, datacenterBandwidthDeviation, randomGen) + bstoreLatency := time.Millisecond * 25 + + b.Run("3Nodes-Overlap3-UnixfsFetch", func(b *testing.B) { + subtestDistributeAndFetchRateLimited(b, 3, 100, datacenterNetworkDelay, datacenterBandwidthGenerator, largeBlockSize, bstoreLatency, allToAll, unixfsFileFetch) + }) + out, _ := json.MarshalIndent(benchmarkLog, "", " ") + _ = ioutil.WriteFile("tmp/rb-benchmark.json", out, 0666) + printResults(benchmarkLog) +} + +func BenchmarkDatacenterMultiLeechMultiSeed(b *testing.B) { + benchmarkLog = nil + benchmarkSeed, err := strconv.ParseInt(os.Getenv("BENCHMARK_SEED"), 10, 64) + var randomGen *rand.Rand = nil + if err == nil { + randomGen = rand.New(rand.NewSource(benchmarkSeed)) + } + + datacenterNetworkDelayGenerator := tn.InternetLatencyDelayGenerator( + fastSpeed-datacenterSpeed, (fastSpeed-datacenterSpeed)/2, + 0.0, 0.0, datacenterDistribution, randomGen) + datacenterNetworkDelay := delay.Delay(datacenterSpeed, datacenterNetworkDelayGenerator) + datacenterBandwidthGenerator := tn.VariableRateLimitGenerator(datacenterBandwidth, datacenterBandwidthDeviation, randomGen) + bstoreLatency := time.Millisecond * 25 + + b.Run("3Leech3Seed-AllToAll-UnixfsFetch", func(b *testing.B) { + d := datacenterNetworkDelay + rateLimitGenerator := datacenterBandwidthGenerator + blockSize := largeBlockSize + df := allToAll + ff := unixfsFileFetchLarge + numnodes := 6 + numblks := 1000 + + for i := 0; i < b.N; i++ { + net := tn.RateLimitedVirtualNetwork(mockrouting.NewServer(), d, rateLimitGenerator) + + ig := testinstance.NewTestInstanceGenerator(net) + defer ig.Close() + + instances := ig.Instances(numnodes) + blocks := testutil.GenerateBlocksOfSize(numblks, blockSize) + runDistributionMulti(b, instances, 3, blocks, bstoreLatency, df, ff) + } + }) + + out, _ := json.MarshalIndent(benchmarkLog, "", " ") + _ = ioutil.WriteFile("tmp/rb-benchmark.json", out, 0666) + printResults(benchmarkLog) } -func subtestDistributeAndFetch(b *testing.B, numnodes, numblks int, d delay.D, df distFunc, ff fetchFunc) { +func subtestDistributeAndFetch(b *testing.B, numnodes, numblks int, d delay.D, bstoreLatency time.Duration, df distFunc, ff fetchFunc) { for i := 0; i < b.N; i++ { - start := time.Now() net := tn.VirtualNetwork(mockrouting.NewServer(), d) ig := testinstance.NewTestInstanceGenerator(net) - defer ig.Close() - - bg := blocksutil.NewBlockGenerator() instances := ig.Instances(numnodes) - blocks := bg.Blocks(numblks) - runDistribution(b, instances, blocks, df, ff, start) + rootBlock := testutil.GenerateBlocksOfSize(1, rootBlockSize) + blocks := testutil.GenerateBlocksOfSize(numblks, stdBlockSize) + blocks[0] = rootBlock[0] + runDistribution(b, instances, blocks, bstoreLatency, df, ff) + ig.Close() + // panic("done") } } -func subtestDistributeAndFetchRateLimited(b *testing.B, numnodes, numblks int, d delay.D, rateLimitGenerator tn.RateLimitGenerator, blockSize int64, df distFunc, ff fetchFunc) { +func subtestDistributeAndFetchRateLimited(b *testing.B, numnodes, numblks int, d delay.D, rateLimitGenerator tn.RateLimitGenerator, blockSize int64, bstoreLatency time.Duration, df distFunc, ff fetchFunc) { for i := 0; i < b.N; i++ { - - start := time.Now() net := tn.RateLimitedVirtualNetwork(mockrouting.NewServer(), d, rateLimitGenerator) ig := testinstance.NewTestInstanceGenerator(net) defer ig.Close() instances := ig.Instances(numnodes) + rootBlock := testutil.GenerateBlocksOfSize(1, rootBlockSize) blocks := testutil.GenerateBlocksOfSize(numblks, blockSize) - - runDistribution(b, instances, blocks, df, ff, start) + blocks[0] = rootBlock[0] + runDistribution(b, instances, blocks, bstoreLatency, df, ff) } } -func runDistribution(b *testing.B, instances []testinstance.Instance, blocks []blocks.Block, df distFunc, ff fetchFunc, start time.Time) { - +func runDistributionMulti(b *testing.B, instances []testinstance.Instance, numFetchers int, blocks []blocks.Block, bstoreLatency time.Duration, df distFunc, ff fetchFunc) { numnodes := len(instances) + fetchers := instances[numnodes-numFetchers:] + + // Distribute blocks to seed nodes + seeds := instances[:numnodes-numFetchers] + df(b, seeds, blocks) + + // Set the blockstore latency on seed nodes + if bstoreLatency > 0 { + for _, i := range seeds { + i.SetBlockstoreLatency(bstoreLatency) + } + } + + // Fetch blocks (from seed nodes to leech nodes) + var ks []cid.Cid + for _, blk := range blocks { + ks = append(ks, blk.Cid()) + } + + start := time.Now() + var wg sync.WaitGroup + for _, fetcher := range fetchers { + wg.Add(1) + + go func(ftchr testinstance.Instance) { + defer wg.Done() + + ff(b, ftchr.Exchange, ks) + }(fetcher) + } + wg.Wait() + + // Collect statistics + fetcher := fetchers[0] + st, err := fetcher.Exchange.Stat() + if err != nil { + b.Fatal(err) + } + + for _, fetcher := range fetchers { + nst := fetcher.Adapter.Stats() + stats := runStats{ + Time: time.Since(start), + MsgRecd: nst.MessagesRecvd, + MsgSent: nst.MessagesSent, + DupsRcvd: st.DupBlksReceived, + BlksRcvd: st.BlocksReceived, + Name: b.Name(), + } + benchmarkLog = append(benchmarkLog, stats) + } + // b.Logf("send/recv: %d / %d (dups: %d)", nst.MessagesSent, nst.MessagesRecvd, st.DupBlksReceived) +} +func runDistribution(b *testing.B, instances []testinstance.Instance, blocks []blocks.Block, bstoreLatency time.Duration, df distFunc, ff fetchFunc) { + numnodes := len(instances) fetcher := instances[numnodes-1] - df(b, instances[:numnodes-1], blocks) + // Distribute blocks to seed nodes + seeds := instances[:numnodes-1] + df(b, seeds, blocks) + // Set the blockstore latency on seed nodes + if bstoreLatency > 0 { + for _, i := range seeds { + i.SetBlockstoreLatency(bstoreLatency) + } + } + + // Fetch blocks (from seed nodes to leech nodes) var ks []cid.Cid for _, blk := range blocks { ks = append(ks, blk.Cid()) } + start := time.Now() ff(b, fetcher.Exchange, ks) + // Collect statistics st, err := fetcher.Exchange.Stat() if err != nil { b.Fatal(err) @@ -202,14 +359,15 @@ func runDistribution(b *testing.B, instances []testinstance.Instance, blocks []b nst := fetcher.Adapter.Stats() stats := runStats{ - Time: time.Since(start), - MsgRecd: nst.MessagesRecvd, - MsgSent: nst.MessagesSent, - Dups: st.DupBlksReceived, - Name: b.Name(), + Time: time.Since(start), + MsgRecd: nst.MessagesRecvd, + MsgSent: nst.MessagesSent, + DupsRcvd: st.DupBlksReceived, + BlksRcvd: st.BlocksReceived, + Name: b.Name(), } benchmarkLog = append(benchmarkLog, stats) - b.Logf("send/recv: %d / %d", nst.MessagesSent, nst.MessagesRecvd) + // b.Logf("send/recv: %d / %d (dups: %d)", nst.MessagesSent, nst.MessagesRecvd, st.DupBlksReceived) } func allToAll(b *testing.B, provs []testinstance.Instance, blocks []blocks.Block) { @@ -282,7 +440,7 @@ func oneAtATime(b *testing.B, bs *bitswap.Bitswap, ks []cid.Cid) { b.Fatal(err) } } - b.Logf("Session fetch latency: %s", ses.GetAverageLatency()) + // b.Logf("Session fetch latency: %s", ses.GetAverageLatency()) } // fetch data in batches, 10 at a time @@ -348,3 +506,111 @@ func unixfsFileFetch(b *testing.B, bs *bitswap.Bitswap, ks []cid.Cid) { for range out { } } + +func unixfsFileFetchLarge(b *testing.B, bs *bitswap.Bitswap, ks []cid.Cid) { + ses := bs.NewSession(context.Background()) + _, err := ses.GetBlock(context.Background(), ks[0]) + if err != nil { + b.Fatal(err) + } + + out, err := ses.GetBlocks(context.Background(), ks[1:11]) + if err != nil { + b.Fatal(err) + } + for range out { + } + + out, err = ses.GetBlocks(context.Background(), ks[11:100]) + if err != nil { + b.Fatal(err) + } + for range out { + } + + rest := ks[100:] + for len(rest) > 0 { + var batch [][]cid.Cid + for i := 0; i < 5 && len(rest) > 0; i++ { + cnt := 10 + if len(rest) < 10 { + cnt = len(rest) + } + group := rest[:cnt] + rest = rest[cnt:] + batch = append(batch, group) + } + + var anyErr error + var wg sync.WaitGroup + for _, group := range batch { + wg.Add(1) + go func(grp []cid.Cid) { + defer wg.Done() + + out, err = ses.GetBlocks(context.Background(), grp) + if err != nil { + anyErr = err + } + for range out { + } + }(group) + } + wg.Wait() + + // Note: b.Fatal() cannot be called from within a go-routine + if anyErr != nil { + b.Fatal(anyErr) + } + } +} + +func printResults(rs []runStats) { + nameOrder := make([]string, 0) + names := make(map[string]struct{}) + for i := 0; i < len(rs); i++ { + if _, ok := names[rs[i].Name]; !ok { + nameOrder = append(nameOrder, rs[i].Name) + names[rs[i].Name] = struct{}{} + } + } + + for i := 0; i < len(names); i++ { + name := nameOrder[i] + count := 0 + sent := 0.0 + rcvd := 0.0 + dups := 0.0 + blks := 0.0 + elpd := 0.0 + for i := 0; i < len(rs); i++ { + if rs[i].Name == name { + count++ + sent += float64(rs[i].MsgSent) + rcvd += float64(rs[i].MsgRecd) + dups += float64(rs[i].DupsRcvd) + blks += float64(rs[i].BlksRcvd) + elpd += float64(rs[i].Time) + } + } + sent /= float64(count) + rcvd /= float64(count) + dups /= float64(count) + blks /= float64(count) + + label := fmt.Sprintf("%s (%d runs / %.2fs):", name, count, elpd/1000000000.0) + fmt.Printf("%-75s %s: sent %d, recv %d, dups %d / %d\n", + label, + fmtDuration(time.Duration(int64(math.Round(elpd/float64(count))))), + int64(math.Round(sent)), int64(math.Round(rcvd)), + int64(math.Round(dups)), int64(math.Round(blks))) + } +} + +func fmtDuration(d time.Duration) string { + d = d.Round(time.Millisecond) + s := d / time.Second + d -= s * time.Second + ms := d / time.Millisecond + return fmt.Sprintf("%d.%03ds", s, ms) +} diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 93759802b..d607274df 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -5,12 +5,13 @@ package bitswap import ( "context" "errors" + "sync" "time" - bssrs "github.com/ipfs/go-bitswap/sessionrequestsplitter" delay "github.com/ipfs/go-ipfs-delay" + bsbpm "github.com/ipfs/go-bitswap/blockpresencemanager" decision "github.com/ipfs/go-bitswap/decision" bsgetter "github.com/ipfs/go-bitswap/getter" bsmsg "github.com/ipfs/go-bitswap/message" @@ -20,6 +21,7 @@ import ( bspm "github.com/ipfs/go-bitswap/peermanager" bspqm "github.com/ipfs/go-bitswap/providerquerymanager" bssession "github.com/ipfs/go-bitswap/session" + bssim "github.com/ipfs/go-bitswap/sessioninterestmanager" bssm "github.com/ipfs/go-bitswap/sessionmanager" bsspm "github.com/ipfs/go-bitswap/sessionpeermanager" bswm "github.com/ipfs/go-bitswap/wantmanager" @@ -113,24 +115,30 @@ func New(parent context.Context, network bsnet.BitSwapNetwork, return bsmq.New(ctx, p, network) } - wm := bswm.New(ctx, bspm.New(ctx, peerQueueFactory)) + sim := bssim.New() + bpm := bsbpm.New() + pm := bspm.New(ctx, peerQueueFactory, network.Self()) + wm := bswm.New(ctx, pm, sim, bpm) pqm := bspqm.New(ctx, network) - sessionFactory := func(ctx context.Context, id uint64, pm bssession.PeerManager, srs bssession.RequestSplitter, + sessionFactory := func(ctx context.Context, id uint64, spm bssession.SessionPeerManager, + sim *bssim.SessionInterestManager, + pm bssession.PeerManager, + bpm *bsbpm.BlockPresenceManager, notif notifications.PubSub, provSearchDelay time.Duration, - rebroadcastDelay delay.D) bssm.Session { - return bssession.New(ctx, id, wm, pm, srs, notif, provSearchDelay, rebroadcastDelay) + rebroadcastDelay delay.D, + self peer.ID) bssm.Session { + return bssession.New(ctx, id, wm, spm, sim, pm, bpm, notif, provSearchDelay, rebroadcastDelay, self) } - sessionPeerManagerFactory := func(ctx context.Context, id uint64) bssession.PeerManager { + sessionPeerManagerFactory := func(ctx context.Context, id uint64) bssession.SessionPeerManager { return bsspm.New(ctx, id, network.ConnectionManager(), pqm) } - sessionRequestSplitterFactory := func(ctx context.Context) bssession.RequestSplitter { - return bssrs.New(ctx) - } notif := notifications.New() + sm := bssm.New(ctx, sessionFactory, sim, sessionPeerManagerFactory, bpm, pm, notif, network.Self()) + wm.SetSessionManager(sm) + engine := decision.NewEngine(ctx, bstore, network.ConnectionManager(), network.Self()) - engine := decision.NewEngine(ctx, bstore, network.ConnectionManager()) // TODO close the engine with Close() method bs := &Bitswap{ blockstore: bstore, engine: engine, @@ -139,8 +147,10 @@ func New(parent context.Context, network bsnet.BitSwapNetwork, newBlocks: make(chan cid.Cid, HasBlockBufferSize), provideKeys: make(chan cid.Cid, provideKeysBufferSize), wm: wm, + pm: pm, pqm: pqm, - sm: bssm.New(ctx, sessionFactory, sessionPeerManagerFactory, sessionRequestSplitterFactory, notif), + sm: sm, + sim: sim, notif: notif, counters: new(counters), dupMetric: dupHist, @@ -156,7 +166,6 @@ func New(parent context.Context, network bsnet.BitSwapNetwork, option(bs) } - bs.wm.Startup() bs.pqm.Startup() network.SetDelegate(bs) @@ -181,6 +190,8 @@ type Bitswap struct { // the wantlist tracks global wants for bitswap wm *bswm.WantManager + pm *bspm.PeerManager + // the provider query manager manages requests to find providers pqm *bspqm.ProviderQueryManager @@ -215,9 +226,13 @@ type Bitswap struct { allMetric metrics.Histogram sentHistogram metrics.Histogram - // the sessionmanager manages tracking sessions + // the SessionManager routes requests to interested sessions sm *bssm.SessionManager + // the SessionInterestManager keeps track of which sessions are interested + // in which CIDs + sim *bssim.SessionInterestManager + // whether or not to make provide announcements provideEnabled bool @@ -275,14 +290,14 @@ func (bs *Bitswap) GetBlocks(ctx context.Context, keys []cid.Cid) (<-chan blocks // HasBlock announces the existence of a block to this bitswap service. The // service will potentially notify its peers. func (bs *Bitswap) HasBlock(blk blocks.Block) error { - return bs.receiveBlocksFrom(context.Background(), "", []blocks.Block{blk}) + return bs.receiveBlocksFrom(context.Background(), "", []blocks.Block{blk}, nil, nil) } // TODO: Some of this stuff really only needs to be done when adding a block // from the user, not when receiving it from the network. // In case you run `git blame` on this comment, I'll save you some time: ask // @whyrusleeping, I don't know the answers you seek. -func (bs *Bitswap) receiveBlocksFrom(ctx context.Context, from peer.ID, blks []blocks.Block) error { +func (bs *Bitswap) receiveBlocksFrom(ctx context.Context, from peer.ID, blks []blocks.Block, haves []cid.Cid, dontHaves []cid.Cid) error { select { case <-bs.process.Closing(): return errors.New("bitswap is closed") @@ -293,22 +308,20 @@ func (bs *Bitswap) receiveBlocksFrom(ctx context.Context, from peer.ID, blks []b // If blocks came from the network if from != "" { - // Split blocks into wanted blocks vs duplicates - wanted = make([]blocks.Block, 0, len(blks)) - for _, b := range blks { - if bs.sm.IsWanted(b.Cid()) { - wanted = append(wanted, b) - } else { - log.Debugf("[recv] block not in wantlist; cid=%s, peer=%s", b.Cid(), from) - } + var notWanted []blocks.Block + wanted, notWanted = bs.sim.SplitWantedUnwanted(blks) + for _, b := range notWanted { + log.Debugf("[recv] block not in wantlist; cid=%s, peer=%s", b.Cid(), from) } } // Put wanted blocks into blockstore - err := bs.blockstore.PutMany(wanted) - if err != nil { - log.Errorf("Error writing %d blocks to datastore: %s", len(wanted), err) - return err + if len(wanted) > 0 { + err := bs.blockstore.PutMany(wanted) + if err != nil { + log.Errorf("Error writing %d blocks to datastore: %s", len(wanted), err) + return err + } } // NOTE: There exists the possiblity for a race condition here. If a user @@ -322,23 +335,15 @@ func (bs *Bitswap) receiveBlocksFrom(ctx context.Context, from peer.ID, blks []b allKs = append(allKs, b.Cid()) } - wantedKs := allKs - if len(blks) != len(wanted) { - wantedKs = make([]cid.Cid, 0, len(wanted)) - for _, b := range wanted { - wantedKs = append(wantedKs, b.Cid()) - } - } - // Send all block keys (including duplicates) to any sessions that want them. // (The duplicates are needed by sessions for accounting purposes) - bs.sm.ReceiveFrom(from, allKs) + bs.wm.ReceiveFrom(ctx, from, allKs, haves, dontHaves) - // Send wanted block keys to decision engine - bs.engine.AddBlocks(wantedKs) + // Send wanted blocks to decision engine + bs.engine.ReceiveFrom(from, wanted, haves) // Publish the block to any Bitswap clients that had requested blocks. - // (the sessions use this pubsub mechanism to inform clients of received + // (the sessions use this pubsub mechanism to inform clients of incoming // blocks) for _, b := range wanted { bs.notif.Publish(b) @@ -346,9 +351,9 @@ func (bs *Bitswap) receiveBlocksFrom(ctx context.Context, from peer.ID, blks []b // If the reprovider is enabled, send wanted blocks to reprovider if bs.provideEnabled { - for _, k := range wantedKs { + for _, blk := range wanted { select { - case bs.newBlocks <- k: + case bs.newBlocks <- blk.Cid(): // send block off to be reprovided case <-bs.process.Closing(): return bs.process.Close() @@ -380,20 +385,22 @@ func (bs *Bitswap) ReceiveMessage(ctx context.Context, p peer.ID, incoming bsmsg iblocks := incoming.Blocks() - if len(iblocks) == 0 { - return - } - - bs.updateReceiveCounters(iblocks) - for _, b := range iblocks { - log.Debugf("[recv] block; cid=%s, peer=%s", b.Cid(), p) + if len(iblocks) > 0 { + bs.updateReceiveCounters(iblocks) + for _, b := range iblocks { + log.Debugf("[recv] block; cid=%s, peer=%s", b.Cid(), p) + } } - // Process blocks - err := bs.receiveBlocksFrom(ctx, p, iblocks) - if err != nil { - log.Warningf("ReceiveMessage recvBlockFrom error: %s", err) - return + haves := incoming.Haves() + dontHaves := incoming.DontHaves() + if len(iblocks) > 0 || len(haves) > 0 || len(dontHaves) > 0 { + // Process blocks + err := bs.receiveBlocksFrom(ctx, p, iblocks, haves, dontHaves) + if err != nil { + log.Warningf("ReceiveMessage recvBlockFrom error: %s", err) + return + } } } @@ -479,12 +486,12 @@ func (bs *Bitswap) Close() error { // GetWantlist returns the current local wantlist. func (bs *Bitswap) GetWantlist() []cid.Cid { - entries := bs.wm.CurrentWants() - out := make([]cid.Cid, 0, len(entries)) - for _, e := range entries { - out = append(out, e.Cid) - } - return out + return bs.pm.CurrentWants() +} + +// GetWanthaves returns the current list of want-haves. +func (bs *Bitswap) GetWantHaves() []cid.Cid { + return bs.pm.CurrentWantHaves() } // IsOnline is needed to match go-ipfs-exchange-interface diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index 9b7571820..965c94ed6 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -571,8 +571,9 @@ func TestWantlistCleanup(t *testing.T) { defer ig.Close() bg := blocksutil.NewBlockGenerator() - instances := ig.Instances(1)[0] - bswap := instances.Exchange + instances := ig.Instances(2) + instance := instances[0] + bswap := instance.Exchange blocks := bg.Blocks(20) var keys []cid.Cid @@ -580,6 +581,7 @@ func TestWantlistCleanup(t *testing.T) { keys = append(keys, b.Cid()) } + // Once context times out, key should be removed from wantlist ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond*50) defer cancel() _, err := bswap.GetBlock(ctx, keys[0]) @@ -589,10 +591,11 @@ func TestWantlistCleanup(t *testing.T) { time.Sleep(time.Millisecond * 50) - if len(bswap.GetWantlist()) > 0 { + if len(bswap.GetWantHaves()) > 0 { t.Fatal("should not have anyting in wantlist") } + // Once context times out, keys should be removed from wantlist ctx, cancel = context.WithTimeout(context.Background(), time.Millisecond*50) defer cancel() _, err = bswap.GetBlocks(ctx, keys[:10]) @@ -603,29 +606,37 @@ func TestWantlistCleanup(t *testing.T) { <-ctx.Done() time.Sleep(time.Millisecond * 50) - if len(bswap.GetWantlist()) > 0 { + if len(bswap.GetWantHaves()) > 0 { t.Fatal("should not have anyting in wantlist") } + // Send want for single block, with no timeout _, err = bswap.GetBlocks(context.Background(), keys[:1]) if err != nil { t.Fatal(err) } + // Send want for 10 blocks ctx, cancel = context.WithCancel(context.Background()) _, err = bswap.GetBlocks(ctx, keys[10:]) if err != nil { t.Fatal(err) } + // Even after 50 milli-seconds we haven't explicitly cancelled anything + // and no timeouts have expired, so we should have 11 want-haves time.Sleep(time.Millisecond * 50) - if len(bswap.GetWantlist()) != 5 { - t.Fatal("should have 5 keys in wantlist") + if len(bswap.GetWantHaves()) != 11 { + t.Fatal("should have 11 keys in wantlist") } + // Cancel the timeout for the request for 10 blocks. This should remove + // the want-haves cancel() + + // Once the cancel is processed, we are left with the request for 1 block time.Sleep(time.Millisecond * 50) - if !(len(bswap.GetWantlist()) == 1 && bswap.GetWantlist()[0] == keys[0]) { + if !(len(bswap.GetWantHaves()) == 1 && bswap.GetWantHaves()[0] == keys[0]) { t.Fatal("should only have keys[0] in wantlist") } } diff --git a/bitswap/bitswap_with_sessions_test.go b/bitswap/bitswap_with_sessions_test.go index db7255c80..77ad03b2e 100644 --- a/bitswap/bitswap_with_sessions_test.go +++ b/bitswap/bitswap_with_sessions_test.go @@ -30,12 +30,15 @@ func TestBasicSessions(t *testing.T) { a := inst[0] b := inst[1] + // Add a block to Peer B if err := b.Blockstore().Put(block); err != nil { t.Fatal(err) } + // Create a session on Peer A sesa := a.Exchange.NewSession(ctx) + // Get the block blkout, err := sesa.GetBlock(ctx, block.Cid()) if err != nil { t.Fatal(err) @@ -74,6 +77,7 @@ func TestSessionBetweenPeers(t *testing.T) { inst := ig.Instances(10) + // Add 101 blocks to Peer A blks := bgen.Blocks(101) if err := inst[0].Blockstore().PutMany(blks); err != nil { t.Fatal(err) @@ -84,6 +88,7 @@ func TestSessionBetweenPeers(t *testing.T) { cids = append(cids, blk.Cid()) } + // Create a session on Peer B ses := inst[1].Exchange.NewSession(ctx) if _, err := ses.GetBlock(ctx, cids[0]); err != nil { t.Fatal(err) @@ -91,6 +96,7 @@ func TestSessionBetweenPeers(t *testing.T) { blks = blks[1:] cids = cids[1:] + // Fetch blocks with the session, 10 at a time for i := 0; i < 10; i++ { ch, err := ses.GetBlocks(ctx, cids[i*10:(i+1)*10]) if err != nil { @@ -127,6 +133,7 @@ func TestSessionSplitFetch(t *testing.T) { inst := ig.Instances(11) + // Add 10 distinct blocks to each of 10 peers blks := bgen.Blocks(100) for i := 0; i < 10; i++ { if err := inst[i].Blockstore().PutMany(blks[i*10 : (i+1)*10]); err != nil { @@ -139,6 +146,7 @@ func TestSessionSplitFetch(t *testing.T) { cids = append(cids, blk.Cid()) } + // Create a session on the remaining peer and fetch all the blocks 10 at a time ses := inst[10].Exchange.NewSession(ctx).(*bssession.Session) ses.SetBaseTickDelay(time.Millisecond * 10) @@ -169,6 +177,7 @@ func TestFetchNotConnected(t *testing.T) { other := ig.Next() + // Provide 10 blocks on Peer A blks := bgen.Blocks(10) for _, block := range blks { if err := other.Exchange.HasBlock(block); err != nil { @@ -181,6 +190,9 @@ func TestFetchNotConnected(t *testing.T) { cids = append(cids, blk.Cid()) } + // Request blocks with Peer B + // Note: Peer A and Peer B are not initially connected, so this tests + // that Peer B will search for and find Peer A thisNode := ig.Next() ses := thisNode.Exchange.NewSession(ctx).(*bssession.Session) ses.SetBaseTickDelay(time.Millisecond * 10) @@ -198,6 +210,81 @@ func TestFetchNotConnected(t *testing.T) { t.Fatal(err) } } + +func TestFetchAfterDisconnect(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) + defer cancel() + + vnet := getVirtualNetwork() + ig := testinstance.NewTestInstanceGenerator(vnet, bitswap.ProviderSearchDelay(10*time.Millisecond)) + defer ig.Close() + bgen := blocksutil.NewBlockGenerator() + + inst := ig.Instances(2) + peerA := inst[0] + peerB := inst[1] + + // Provide 5 blocks on Peer A + blks := bgen.Blocks(10) + var cids []cid.Cid + for _, blk := range blks { + cids = append(cids, blk.Cid()) + } + + firstBlks := blks[:5] + for _, block := range firstBlks { + if err := peerA.Exchange.HasBlock(block); err != nil { + t.Fatal(err) + } + } + + // Request all blocks with Peer B + ses := peerB.Exchange.NewSession(ctx).(*bssession.Session) + ses.SetBaseTickDelay(time.Millisecond * 10) + + ch, err := ses.GetBlocks(ctx, cids) + if err != nil { + t.Fatal(err) + } + + // Should get first 5 blocks + var got []blocks.Block + for i := 0; i < 5; i++ { + b := <-ch + got = append(got, b) + } + + if err := assertBlockLists(got, blks[:5]); err != nil { + t.Fatal(err) + } + + // Break connection + err = peerA.Adapter.DisconnectFrom(ctx, peerB.Peer) + if err != nil { + t.Fatal(err) + } + + // Provide remaining blocks + lastBlks := blks[5:] + for _, block := range lastBlks { + if err := peerA.Exchange.HasBlock(block); err != nil { + t.Fatal(err) + } + } + + // Peer B should call FindProviders() and find Peer A + + // Should get last 5 blocks + for i := 0; i < 5; i++ { + b := <-ch + got = append(got, b) + } + + if err := assertBlockLists(got, blks); err != nil { + t.Fatal(err) + } +} + func TestInterestCacheOverflow(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() diff --git a/bitswap/blockpresencemanager/blockpresencemanager.go b/bitswap/blockpresencemanager/blockpresencemanager.go new file mode 100644 index 000000000..87821f2f8 --- /dev/null +++ b/bitswap/blockpresencemanager/blockpresencemanager.go @@ -0,0 +1,111 @@ +package blockpresencemanager + +import ( + "sync" + + cid "github.com/ipfs/go-cid" + peer "github.com/libp2p/go-libp2p-core/peer" +) + +// BlockPresenceManager keeps track of which peers have indicated that they +// have or explicitly don't have a block +type BlockPresenceManager struct { + sync.RWMutex + presence map[cid.Cid]map[peer.ID]bool +} + +func New() *BlockPresenceManager { + return &BlockPresenceManager{ + presence: make(map[cid.Cid]map[peer.ID]bool), + } +} + +// ReceiveFrom is called when a peer sends us information about which blocks +// it has and does not have +func (bpm *BlockPresenceManager) ReceiveFrom(p peer.ID, haves []cid.Cid, dontHaves []cid.Cid) { + bpm.Lock() + defer bpm.Unlock() + + for _, c := range haves { + bpm.updateBlockPresence(p, c, true) + } + for _, c := range dontHaves { + bpm.updateBlockPresence(p, c, false) + } +} + +func (bpm *BlockPresenceManager) updateBlockPresence(p peer.ID, c cid.Cid, present bool) { + _, ok := bpm.presence[c] + if !ok { + bpm.presence[c] = make(map[peer.ID]bool) + } + + // Make sure not to change HAVE to DONT_HAVE + has, pok := bpm.presence[c][p] + if pok && has { + return + } + bpm.presence[c][p] = present +} + +// PeerHasBlock indicates whether the given peer has sent a HAVE for the given +// cid +func (bpm *BlockPresenceManager) PeerHasBlock(p peer.ID, c cid.Cid) bool { + bpm.RLock() + defer bpm.RUnlock() + + return bpm.presence[c][p] +} + +// PeerDoesNotHaveBlock indicates whether the given peer has sent a DONT_HAVE +// for the given cid +func (bpm *BlockPresenceManager) PeerDoesNotHaveBlock(p peer.ID, c cid.Cid) bool { + bpm.RLock() + defer bpm.RUnlock() + + have, known := bpm.presence[c][p] + return known && !have +} + +// Filters the keys such that all the given peers have received a DONT_HAVE +// for a key. +// This allows us to know if we've exhausted all possibilities of finding +// the key with the peers we know about. +func (bpm *BlockPresenceManager) AllPeersDoNotHaveBlock(peers []peer.ID, ks []cid.Cid) []cid.Cid { + bpm.RLock() + defer bpm.RUnlock() + + var res []cid.Cid + for _, c := range ks { + if bpm.allDontHave(peers, c) { + res = append(res, c) + } + } + return res +} + +func (bpm *BlockPresenceManager) allDontHave(peers []peer.ID, c cid.Cid) bool { + // Check if we know anything about the cid's block presence + ps, cok := bpm.presence[c] + if !cok { + return false + } + + // Check if we explicitly know that all the given peers do not have the cid + for _, p := range peers { + if has, pok := ps[p]; !pok || has { + return false + } + } + return true +} + +// RemoveKeys cleans up the given keys from the block presence map +func (bpm *BlockPresenceManager) RemoveKeys(ks []cid.Cid) { + bpm.Lock() + defer bpm.Unlock() + + for _, c := range ks { + delete(bpm.presence, c) + } +} diff --git a/bitswap/blockpresencemanager/blockpresencemanager_test.go b/bitswap/blockpresencemanager/blockpresencemanager_test.go new file mode 100644 index 000000000..6154f4dff --- /dev/null +++ b/bitswap/blockpresencemanager/blockpresencemanager_test.go @@ -0,0 +1,239 @@ +package blockpresencemanager + +import ( + "fmt" + "testing" + + "github.com/ipfs/go-bitswap/testutil" + peer "github.com/libp2p/go-libp2p-core/peer" + + cid "github.com/ipfs/go-cid" +) + +const ( + expHasFalseMsg = "Expected PeerHasBlock to return false" + expHasTrueMsg = "Expected PeerHasBlock to return true" + expDoesNotHaveFalseMsg = "Expected PeerDoesNotHaveBlock to return false" + expDoesNotHaveTrueMsg = "Expected PeerDoesNotHaveBlock to return true" +) + +func TestBlockPresenceManager(t *testing.T) { + bpm := New() + + p := testutil.GeneratePeers(1)[0] + cids := testutil.GenerateCids(2) + c0 := cids[0] + c1 := cids[1] + + // Nothing stored yet, both PeerHasBlock and PeerDoesNotHaveBlock should + // return false + if bpm.PeerHasBlock(p, c0) { + t.Fatal(expHasFalseMsg) + } + if bpm.PeerDoesNotHaveBlock(p, c0) { + t.Fatal(expDoesNotHaveFalseMsg) + } + + // HAVE cid0 / DONT_HAVE cid1 + bpm.ReceiveFrom(p, []cid.Cid{c0}, []cid.Cid{c1}) + + // Peer has received HAVE for cid0 + if !bpm.PeerHasBlock(p, c0) { + t.Fatal(expHasTrueMsg) + } + if bpm.PeerDoesNotHaveBlock(p, c0) { + t.Fatal(expDoesNotHaveFalseMsg) + } + + // Peer has received DONT_HAVE for cid1 + if !bpm.PeerDoesNotHaveBlock(p, c1) { + t.Fatal(expDoesNotHaveTrueMsg) + } + if bpm.PeerHasBlock(p, c1) { + t.Fatal(expHasFalseMsg) + } + + // HAVE cid1 / DONT_HAVE cid0 + bpm.ReceiveFrom(p, []cid.Cid{c1}, []cid.Cid{c0}) + + // DONT_HAVE cid0 should NOT over-write earlier HAVE cid0 + if bpm.PeerDoesNotHaveBlock(p, c0) { + t.Fatal(expDoesNotHaveFalseMsg) + } + if !bpm.PeerHasBlock(p, c0) { + t.Fatal(expHasTrueMsg) + } + + // HAVE cid1 should over-write earlier DONT_HAVE cid1 + if !bpm.PeerHasBlock(p, c1) { + t.Fatal(expHasTrueMsg) + } + if bpm.PeerDoesNotHaveBlock(p, c1) { + t.Fatal(expDoesNotHaveFalseMsg) + } + + // Remove cid0 + bpm.RemoveKeys([]cid.Cid{c0}) + + // Nothing stored, both PeerHasBlock and PeerDoesNotHaveBlock should + // return false + if bpm.PeerHasBlock(p, c0) { + t.Fatal(expHasFalseMsg) + } + if bpm.PeerDoesNotHaveBlock(p, c0) { + t.Fatal(expDoesNotHaveFalseMsg) + } + + // Remove cid1 + bpm.RemoveKeys([]cid.Cid{c1}) + + // Nothing stored, both PeerHasBlock and PeerDoesNotHaveBlock should + // return false + if bpm.PeerHasBlock(p, c1) { + t.Fatal(expHasFalseMsg) + } + if bpm.PeerDoesNotHaveBlock(p, c1) { + t.Fatal(expDoesNotHaveFalseMsg) + } +} + +func TestAddRemoveMulti(t *testing.T) { + bpm := New() + + peers := testutil.GeneratePeers(2) + p0 := peers[0] + p1 := peers[1] + cids := testutil.GenerateCids(3) + c0 := cids[0] + c1 := cids[1] + c2 := cids[2] + + // p0: HAVE cid0, cid1 / DONT_HAVE cid1, cid2 + // p1: HAVE cid1, cid2 / DONT_HAVE cid0 + bpm.ReceiveFrom(p0, []cid.Cid{c0, c1}, []cid.Cid{c1, c2}) + bpm.ReceiveFrom(p1, []cid.Cid{c1, c2}, []cid.Cid{c0}) + + // Peer 0 should end up with + // - HAVE cid0 + // - HAVE cid1 + // - DONT_HAVE cid2 + if !bpm.PeerHasBlock(p0, c0) { + t.Fatal(expHasTrueMsg) + } + if !bpm.PeerHasBlock(p0, c1) { + t.Fatal(expHasTrueMsg) + } + if !bpm.PeerDoesNotHaveBlock(p0, c2) { + t.Fatal(expDoesNotHaveTrueMsg) + } + + // Peer 1 should end up with + // - HAVE cid1 + // - HAVE cid2 + // - DONT_HAVE cid0 + if !bpm.PeerHasBlock(p1, c1) { + t.Fatal(expHasTrueMsg) + } + if !bpm.PeerHasBlock(p1, c2) { + t.Fatal(expHasTrueMsg) + } + if !bpm.PeerDoesNotHaveBlock(p1, c0) { + t.Fatal(expDoesNotHaveTrueMsg) + } + + // Remove cid1 and cid2. Should end up with + // Peer 0: HAVE cid0 + // Peer 1: DONT_HAVE cid0 + bpm.RemoveKeys([]cid.Cid{c1, c2}) + if !bpm.PeerHasBlock(p0, c0) { + t.Fatal(expHasTrueMsg) + } + if !bpm.PeerDoesNotHaveBlock(p1, c0) { + t.Fatal(expDoesNotHaveTrueMsg) + } + + // The other keys should have been cleared, so both HasBlock() and + // DoesNotHaveBlock() should return false + if bpm.PeerHasBlock(p0, c1) { + t.Fatal(expHasFalseMsg) + } + if bpm.PeerDoesNotHaveBlock(p0, c1) { + t.Fatal(expDoesNotHaveFalseMsg) + } + if bpm.PeerHasBlock(p0, c2) { + t.Fatal(expHasFalseMsg) + } + if bpm.PeerDoesNotHaveBlock(p0, c2) { + t.Fatal(expDoesNotHaveFalseMsg) + } + if bpm.PeerHasBlock(p1, c1) { + t.Fatal(expHasFalseMsg) + } + if bpm.PeerDoesNotHaveBlock(p1, c1) { + t.Fatal(expDoesNotHaveFalseMsg) + } + if bpm.PeerHasBlock(p1, c2) { + t.Fatal(expHasFalseMsg) + } + if bpm.PeerDoesNotHaveBlock(p1, c2) { + t.Fatal(expDoesNotHaveFalseMsg) + } +} + +func TestAllPeersDoNotHaveBlock(t *testing.T) { + bpm := New() + + peers := testutil.GeneratePeers(3) + p0 := peers[0] + p1 := peers[1] + p2 := peers[2] + + cids := testutil.GenerateCids(3) + c0 := cids[0] + c1 := cids[1] + c2 := cids[2] + + // c0 c1 c2 + // p0 ? N N + // p1 N Y ? + // p2 Y Y N + bpm.ReceiveFrom(p0, []cid.Cid{}, []cid.Cid{c1, c2}) + bpm.ReceiveFrom(p1, []cid.Cid{c1}, []cid.Cid{c0}) + bpm.ReceiveFrom(p2, []cid.Cid{c0, c1}, []cid.Cid{c2}) + + type testcase struct { + peers []peer.ID + ks []cid.Cid + exp []cid.Cid + } + + testcases := []testcase{ + testcase{[]peer.ID{p0}, []cid.Cid{c0}, []cid.Cid{}}, + testcase{[]peer.ID{p1}, []cid.Cid{c0}, []cid.Cid{c0}}, + testcase{[]peer.ID{p2}, []cid.Cid{c0}, []cid.Cid{}}, + + testcase{[]peer.ID{p0}, []cid.Cid{c1}, []cid.Cid{c1}}, + testcase{[]peer.ID{p1}, []cid.Cid{c1}, []cid.Cid{}}, + testcase{[]peer.ID{p2}, []cid.Cid{c1}, []cid.Cid{}}, + + testcase{[]peer.ID{p0}, []cid.Cid{c2}, []cid.Cid{c2}}, + testcase{[]peer.ID{p1}, []cid.Cid{c2}, []cid.Cid{}}, + testcase{[]peer.ID{p2}, []cid.Cid{c2}, []cid.Cid{c2}}, + + // p0 recieved DONT_HAVE for c1 & c2 (but not for c0) + testcase{[]peer.ID{p0}, []cid.Cid{c0, c1, c2}, []cid.Cid{c1, c2}}, + testcase{[]peer.ID{p0, p1}, []cid.Cid{c0, c1, c2}, []cid.Cid{}}, + // Both p0 and p2 received DONT_HAVE for c2 + testcase{[]peer.ID{p0, p2}, []cid.Cid{c0, c1, c2}, []cid.Cid{c2}}, + testcase{[]peer.ID{p0, p1, p2}, []cid.Cid{c0, c1, c2}, []cid.Cid{}}, + } + + for i, tc := range testcases { + if !testutil.MatchKeysIgnoreOrder( + bpm.AllPeersDoNotHaveBlock(tc.peers, tc.ks), + tc.exp, + ) { + t.Fatal(fmt.Sprintf("test case %d failed: expected matching keys", i)) + } + } +} diff --git a/bitswap/decision/engine.go b/bitswap/decision/engine.go index 7a58bb3f6..2e183b067 100644 --- a/bitswap/decision/engine.go +++ b/bitswap/decision/engine.go @@ -8,8 +8,11 @@ import ( "time" "github.com/google/uuid" + bsmsg "github.com/ipfs/go-bitswap/message" + pb "github.com/ipfs/go-bitswap/message/pb" wl "github.com/ipfs/go-bitswap/wantlist" + blocks "github.com/ipfs/go-block-format" cid "github.com/ipfs/go-cid" bstore "github.com/ipfs/go-ipfs-blockstore" logging "github.com/ipfs/go-log" @@ -56,10 +59,10 @@ var log = logging.Logger("engine") const ( // outboxChanBuffer must be 0 to prevent stale messages from being sent outboxChanBuffer = 0 - // Number of concurrent workers that pull tasks off the request queue - taskWorkerCount = 8 - // maxMessageSize is the maximum size of the batched payload - maxMessageSize = 512 * 1024 + // targetMessageSize is the ideal size of the batched payload. We try to + // pop this much data off the request queue, but it may be a little more + // or less depending on what's in the queue. + targetMessageSize = 16 * 1024 // tagFormat is the tag given to peers associated an engine tagFormat = "bs-engine-%s-%s" @@ -82,6 +85,13 @@ const ( longTermScore = 10 // this is a high tag but it grows _very_ slowly. shortTermScore = 10 // this is a high tag but it'll go away quickly if we aren't using the peer. + // maxBlockSizeReplaceHasWithBlock is the maximum size of the block in + // bytes up to which we will replace a want-have with a want-block + maxBlockSizeReplaceHasWithBlock = 1024 + + // Number of concurrent workers that pull tasks off the request queue + taskWorkerCount = 8 + // Number of concurrent workers that process requests to the blockstore blockstoreWorkerCount = 128 ) @@ -137,7 +147,8 @@ type Engine struct { tagQueued, tagUseful string - lock sync.Mutex // protects the fields immediatly below + lock sync.RWMutex // protects the fields immediatly below + // ledgerMap lists Ledgers by their Partner key. ledgerMap map[peer.ID]*ledger @@ -145,24 +156,39 @@ type Engine struct { taskWorkerLock sync.Mutex taskWorkerCount int + + // maxBlockSizeReplaceHasWithBlock is the maximum size of the block in + // bytes up to which we will replace a want-have with a want-block + maxBlockSizeReplaceHasWithBlock int + + self peer.ID } // NewEngine creates a new block sending engine for the given block store -func NewEngine(ctx context.Context, bs bstore.Blockstore, peerTagger PeerTagger) *Engine { +func NewEngine(ctx context.Context, bs bstore.Blockstore, peerTagger PeerTagger, self peer.ID) *Engine { + return newEngine(ctx, bs, peerTagger, self, maxBlockSizeReplaceHasWithBlock) +} + +// This constructor is used by the tests +func newEngine(ctx context.Context, bs bstore.Blockstore, peerTagger PeerTagger, self peer.ID, maxReplaceSize int) *Engine { e := &Engine{ - ledgerMap: make(map[peer.ID]*ledger), - bsm: newBlockstoreManager(ctx, bs, blockstoreWorkerCount), - peerTagger: peerTagger, - outbox: make(chan (<-chan *Envelope), outboxChanBuffer), - workSignal: make(chan struct{}, 1), - ticker: time.NewTicker(time.Millisecond * 100), - taskWorkerCount: taskWorkerCount, + ledgerMap: make(map[peer.ID]*ledger), + bsm: newBlockstoreManager(ctx, bs, blockstoreWorkerCount), + peerTagger: peerTagger, + outbox: make(chan (<-chan *Envelope), outboxChanBuffer), + workSignal: make(chan struct{}, 1), + ticker: time.NewTicker(time.Millisecond * 100), + maxBlockSizeReplaceHasWithBlock: maxReplaceSize, + taskWorkerCount: taskWorkerCount, + self: self, } e.tagQueued = fmt.Sprintf(tagFormat, "queued", uuid.New().String()) e.tagUseful = fmt.Sprintf(tagFormat, "useful", uuid.New().String()) e.peerRequestQueue = peertaskqueue.New( peertaskqueue.OnPeerAddedHook(e.onPeerAdded), - peertaskqueue.OnPeerRemovedHook(e.onPeerRemoved)) + peertaskqueue.OnPeerRemovedHook(e.onPeerRemoved), + peertaskqueue.TaskMerger(newTaskMerger()), + peertaskqueue.IgnoreFreezing(true)) go e.scoreWorker(ctx) return e } @@ -310,9 +336,9 @@ func (e *Engine) LedgerForPeer(p peer.ID) *Receipt { } } -// Each taskWorker pulls items off the request queue up and adds them to an -// envelope. The envelope is passed off to the bitswap workers, which send -// the message to the network. +// Each taskWorker pulls items off the request queue up to the maximum size +// and adds them to an envelope that is passed off to the bitswap workers, +// which send the message to the network. func (e *Engine) taskWorker(ctx context.Context) { defer e.taskWorkerExit() for { @@ -349,53 +375,91 @@ func (e *Engine) taskWorkerExit() { // context is cancelled before the next Envelope can be created. func (e *Engine) nextEnvelope(ctx context.Context) (*Envelope, error) { for { - nextTask := e.peerRequestQueue.PopBlock() - for nextTask == nil { + // Pop some tasks off the request queue + p, nextTasks, pendingBytes := e.peerRequestQueue.PopTasks(targetMessageSize) + for len(nextTasks) == 0 { select { case <-ctx.Done(): return nil, ctx.Err() case <-e.workSignal: - nextTask = e.peerRequestQueue.PopBlock() + p, nextTasks, pendingBytes = e.peerRequestQueue.PopTasks(targetMessageSize) case <-e.ticker.C: + // When a task is cancelled, the queue may be "frozen" for a + // period of time. We periodically "thaw" the queue to make + // sure it doesn't get stuck in a frozen state. e.peerRequestQueue.ThawRound() - nextTask = e.peerRequestQueue.PopBlock() + p, nextTasks, pendingBytes = e.peerRequestQueue.PopTasks(targetMessageSize) } } - // with a task in hand, we're ready to prepare the envelope... - blockCids := cid.NewSet() - for _, t := range nextTask.Tasks { - blockCids.Add(t.Identifier.(cid.Cid)) + // Create a new message + msg := bsmsg.New(true) + + // log.Debugf(" %s got %d tasks", lu.P(e.self), len(nextTasks)) + + // Amount of data in the request queue still waiting to be popped + msg.SetPendingBytes(int32(pendingBytes)) + + // Split out want-blocks, want-haves and DONT_HAVEs + blockCids := make([]cid.Cid, 0, len(nextTasks)) + blockTasks := make(map[cid.Cid]*taskData, len(nextTasks)) + for _, t := range nextTasks { + c := t.Topic.(cid.Cid) + td := t.Data.(*taskData) + if td.HaveBlock { + if td.IsWantBlock { + blockCids = append(blockCids, c) + blockTasks[c] = td + } else { + // Add HAVES to the message + msg.AddHave(c) + } + } else { + // Add DONT_HAVEs to the message + msg.AddDontHave(c) + } } - blks, err := e.bsm.getBlocks(ctx, blockCids.Keys()) + + // Fetch blocks from datastore + blks, err := e.bsm.getBlocks(ctx, blockCids) if err != nil { // we're dropping the envelope but that's not an issue in practice. return nil, err } - msg := bsmsg.New(true) - for _, b := range blks { - msg.AddBlock(b) + for c, t := range blockTasks { + blk := blks[c] + // If the block was not found (it has been removed) + if blk == nil { + // If the client requested DONT_HAVE, add DONT_HAVE to the message + if t.SendDontHave { + // log.Debugf(" make evlp %s->%s DONT_HAVE (expected block) %s", lu.P(e.self), lu.P(p), lu.C(c)) + msg.AddDontHave(c) + } + } else { + // Add the block to the message + // log.Debugf(" make evlp %s->%s block: %s (%d bytes)", lu.P(e.self), lu.P(p), lu.C(c), len(blk.RawData())) + msg.AddBlock(blk) + } } + // If there's nothing in the message, bail out if msg.Empty() { - // If we don't have the block, don't hold that against the peer - // make sure to update that the task has been 'completed' - nextTask.Done(nextTask.Tasks) + e.peerRequestQueue.TasksDone(p, nextTasks...) continue } + // log.Debugf(" sending message %s->%s (%d blks / %d presences / %d bytes)\n", lu.P(e.self), lu.P(p), blkCount, presenceCount, msg.Size()) return &Envelope{ - Peer: nextTask.Target, + Peer: p, Message: msg, Sent: func() { - nextTask.Done(nextTask.Tasks) - select { - case e.workSignal <- struct{}{}: - // work completing may mean that our queue will provide new - // work to be done. - default: - } + // Once the message has been sent, signal the request queue so + // it can be cleared from the queue + e.peerRequestQueue.TasksDone(p, nextTasks...) + + // Signal the worker to check for more work + e.signalNewWork() }, }, nil } @@ -408,8 +472,8 @@ func (e *Engine) Outbox() <-chan (<-chan *Envelope) { // Peers returns a slice of Peers with whom the local node has active sessions. func (e *Engine) Peers() []peer.ID { - e.lock.Lock() - defer e.lock.Unlock() + e.lock.RLock() + defer e.lock.RUnlock() response := make([]peer.ID, 0, len(e.ledgerMap)) @@ -419,9 +483,25 @@ func (e *Engine) Peers() []peer.ID { return response } -// MessageReceived performs book-keeping. Returns error if passed invalid -// arguments. +// MessageReceived is called when a message is received from a remote peer. +// For each item in the wantlist, add a want-have or want-block entry to the +// request queue (this is later popped off by the workerTasks) func (e *Engine) MessageReceived(ctx context.Context, p peer.ID, m bsmsg.BitSwapMessage) { + entries := m.Wantlist() + + // if len(entries) > 0 { + // log.Debugf("engine-%s received message from %s with %d entries\n", lu.P(e.self), lu.P(p), len(entries)) + // for _, et := range entries { + // if !et.Cancel { + // if et.WantType == pb.Message_Wantlist_Have { + // log.Debugf(" recv %s<-%s: want-have %s\n", lu.P(e.self), lu.P(p), lu.C(et.Cid)) + // } else { + // log.Debugf(" recv %s<-%s: want-block %s\n", lu.P(e.self), lu.P(p), lu.C(et.Cid)) + // } + // } + // } + // } + if m.Empty() { log.Debugf("received empty message from %s", p) } @@ -434,12 +514,10 @@ func (e *Engine) MessageReceived(ctx context.Context, p peer.ID, m bsmsg.BitSwap }() // Get block sizes - entries := m.Wantlist() + wants, cancels := e.splitWantsCancels(entries) wantKs := cid.NewSet() - for _, entry := range entries { - if !entry.Cancel { - wantKs.Add(entry.Cid) - } + for _, entry := range wants { + wantKs.Add(entry.Cid) } blockSizes, err := e.bsm.getBlockSizes(ctx, wantKs.Keys()) if err != nil { @@ -447,78 +525,186 @@ func (e *Engine) MessageReceived(ctx context.Context, p peer.ID, m bsmsg.BitSwap return } + // Get the ledger for the peer l := e.findOrCreate(p) l.lk.Lock() defer l.lk.Unlock() + + // Record how many bytes were received in the ledger + blks := m.Blocks() + for _, block := range blks { + log.Debugf("got block %s %d bytes", block, len(block.RawData())) + l.ReceivedBytes(len(block.RawData())) + } + + // If the peer sent a full wantlist, replace the ledger's wantlist if m.Full() { l.wantList = wl.New() } - var msgSize int var activeEntries []peertask.Task - for _, entry := range m.Wantlist() { - if entry.Cancel { - log.Debugf("%s cancel %s", p, entry.Cid) - l.CancelWant(entry.Cid) + + // Remove cancelled blocks from the queue + for _, entry := range cancels { + // log.Debugf("%s<-%s cancel %s", lu.P(e.self), lu.P(p), lu.C(entry.Cid)) + if l.CancelWant(entry.Cid) { e.peerRequestQueue.Remove(entry.Cid, p) - } else { - log.Debugf("wants %s - %d", entry.Cid, entry.Priority) - l.Wants(entry.Cid, entry.Priority) - blockSize, ok := blockSizes[entry.Cid] - if ok { - // we have the block + } + } + + // For each want-have / want-block + for _, entry := range wants { + c := entry.Cid + blockSize, found := blockSizes[entry.Cid] + + // Add each want-have / want-block to the ledger + l.Wants(c, entry.Priority, entry.WantType) + + // If the block was not found + if !found { + // Only add the task to the queue if the requester wants a DONT_HAVE + if entry.SendDontHave { newWorkExists = true - if msgSize+blockSize > maxMessageSize { - e.peerRequestQueue.PushBlock(p, activeEntries...) - activeEntries = []peertask.Task{} - msgSize = 0 + isWantBlock := false + if entry.WantType == pb.Message_Wantlist_Block { + isWantBlock = true } - activeEntries = append(activeEntries, peertask.Task{Identifier: entry.Cid, Priority: entry.Priority}) - msgSize += blockSize + + // if isWantBlock { + // log.Debugf(" put rq %s->%s %s as want-block (not found)\n", lu.P(e.self), lu.P(p), lu.C(entry.Cid)) + // } else { + // log.Debugf(" put rq %s->%s %s as want-have (not found)\n", lu.P(e.self), lu.P(p), lu.C(entry.Cid)) + // } + + activeEntries = append(activeEntries, peertask.Task{ + Topic: c, + Priority: entry.Priority, + Work: bsmsg.BlockPresenceSize(c), + Data: &taskData{ + BlockSize: 0, + HaveBlock: false, + IsWantBlock: isWantBlock, + SendDontHave: entry.SendDontHave, + }, + }) + } + // log.Debugf(" not putting rq %s->%s %s (not found, SendDontHave false)\n", lu.P(e.self), lu.P(p), lu.C(entry.Cid)) + } else { + // The block was found, add it to the queue + newWorkExists = true + + isWantBlock := e.sendAsBlock(entry.WantType, blockSize) + + // if isWantBlock { + // log.Debugf(" put rq %s->%s %s as want-block (%d bytes)\n", lu.P(e.self), lu.P(p), lu.C(entry.Cid), blockSize) + // } else { + // log.Debugf(" put rq %s->%s %s as want-have (%d bytes)\n", lu.P(e.self), lu.P(p), lu.C(entry.Cid), blockSize) + // } + + // entrySize is the amount of space the entry takes up in the + // message we send to the recipient. If we're sending a block, the + // entrySize is the size of the block. Otherwise it's the size of + // a block presence entry. + entrySize := blockSize + if !isWantBlock { + entrySize = bsmsg.BlockPresenceSize(c) } + activeEntries = append(activeEntries, peertask.Task{ + Topic: c, + Priority: entry.Priority, + Work: entrySize, + Data: &taskData{ + BlockSize: blockSize, + HaveBlock: true, + IsWantBlock: isWantBlock, + SendDontHave: entry.SendDontHave, + }, + }) } } + + // Push entries onto the request queue if len(activeEntries) > 0 { - e.peerRequestQueue.PushBlock(p, activeEntries...) + e.peerRequestQueue.PushTasks(p, activeEntries...) } - for _, block := range m.Blocks() { - log.Debugf("got block %s %d bytes", block, len(block.RawData())) - l.ReceivedBytes(len(block.RawData())) +} + +// Split the want-have / want-block entries from the cancel entries +func (e *Engine) splitWantsCancels(es []bsmsg.Entry) ([]bsmsg.Entry, []bsmsg.Entry) { + wants := make([]bsmsg.Entry, 0, len(es)) + cancels := make([]bsmsg.Entry, 0, len(es)) + for _, et := range es { + if et.Cancel { + cancels = append(cancels, et) + } else { + wants = append(wants, et) + } } + return wants, cancels } -func (e *Engine) addBlocks(ks []cid.Cid) { - work := false +// ReceiveFrom is called when new blocks are received and added to the block +// store, meaning there may be peers who want those blocks, so we should send +// the blocks to them. +func (e *Engine) ReceiveFrom(from peer.ID, blks []blocks.Block, haves []cid.Cid) { + if len(blks) == 0 { + return + } + + // Get the size of each block + blockSizes := make(map[cid.Cid]int, len(blks)) + for _, blk := range blks { + blockSizes[blk.Cid()] = len(blk.RawData()) + } + // Check each peer to see if it wants one of the blocks we received + work := false + e.lock.RLock() for _, l := range e.ledgerMap { - l.lk.Lock() - for _, k := range ks { + l.lk.RLock() + + for _, b := range blks { + k := b.Cid() + if entry, ok := l.WantListContains(k); ok { - e.peerRequestQueue.PushBlock(l.Partner, peertask.Task{ - Identifier: entry.Cid, - Priority: entry.Priority, - }) work = true + + blockSize := blockSizes[k] + isWantBlock := e.sendAsBlock(entry.WantType, blockSize) + + // if isWantBlock { + // log.Debugf(" add-block put rq %s->%s %s as want-block (%d bytes)\n", lu.P(e.self), lu.P(l.Partner), lu.C(k), blockSize) + // } else { + // log.Debugf(" add-block put rq %s->%s %s as want-have (%d bytes)\n", lu.P(e.self), lu.P(l.Partner), lu.C(k), blockSize) + // } + + entrySize := blockSize + if !isWantBlock { + entrySize = bsmsg.BlockPresenceSize(k) + } + + e.peerRequestQueue.PushTasks(l.Partner, peertask.Task{ + Topic: entry.Cid, + Priority: entry.Priority, + Work: entrySize, + Data: &taskData{ + BlockSize: blockSize, + HaveBlock: true, + IsWantBlock: isWantBlock, + SendDontHave: false, + }, + }) } } - l.lk.Unlock() + l.lk.RUnlock() } + e.lock.RUnlock() if work { e.signalNewWork() } } -// AddBlocks is called when new blocks are received and added to a block store, -// meaning there may be peers who want those blocks, so we should send the blocks -// to them. -func (e *Engine) AddBlocks(ks []cid.Cid) { - e.lock.Lock() - defer e.lock.Unlock() - - e.addBlocks(ks) -} - // TODO add contents of m.WantList() to my local wantlist? NB: could introduce // race conditions where I send a message, but MessageSent gets handled after // MessageReceived. The information in the local wantlist could become @@ -532,9 +718,19 @@ func (e *Engine) MessageSent(p peer.ID, m bsmsg.BitSwapMessage) { l.lk.Lock() defer l.lk.Unlock() + // Remove sent blocks from the want list for the peer for _, block := range m.Blocks() { l.SentBytes(len(block.RawData())) - l.wantList.Remove(block.Cid()) + l.wantList.RemoveType(block.Cid(), pb.Message_Wantlist_Block) + } + + // Remove sent block presences from the want list for the peer + for _, bp := range m.BlockPresences() { + // TODO: record block presence bytes as well? + // l.SentBytes(?) + if bp.Type == pb.Message_Have { + l.wantList.RemoveType(bp.Cid, pb.Message_Wantlist_Have) + } } } @@ -548,6 +744,7 @@ func (e *Engine) PeerConnected(p peer.ID) { l = newLedger(p) e.ledgerMap[p] = l } + l.lk.Lock() defer l.lk.Unlock() l.ref++ @@ -561,6 +758,7 @@ func (e *Engine) PeerDisconnected(p peer.ID) { if !ok { return } + l.lk.Lock() defer l.lk.Unlock() l.ref-- @@ -569,6 +767,13 @@ func (e *Engine) PeerDisconnected(p peer.ID) { } } +// If the want is a want-have, and it's below a certain size, send the full +// block (instead of sending a HAVE) +func (e *Engine) sendAsBlock(wantType pb.Message_Wantlist_WantType, blockSize int) bool { + isWantBlock := wantType == pb.Message_Wantlist_Block + return isWantBlock || blockSize <= e.maxBlockSizeReplaceHasWithBlock +} + func (e *Engine) numBytesSentTo(p peer.ID) uint64 { // NB not threadsafe return e.findOrCreate(p).Accounting.BytesSent @@ -581,9 +786,20 @@ func (e *Engine) numBytesReceivedFrom(p peer.ID) uint64 { // ledger lazily instantiates a ledger func (e *Engine) findOrCreate(p peer.ID) *ledger { + // Take a read lock (as it's less expensive) to check if we have a ledger + // for the peer + e.lock.RLock() + l, ok := e.ledgerMap[p] + e.lock.RUnlock() + if ok { + return l + } + + // There's no ledger, so take a write lock, then check again and create the + // ledger if necessary e.lock.Lock() defer e.lock.Unlock() - l, ok := e.ledgerMap[p] + l, ok = e.ledgerMap[p] if !ok { l = newLedger(p) e.ledgerMap[p] = l diff --git a/bitswap/decision/engine_test.go b/bitswap/decision/engine_test.go index 09962e1e9..12e7eca21 100644 --- a/bitswap/decision/engine_test.go +++ b/bitswap/decision/engine_test.go @@ -1,6 +1,7 @@ package decision import ( + "bytes" "context" "errors" "fmt" @@ -9,15 +10,19 @@ import ( "testing" "time" + lu "github.com/ipfs/go-bitswap/logutil" message "github.com/ipfs/go-bitswap/message" + pb "github.com/ipfs/go-bitswap/message/pb" + "github.com/ipfs/go-bitswap/testutil" blocks "github.com/ipfs/go-block-format" + cid "github.com/ipfs/go-cid" ds "github.com/ipfs/go-datastore" dssync "github.com/ipfs/go-datastore/sync" blockstore "github.com/ipfs/go-ipfs-blockstore" process "github.com/jbenet/goprocess" peer "github.com/libp2p/go-libp2p-core/peer" - testutil "github.com/libp2p/go-libp2p-core/test" + libp2ptest "github.com/libp2p/go-libp2p-core/test" ) type peerTag struct { @@ -86,10 +91,10 @@ type engineSet struct { Blockstore blockstore.Blockstore } -func newEngine(ctx context.Context, idStr string) engineSet { +func newTestEngine(ctx context.Context, idStr string) engineSet { fpt := &fakePeerTagger{} bs := blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore())) - e := NewEngine(ctx, bs, fpt) + e := newEngine(ctx, bs, fpt, "localhost", 0) e.StartWorkers(ctx, process.WithTeardown(func() error { return nil })) return engineSet{ Peer: peer.ID(idStr), @@ -103,8 +108,8 @@ func newEngine(ctx context.Context, idStr string) engineSet { func TestConsistentAccounting(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - sender := newEngine(ctx, "Ernie") - receiver := newEngine(ctx, "Bert") + sender := newTestEngine(ctx, "Ernie") + receiver := newTestEngine(ctx, "Bert") // Send messages from Ernie to Bert for i := 0; i < 1000; i++ { @@ -138,8 +143,8 @@ func TestPeerIsAddedToPeersWhenMessageReceivedOrSent(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - sanfrancisco := newEngine(ctx, "sf") - seattle := newEngine(ctx, "sea") + sanfrancisco := newTestEngine(ctx, "sf") + seattle := newTestEngine(ctx, "sea") m := message.New(true) @@ -176,7 +181,7 @@ func peerIsPartner(p peer.ID, e *Engine) bool { func TestOutboxClosedWhenEngineClosed(t *testing.T) { ctx := context.Background() t.SkipNow() // TODO implement *Engine.Close - e := NewEngine(ctx, blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore())), &fakePeerTagger{}) + e := newEngine(ctx, blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore())), &fakePeerTagger{}, "localhost", 0) e.StartWorkers(ctx, process.WithTeardown(func() error { return nil })) var wg sync.WaitGroup wg.Add(1) @@ -193,6 +198,616 @@ func TestOutboxClosedWhenEngineClosed(t *testing.T) { } } +func TestPartnerWantHaveWantBlockNonActive(t *testing.T) { + alphabet := "abcdefghijklmnopqrstuvwxyz" + vowels := "aeiou" + + bs := blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore())) + for _, letter := range strings.Split(alphabet, "") { + block := blocks.NewBlock([]byte(letter)) + if err := bs.Put(block); err != nil { + t.Fatal(err) + } + } + + partner := libp2ptest.RandPeerIDFatal(t) + // partnerWantBlocks(e, vowels, partner) + + type testCaseEntry struct { + wantBlks string + wantHaves string + sendDontHave bool + } + + type testCaseExp struct { + blks string + haves string + dontHaves string + } + + type testCase struct { + only bool + wls []testCaseEntry + exp []testCaseExp + } + + testCases := []testCase{ + // Just send want-blocks + testCase{ + wls: []testCaseEntry{ + testCaseEntry{ + wantBlks: vowels, + sendDontHave: false, + }, + }, + exp: []testCaseExp{ + testCaseExp{ + blks: vowels, + }, + }, + }, + + // Send want-blocks and want-haves + testCase{ + wls: []testCaseEntry{ + testCaseEntry{ + wantBlks: vowels, + wantHaves: "fgh", + sendDontHave: false, + }, + }, + exp: []testCaseExp{ + testCaseExp{ + blks: vowels, + haves: "fgh", + }, + }, + }, + + // Send want-blocks and want-haves, with some want-haves that are not + // present, but without requesting DONT_HAVES + testCase{ + wls: []testCaseEntry{ + testCaseEntry{ + wantBlks: vowels, + wantHaves: "fgh123", + sendDontHave: false, + }, + }, + exp: []testCaseExp{ + testCaseExp{ + blks: vowels, + haves: "fgh", + }, + }, + }, + + // Send want-blocks and want-haves, with some want-haves that are not + // present, and request DONT_HAVES + testCase{ + wls: []testCaseEntry{ + testCaseEntry{ + wantBlks: vowels, + wantHaves: "fgh123", + sendDontHave: true, + }, + }, + exp: []testCaseExp{ + testCaseExp{ + blks: vowels, + haves: "fgh", + dontHaves: "123", + }, + }, + }, + + // Send want-blocks and want-haves, with some want-blocks and want-haves that are not + // present, but without requesting DONT_HAVES + testCase{ + wls: []testCaseEntry{ + testCaseEntry{ + wantBlks: "aeiou123", + wantHaves: "fgh456", + sendDontHave: false, + }, + }, + exp: []testCaseExp{ + testCaseExp{ + blks: "aeiou", + haves: "fgh", + dontHaves: "", + }, + }, + }, + + // Send want-blocks and want-haves, with some want-blocks and want-haves that are not + // present, and request DONT_HAVES + testCase{ + wls: []testCaseEntry{ + testCaseEntry{ + wantBlks: "aeiou123", + wantHaves: "fgh456", + sendDontHave: true, + }, + }, + exp: []testCaseExp{ + testCaseExp{ + blks: "aeiou", + haves: "fgh", + dontHaves: "123456", + }, + }, + }, + + // Send repeated want-blocks + testCase{ + wls: []testCaseEntry{ + testCaseEntry{ + wantBlks: "ae", + sendDontHave: false, + }, + testCaseEntry{ + wantBlks: "io", + sendDontHave: false, + }, + testCaseEntry{ + wantBlks: "u", + sendDontHave: false, + }, + }, + exp: []testCaseExp{ + testCaseExp{ + blks: "aeiou", + }, + }, + }, + + // Send repeated want-blocks and want-haves + testCase{ + wls: []testCaseEntry{ + testCaseEntry{ + wantBlks: "ae", + wantHaves: "jk", + sendDontHave: false, + }, + testCaseEntry{ + wantBlks: "io", + wantHaves: "lm", + sendDontHave: false, + }, + testCaseEntry{ + wantBlks: "u", + sendDontHave: false, + }, + }, + exp: []testCaseExp{ + testCaseExp{ + blks: "aeiou", + haves: "jklm", + }, + }, + }, + + // Send repeated want-blocks and want-haves, with some want-blocks and want-haves that are not + // present, and request DONT_HAVES + testCase{ + wls: []testCaseEntry{ + testCaseEntry{ + wantBlks: "ae12", + wantHaves: "jk5", + sendDontHave: true, + }, + testCaseEntry{ + wantBlks: "io34", + wantHaves: "lm", + sendDontHave: true, + }, + testCaseEntry{ + wantBlks: "u", + wantHaves: "6", + sendDontHave: true, + }, + }, + exp: []testCaseExp{ + testCaseExp{ + blks: "aeiou", + haves: "jklm", + dontHaves: "123456", + }, + }, + }, + + // Send want-block then want-have for same CID + testCase{ + wls: []testCaseEntry{ + testCaseEntry{ + wantBlks: "a", + sendDontHave: true, + }, + testCaseEntry{ + wantHaves: "a", + sendDontHave: true, + }, + }, + // want-have should be ignored because there was already a + // want-block for the same CID in the queue + exp: []testCaseExp{ + testCaseExp{ + blks: "a", + }, + }, + }, + + // Send want-have then want-block for same CID + testCase{ + wls: []testCaseEntry{ + testCaseEntry{ + wantHaves: "b", + sendDontHave: true, + }, + testCaseEntry{ + wantBlks: "b", + sendDontHave: true, + }, + }, + // want-block should overwrite existing want-have + exp: []testCaseExp{ + testCaseExp{ + blks: "b", + }, + }, + }, + + // Send want-block then want-block for same CID + testCase{ + wls: []testCaseEntry{ + testCaseEntry{ + wantBlks: "a", + sendDontHave: true, + }, + testCaseEntry{ + wantBlks: "a", + sendDontHave: true, + }, + }, + // second want-block should be ignored + exp: []testCaseExp{ + testCaseExp{ + blks: "a", + }, + }, + }, + + // Send want-have then want-have for same CID + testCase{ + wls: []testCaseEntry{ + testCaseEntry{ + wantHaves: "a", + sendDontHave: true, + }, + testCaseEntry{ + wantHaves: "a", + sendDontHave: true, + }, + }, + // second want-have should be ignored + exp: []testCaseExp{ + testCaseExp{ + haves: "a", + }, + }, + }, + } + + var onlyTestCases []testCase + for _, testCase := range testCases { + if testCase.only { + onlyTestCases = append(onlyTestCases, testCase) + } + } + if len(onlyTestCases) > 0 { + testCases = onlyTestCases + } + + e := newEngine(context.Background(), bs, &fakePeerTagger{}, "localhost", 0) + e.StartWorkers(context.Background(), process.WithTeardown(func() error { return nil })) + for i, testCase := range testCases { + t.Logf("Test case %d:", i) + for _, wl := range testCase.wls { + t.Logf(" want-blocks '%s' / want-haves '%s' / sendDontHave %t", + wl.wantBlks, wl.wantHaves, wl.sendDontHave) + wantBlks := strings.Split(wl.wantBlks, "") + wantHaves := strings.Split(wl.wantHaves, "") + partnerWantBlocksHaves(e, wantBlks, wantHaves, wl.sendDontHave, partner) + } + + for _, exp := range testCase.exp { + expBlks := strings.Split(exp.blks, "") + expHaves := strings.Split(exp.haves, "") + expDontHaves := strings.Split(exp.dontHaves, "") + + next := <-e.Outbox() + env := <-next + err := checkOutput(t, e, env, expBlks, expHaves, expDontHaves) + if err != nil { + t.Fatal(err) + } + env.Sent() + } + } +} + +func TestPartnerWantHaveWantBlockActive(t *testing.T) { + alphabet := "abcdefghijklmnopqrstuvwxyz" + + bs := blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore())) + for _, letter := range strings.Split(alphabet, "") { + block := blocks.NewBlock([]byte(letter)) + if err := bs.Put(block); err != nil { + t.Fatal(err) + } + } + + partner := libp2ptest.RandPeerIDFatal(t) + + type testCaseEntry struct { + wantBlks string + wantHaves string + sendDontHave bool + } + + type testCaseExp struct { + blks string + haves string + dontHaves string + } + + type testCase struct { + only bool + wls []testCaseEntry + exp []testCaseExp + } + + testCases := []testCase{ + // Send want-block then want-have for same CID + testCase{ + wls: []testCaseEntry{ + testCaseEntry{ + wantBlks: "a", + sendDontHave: true, + }, + testCaseEntry{ + wantHaves: "a", + sendDontHave: true, + }, + }, + // want-have should be ignored because there was already a + // want-block for the same CID in the queue + exp: []testCaseExp{ + testCaseExp{ + blks: "a", + }, + }, + }, + + // Send want-have then want-block for same CID + testCase{ + wls: []testCaseEntry{ + testCaseEntry{ + wantHaves: "b", + sendDontHave: true, + }, + testCaseEntry{ + wantBlks: "b", + sendDontHave: true, + }, + }, + // want-have is active when want-block is added, so want-have + // should get sent, then want-block + exp: []testCaseExp{ + testCaseExp{ + haves: "b", + }, + testCaseExp{ + blks: "b", + }, + }, + }, + + // Send want-block then want-block for same CID + testCase{ + wls: []testCaseEntry{ + testCaseEntry{ + wantBlks: "a", + sendDontHave: true, + }, + testCaseEntry{ + wantBlks: "a", + sendDontHave: true, + }, + }, + // second want-block should be ignored + exp: []testCaseExp{ + testCaseExp{ + blks: "a", + }, + }, + }, + + // Send want-have then want-have for same CID + testCase{ + wls: []testCaseEntry{ + testCaseEntry{ + wantHaves: "a", + sendDontHave: true, + }, + testCaseEntry{ + wantHaves: "a", + sendDontHave: true, + }, + }, + // second want-have should be ignored + exp: []testCaseExp{ + testCaseExp{ + haves: "a", + }, + }, + }, + } + + var onlyTestCases []testCase + for _, testCase := range testCases { + if testCase.only { + onlyTestCases = append(onlyTestCases, testCase) + } + } + if len(onlyTestCases) > 0 { + testCases = onlyTestCases + } + + e := newEngine(context.Background(), bs, &fakePeerTagger{}, "localhost", 0) + e.StartWorkers(context.Background(), process.WithTeardown(func() error { return nil })) + + var next envChan + for i, testCase := range testCases { + envs := make([]*Envelope, 0) + + t.Logf("Test case %d:", i) + for _, wl := range testCase.wls { + t.Logf(" want-blocks '%s' / want-haves '%s' / sendDontHave %t", + wl.wantBlks, wl.wantHaves, wl.sendDontHave) + wantBlks := strings.Split(wl.wantBlks, "") + wantHaves := strings.Split(wl.wantHaves, "") + partnerWantBlocksHaves(e, wantBlks, wantHaves, wl.sendDontHave, partner) + + var env *Envelope + next, env = getNextEnvelope(e, next, 5*time.Millisecond) + if env != nil { + envs = append(envs, env) + } + } + + if len(envs) != len(testCase.exp) { + t.Fatalf("Expected %d envelopes but received %d", len(testCase.exp), len(envs)) + } + + for i, exp := range testCase.exp { + expBlks := strings.Split(exp.blks, "") + expHaves := strings.Split(exp.haves, "") + expDontHaves := strings.Split(exp.dontHaves, "") + + err := checkOutput(t, e, envs[i], expBlks, expHaves, expDontHaves) + if err != nil { + t.Fatal(err) + } + envs[i].Sent() + } + } +} + +func checkOutput(t *testing.T, e *Engine, envelope *Envelope, expBlks []string, expHaves []string, expDontHaves []string) error { + blks := envelope.Message.Blocks() + presences := envelope.Message.BlockPresences() + + // Verify payload message length + if len(blks) != len(expBlks) { + blkDiff := formatBlocksDiff(blks, expBlks) + msg := fmt.Sprintf("Received %d blocks. Expected %d blocks:\n%s", len(blks), len(expBlks), blkDiff) + return errors.New(msg) + } + + // Verify block presences message length + expPresencesCount := len(expHaves) + len(expDontHaves) + if len(presences) != expPresencesCount { + presenceDiff := formatPresencesDiff(presences, expHaves, expDontHaves) + return fmt.Errorf("Received %d BlockPresences. Expected %d BlockPresences:\n%s", + len(presences), expPresencesCount, presenceDiff) + } + + // Verify payload message contents + for _, k := range expBlks { + found := false + expected := blocks.NewBlock([]byte(k)) + for _, block := range blks { + if block.Cid().Equals(expected.Cid()) { + found = true + break + } + } + if !found { + return errors.New(formatBlocksDiff(blks, expBlks)) + } + } + + // Verify HAVEs + if err := checkPresence(presences, expHaves, pb.Message_Have); err != nil { + return errors.New(formatPresencesDiff(presences, expHaves, expDontHaves)) + } + + // Verify DONT_HAVEs + if err := checkPresence(presences, expDontHaves, pb.Message_DontHave); err != nil { + return errors.New(formatPresencesDiff(presences, expHaves, expDontHaves)) + } + + return nil +} + +func checkPresence(presences []message.BlockPresence, expPresence []string, presenceType pb.Message_BlockPresenceType) error { + for _, k := range expPresence { + found := false + expected := blocks.NewBlock([]byte(k)) + for _, p := range presences { + if p.Cid.Equals(expected.Cid()) { + found = true + if p.Type != presenceType { + return errors.New("type mismatch") + } + break + } + } + if !found { + return errors.New("not found") + } + } + return nil +} + +func formatBlocksDiff(blks []blocks.Block, expBlks []string) string { + var out bytes.Buffer + out.WriteString(fmt.Sprintf("Blocks (%d):\n", len(blks))) + for _, b := range blks { + out.WriteString(fmt.Sprintf(" %s: %s\n", lu.C(b.Cid()), b.RawData())) + } + out.WriteString(fmt.Sprintf("Expected (%d):\n", len(expBlks))) + for _, k := range expBlks { + expected := blocks.NewBlock([]byte(k)) + out.WriteString(fmt.Sprintf(" %s: %s\n", lu.C(expected.Cid()), k)) + } + return out.String() +} + +func formatPresencesDiff(presences []message.BlockPresence, expHaves []string, expDontHaves []string) string { + var out bytes.Buffer + out.WriteString(fmt.Sprintf("BlockPresences (%d):\n", len(presences))) + for _, p := range presences { + t := "HAVE" + if p.Type == pb.Message_DontHave { + t = "DONT_HAVE" + } + out.WriteString(fmt.Sprintf(" %s - %s\n", lu.C(p.Cid), t)) + } + out.WriteString(fmt.Sprintf("Expected (%d):\n", len(expHaves)+len(expDontHaves))) + for _, k := range expHaves { + expected := blocks.NewBlock([]byte(k)) + out.WriteString(fmt.Sprintf(" %s: %s - HAVE\n", lu.C(expected.Cid()), k)) + } + for _, k := range expDontHaves { + expected := blocks.NewBlock([]byte(k)) + out.WriteString(fmt.Sprintf(" %s: %s - DONT_HAVE\n", lu.C(expected.Cid()), k)) + } + return out.String() +} + func TestPartnerWantsThenCancels(t *testing.T) { numRounds := 10 if testing.Short() { @@ -235,7 +850,7 @@ func TestPartnerWantsThenCancels(t *testing.T) { ctx := context.Background() for i := 0; i < numRounds; i++ { expected := make([][]string, 0, len(testcases)) - e := NewEngine(ctx, bs, &fakePeerTagger{}) + e := newEngine(ctx, bs, &fakePeerTagger{}, "localhost", 0) e.StartWorkers(ctx, process.WithTeardown(func() error { return nil })) for _, testcase := range testcases { set := testcase[0] @@ -243,9 +858,9 @@ func TestPartnerWantsThenCancels(t *testing.T) { keeps := stringsComplement(set, cancels) expected = append(expected, keeps) - partner := testutil.RandPeerIDFatal(t) + partner := libp2ptest.RandPeerIDFatal(t) - partnerWants(e, set, partner) + partnerWantBlocks(e, set, partner) partnerCancels(e, cancels, partner) } if err := checkHandledInOrder(t, e, expected); err != nil { @@ -255,11 +870,119 @@ func TestPartnerWantsThenCancels(t *testing.T) { } } +func TestSendReceivedBlocksToPeersThatWantThem(t *testing.T) { + bs := blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore())) + partner := libp2ptest.RandPeerIDFatal(t) + otherPeer := libp2ptest.RandPeerIDFatal(t) + + e := newEngine(context.Background(), bs, &fakePeerTagger{}, "localhost", 0) + e.StartWorkers(context.Background(), process.WithTeardown(func() error { return nil })) + + blks := testutil.GenerateBlocksOfSize(4, 8*1024) + msg := message.New(false) + msg.AddEntry(blks[0].Cid(), 4, pb.Message_Wantlist_Have, false) + msg.AddEntry(blks[1].Cid(), 3, pb.Message_Wantlist_Have, false) + msg.AddEntry(blks[2].Cid(), 2, pb.Message_Wantlist_Block, false) + msg.AddEntry(blks[3].Cid(), 1, pb.Message_Wantlist_Block, false) + e.MessageReceived(context.Background(), partner, msg) + + // Nothing in blockstore, so shouldn't get any envelope + var next envChan + next, env := getNextEnvelope(e, next, 5*time.Millisecond) + if env != nil { + t.Fatal("expected no envelope yet") + } + + if err := bs.PutMany([]blocks.Block{blks[0], blks[2]}); err != nil { + t.Fatal(err) + } + e.ReceiveFrom(otherPeer, []blocks.Block{blks[0], blks[2]}, []cid.Cid{}) + _, env = getNextEnvelope(e, next, 5*time.Millisecond) + if env == nil { + t.Fatal("expected envelope") + } + if env.Peer != partner { + t.Fatal("expected message to peer") + } + sentBlk := env.Message.Blocks() + if len(sentBlk) != 1 || !sentBlk[0].Cid().Equals(blks[2].Cid()) { + t.Fatal("expected 1 block") + } + sentHave := env.Message.BlockPresences() + if len(sentHave) != 1 || !sentHave[0].Cid.Equals(blks[0].Cid()) || sentHave[0].Type != pb.Message_Have { + t.Fatal("expected 1 HAVE") + } +} + +func TestSendDontHave(t *testing.T) { + bs := blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore())) + partner := libp2ptest.RandPeerIDFatal(t) + otherPeer := libp2ptest.RandPeerIDFatal(t) + + e := newEngine(context.Background(), bs, &fakePeerTagger{}, "localhost", 0) + e.StartWorkers(context.Background(), process.WithTeardown(func() error { return nil })) + + blks := testutil.GenerateBlocksOfSize(4, 8*1024) + msg := message.New(false) + msg.AddEntry(blks[0].Cid(), 4, pb.Message_Wantlist_Have, false) + msg.AddEntry(blks[1].Cid(), 3, pb.Message_Wantlist_Have, true) + msg.AddEntry(blks[2].Cid(), 2, pb.Message_Wantlist_Block, false) + msg.AddEntry(blks[3].Cid(), 1, pb.Message_Wantlist_Block, true) + e.MessageReceived(context.Background(), partner, msg) + + // Nothing in blockstore, should get DONT_HAVE for entries that wanted it + var next envChan + next, env := getNextEnvelope(e, next, 5*time.Millisecond) + if env == nil { + t.Fatal("expected envelope") + } + if env.Peer != partner { + t.Fatal("expected message to peer") + } + if len(env.Message.Blocks()) > 0 { + t.Fatal("expected no blocks") + } + sentDontHaves := env.Message.BlockPresences() + if len(sentDontHaves) != 2 { + t.Fatal("expected 2 DONT_HAVEs") + } + if !sentDontHaves[0].Cid.Equals(blks[1].Cid()) && + !sentDontHaves[1].Cid.Equals(blks[1].Cid()) { + t.Fatal("expected DONT_HAVE for want-have") + } + if !sentDontHaves[0].Cid.Equals(blks[3].Cid()) && + !sentDontHaves[1].Cid.Equals(blks[3].Cid()) { + t.Fatal("expected DONT_HAVE for want-block") + } + + // Receive all the blocks + if err := bs.PutMany(blks); err != nil { + t.Fatal(err) + } + e.ReceiveFrom(otherPeer, blks, []cid.Cid{}) + + // Envelope should contain 2 HAVEs / 2 blocks + _, env = getNextEnvelope(e, next, 5*time.Millisecond) + if env == nil { + t.Fatal("expected envelope") + } + if env.Peer != partner { + t.Fatal("expected message to peer") + } + if len(env.Message.Blocks()) != 2 { + t.Fatal("expected 2 blocks") + } + sentHave := env.Message.BlockPresences() + if len(sentHave) != 2 || sentHave[0].Type != pb.Message_Have || sentHave[1].Type != pb.Message_Have { + t.Fatal("expected 2 HAVEs") + } +} + func TestTaggingPeers(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) defer cancel() - sanfrancisco := newEngine(ctx, "sf") - seattle := newEngine(ctx, "sea") + sanfrancisco := newTestEngine(ctx, "sf") + seattle := newTestEngine(ctx, "sea") keys := []string{"a", "b", "c", "d", "e"} for _, letter := range keys { @@ -268,7 +991,7 @@ func TestTaggingPeers(t *testing.T) { t.Fatal(err) } } - partnerWants(sanfrancisco.Engine, keys, seattle.Peer) + partnerWantBlocks(sanfrancisco.Engine, keys, seattle.Peer) next := <-sanfrancisco.Engine.Outbox() envelope := <-next @@ -285,12 +1008,12 @@ func TestTaggingPeers(t *testing.T) { func TestTaggingUseful(t *testing.T) { oldShortTerm := shortTerm - shortTerm = 1 * time.Millisecond + shortTerm = 2 * time.Millisecond defer func() { shortTerm = oldShortTerm }() ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) defer cancel() - me := newEngine(ctx, "engine") + me := newTestEngine(ctx, "engine") friend := peer.ID("friend") block := blocks.NewBlock([]byte("foobar")) @@ -322,11 +1045,27 @@ func TestTaggingUseful(t *testing.T) { } } -func partnerWants(e *Engine, keys []string, partner peer.ID) { +func partnerWantBlocks(e *Engine, keys []string, partner peer.ID) { add := message.New(false) for i, letter := range keys { block := blocks.NewBlock([]byte(letter)) - add.AddEntry(block.Cid(), len(keys)-i) + add.AddEntry(block.Cid(), len(keys)-i, pb.Message_Wantlist_Block, true) + } + e.MessageReceived(context.Background(), partner, add) +} + +func partnerWantBlocksHaves(e *Engine, keys []string, wantHaves []string, sendDontHave bool, partner peer.ID) { + add := message.New(false) + priority := len(wantHaves) + len(keys) + for _, letter := range wantHaves { + block := blocks.NewBlock([]byte(letter)) + add.AddEntry(block.Cid(), priority, pb.Message_Wantlist_Have, sendDontHave) + priority-- + } + for _, letter := range keys { + block := blocks.NewBlock([]byte(letter)) + add.AddEntry(block.Cid(), priority, pb.Message_Wantlist_Block, sendDontHave) + priority-- } e.MessageReceived(context.Background(), partner, add) } @@ -340,6 +1079,29 @@ func partnerCancels(e *Engine, keys []string, partner peer.ID) { e.MessageReceived(context.Background(), partner, cancels) } +type envChan <-chan *Envelope + +func getNextEnvelope(e *Engine, next envChan, t time.Duration) (envChan, *Envelope) { + ctx, cancel := context.WithTimeout(context.Background(), t) + defer cancel() + + if next == nil { + next = <-e.Outbox() // returns immediately + } + + select { + case env, ok := <-next: // blocks till next envelope ready + if !ok { + log.Warningf("got closed channel") + return nil, nil + } + return nil, env + case <-ctx.Done(): + // log.Warningf("got timeout") + } + return next, nil +} + func checkHandledInOrder(t *testing.T, e *Engine, expected [][]string) error { for _, keys := range expected { next := <-e.Outbox() diff --git a/bitswap/decision/ledger.go b/bitswap/decision/ledger.go index 277daaa2c..a607834a8 100644 --- a/bitswap/decision/ledger.go +++ b/bitswap/decision/ledger.go @@ -4,6 +4,7 @@ import ( "sync" "time" + pb "github.com/ipfs/go-bitswap/message/pb" wl "github.com/ipfs/go-bitswap/wantlist" cid "github.com/ipfs/go-cid" @@ -46,7 +47,7 @@ type ledger struct { // don't drop the reference to this ledger in multi-connection scenarios ref int - lk sync.Mutex + lk sync.RWMutex } // Receipt is a summary of the ledger for a given peer @@ -90,13 +91,13 @@ func (l *ledger) ReceivedBytes(n int) { l.Accounting.BytesRecv += uint64(n) } -func (l *ledger) Wants(k cid.Cid, priority int) { +func (l *ledger) Wants(k cid.Cid, priority int, wantType pb.Message_Wantlist_WantType) { log.Debugf("peer %s wants %s", l.Partner, k) - l.wantList.Add(k, priority) + l.wantList.Add(k, priority, wantType) } -func (l *ledger) CancelWant(k cid.Cid) { - l.wantList.Remove(k) +func (l *ledger) CancelWant(k cid.Cid) bool { + return l.wantList.Remove(k) } func (l *ledger) WantListContains(k cid.Cid) (wl.Entry, bool) { diff --git a/bitswap/decision/taskmerger.go b/bitswap/decision/taskmerger.go new file mode 100644 index 000000000..190486419 --- /dev/null +++ b/bitswap/decision/taskmerger.go @@ -0,0 +1,87 @@ +package decision + +import ( + "github.com/ipfs/go-peertaskqueue/peertask" +) + +// taskData is extra data associated with each task in the request queue +type taskData struct { + // Tasks can be want-have or want-block + IsWantBlock bool + // Whether to immediately send a response if the block is not found + SendDontHave bool + // The size of the block corresponding to the task + BlockSize int + // Whether the block was found + HaveBlock bool +} + +type taskMerger struct{} + +func newTaskMerger() *taskMerger { + return &taskMerger{} +} + +// The request queue uses this Method to decide if a newly pushed task has any +// new information beyond the tasks with the same Topic (CID) in the queue. +func (*taskMerger) HasNewInfo(task peertask.Task, existing []peertask.Task) bool { + haveSize := false + isWantBlock := false + for _, et := range existing { + etd := et.Data.(*taskData) + if etd.HaveBlock { + haveSize = true + } + + if etd.IsWantBlock { + isWantBlock = true + } + } + + // If there is no active want-block and the new task is a want-block, + // the new task is better + newTaskData := task.Data.(*taskData) + if !isWantBlock && newTaskData.IsWantBlock { + return true + } + + // If there is no size information for the CID and the new task has + // size information, the new task is better + if !haveSize && newTaskData.HaveBlock { + return true + } + + return false +} + +// The request queue uses Merge to merge a newly pushed task with an existing +// task with the same Topic (CID) +func (*taskMerger) Merge(task peertask.Task, existing *peertask.Task) { + newTask := task.Data.(*taskData) + existingTask := existing.Data.(*taskData) + + // If we now have block size information, update the task with + // the new block size + if !existingTask.HaveBlock && newTask.HaveBlock { + existingTask.HaveBlock = newTask.HaveBlock + existingTask.BlockSize = newTask.BlockSize + } + + // If replacing a want-have with a want-block + if !existingTask.IsWantBlock && newTask.IsWantBlock { + // Change the type from want-have to want-block + existingTask.IsWantBlock = true + // If the want-have was a DONT_HAVE, or the want-block has a size + if !existingTask.HaveBlock || newTask.HaveBlock { + // Update the entry size + existingTask.HaveBlock = newTask.HaveBlock + existing.Work = task.Work + } + } + + // If the task is a want-block, make sure the entry size is equal + // to the block size (because we will send the whole block) + if existingTask.IsWantBlock && existingTask.HaveBlock { + existing.Work = existingTask.BlockSize + } +} diff --git a/bitswap/decision/taskmerger_test.go b/bitswap/decision/taskmerger_test.go new file mode 100644 index 000000000..7d4d61c8c --- /dev/null +++ b/bitswap/decision/taskmerger_test.go @@ -0,0 +1,357 @@ +package decision + +import ( + "testing" + + "github.com/ipfs/go-bitswap/testutil" + "github.com/ipfs/go-peertaskqueue" + "github.com/ipfs/go-peertaskqueue/peertask" +) + +func TestPushHaveVsBlock(t *testing.T) { + partner := testutil.GeneratePeers(1)[0] + + wantHave := peertask.Task{ + Topic: "1", + Priority: 10, + Work: 1, + Data: &taskData{ + IsWantBlock: false, + BlockSize: 10, + HaveBlock: true, + SendDontHave: false, + }, + } + wantBlock := peertask.Task{ + Topic: "1", + Priority: 10, + Work: 10, + Data: &taskData{ + IsWantBlock: true, + BlockSize: 10, + HaveBlock: true, + SendDontHave: false, + }, + } + + runTestCase := func(tasks []peertask.Task, expIsWantBlock bool) { + tasks = cloneTasks(tasks) + ptq := peertaskqueue.New(peertaskqueue.TaskMerger(newTaskMerger())) + ptq.PushTasks(partner, tasks...) + _, popped, _ := ptq.PopTasks(100) + if len(popped) != 1 { + t.Fatalf("Expected 1 task, received %d tasks", len(popped)) + } + isWantBlock := popped[0].Data.(*taskData).IsWantBlock + if isWantBlock != expIsWantBlock { + t.Fatalf("Expected task.IsWantBlock to be %t, received %t", expIsWantBlock, isWantBlock) + } + } + const wantBlockType = true + const wantHaveType = false + + // should ignore second want-have + runTestCase([]peertask.Task{wantHave, wantHave}, wantHaveType) + // should ignore second want-block + runTestCase([]peertask.Task{wantBlock, wantBlock}, wantBlockType) + // want-have does not overwrite want-block + runTestCase([]peertask.Task{wantBlock, wantHave}, wantBlockType) + // want-block overwrites want-have + runTestCase([]peertask.Task{wantHave, wantBlock}, wantBlockType) +} + +func TestPushSizeInfo(t *testing.T) { + partner := testutil.GeneratePeers(1)[0] + + wantBlockBlockSize := 10 + wantBlockDontHaveBlockSize := 0 + wantHaveBlockSize := 10 + wantHaveDontHaveBlockSize := 0 + wantBlock := peertask.Task{ + Topic: "1", + Priority: 10, + Work: 10, + Data: &taskData{ + IsWantBlock: true, + BlockSize: wantBlockBlockSize, + HaveBlock: true, + SendDontHave: false, + }, + } + wantBlockDontHave := peertask.Task{ + Topic: "1", + Priority: 10, + Work: 2, + Data: &taskData{ + IsWantBlock: true, + BlockSize: wantBlockDontHaveBlockSize, + HaveBlock: false, + SendDontHave: false, + }, + } + wantHave := peertask.Task{ + Topic: "1", + Priority: 10, + Work: 1, Data: &taskData{ + IsWantBlock: false, + BlockSize: wantHaveBlockSize, + HaveBlock: true, + SendDontHave: false, + }, + } + wantHaveDontHave := peertask.Task{ + Topic: "1", + Priority: 10, + Work: 1, + Data: &taskData{ + IsWantBlock: false, + BlockSize: wantHaveDontHaveBlockSize, + HaveBlock: false, + SendDontHave: false, + }, + } + + runTestCase := func(tasks []peertask.Task, expSize int, expBlockSize int, expIsWantBlock bool) { + tasks = cloneTasks(tasks) + ptq := peertaskqueue.New(peertaskqueue.TaskMerger(newTaskMerger())) + ptq.PushTasks(partner, tasks...) + _, popped, _ := ptq.PopTasks(100) + if len(popped) != 1 { + t.Fatalf("Expected 1 task, received %d tasks", len(popped)) + } + if popped[0].Work != expSize { + t.Fatalf("Expected task.Work to be %d, received %d", expSize, popped[0].Work) + } + td := popped[0].Data.(*taskData) + if td.BlockSize != expBlockSize { + t.Fatalf("Expected task.Work to be %d, received %d", expBlockSize, td.BlockSize) + } + if td.IsWantBlock != expIsWantBlock { + t.Fatalf("Expected task.IsWantBlock to be %t, received %t", expIsWantBlock, td.IsWantBlock) + } + } + + isWantBlock := true + isWantHave := false + + // want-block (DONT_HAVE) should have no effect on existing want-block (DONT_HAVE) + runTestCase([]peertask.Task{wantBlockDontHave, wantBlockDontHave}, wantBlockDontHave.Work, wantBlockDontHaveBlockSize, isWantBlock) + // want-have (DONT_HAVE) should have no effect on existing want-block (DONT_HAVE) + runTestCase([]peertask.Task{wantBlockDontHave, wantHaveDontHave}, wantBlockDontHave.Work, wantBlockDontHaveBlockSize, isWantBlock) + // want-block with size should update existing want-block (DONT_HAVE) + runTestCase([]peertask.Task{wantBlockDontHave, wantBlock}, wantBlock.Work, wantBlockBlockSize, isWantBlock) + // want-have with size should update existing want-block (DONT_HAVE) size, + // but leave it as a want-block (ie should not change it to want-have) + runTestCase([]peertask.Task{wantBlockDontHave, wantHave}, wantHaveBlockSize, wantHaveBlockSize, isWantBlock) + + // want-block (DONT_HAVE) size should not update existing want-block with size + runTestCase([]peertask.Task{wantBlock, wantBlockDontHave}, wantBlock.Work, wantBlockBlockSize, isWantBlock) + // want-have (DONT_HAVE) should have no effect on existing want-block with size + runTestCase([]peertask.Task{wantBlock, wantHaveDontHave}, wantBlock.Work, wantBlockBlockSize, isWantBlock) + // want-block with size should have no effect on existing want-block with size + runTestCase([]peertask.Task{wantBlock, wantBlock}, wantBlock.Work, wantBlockBlockSize, isWantBlock) + // want-have with size should have no effect on existing want-block with size + runTestCase([]peertask.Task{wantBlock, wantHave}, wantBlock.Work, wantBlockBlockSize, isWantBlock) + + // want-block (DONT_HAVE) should update type and entry size of existing want-have (DONT_HAVE) + runTestCase([]peertask.Task{wantHaveDontHave, wantBlockDontHave}, wantBlockDontHave.Work, wantBlockDontHaveBlockSize, isWantBlock) + // want-have (DONT_HAVE) should have no effect on existing want-have (DONT_HAVE) + runTestCase([]peertask.Task{wantHaveDontHave, wantHaveDontHave}, wantHaveDontHave.Work, wantHaveDontHaveBlockSize, isWantHave) + // want-block with size should update existing want-have (DONT_HAVE) + runTestCase([]peertask.Task{wantHaveDontHave, wantBlock}, wantBlock.Work, wantBlockBlockSize, isWantBlock) + // want-have with size should update existing want-have (DONT_HAVE) + runTestCase([]peertask.Task{wantHaveDontHave, wantHave}, wantHave.Work, wantHaveBlockSize, isWantHave) + + // want-block (DONT_HAVE) should update type and entry size of existing want-have with size + runTestCase([]peertask.Task{wantHave, wantBlockDontHave}, wantHaveBlockSize, wantHaveBlockSize, isWantBlock) + // want-have (DONT_HAVE) should not update existing want-have with size + runTestCase([]peertask.Task{wantHave, wantHaveDontHave}, wantHave.Work, wantHaveBlockSize, isWantHave) + // want-block with size should update type and entry size of existing want-have with size + runTestCase([]peertask.Task{wantHave, wantBlock}, wantBlock.Work, wantBlockBlockSize, isWantBlock) + // want-have should have no effect on existing want-have + runTestCase([]peertask.Task{wantHave, wantHave}, wantHave.Work, wantHaveBlockSize, isWantHave) +} + +func TestPushHaveVsBlockActive(t *testing.T) { + partner := testutil.GeneratePeers(1)[0] + + wantBlock := peertask.Task{ + Topic: "1", + Priority: 10, + Work: 10, + Data: &taskData{ + IsWantBlock: true, + BlockSize: 10, + HaveBlock: true, + SendDontHave: false, + }, + } + wantHave := peertask.Task{ + Topic: "1", + Priority: 10, + Work: 1, + Data: &taskData{ + IsWantBlock: false, + BlockSize: 10, + HaveBlock: true, + SendDontHave: false, + }, + } + + runTestCase := func(tasks []peertask.Task, expCount int) { + tasks = cloneTasks(tasks) + ptq := peertaskqueue.New(peertaskqueue.TaskMerger(newTaskMerger())) + // ptq.PushTasks(partner, tasks...) + var popped []*peertask.Task + for _, task := range tasks { + // Push the task + // tracker.PushTasks([]peertask.Task{task}) + ptq.PushTasks(partner, task) + // Pop the task (which makes it active) + _, poppedTasks, _ := ptq.PopTasks(10) + popped = append(popped, poppedTasks...) + } + if len(popped) != expCount { + t.Fatalf("Expected %d tasks, received %d tasks", expCount, len(popped)) + } + } + + // should ignore second want-have + runTestCase([]peertask.Task{wantHave, wantHave}, 1) + // should ignore second want-block + runTestCase([]peertask.Task{wantBlock, wantBlock}, 1) + // want-have does not overwrite want-block + runTestCase([]peertask.Task{wantBlock, wantHave}, 1) + // can't replace want-have with want-block because want-have is active + runTestCase([]peertask.Task{wantHave, wantBlock}, 2) +} + +func TestPushSizeInfoActive(t *testing.T) { + partner := testutil.GeneratePeers(1)[0] + + wantBlock := peertask.Task{ + Topic: "1", + Priority: 10, + Work: 10, + Data: &taskData{ + IsWantBlock: true, + BlockSize: 10, + HaveBlock: true, + SendDontHave: false, + }, + } + wantBlockDontHave := peertask.Task{ + Topic: "1", + Priority: 10, + Work: 2, + Data: &taskData{ + IsWantBlock: true, + BlockSize: 0, + HaveBlock: false, + SendDontHave: false, + }, + } + wantHave := peertask.Task{ + Topic: "1", + Priority: 10, + Work: 1, + Data: &taskData{ + IsWantBlock: false, + BlockSize: 10, + HaveBlock: true, + SendDontHave: false, + }, + } + wantHaveDontHave := peertask.Task{ + Topic: "1", + Priority: 10, + Work: 1, + Data: &taskData{ + IsWantBlock: false, + BlockSize: 0, + HaveBlock: false, + SendDontHave: false, + }, + } + + runTestCase := func(tasks []peertask.Task, expTasks []peertask.Task) { + tasks = cloneTasks(tasks) + ptq := peertaskqueue.New(peertaskqueue.TaskMerger(newTaskMerger())) + var popped []*peertask.Task + for _, task := range tasks { + // Push the task + ptq.PushTasks(partner, task) + // Pop the task (which makes it active) + _, poppedTasks, _ := ptq.PopTasks(10) + popped = append(popped, poppedTasks...) + } + if len(popped) != len(expTasks) { + t.Fatalf("Expected %d tasks, received %d tasks", len(expTasks), len(popped)) + } + for i, task := range popped { + td := task.Data.(*taskData) + expTd := expTasks[i].Data.(*taskData) + if td.IsWantBlock != expTd.IsWantBlock { + t.Fatalf("Expected IsWantBlock to be %t, received %t", expTd.IsWantBlock, td.IsWantBlock) + } + if task.Work != expTasks[i].Work { + t.Fatalf("Expected Size to be %d, received %d", expTasks[i].Work, task.Work) + } + } + } + + // second want-block (DONT_HAVE) should be ignored + runTestCase([]peertask.Task{wantBlockDontHave, wantBlockDontHave}, []peertask.Task{wantBlockDontHave}) + // want-have (DONT_HAVE) should be ignored if there is existing active want-block (DONT_HAVE) + runTestCase([]peertask.Task{wantBlockDontHave, wantHaveDontHave}, []peertask.Task{wantBlockDontHave}) + // want-block with size should be added if there is existing active want-block (DONT_HAVE) + runTestCase([]peertask.Task{wantBlockDontHave, wantBlock}, []peertask.Task{wantBlockDontHave, wantBlock}) + // want-have with size should be added if there is existing active want-block (DONT_HAVE) + runTestCase([]peertask.Task{wantBlockDontHave, wantHave}, []peertask.Task{wantBlockDontHave, wantHave}) + + // want-block (DONT_HAVE) should be added if there is existing active want-have (DONT_HAVE) + runTestCase([]peertask.Task{wantHaveDontHave, wantBlockDontHave}, []peertask.Task{wantHaveDontHave, wantBlockDontHave}) + // want-have (DONT_HAVE) should be ignored if there is existing active want-have (DONT_HAVE) + runTestCase([]peertask.Task{wantHaveDontHave, wantHaveDontHave}, []peertask.Task{wantHaveDontHave}) + // want-block with size should be added if there is existing active want-have (DONT_HAVE) + runTestCase([]peertask.Task{wantHaveDontHave, wantBlock}, []peertask.Task{wantHaveDontHave, wantBlock}) + // want-have with size should be added if there is existing active want-have (DONT_HAVE) + runTestCase([]peertask.Task{wantHaveDontHave, wantHave}, []peertask.Task{wantHaveDontHave, wantHave}) + + // want-block (DONT_HAVE) should be ignored if there is existing active want-block with size + runTestCase([]peertask.Task{wantBlock, wantBlockDontHave}, []peertask.Task{wantBlock}) + // want-have (DONT_HAVE) should be ignored if there is existing active want-block with size + runTestCase([]peertask.Task{wantBlock, wantHaveDontHave}, []peertask.Task{wantBlock}) + // second want-block with size should be ignored + runTestCase([]peertask.Task{wantBlock, wantBlock}, []peertask.Task{wantBlock}) + // want-have with size should be ignored if there is existing active want-block with size + runTestCase([]peertask.Task{wantBlock, wantHave}, []peertask.Task{wantBlock}) + + // want-block (DONT_HAVE) should be added if there is existing active want-have with size + runTestCase([]peertask.Task{wantHave, wantBlockDontHave}, []peertask.Task{wantHave, wantBlockDontHave}) + // want-have (DONT_HAVE) should be ignored if there is existing active want-have with size + runTestCase([]peertask.Task{wantHave, wantHaveDontHave}, []peertask.Task{wantHave}) + // second want-have with size should be ignored + runTestCase([]peertask.Task{wantHave, wantHave}, []peertask.Task{wantHave}) + // want-block with size should be added if there is existing active want-have with size + runTestCase([]peertask.Task{wantHave, wantBlock}, []peertask.Task{wantHave, wantBlock}) +} + +func cloneTasks(tasks []peertask.Task) []peertask.Task { + var cp []peertask.Task + for _, t := range tasks { + td := t.Data.(*taskData) + cp = append(cp, peertask.Task{ + Topic: t.Topic, + Priority: t.Priority, + Work: t.Work, + Data: &taskData{ + IsWantBlock: td.IsWantBlock, + BlockSize: td.BlockSize, + HaveBlock: td.HaveBlock, + SendDontHave: td.SendDontHave, + }, + }) + } + return cp +} diff --git a/bitswap/logutil/logutil.go b/bitswap/logutil/logutil.go new file mode 100644 index 000000000..8cba2a47c --- /dev/null +++ b/bitswap/logutil/logutil.go @@ -0,0 +1,26 @@ +package logutil + +import ( + cid "github.com/ipfs/go-cid" + peer "github.com/libp2p/go-libp2p-core/peer" +) + +func C(c cid.Cid) string { + if c.Defined() { + str := c.String() + return str[len(str)-6:] + } + return "" +} + +func P(p peer.ID) string { + if p != "" { + str := p.String() + limit := 6 + if len(str) < limit { + limit = len(str) + } + return str[len(str)-limit:] + } + return "" +} diff --git a/bitswap/message/message.go b/bitswap/message/message.go index 08c85ea6f..c4ea0fd12 100644 --- a/bitswap/message/message.go +++ b/bitswap/message/message.go @@ -6,9 +6,9 @@ import ( "io" pb "github.com/ipfs/go-bitswap/message/pb" - wantlist "github.com/ipfs/go-bitswap/wantlist" - blocks "github.com/ipfs/go-block-format" + "github.com/ipfs/go-bitswap/wantlist" + blocks "github.com/ipfs/go-block-format" cid "github.com/ipfs/go-cid" pool "github.com/libp2p/go-buffer-pool" msgio "github.com/libp2p/go-msgio" @@ -25,18 +25,43 @@ type BitSwapMessage interface { // Blocks returns a slice of unique blocks. Blocks() []blocks.Block + // BlockPresences returns the list of HAVE / DONT_HAVE in the message + BlockPresences() []BlockPresence + // Haves returns the Cids for each HAVE + Haves() []cid.Cid + // DontHaves returns the Cids for each DONT_HAVE + DontHaves() []cid.Cid + // PendingBytes returns the number of outstanding bytes of data that the + // engine has yet to send to the client (because they didn't fit in this + // message) + PendingBytes() int32 // AddEntry adds an entry to the Wantlist. - AddEntry(key cid.Cid, priority int) + AddEntry(key cid.Cid, priority int, wantType pb.Message_Wantlist_WantType, sendDontHave bool) int - Cancel(key cid.Cid) + // Cancel adds a CANCEL for the given CID to the message + // Returns the size of the CANCEL entry in the protobuf + Cancel(key cid.Cid) int + // Empty indicates whether the message has any information Empty() bool + // Size returns the size of the message in bytes + Size() int // A full wantlist is an authoritative copy, a 'non-full' wantlist is a patch-set Full() bool + // AddBlock adds a block to the message AddBlock(blocks.Block) + // AddBlockPresence adds a HAVE / DONT_HAVE for the given Cid to the message + AddBlockPresence(cid.Cid, pb.Message_BlockPresenceType) + // AddHave adds a HAVE for the given Cid to the message + AddHave(cid.Cid) + // AddDontHave adds a DONT_HAVE for the given Cid to the message + AddDontHave(cid.Cid) + // SetPendingBytes sets the number of bytes of data that are yet to be sent + // to the client (because they didn't fit in this message) + SetPendingBytes(int32) Exportable Loggable() map[string]interface{} @@ -45,16 +70,27 @@ type BitSwapMessage interface { // Exportable is an interface for structures than can be // encoded in a bitswap protobuf. type Exportable interface { + // Note that older Bitswap versions use a different wire format, so we need + // to convert the message to the appropriate format depending on which + // version of the protocol the remote peer supports. ToProtoV0() *pb.Message ToProtoV1() *pb.Message ToNetV0(w io.Writer) error ToNetV1(w io.Writer) error } +// BlockPresence represents a HAVE / DONT_HAVE for a given Cid +type BlockPresence struct { + Cid cid.Cid + Type pb.Message_BlockPresenceType +} + type impl struct { - full bool - wantlist map[cid.Cid]*Entry - blocks map[cid.Cid]blocks.Block + full bool + wantlist map[cid.Cid]*Entry + blocks map[cid.Cid]blocks.Block + blockPresences map[cid.Cid]pb.Message_BlockPresenceType + pendingBytes int32 } // New returns a new, empty bitswap message @@ -64,17 +100,21 @@ func New(full bool) BitSwapMessage { func newMsg(full bool) *impl { return &impl{ - blocks: make(map[cid.Cid]blocks.Block), - wantlist: make(map[cid.Cid]*Entry), - full: full, + blocks: make(map[cid.Cid]blocks.Block), + blockPresences: make(map[cid.Cid]pb.Message_BlockPresenceType), + wantlist: make(map[cid.Cid]*Entry), + full: full, } } -// Entry is an wantlist entry in a Bitswap message (along with whether it's an -// add or cancel). +// Entry is a wantlist entry in a Bitswap message, with flags indicating +// - whether message is a cancel +// - whether requester wants a DONT_HAVE message +// - whether requester wants a HAVE message (instead of the block) type Entry struct { wantlist.Entry - Cancel bool + Cancel bool + SendDontHave bool } func newMessageFromProto(pbm pb.Message) (BitSwapMessage, error) { @@ -84,7 +124,7 @@ func newMessageFromProto(pbm pb.Message) (BitSwapMessage, error) { if err != nil { return nil, fmt.Errorf("incorrectly formatted cid in wantlist: %s", err) } - m.addEntry(c, int(e.Priority), e.Cancel) + m.addEntry(c, int(e.Priority), e.Cancel, e.WantType, e.SendDontHave) } // deprecated @@ -114,6 +154,18 @@ func newMessageFromProto(pbm pb.Message) (BitSwapMessage, error) { m.AddBlock(blk) } + for _, bi := range pbm.GetBlockPresences() { + c, err := cid.Cast(bi.GetCid()) + if err != nil { + return nil, err + } + + t := bi.GetType() + m.AddBlockPresence(c, t) + } + + m.pendingBytes = pbm.PendingBytes + return m, nil } @@ -122,7 +174,7 @@ func (m *impl) Full() bool { } func (m *impl) Empty() bool { - return len(m.blocks) == 0 && len(m.wantlist) == 0 + return len(m.blocks) == 0 && len(m.wantlist) == 0 && len(m.blockPresences) == 0 } func (m *impl) Wantlist() []Entry { @@ -141,35 +193,129 @@ func (m *impl) Blocks() []blocks.Block { return bs } -func (m *impl) Cancel(k cid.Cid) { - delete(m.wantlist, k) - m.addEntry(k, 0, true) +func (m *impl) BlockPresences() []BlockPresence { + bps := make([]BlockPresence, 0, len(m.blockPresences)) + for c, t := range m.blockPresences { + bps = append(bps, BlockPresence{c, t}) + } + return bps +} + +func (m *impl) Haves() []cid.Cid { + return m.getBlockPresenceByType(pb.Message_Have) +} + +func (m *impl) DontHaves() []cid.Cid { + return m.getBlockPresenceByType(pb.Message_DontHave) +} + +func (m *impl) getBlockPresenceByType(t pb.Message_BlockPresenceType) []cid.Cid { + cids := make([]cid.Cid, 0, len(m.blockPresences)) + for c, bpt := range m.blockPresences { + if bpt == t { + cids = append(cids, c) + } + } + return cids +} + +func (m *impl) PendingBytes() int32 { + return m.pendingBytes } -func (m *impl) AddEntry(k cid.Cid, priority int) { - m.addEntry(k, priority, false) +func (m *impl) SetPendingBytes(pendingBytes int32) { + m.pendingBytes = pendingBytes } -func (m *impl) addEntry(c cid.Cid, priority int, cancel bool) { +func (m *impl) Cancel(k cid.Cid) int { + return m.addEntry(k, 0, true, pb.Message_Wantlist_Block, false) +} + +func (m *impl) AddEntry(k cid.Cid, priority int, wantType pb.Message_Wantlist_WantType, sendDontHave bool) int { + return m.addEntry(k, priority, false, wantType, sendDontHave) +} + +func (m *impl) addEntry(c cid.Cid, priority int, cancel bool, wantType pb.Message_Wantlist_WantType, sendDontHave bool) int { e, exists := m.wantlist[c] if exists { - e.Priority = priority - e.Cancel = cancel - } else { - m.wantlist[c] = &Entry{ - Entry: wantlist.Entry{ - Cid: c, - Priority: priority, - }, - Cancel: cancel, + // Only change priority if want is of the same type + if e.WantType == wantType { + e.Priority = priority + } + // Only change from "dont cancel" to "do cancel" + if cancel { + e.Cancel = cancel } + // Only change from "dont send" to "do send" DONT_HAVE + if sendDontHave { + e.SendDontHave = sendDontHave + } + // want-block overrides existing want-have + if wantType == pb.Message_Wantlist_Block && e.WantType == pb.Message_Wantlist_Have { + e.WantType = wantType + } + m.wantlist[c] = e + return 0 } + + e = &Entry{ + Entry: wantlist.Entry{ + Cid: c, + Priority: priority, + WantType: wantType, + }, + SendDontHave: sendDontHave, + Cancel: cancel, + } + m.wantlist[c] = e + + aspb := entryToPB(e) + return aspb.Size() } func (m *impl) AddBlock(b blocks.Block) { + delete(m.blockPresences, b.Cid()) m.blocks[b.Cid()] = b } +func (m *impl) AddBlockPresence(c cid.Cid, t pb.Message_BlockPresenceType) { + if _, ok := m.blocks[c]; ok { + return + } + m.blockPresences[c] = t +} + +func (m *impl) AddHave(c cid.Cid) { + m.AddBlockPresence(c, pb.Message_Have) +} + +func (m *impl) AddDontHave(c cid.Cid) { + m.AddBlockPresence(c, pb.Message_DontHave) +} + +func (m *impl) Size() int { + size := 0 + for _, block := range m.blocks { + size += len(block.RawData()) + } + for c := range m.blockPresences { + size += BlockPresenceSize(c) + } + for _, e := range m.wantlist { + epb := entryToPB(e) + size += epb.Size() + } + + return size +} + +func BlockPresenceSize(c cid.Cid) int { + return (&pb.Message_BlockPresence{ + Cid: c.Bytes(), + Type: pb.Message_Have, + }).Size() +} + // FromNet generates a new BitswapMessage from incoming data on an io.Reader. func FromNet(r io.Reader) (BitSwapMessage, error) { reader := msgio.NewVarintReaderSize(r, network.MessageSizeMax) @@ -193,15 +339,21 @@ func FromMsgReader(r msgio.Reader) (BitSwapMessage, error) { return newMessageFromProto(pb) } +func entryToPB(e *Entry) pb.Message_Wantlist_Entry { + return pb.Message_Wantlist_Entry{ + Block: e.Cid.Bytes(), + Priority: int32(e.Priority), + Cancel: e.Cancel, + WantType: e.WantType, + SendDontHave: e.SendDontHave, + } +} + func (m *impl) ToProtoV0() *pb.Message { pbm := new(pb.Message) pbm.Wantlist.Entries = make([]pb.Message_Wantlist_Entry, 0, len(m.wantlist)) for _, e := range m.wantlist { - pbm.Wantlist.Entries = append(pbm.Wantlist.Entries, pb.Message_Wantlist_Entry{ - Block: e.Cid.Bytes(), - Priority: int32(e.Priority), - Cancel: e.Cancel, - }) + pbm.Wantlist.Entries = append(pbm.Wantlist.Entries, entryToPB(e)) } pbm.Wantlist.Full = m.full @@ -217,11 +369,7 @@ func (m *impl) ToProtoV1() *pb.Message { pbm := new(pb.Message) pbm.Wantlist.Entries = make([]pb.Message_Wantlist_Entry, 0, len(m.wantlist)) for _, e := range m.wantlist { - pbm.Wantlist.Entries = append(pbm.Wantlist.Entries, pb.Message_Wantlist_Entry{ - Block: e.Cid.Bytes(), - Priority: int32(e.Priority), - Cancel: e.Cancel, - }) + pbm.Wantlist.Entries = append(pbm.Wantlist.Entries, entryToPB(e)) } pbm.Wantlist.Full = m.full @@ -233,6 +381,17 @@ func (m *impl) ToProtoV1() *pb.Message { Prefix: b.Cid().Prefix().Bytes(), }) } + + pbm.BlockPresences = make([]pb.Message_BlockPresence, 0, len(m.blockPresences)) + for c, t := range m.blockPresences { + pbm.BlockPresences = append(pbm.BlockPresences, pb.Message_BlockPresence{ + Cid: c.Bytes(), + Type: t, + }) + } + + pbm.PendingBytes = m.PendingBytes() + return pbm } diff --git a/bitswap/message/message_test.go b/bitswap/message/message_test.go index 686ac4a4a..4b51a3cc2 100644 --- a/bitswap/message/message_test.go +++ b/bitswap/message/message_test.go @@ -18,7 +18,7 @@ func mkFakeCid(s string) cid.Cid { func TestAppendWanted(t *testing.T) { str := mkFakeCid("foo") m := New(true) - m.AddEntry(str, 1) + m.AddEntry(str, 1, pb.Message_Wantlist_Block, true) if !wantlistContains(&m.ToProtoV0().Wantlist, str) { t.Fail() @@ -69,7 +69,7 @@ func TestWantlist(t *testing.T) { keystrs := []cid.Cid{mkFakeCid("foo"), mkFakeCid("bar"), mkFakeCid("baz"), mkFakeCid("bat")} m := New(true) for _, s := range keystrs { - m.AddEntry(s, 1) + m.AddEntry(s, 1, pb.Message_Wantlist_Block, true) } exported := m.Wantlist() @@ -92,7 +92,7 @@ func TestCopyProtoByValue(t *testing.T) { str := mkFakeCid("foo") m := New(true) protoBeforeAppend := m.ToProtoV0() - m.AddEntry(str, 1) + m.AddEntry(str, 1, pb.Message_Wantlist_Block, true) if wantlistContains(&protoBeforeAppend.Wantlist, str) { t.Fail() } @@ -100,11 +100,11 @@ func TestCopyProtoByValue(t *testing.T) { func TestToNetFromNetPreservesWantList(t *testing.T) { original := New(true) - original.AddEntry(mkFakeCid("M"), 1) - original.AddEntry(mkFakeCid("B"), 1) - original.AddEntry(mkFakeCid("D"), 1) - original.AddEntry(mkFakeCid("T"), 1) - original.AddEntry(mkFakeCid("F"), 1) + original.AddEntry(mkFakeCid("M"), 1, pb.Message_Wantlist_Block, true) + original.AddEntry(mkFakeCid("B"), 1, pb.Message_Wantlist_Block, true) + original.AddEntry(mkFakeCid("D"), 1, pb.Message_Wantlist_Block, true) + original.AddEntry(mkFakeCid("T"), 1, pb.Message_Wantlist_Block, true) + original.AddEntry(mkFakeCid("F"), 1, pb.Message_Wantlist_Block, true) buf := new(bytes.Buffer) if err := original.ToNetV1(buf); err != nil { @@ -184,8 +184,8 @@ func TestDuplicates(t *testing.T) { b := blocks.NewBlock([]byte("foo")) msg := New(true) - msg.AddEntry(b.Cid(), 1) - msg.AddEntry(b.Cid(), 1) + msg.AddEntry(b.Cid(), 1, pb.Message_Wantlist_Block, true) + msg.AddEntry(b.Cid(), 1, pb.Message_Wantlist_Block, true) if len(msg.Wantlist()) != 1 { t.Fatal("Duplicate in BitSwapMessage") } @@ -195,4 +195,97 @@ func TestDuplicates(t *testing.T) { if len(msg.Blocks()) != 1 { t.Fatal("Duplicate in BitSwapMessage") } + + b2 := blocks.NewBlock([]byte("bar")) + msg.AddBlockPresence(b2.Cid(), pb.Message_Have) + msg.AddBlockPresence(b2.Cid(), pb.Message_Have) + if len(msg.Haves()) != 1 { + t.Fatal("Duplicate in BitSwapMessage") + } +} + +func TestBlockPresences(t *testing.T) { + b1 := blocks.NewBlock([]byte("foo")) + b2 := blocks.NewBlock([]byte("bar")) + msg := New(true) + + msg.AddBlockPresence(b1.Cid(), pb.Message_Have) + msg.AddBlockPresence(b2.Cid(), pb.Message_DontHave) + if len(msg.Haves()) != 1 || !msg.Haves()[0].Equals(b1.Cid()) { + t.Fatal("Expected HAVE") + } + if len(msg.DontHaves()) != 1 || !msg.DontHaves()[0].Equals(b2.Cid()) { + t.Fatal("Expected HAVE") + } + + msg.AddBlock(b1) + if len(msg.Haves()) != 0 { + t.Fatal("Expected block to overwrite HAVE") + } + + msg.AddBlock(b2) + if len(msg.DontHaves()) != 0 { + t.Fatal("Expected block to overwrite DONT_HAVE") + } + + msg.AddBlockPresence(b1.Cid(), pb.Message_Have) + if len(msg.Haves()) != 0 { + t.Fatal("Expected HAVE not to overwrite block") + } + + msg.AddBlockPresence(b2.Cid(), pb.Message_DontHave) + if len(msg.DontHaves()) != 0 { + t.Fatal("Expected DONT_HAVE not to overwrite block") + } +} + +func TestAddWantlistEntry(t *testing.T) { + b := blocks.NewBlock([]byte("foo")) + msg := New(true) + + msg.AddEntry(b.Cid(), 1, pb.Message_Wantlist_Have, false) + msg.AddEntry(b.Cid(), 2, pb.Message_Wantlist_Block, true) + entries := msg.Wantlist() + if len(entries) != 1 { + t.Fatal("Duplicate in BitSwapMessage") + } + e := entries[0] + if e.WantType != pb.Message_Wantlist_Block { + t.Fatal("want-block should override want-have") + } + if e.SendDontHave != true { + t.Fatal("true SendDontHave should override false SendDontHave") + } + if e.Priority != 1 { + t.Fatal("priority should only be overridden if wants are of same type") + } + + msg.AddEntry(b.Cid(), 2, pb.Message_Wantlist_Block, true) + e = msg.Wantlist()[0] + if e.Priority != 2 { + t.Fatal("priority should be overridden if wants are of same type") + } + + msg.AddEntry(b.Cid(), 3, pb.Message_Wantlist_Have, false) + e = msg.Wantlist()[0] + if e.WantType != pb.Message_Wantlist_Block { + t.Fatal("want-have should not override want-block") + } + if e.SendDontHave != true { + t.Fatal("false SendDontHave should not override true SendDontHave") + } + if e.Priority != 2 { + t.Fatal("priority should only be overridden if wants are of same type") + } + + msg.Cancel(b.Cid()) + e = msg.Wantlist()[0] + if !e.Cancel { + t.Fatal("cancel should override want") + } + + msg.AddEntry(b.Cid(), 10, pb.Message_Wantlist_Block, true) + if !e.Cancel { + t.Fatal("want should not override cancel") + } } diff --git a/bitswap/message/pb/message.pb.go b/bitswap/message/pb/message.pb.go index adf14da87..b64e30825 100644 --- a/bitswap/message/pb/message.pb.go +++ b/bitswap/message/pb/message.pb.go @@ -21,12 +21,64 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +type Message_BlockPresenceType int32 + +const ( + Message_Have Message_BlockPresenceType = 0 + Message_DontHave Message_BlockPresenceType = 1 +) + +var Message_BlockPresenceType_name = map[int32]string{ + 0: "Have", + 1: "DontHave", +} + +var Message_BlockPresenceType_value = map[string]int32{ + "Have": 0, + "DontHave": 1, +} + +func (x Message_BlockPresenceType) String() string { + return proto.EnumName(Message_BlockPresenceType_name, int32(x)) +} + +func (Message_BlockPresenceType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_33c57e4bae7b9afd, []int{0, 0} +} + +type Message_Wantlist_WantType int32 + +const ( + Message_Wantlist_Block Message_Wantlist_WantType = 0 + Message_Wantlist_Have Message_Wantlist_WantType = 1 +) + +var Message_Wantlist_WantType_name = map[int32]string{ + 0: "Block", + 1: "Have", +} + +var Message_Wantlist_WantType_value = map[string]int32{ + "Block": 0, + "Have": 1, +} + +func (x Message_Wantlist_WantType) String() string { + return proto.EnumName(Message_Wantlist_WantType_name, int32(x)) +} + +func (Message_Wantlist_WantType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_33c57e4bae7b9afd, []int{0, 0, 0} +} type Message struct { - Wantlist Message_Wantlist `protobuf:"bytes,1,opt,name=wantlist,proto3" json:"wantlist"` - Blocks [][]byte `protobuf:"bytes,2,rep,name=blocks,proto3" json:"blocks,omitempty"` - Payload []Message_Block `protobuf:"bytes,3,rep,name=payload,proto3" json:"payload"` + Wantlist Message_Wantlist `protobuf:"bytes,1,opt,name=wantlist,proto3" json:"wantlist"` + Blocks [][]byte `protobuf:"bytes,2,rep,name=blocks,proto3" json:"blocks,omitempty"` + Payload []Message_Block `protobuf:"bytes,3,rep,name=payload,proto3" json:"payload"` + BlockPresences []Message_BlockPresence `protobuf:"bytes,4,rep,name=blockPresences,proto3" json:"blockPresences"` + PendingBytes int32 `protobuf:"varint,5,opt,name=pendingBytes,proto3" json:"pendingBytes,omitempty"` } func (m *Message) Reset() { *m = Message{} } @@ -83,6 +135,20 @@ func (m *Message) GetPayload() []Message_Block { return nil } +func (m *Message) GetBlockPresences() []Message_BlockPresence { + if m != nil { + return m.BlockPresences + } + return nil +} + +func (m *Message) GetPendingBytes() int32 { + if m != nil { + return m.PendingBytes + } + return 0 +} + type Message_Wantlist struct { Entries []Message_Wantlist_Entry `protobuf:"bytes,1,rep,name=entries,proto3" json:"entries"` Full bool `protobuf:"varint,2,opt,name=full,proto3" json:"full,omitempty"` @@ -136,9 +202,11 @@ func (m *Message_Wantlist) GetFull() bool { } type Message_Wantlist_Entry struct { - Block []byte `protobuf:"bytes,1,opt,name=block,proto3" json:"block,omitempty"` - Priority int32 `protobuf:"varint,2,opt,name=priority,proto3" json:"priority,omitempty"` - Cancel bool `protobuf:"varint,3,opt,name=cancel,proto3" json:"cancel,omitempty"` + Block []byte `protobuf:"bytes,1,opt,name=block,proto3" json:"block,omitempty"` + Priority int32 `protobuf:"varint,2,opt,name=priority,proto3" json:"priority,omitempty"` + Cancel bool `protobuf:"varint,3,opt,name=cancel,proto3" json:"cancel,omitempty"` + WantType Message_Wantlist_WantType `protobuf:"varint,4,opt,name=wantType,proto3,enum=bitswap.message.pb.Message_Wantlist_WantType" json:"wantType,omitempty"` + SendDontHave bool `protobuf:"varint,5,opt,name=sendDontHave,proto3" json:"sendDontHave,omitempty"` } func (m *Message_Wantlist_Entry) Reset() { *m = Message_Wantlist_Entry{} } @@ -195,6 +263,20 @@ func (m *Message_Wantlist_Entry) GetCancel() bool { return false } +func (m *Message_Wantlist_Entry) GetWantType() Message_Wantlist_WantType { + if m != nil { + return m.WantType + } + return Message_Wantlist_Block +} + +func (m *Message_Wantlist_Entry) GetSendDontHave() bool { + if m != nil { + return m.SendDontHave + } + return false +} + type Message_Block struct { Prefix []byte `protobuf:"bytes,1,opt,name=prefix,proto3" json:"prefix,omitempty"` Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` @@ -247,38 +329,103 @@ func (m *Message_Block) GetData() []byte { return nil } +type Message_BlockPresence struct { + Cid []byte `protobuf:"bytes,1,opt,name=cid,proto3" json:"cid,omitempty"` + Type Message_BlockPresenceType `protobuf:"varint,2,opt,name=type,proto3,enum=bitswap.message.pb.Message_BlockPresenceType" json:"type,omitempty"` +} + +func (m *Message_BlockPresence) Reset() { *m = Message_BlockPresence{} } +func (m *Message_BlockPresence) String() string { return proto.CompactTextString(m) } +func (*Message_BlockPresence) ProtoMessage() {} +func (*Message_BlockPresence) Descriptor() ([]byte, []int) { + return fileDescriptor_33c57e4bae7b9afd, []int{0, 2} +} +func (m *Message_BlockPresence) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Message_BlockPresence) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Message_BlockPresence.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Message_BlockPresence) XXX_Merge(src proto.Message) { + xxx_messageInfo_Message_BlockPresence.Merge(m, src) +} +func (m *Message_BlockPresence) XXX_Size() int { + return m.Size() +} +func (m *Message_BlockPresence) XXX_DiscardUnknown() { + xxx_messageInfo_Message_BlockPresence.DiscardUnknown(m) +} + +var xxx_messageInfo_Message_BlockPresence proto.InternalMessageInfo + +func (m *Message_BlockPresence) GetCid() []byte { + if m != nil { + return m.Cid + } + return nil +} + +func (m *Message_BlockPresence) GetType() Message_BlockPresenceType { + if m != nil { + return m.Type + } + return Message_Have +} + func init() { + proto.RegisterEnum("bitswap.message.pb.Message_BlockPresenceType", Message_BlockPresenceType_name, Message_BlockPresenceType_value) + proto.RegisterEnum("bitswap.message.pb.Message_Wantlist_WantType", Message_Wantlist_WantType_name, Message_Wantlist_WantType_value) proto.RegisterType((*Message)(nil), "bitswap.message.pb.Message") proto.RegisterType((*Message_Wantlist)(nil), "bitswap.message.pb.Message.Wantlist") proto.RegisterType((*Message_Wantlist_Entry)(nil), "bitswap.message.pb.Message.Wantlist.Entry") proto.RegisterType((*Message_Block)(nil), "bitswap.message.pb.Message.Block") + proto.RegisterType((*Message_BlockPresence)(nil), "bitswap.message.pb.Message.BlockPresence") } func init() { proto.RegisterFile("message.proto", fileDescriptor_33c57e4bae7b9afd) } var fileDescriptor_33c57e4bae7b9afd = []byte{ - // 335 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x91, 0xcf, 0x4a, 0xf3, 0x40, - 0x14, 0xc5, 0x33, 0x4d, 0xd3, 0x86, 0xdb, 0x7e, 0x9b, 0xe1, 0x43, 0x86, 0x2c, 0x62, 0x14, 0x17, - 0x41, 0x70, 0x0a, 0xed, 0x13, 0x58, 0xd0, 0x85, 0xe0, 0xc2, 0x6c, 0x5c, 0x4f, 0xd2, 0x34, 0x0e, - 0xa6, 0x99, 0x90, 0x4c, 0xa9, 0x7d, 0x0b, 0x5f, 0xc1, 0x07, 0x71, 0xdf, 0x65, 0x97, 0xae, 0x44, - 0xda, 0x17, 0x91, 0xdc, 0x4e, 0xb3, 0x11, 0xc4, 0xdd, 0x3d, 0xc3, 0x39, 0xbf, 0xfb, 0x67, 0xe0, - 0xdf, 0x22, 0xad, 0x6b, 0x91, 0xa5, 0xbc, 0xac, 0x94, 0x56, 0x94, 0xc6, 0x52, 0xd7, 0x2b, 0x51, - 0xf2, 0xf6, 0x39, 0xf6, 0xae, 0x32, 0xa9, 0x9f, 0x96, 0x31, 0x4f, 0xd4, 0x62, 0x94, 0xa9, 0x4c, - 0x8d, 0xd0, 0x1a, 0x2f, 0xe7, 0xa8, 0x50, 0x60, 0x75, 0x40, 0x9c, 0xbf, 0xd9, 0xd0, 0xbf, 0x3f, - 0xa4, 0xe9, 0x2d, 0xb8, 0x2b, 0x51, 0xe8, 0x5c, 0xd6, 0x9a, 0x91, 0x80, 0x84, 0x83, 0xf1, 0x05, - 0xff, 0xd9, 0x81, 0x1b, 0x3b, 0x7f, 0x34, 0xde, 0x69, 0x77, 0xf3, 0x79, 0x6a, 0x45, 0x6d, 0x96, - 0x9e, 0x40, 0x2f, 0xce, 0x55, 0xf2, 0x5c, 0xb3, 0x4e, 0x60, 0x87, 0xc3, 0xc8, 0x28, 0x7a, 0x0d, - 0xfd, 0x52, 0xac, 0x73, 0x25, 0x66, 0xcc, 0x0e, 0xec, 0x70, 0x30, 0x3e, 0xfb, 0x0d, 0x3f, 0x6d, - 0x42, 0x86, 0x7d, 0xcc, 0x79, 0xef, 0x04, 0xdc, 0x63, 0x5f, 0x7a, 0x07, 0xfd, 0xb4, 0xd0, 0x95, - 0x4c, 0x6b, 0x46, 0x90, 0x77, 0xf9, 0x97, 0x71, 0xf9, 0x4d, 0xa1, 0xab, 0xf5, 0x11, 0x6c, 0x00, - 0x94, 0x42, 0x77, 0xbe, 0xcc, 0x73, 0xd6, 0x09, 0x48, 0xe8, 0x46, 0x58, 0x7b, 0x0f, 0xe0, 0xa0, - 0x97, 0xfe, 0x07, 0x07, 0x57, 0xc0, 0xab, 0x0c, 0xa3, 0x83, 0xa0, 0x1e, 0xb8, 0x65, 0x25, 0x55, - 0x25, 0xf5, 0x1a, 0x63, 0x4e, 0xd4, 0xea, 0xe6, 0x04, 0x89, 0x28, 0x92, 0x34, 0x67, 0x36, 0x02, - 0x8d, 0xf2, 0x26, 0xe0, 0xe0, 0x5e, 0x8d, 0xa1, 0xac, 0xd2, 0xb9, 0x7c, 0x31, 0x4c, 0xa3, 0x9a, - 0x39, 0x66, 0x42, 0x0b, 0x04, 0x0e, 0x23, 0xac, 0xa7, 0x6c, 0xb3, 0xf3, 0xc9, 0x76, 0xe7, 0x93, - 0xaf, 0x9d, 0x4f, 0x5e, 0xf7, 0xbe, 0xb5, 0xdd, 0xfb, 0xd6, 0xc7, 0xde, 0xb7, 0xe2, 0x1e, 0x7e, - 0xe2, 0xe4, 0x3b, 0x00, 0x00, 0xff, 0xff, 0x5d, 0x1d, 0x6e, 0x21, 0x18, 0x02, 0x00, 0x00, + // 483 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x53, 0x4d, 0x6b, 0xd4, 0x50, + 0x14, 0xcd, 0x9b, 0x24, 0x9d, 0x78, 0x9b, 0x96, 0xf1, 0x21, 0xf2, 0xc8, 0x22, 0x8d, 0x83, 0x8b, + 0xa8, 0x34, 0x85, 0xe9, 0x2f, 0xe8, 0xa0, 0xa2, 0x82, 0x20, 0x41, 0x98, 0x75, 0x3e, 0xde, 0xc4, + 0x60, 0x9a, 0x84, 0xbc, 0x37, 0xd6, 0xfc, 0x0b, 0x7f, 0x92, 0xb8, 0xea, 0x4a, 0xba, 0x74, 0x25, + 0x32, 0xf3, 0x47, 0x24, 0x37, 0x2f, 0x81, 0xb1, 0x60, 0xbb, 0xbb, 0xe7, 0xbe, 0x7b, 0x4e, 0xee, + 0xb9, 0x87, 0xc0, 0xd1, 0x25, 0x17, 0x22, 0xca, 0x78, 0x50, 0x37, 0x95, 0xac, 0x28, 0x8d, 0x73, + 0x29, 0xae, 0xa2, 0x3a, 0x18, 0xdb, 0xb1, 0x73, 0x9a, 0xe5, 0xf2, 0xd3, 0x26, 0x0e, 0x92, 0xea, + 0xf2, 0x2c, 0xab, 0xb2, 0xea, 0x0c, 0x47, 0xe3, 0xcd, 0x1a, 0x11, 0x02, 0xac, 0x7a, 0x89, 0xf9, + 0x8f, 0x03, 0x98, 0xbe, 0xef, 0xd9, 0xf4, 0x35, 0x58, 0x57, 0x51, 0x29, 0x8b, 0x5c, 0x48, 0x46, + 0x3c, 0xe2, 0x1f, 0x2e, 0x9e, 0x06, 0xb7, 0xbf, 0x10, 0xa8, 0xf1, 0x60, 0xa5, 0x66, 0x97, 0xc6, + 0xf5, 0xef, 0x13, 0x2d, 0x1c, 0xb9, 0xf4, 0x31, 0x1c, 0xc4, 0x45, 0x95, 0x7c, 0x16, 0x6c, 0xe2, + 0xe9, 0xbe, 0x1d, 0x2a, 0x44, 0x2f, 0x60, 0x5a, 0x47, 0x6d, 0x51, 0x45, 0x29, 0xd3, 0x3d, 0xdd, + 0x3f, 0x5c, 0x3c, 0xf9, 0x9f, 0xfc, 0xb2, 0x23, 0x29, 0xed, 0x81, 0x47, 0x57, 0x70, 0x8c, 0x62, + 0x1f, 0x1a, 0x2e, 0x78, 0x99, 0x70, 0xc1, 0x0c, 0x54, 0x7a, 0x76, 0xa7, 0xd2, 0xc0, 0x50, 0x8a, + 0xff, 0xc8, 0xd0, 0x39, 0xd8, 0x35, 0x2f, 0xd3, 0xbc, 0xcc, 0x96, 0xad, 0xe4, 0x82, 0x99, 0x1e, + 0xf1, 0xcd, 0x70, 0xaf, 0xe7, 0xfc, 0x9c, 0x80, 0x35, 0x98, 0xa6, 0xef, 0x60, 0xca, 0x4b, 0xd9, + 0xe4, 0x5c, 0x30, 0x82, 0x2b, 0x3c, 0xbf, 0xcf, 0xad, 0x82, 0x57, 0xa5, 0x6c, 0xda, 0xc1, 0x95, + 0x12, 0xa0, 0x14, 0x8c, 0xf5, 0xa6, 0x28, 0xd8, 0xc4, 0x23, 0xbe, 0x15, 0x62, 0xed, 0x7c, 0x27, + 0x60, 0xe2, 0x30, 0x7d, 0x04, 0x26, 0x2e, 0x8b, 0x99, 0xd8, 0x61, 0x0f, 0xa8, 0x03, 0x56, 0xdd, + 0xe4, 0x55, 0x93, 0xcb, 0x16, 0x79, 0x66, 0x38, 0xe2, 0x2e, 0x80, 0x24, 0x2a, 0x13, 0x5e, 0x30, + 0x1d, 0x15, 0x15, 0xa2, 0x6f, 0xfb, 0x80, 0x3f, 0xb6, 0x35, 0x67, 0x86, 0x47, 0xfc, 0xe3, 0xc5, + 0xe9, 0xbd, 0x96, 0x5e, 0x29, 0x52, 0x38, 0xd2, 0xbb, 0x7b, 0x09, 0x5e, 0xa6, 0x2f, 0xab, 0x52, + 0xbe, 0x89, 0xbe, 0x70, 0xbc, 0x97, 0x15, 0xee, 0xf5, 0xe6, 0x27, 0xfd, 0xb9, 0x70, 0xfe, 0x01, + 0x98, 0x18, 0xc3, 0x4c, 0xa3, 0x16, 0x18, 0xdd, 0xf3, 0x8c, 0x38, 0xe7, 0xaa, 0xd9, 0x2d, 0x5c, + 0x37, 0x7c, 0x9d, 0x7f, 0x55, 0x1e, 0x15, 0xea, 0x0e, 0x93, 0x46, 0x32, 0x42, 0x83, 0x76, 0x88, + 0xb5, 0x93, 0xc2, 0xd1, 0x5e, 0xa0, 0x74, 0x06, 0x7a, 0x92, 0xa7, 0x8a, 0xd9, 0x95, 0xf4, 0x02, + 0x0c, 0xd9, 0x79, 0x9c, 0xdc, 0xed, 0x71, 0x4f, 0x0a, 0x3d, 0x22, 0x75, 0xfe, 0x02, 0x1e, 0xde, + 0x7a, 0x1a, 0x37, 0xd7, 0xa8, 0x0d, 0xd6, 0x60, 0x73, 0x46, 0x96, 0xec, 0x7a, 0xeb, 0x92, 0x9b, + 0xad, 0x4b, 0xfe, 0x6c, 0x5d, 0xf2, 0x6d, 0xe7, 0x6a, 0x37, 0x3b, 0x57, 0xfb, 0xb5, 0x73, 0xb5, + 0xf8, 0x00, 0xff, 0xb2, 0xf3, 0xbf, 0x01, 0x00, 0x00, 0xff, 0xff, 0xac, 0xa9, 0xf7, 0xab, 0xb9, + 0x03, 0x00, 0x00, } func (m *Message) Marshal() (dAtA []byte, err error) { @@ -301,6 +448,25 @@ func (m *Message) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.PendingBytes != 0 { + i = encodeVarintMessage(dAtA, i, uint64(m.PendingBytes)) + i-- + dAtA[i] = 0x28 + } + if len(m.BlockPresences) > 0 { + for iNdEx := len(m.BlockPresences) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.BlockPresences[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMessage(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } if len(m.Payload) > 0 { for iNdEx := len(m.Payload) - 1; iNdEx >= 0; iNdEx-- { { @@ -404,6 +570,21 @@ func (m *Message_Wantlist_Entry) MarshalToSizedBuffer(dAtA []byte) (int, error) _ = i var l int _ = l + if m.SendDontHave { + i-- + if m.SendDontHave { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + } + if m.WantType != 0 { + i = encodeVarintMessage(dAtA, i, uint64(m.WantType)) + i-- + dAtA[i] = 0x20 + } if m.Cancel { i-- if m.Cancel { @@ -466,6 +647,41 @@ func (m *Message_Block) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *Message_BlockPresence) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Message_BlockPresence) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Message_BlockPresence) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Type != 0 { + i = encodeVarintMessage(dAtA, i, uint64(m.Type)) + i-- + dAtA[i] = 0x10 + } + if len(m.Cid) > 0 { + i -= len(m.Cid) + copy(dAtA[i:], m.Cid) + i = encodeVarintMessage(dAtA, i, uint64(len(m.Cid))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + func encodeVarintMessage(dAtA []byte, offset int, v uint64) int { offset -= sovMessage(v) base := offset @@ -497,6 +713,15 @@ func (m *Message) Size() (n int) { n += 1 + l + sovMessage(uint64(l)) } } + if len(m.BlockPresences) > 0 { + for _, e := range m.BlockPresences { + l = e.Size() + n += 1 + l + sovMessage(uint64(l)) + } + } + if m.PendingBytes != 0 { + n += 1 + sovMessage(uint64(m.PendingBytes)) + } return n } @@ -534,6 +759,12 @@ func (m *Message_Wantlist_Entry) Size() (n int) { if m.Cancel { n += 2 } + if m.WantType != 0 { + n += 1 + sovMessage(uint64(m.WantType)) + } + if m.SendDontHave { + n += 2 + } return n } @@ -554,6 +785,22 @@ func (m *Message_Block) Size() (n int) { return n } +func (m *Message_BlockPresence) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Cid) + if l > 0 { + n += 1 + l + sovMessage(uint64(l)) + } + if m.Type != 0 { + n += 1 + sovMessage(uint64(m.Type)) + } + return n +} + func sovMessage(x uint64) (n int) { return (math_bits.Len64(x|1) + 6) / 7 } @@ -688,6 +935,59 @@ func (m *Message) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BlockPresences", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMessage + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMessage + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.BlockPresences = append(m.BlockPresences, Message_BlockPresence{}) + if err := m.BlockPresences[len(m.BlockPresences)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PendingBytes", wireType) + } + m.PendingBytes = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.PendingBytes |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := skipMessage(dAtA[iNdEx:]) @@ -921,6 +1221,45 @@ func (m *Message_Wantlist_Entry) Unmarshal(dAtA []byte) error { } } m.Cancel = bool(v != 0) + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field WantType", wireType) + } + m.WantType = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.WantType |= Message_Wantlist_WantType(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SendDontHave", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.SendDontHave = bool(v != 0) default: iNdEx = preIndex skippy, err := skipMessage(dAtA[iNdEx:]) @@ -1066,10 +1405,115 @@ func (m *Message_Block) Unmarshal(dAtA []byte) error { } return nil } +func (m *Message_BlockPresence) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BlockPresence: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BlockPresence: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Cid", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthMessage + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthMessage + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Cid = append(m.Cid[:0], dAtA[iNdEx:postIndex]...) + if m.Cid == nil { + m.Cid = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= Message_BlockPresenceType(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipMessage(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMessage + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthMessage + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func skipMessage(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 - depth := 0 for iNdEx < l { var wire uint64 for shift := uint(0); ; shift += 7 { @@ -1101,8 +1545,10 @@ func skipMessage(dAtA []byte) (n int, err error) { break } } + return iNdEx, nil case 1: iNdEx += 8 + return iNdEx, nil case 2: var length int for shift := uint(0); ; shift += 7 { @@ -1123,30 +1569,55 @@ func skipMessage(dAtA []byte) (n int, err error) { return 0, ErrInvalidLengthMessage } iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthMessage + } + return iNdEx, nil case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupMessage + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowMessage + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipMessage(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthMessage + } } - depth-- + return iNdEx, nil + case 4: + return iNdEx, nil case 5: iNdEx += 4 + return iNdEx, nil default: return 0, fmt.Errorf("proto: illegal wireType %d", wireType) } - if iNdEx < 0 { - return 0, ErrInvalidLengthMessage - } - if depth == 0 { - return iNdEx, nil - } } - return 0, io.ErrUnexpectedEOF + panic("unreachable") } var ( - ErrInvalidLengthMessage = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowMessage = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupMessage = fmt.Errorf("proto: unexpected end of group") + ErrInvalidLengthMessage = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowMessage = fmt.Errorf("proto: integer overflow") ) diff --git a/bitswap/message/pb/message.proto b/bitswap/message/pb/message.proto index 102b3431d..f7afdb1fe 100644 --- a/bitswap/message/pb/message.proto +++ b/bitswap/message/pb/message.proto @@ -7,11 +7,17 @@ import "github.com/gogo/protobuf/gogoproto/gogo.proto"; message Message { message Wantlist { + enum WantType { + Block = 0; + Have = 1; + } message Entry { bytes block = 1; // the block cid (cidV0 in bitswap 1.0.0, cidV1 in bitswap 1.1.0) int32 priority = 2; // the priority (normalized). default to 1 bool cancel = 3; // whether this revokes an entry + WantType wantType = 4; // Note: defaults to enum 0, ie Block + bool sendDontHave = 5; // Note: defaults to false } repeated Entry entries = 1 [(gogoproto.nullable) = false]; // a list of wantlist entries @@ -23,7 +29,18 @@ message Message { bytes data = 2; } + enum BlockPresenceType { + Have = 0; + DontHave = 1; + } + message BlockPresence { + bytes cid = 1; + BlockPresenceType type = 2; + } + Wantlist wantlist = 1 [(gogoproto.nullable) = false]; repeated bytes blocks = 2; // used to send Blocks in bitswap 1.0.0 repeated Block payload = 3 [(gogoproto.nullable) = false]; // used to send Blocks in bitswap 1.1.0 + repeated BlockPresence blockPresences = 4 [(gogoproto.nullable) = false]; + int32 pendingBytes = 5; } diff --git a/bitswap/messagequeue/messagequeue.go b/bitswap/messagequeue/messagequeue.go index 601a70748..b8caad57b 100644 --- a/bitswap/messagequeue/messagequeue.go +++ b/bitswap/messagequeue/messagequeue.go @@ -2,12 +2,17 @@ package messagequeue import ( "context" + "math" "sync" "time" + debounce "github.com/bep/debounce" + bsmsg "github.com/ipfs/go-bitswap/message" + pb "github.com/ipfs/go-bitswap/message/pb" bsnet "github.com/ipfs/go-bitswap/network" - wantlist "github.com/ipfs/go-bitswap/wantlist" + bswl "github.com/ipfs/go-bitswap/wantlist" + cid "github.com/ipfs/go-cid" logging "github.com/ipfs/go-log" peer "github.com/libp2p/go-libp2p-core/peer" ) @@ -16,7 +21,18 @@ var log = logging.Logger("bitswap") const ( defaultRebroadcastInterval = 30 * time.Second - maxRetries = 10 + // maxRetries is the number of times to attempt to send a message before + // giving up + maxRetries = 10 + // maxMessageSize is the maximum message size in bytes + maxMessageSize = 1024 * 1024 * 2 + // sendErrorBackoff is the time to wait before retrying to connect after + // an error when trying to send a message + sendErrorBackoff = 100 * time.Millisecond + // maxPriority is the max priority as defined by the bitswap protocol + maxPriority = math.MaxInt32 + // sendMessageDebounce is the debounce duration when calling sendMessage() + sendMessageDebounce = time.Millisecond ) // MessageNetwork is any network that can connect peers and generate a message @@ -24,55 +40,168 @@ const ( type MessageNetwork interface { ConnectTo(context.Context, peer.ID) error NewMessageSender(context.Context, peer.ID) (bsnet.MessageSender, error) + Self() peer.ID } // MessageQueue implements queue of want messages to send to peers. type MessageQueue struct { - ctx context.Context - p peer.ID - network MessageNetwork - - outgoingWork chan struct{} - done chan struct{} - - // do not touch out of run loop - wl *wantlist.SessionTrackedWantlist - nextMessage bsmsg.BitSwapMessage - nextMessageLk sync.RWMutex + ctx context.Context + p peer.ID + network MessageNetwork + maxMessageSize int + sendErrorBackoff time.Duration + + signalWorkReady func() + outgoingWork chan struct{} + done chan struct{} + + // Take lock whenever any of these variables are modified + wllock sync.Mutex + bcstWants recallWantlist + peerWants recallWantlist + cancels *cid.Set + priority int + + // Dont touch any of these variables outside of run loop sender bsnet.MessageSender rebroadcastIntervalLk sync.RWMutex rebroadcastInterval time.Duration rebroadcastTimer *time.Timer } +// recallWantlist keeps a list of pending wants, and a list of all wants that +// have ever been requested +type recallWantlist struct { + // The list of all wants that have been requested, including wants that + // have been sent and wants that have not yet been sent + allWants *bswl.Wantlist + // The list of wants that have not yet been sent + pending *bswl.Wantlist +} + +func newRecallWantList() recallWantlist { + return recallWantlist{ + allWants: bswl.New(), + pending: bswl.New(), + } +} + +// Add want to both the pending list and the list of all wants +func (r *recallWantlist) Add(c cid.Cid, priority int, wtype pb.Message_Wantlist_WantType) { + r.allWants.Add(c, priority, wtype) + r.pending.Add(c, priority, wtype) +} + +// Remove wants from both the pending list and the list of all wants +func (r *recallWantlist) Remove(c cid.Cid) { + r.allWants.Remove(c) + r.pending.Remove(c) +} + +// Remove wants by type from both the pending list and the list of all wants +func (r *recallWantlist) RemoveType(c cid.Cid, wtype pb.Message_Wantlist_WantType) { + r.allWants.RemoveType(c, wtype) + r.pending.RemoveType(c, wtype) +} + // New creats a new MessageQueue. func New(ctx context.Context, p peer.ID, network MessageNetwork) *MessageQueue { - return &MessageQueue{ + return newMessageQueue(ctx, p, network, maxMessageSize, sendErrorBackoff) +} + +// This constructor is used by the tests +func newMessageQueue(ctx context.Context, p peer.ID, network MessageNetwork, maxMsgSize int, sendErrorBackoff time.Duration) *MessageQueue { + mq := &MessageQueue{ ctx: ctx, - wl: wantlist.NewSessionTrackedWantlist(), - network: network, p: p, + network: network, + maxMessageSize: maxMsgSize, + bcstWants: newRecallWantList(), + peerWants: newRecallWantList(), + cancels: cid.NewSet(), outgoingWork: make(chan struct{}, 1), done: make(chan struct{}), rebroadcastInterval: defaultRebroadcastInterval, + sendErrorBackoff: sendErrorBackoff, + priority: maxPriority, } + + // Apply debounce to the work ready signal (which triggers sending a message) + debounced := debounce.New(sendMessageDebounce) + mq.signalWorkReady = func() { debounced(mq.onWorkReady) } + + return mq } -// AddMessage adds new entries to an outgoing message for a given session. -func (mq *MessageQueue) AddMessage(entries []bsmsg.Entry, ses uint64) { - if !mq.addEntries(entries, ses) { +// Add want-haves that are part of a broadcast to all connected peers +func (mq *MessageQueue) AddBroadcastWantHaves(wantHaves []cid.Cid) { + if len(wantHaves) == 0 { return } - select { - case mq.outgoingWork <- struct{}{}: - default: + + mq.wllock.Lock() + defer mq.wllock.Unlock() + + for _, c := range wantHaves { + mq.bcstWants.Add(c, mq.priority, pb.Message_Wantlist_Have) + mq.priority-- + + // We're adding a want-have for the cid, so clear any pending cancel + // for the cid + mq.cancels.Remove(c) } + + // Schedule a message send + mq.signalWorkReady() } -// AddWantlist adds a complete session tracked want list to a message queue -func (mq *MessageQueue) AddWantlist(initialWants *wantlist.SessionTrackedWantlist) { - initialWants.CopyWants(mq.wl) - mq.addWantlist() +// Add want-haves and want-blocks for the peer for this message queue. +func (mq *MessageQueue) AddWants(wantBlocks []cid.Cid, wantHaves []cid.Cid) { + if len(wantBlocks) == 0 && len(wantHaves) == 0 { + return + } + + mq.wllock.Lock() + defer mq.wllock.Unlock() + + for _, c := range wantHaves { + mq.peerWants.Add(c, mq.priority, pb.Message_Wantlist_Have) + mq.priority-- + + // We're adding a want-have for the cid, so clear any pending cancel + // for the cid + mq.cancels.Remove(c) + } + for _, c := range wantBlocks { + mq.peerWants.Add(c, mq.priority, pb.Message_Wantlist_Block) + mq.priority-- + + // We're adding a want-block for the cid, so clear any pending cancel + // for the cid + mq.cancels.Remove(c) + } + + // Schedule a message send + mq.signalWorkReady() +} + +// Add cancel messages for the given keys. +func (mq *MessageQueue) AddCancels(cancelKs []cid.Cid) { + if len(cancelKs) == 0 { + return + } + + mq.wllock.Lock() + defer mq.wllock.Unlock() + + for _, c := range cancelKs { + mq.bcstWants.Remove(c) + mq.peerWants.Remove(c) + mq.cancels.Add(c) + } + + // Schedule a message send + mq.signalWorkReady() } // SetRebroadcastInterval sets a new interval on which to rebroadcast the full wantlist @@ -85,8 +214,7 @@ func (mq *MessageQueue) SetRebroadcastInterval(delay time.Duration) { mq.rebroadcastIntervalLk.Unlock() } -// Startup starts the processing of messages, and creates an initial message -// based on the given initial wantlist. +// Startup starts the processing of messages and rebroadcasting. func (mq *MessageQueue) Startup() { mq.rebroadcastIntervalLk.RLock() mq.rebroadcastTimer = time.NewTimer(mq.rebroadcastInterval) @@ -105,7 +233,7 @@ func (mq *MessageQueue) runQueue() { case <-mq.rebroadcastTimer.C: mq.rebroadcastWantlist() case <-mq.outgoingWork: - mq.sendMessage() + mq.sendIfReady() case <-mq.done: if mq.sender != nil { mq.sender.Close() @@ -120,87 +248,178 @@ func (mq *MessageQueue) runQueue() { } } -func (mq *MessageQueue) addWantlist() { - - mq.nextMessageLk.Lock() - defer mq.nextMessageLk.Unlock() - - if mq.wl.Len() > 0 { - if mq.nextMessage == nil { - mq.nextMessage = bsmsg.New(false) - } - for _, e := range mq.wl.Entries() { - mq.nextMessage.AddEntry(e.Cid, e.Priority) - } - select { - case mq.outgoingWork <- struct{}{}: - default: - } - } -} - +// Periodically resend the list of wants to the peer func (mq *MessageQueue) rebroadcastWantlist() { mq.rebroadcastIntervalLk.RLock() mq.rebroadcastTimer.Reset(mq.rebroadcastInterval) mq.rebroadcastIntervalLk.RUnlock() - mq.addWantlist() + // If some wants were transferred from the rebroadcast list + if mq.transferRebroadcastWants() { + // Send them out + mq.sendMessage() + } } -func (mq *MessageQueue) addEntries(entries []bsmsg.Entry, ses uint64) bool { - var work bool - mq.nextMessageLk.Lock() - defer mq.nextMessageLk.Unlock() - // if we have no message held allocate a new one - if mq.nextMessage == nil { - mq.nextMessage = bsmsg.New(false) - } +// Transfer wants from the rebroadcast lists into the pending lists. +func (mq *MessageQueue) transferRebroadcastWants() bool { + mq.wllock.Lock() + defer mq.wllock.Unlock() - for _, e := range entries { - if e.Cancel { - if mq.wl.Remove(e.Cid, ses) { - work = true - mq.nextMessage.Cancel(e.Cid) - } - } else { - if mq.wl.Add(e.Cid, e.Priority, ses) { - work = true - mq.nextMessage.AddEntry(e.Cid, e.Priority) - } - } + // Check if there are any wants to rebroadcast + if mq.bcstWants.allWants.Len() == 0 && mq.peerWants.allWants.Len() == 0 { + return false } - return work + + // Copy all wants into pending wants lists + mq.bcstWants.pending.Absorb(mq.bcstWants.allWants) + mq.peerWants.pending.Absorb(mq.peerWants.allWants) + + return true } -func (mq *MessageQueue) extractOutgoingMessage() bsmsg.BitSwapMessage { - // grab outgoing message - mq.nextMessageLk.Lock() - message := mq.nextMessage - mq.nextMessage = nil - mq.nextMessageLk.Unlock() - return message +func (mq *MessageQueue) onWorkReady() { + select { + case mq.outgoingWork <- struct{}{}: + default: + } } -func (mq *MessageQueue) sendMessage() { - message := mq.extractOutgoingMessage() - if message == nil || message.Empty() { - return +func (mq *MessageQueue) sendIfReady() { + if mq.hasPendingWork() { + mq.sendMessage() } +} +func (mq *MessageQueue) sendMessage() { err := mq.initializeSender() if err != nil { log.Infof("cant open message sender to peer %s: %s", mq.p, err) // TODO: cant connect, what now? + // TODO: should we stop using this connection and clear the want list + // to avoid using up memory? return } - for i := 0; i < maxRetries; i++ { // try to send this message until we fail. + // Convert want lists to a Bitswap Message + message, onSent := mq.extractOutgoingMessage(mq.sender.SupportsHave()) + if message == nil || message.Empty() { + return + } + + // mq.logOutgoingMessage(message) + + // Try to send this message repeatedly + for i := 0; i < maxRetries; i++ { if mq.attemptSendAndRecovery(message) { + // We were able to send successfully. + onSent() + + // If the message was too big and only a subset of wants could be + // sent, schedule sending the rest of the wants in the next + // iteration of the event loop. + if mq.hasPendingWork() { + mq.signalWorkReady() + } + return } } } +// func (mq *MessageQueue) logOutgoingMessage(msg bsmsg.BitSwapMessage) { +// entries := msg.Wantlist() +// for _, e := range entries { +// if e.Cancel { +// if e.WantType == pb.Message_Wantlist_Have { +// log.Debugf("send %s->%s: cancel-have %s\n", lu.P(mq.network.Self()), lu.P(mq.p), lu.C(e.Cid)) +// } else { +// log.Debugf("send %s->%s: cancel-block %s\n", lu.P(mq.network.Self()), lu.P(mq.p), lu.C(e.Cid)) +// } +// } else { +// if e.WantType == pb.Message_Wantlist_Have { +// log.Debugf("send %s->%s: want-have %s\n", lu.P(mq.network.Self()), lu.P(mq.p), lu.C(e.Cid)) +// } else { +// log.Debugf("send %s->%s: want-block %s\n", lu.P(mq.network.Self()), lu.P(mq.p), lu.C(e.Cid)) +// } +// } +// } +// } + +func (mq *MessageQueue) hasPendingWork() bool { + mq.wllock.Lock() + defer mq.wllock.Unlock() + + return mq.bcstWants.pending.Len() > 0 || mq.peerWants.pending.Len() > 0 || mq.cancels.Len() > 0 +} + +func (mq *MessageQueue) extractOutgoingMessage(supportsHave bool) (bsmsg.BitSwapMessage, func()) { + // Create a new message + msg := bsmsg.New(false) + + mq.wllock.Lock() + defer mq.wllock.Unlock() + + // Get broadcast and regular wantlist entries + bcstEntries := mq.bcstWants.pending.SortedEntries() + peerEntries := mq.peerWants.pending.SortedEntries() + + // Size of the message so far + msgSize := 0 + + // Add each broadcast want-have to the message + for i := 0; i < len(bcstEntries) && msgSize < mq.maxMessageSize; i++ { + // Broadcast wants are sent as want-have + wantType := pb.Message_Wantlist_Have + + // If the remote peer doesn't support HAVE / DONT_HAVE messages, + // send a want-block instead + if !supportsHave { + wantType = pb.Message_Wantlist_Block + } + + e := bcstEntries[i] + msgSize += msg.AddEntry(e.Cid, e.Priority, wantType, false) + } + + // Add each regular want-have / want-block to the message + for i := 0; i < len(peerEntries) && msgSize < mq.maxMessageSize; i++ { + e := peerEntries[i] + // If the remote peer doesn't support HAVE / DONT_HAVE messages, + // don't send want-haves (only send want-blocks) + if !supportsHave && e.WantType == pb.Message_Wantlist_Have { + mq.peerWants.RemoveType(e.Cid, pb.Message_Wantlist_Have) + } else { + msgSize += msg.AddEntry(e.Cid, e.Priority, e.WantType, true) + } + } + + // Add each cancel to the message + cancels := mq.cancels.Keys() + for i := 0; i < len(cancels) && msgSize < mq.maxMessageSize; i++ { + c := cancels[i] + + msgSize += msg.Cancel(c) + + // Clear the cancel - we make a best effort to let peers know about + // cancels but won't save them to resend if there's a failure. + mq.cancels.Remove(c) + } + + // Called when the message has been successfully sent. + // Remove the sent keys from the broadcast and regular wantlists. + onSent := func() { + mq.wllock.Lock() + defer mq.wllock.Unlock() + + for _, e := range msg.Wantlist() { + mq.bcstWants.pending.Remove(e.Cid) + mq.peerWants.pending.RemoveType(e.Cid, e.WantType) + } + } + + return msg, onSent +} func (mq *MessageQueue) initializeSender() error { if mq.sender != nil { return nil @@ -228,18 +447,14 @@ func (mq *MessageQueue) attemptSendAndRecovery(message bsmsg.BitSwapMessage) boo return true case <-mq.ctx.Done(): return true - case <-time.After(time.Millisecond * 100): - // wait 100ms in case disconnect notifications are still propogating + case <-time.After(mq.sendErrorBackoff): + // wait 100ms in case disconnect notifications are still propagating log.Warning("SendMsg errored but neither 'done' nor context.Done() were set") } err = mq.initializeSender() if err != nil { log.Infof("couldnt open sender again after SendMsg(%s) failed: %s", mq.p, err) - // TODO(why): what do we do now? - // I think the *right* answer is to probably put the message we're - // trying to send back, and then return to waiting for new work or - // a disconnect. return true } diff --git a/bitswap/messagequeue/messagequeue_test.go b/bitswap/messagequeue/messagequeue_test.go index e9d09b931..6ce146f94 100644 --- a/bitswap/messagequeue/messagequeue_test.go +++ b/bitswap/messagequeue/messagequeue_test.go @@ -2,12 +2,16 @@ package messagequeue import ( "context" + "errors" "testing" "time" + "github.com/ipfs/go-bitswap/message" "github.com/ipfs/go-bitswap/testutil" + cid "github.com/ipfs/go-cid" bsmsg "github.com/ipfs/go-bitswap/message" + pb "github.com/ipfs/go-bitswap/message/pb" bsnet "github.com/ipfs/go-bitswap/network" peer "github.com/libp2p/go-libp2p-core/peer" ) @@ -29,19 +33,28 @@ func (fmn *fakeMessageNetwork) NewMessageSender(context.Context, peer.ID) (bsnet return nil, fmn.messageSenderError } +func (fms *fakeMessageNetwork) Self() peer.ID { return "" } + type fakeMessageSender struct { sendError error fullClosed chan<- struct{} reset chan<- struct{} messagesSent chan<- bsmsg.BitSwapMessage + sendErrors chan<- error + supportsHave bool } func (fms *fakeMessageSender) SendMsg(ctx context.Context, msg bsmsg.BitSwapMessage) error { + if fms.sendError != nil { + fms.sendErrors <- fms.sendError + return fms.sendError + } fms.messagesSent <- msg - return fms.sendError + return nil } -func (fms *fakeMessageSender) Close() error { fms.fullClosed <- struct{}{}; return nil } -func (fms *fakeMessageSender) Reset() error { fms.reset <- struct{}{}; return nil } +func (fms *fakeMessageSender) Close() error { fms.fullClosed <- struct{}{}; return nil } +func (fms *fakeMessageSender) Reset() error { fms.reset <- struct{}{}; return nil } +func (fms *fakeMessageSender) SupportsHave() bool { return fms.supportsHave } func collectMessages(ctx context.Context, t *testing.T, @@ -71,24 +84,24 @@ func totalEntriesLength(messages []bsmsg.BitSwapMessage) int { func TestStartupAndShutdown(t *testing.T) { ctx := context.Background() messagesSent := make(chan bsmsg.BitSwapMessage) + sendErrors := make(chan error) resetChan := make(chan struct{}, 1) fullClosedChan := make(chan struct{}, 1) - fakeSender := &fakeMessageSender{nil, fullClosedChan, resetChan, messagesSent} + fakeSender := &fakeMessageSender{nil, fullClosedChan, resetChan, messagesSent, sendErrors, true} fakenet := &fakeMessageNetwork{nil, nil, fakeSender} peerID := testutil.GeneratePeers(1)[0] messageQueue := New(ctx, peerID, fakenet) - ses := testutil.GenerateSessionID() - wl := testutil.GenerateWantlist(10, ses) + bcstwh := testutil.GenerateCids(10) messageQueue.Startup() - messageQueue.AddWantlist(wl) + messageQueue.AddBroadcastWantHaves(bcstwh) messages := collectMessages(ctx, t, messagesSent, 10*time.Millisecond) if len(messages) != 1 { - t.Fatal("wrong number of messages were sent for initial wants") + t.Fatal("wrong number of messages were sent for broadcast want-haves") } firstMessage := messages[0] - if len(firstMessage.Wantlist()) != wl.Len() { + if len(firstMessage.Wantlist()) != len(bcstwh) { t.Fatal("did not add all wants to want list") } for _, entry := range firstMessage.Wantlist() { @@ -113,22 +126,22 @@ func TestStartupAndShutdown(t *testing.T) { func TestSendingMessagesDeduped(t *testing.T) { ctx := context.Background() messagesSent := make(chan bsmsg.BitSwapMessage) + sendErrors := make(chan error) resetChan := make(chan struct{}, 1) fullClosedChan := make(chan struct{}, 1) - fakeSender := &fakeMessageSender{nil, fullClosedChan, resetChan, messagesSent} + fakeSender := &fakeMessageSender{nil, fullClosedChan, resetChan, messagesSent, sendErrors, true} fakenet := &fakeMessageNetwork{nil, nil, fakeSender} peerID := testutil.GeneratePeers(1)[0] messageQueue := New(ctx, peerID, fakenet) - ses1 := testutil.GenerateSessionID() - ses2 := testutil.GenerateSessionID() - entries := testutil.GenerateMessageEntries(10, false) - messageQueue.Startup() + wantHaves := testutil.GenerateCids(10) + wantBlocks := testutil.GenerateCids(10) - messageQueue.AddMessage(entries, ses1) - messageQueue.AddMessage(entries, ses2) + messageQueue.Startup() + messageQueue.AddWants(wantBlocks, wantHaves) + messageQueue.AddWants(wantBlocks, wantHaves) messages := collectMessages(ctx, t, messagesSent, 10*time.Millisecond) - if totalEntriesLength(messages) != len(entries) { + if totalEntriesLength(messages) != len(wantHaves)+len(wantBlocks) { t.Fatal("Messages were not deduped") } } @@ -136,62 +149,448 @@ func TestSendingMessagesDeduped(t *testing.T) { func TestSendingMessagesPartialDupe(t *testing.T) { ctx := context.Background() messagesSent := make(chan bsmsg.BitSwapMessage) + sendErrors := make(chan error) resetChan := make(chan struct{}, 1) fullClosedChan := make(chan struct{}, 1) - fakeSender := &fakeMessageSender{nil, fullClosedChan, resetChan, messagesSent} + fakeSender := &fakeMessageSender{nil, fullClosedChan, resetChan, messagesSent, sendErrors, true} fakenet := &fakeMessageNetwork{nil, nil, fakeSender} peerID := testutil.GeneratePeers(1)[0] messageQueue := New(ctx, peerID, fakenet) - ses1 := testutil.GenerateSessionID() - ses2 := testutil.GenerateSessionID() - entries := testutil.GenerateMessageEntries(10, false) - moreEntries := testutil.GenerateMessageEntries(5, false) - secondEntries := append(entries[5:], moreEntries...) - messageQueue.Startup() + wantHaves := testutil.GenerateCids(10) + wantBlocks := testutil.GenerateCids(10) - messageQueue.AddMessage(entries, ses1) - messageQueue.AddMessage(secondEntries, ses2) + messageQueue.Startup() + messageQueue.AddWants(wantBlocks[:8], wantHaves[:8]) + messageQueue.AddWants(wantBlocks[3:], wantHaves[3:]) messages := collectMessages(ctx, t, messagesSent, 20*time.Millisecond) - if totalEntriesLength(messages) != len(entries)+len(moreEntries) { + if totalEntriesLength(messages) != len(wantHaves)+len(wantBlocks) { t.Fatal("messages were not correctly deduped") } +} + +func TestSendingMessagesPriority(t *testing.T) { + ctx := context.Background() + messagesSent := make(chan bsmsg.BitSwapMessage) + sendErrors := make(chan error) + resetChan := make(chan struct{}, 1) + fullClosedChan := make(chan struct{}, 1) + fakeSender := &fakeMessageSender{nil, fullClosedChan, resetChan, messagesSent, sendErrors, true} + fakenet := &fakeMessageNetwork{nil, nil, fakeSender} + peerID := testutil.GeneratePeers(1)[0] + messageQueue := New(ctx, peerID, fakenet) + wantHaves1 := testutil.GenerateCids(5) + wantHaves2 := testutil.GenerateCids(5) + wantHaves := append(wantHaves1, wantHaves2...) + wantBlocks1 := testutil.GenerateCids(5) + wantBlocks2 := testutil.GenerateCids(5) + wantBlocks := append(wantBlocks1, wantBlocks2...) + + messageQueue.Startup() + messageQueue.AddWants(wantBlocks1, wantHaves1) + messageQueue.AddWants(wantBlocks2, wantHaves2) + messages := collectMessages(ctx, t, messagesSent, 20*time.Millisecond) + + if totalEntriesLength(messages) != len(wantHaves)+len(wantBlocks) { + t.Fatal("wrong number of wants") + } + byCid := make(map[cid.Cid]message.Entry) + for _, entry := range messages[0].Wantlist() { + byCid[entry.Cid] = entry + } + + // Check that earliest want-haves have highest priority + for i := range wantHaves { + if i > 0 { + if byCid[wantHaves[i]].Priority > byCid[wantHaves[i-1]].Priority { + t.Fatal("earliest want-haves should have higher priority") + } + } + } + + // Check that earliest want-blocks have highest priority + for i := range wantBlocks { + if i > 0 { + if byCid[wantBlocks[i]].Priority > byCid[wantBlocks[i-1]].Priority { + t.Fatal("earliest want-blocks should have higher priority") + } + } + } + + // Check that want-haves have higher priority than want-blocks within + // same group + for i := range wantHaves1 { + if i > 0 { + if byCid[wantHaves[i]].Priority <= byCid[wantBlocks[0]].Priority { + t.Fatal("want-haves should have higher priority than want-blocks") + } + } + } + // Check that all items in first group have higher priority than first item + // in second group + for i := range wantHaves1 { + if i > 0 { + if byCid[wantHaves[i]].Priority <= byCid[wantHaves2[0]].Priority { + t.Fatal("items in first group should have higher priority than items in second group") + } + } + } } -func TestWantlistRebroadcast(t *testing.T) { +func TestCancelOverridesPendingWants(t *testing.T) { + ctx := context.Background() + messagesSent := make(chan bsmsg.BitSwapMessage) + sendErrors := make(chan error) + resetChan := make(chan struct{}, 1) + fullClosedChan := make(chan struct{}, 1) + fakeSender := &fakeMessageSender{nil, fullClosedChan, resetChan, messagesSent, sendErrors, true} + fakenet := &fakeMessageNetwork{nil, nil, fakeSender} + peerID := testutil.GeneratePeers(1)[0] + messageQueue := New(ctx, peerID, fakenet) + wantHaves := testutil.GenerateCids(2) + wantBlocks := testutil.GenerateCids(2) + + messageQueue.Startup() + messageQueue.AddWants(wantBlocks, wantHaves) + messageQueue.AddCancels([]cid.Cid{wantBlocks[0], wantHaves[0]}) + messages := collectMessages(ctx, t, messagesSent, 10*time.Millisecond) + + if totalEntriesLength(messages) != len(wantHaves)+len(wantBlocks) { + t.Fatal("Wrong message count") + } + wb, wh, cl := filterWantTypes(messages[0].Wantlist()) + if len(wb) != 1 || !wb[0].Equals(wantBlocks[1]) { + t.Fatal("Expected 1 want-block") + } + if len(wh) != 1 || !wh[0].Equals(wantHaves[1]) { + t.Fatal("Expected 1 want-have") + } + if len(cl) != 2 { + t.Fatal("Expected 2 cancels") + } +} + +func TestWantOverridesPendingCancels(t *testing.T) { ctx := context.Background() messagesSent := make(chan bsmsg.BitSwapMessage) + sendErrors := make(chan error) resetChan := make(chan struct{}, 1) fullClosedChan := make(chan struct{}, 1) - fakeSender := &fakeMessageSender{nil, fullClosedChan, resetChan, messagesSent} + fakeSender := &fakeMessageSender{nil, fullClosedChan, resetChan, messagesSent, sendErrors, true} fakenet := &fakeMessageNetwork{nil, nil, fakeSender} peerID := testutil.GeneratePeers(1)[0] messageQueue := New(ctx, peerID, fakenet) - ses := testutil.GenerateSessionID() - wl := testutil.GenerateWantlist(10, ses) + cancels := testutil.GenerateCids(3) messageQueue.Startup() - messageQueue.AddWantlist(wl) + messageQueue.AddCancels(cancels) + messageQueue.AddWants([]cid.Cid{cancels[0]}, []cid.Cid{cancels[1]}) + messages := collectMessages(ctx, t, messagesSent, 10*time.Millisecond) + + if totalEntriesLength(messages) != len(cancels) { + t.Fatal("Wrong message count") + } + + wb, wh, cl := filterWantTypes(messages[0].Wantlist()) + if len(wb) != 1 || !wb[0].Equals(cancels[0]) { + t.Fatal("Expected 1 want-block") + } + if len(wh) != 1 || !wh[0].Equals(cancels[1]) { + t.Fatal("Expected 1 want-have") + } + if len(cl) != 1 || !cl[0].Equals(cancels[2]) { + t.Fatal("Expected 1 cancel") + } +} + +func TestWantlistRebroadcast(t *testing.T) { + ctx := context.Background() + messagesSent := make(chan bsmsg.BitSwapMessage) + sendErrors := make(chan error) + resetChan := make(chan struct{}, 1) + fullClosedChan := make(chan struct{}, 1) + fakeSender := &fakeMessageSender{nil, fullClosedChan, resetChan, messagesSent, sendErrors, true} + fakenet := &fakeMessageNetwork{nil, nil, fakeSender} + peerID := testutil.GeneratePeers(1)[0] + messageQueue := New(ctx, peerID, fakenet) + bcstwh := testutil.GenerateCids(10) + wantHaves := testutil.GenerateCids(10) + wantBlocks := testutil.GenerateCids(10) + + // Add some broadcast want-haves + messageQueue.Startup() + messageQueue.AddBroadcastWantHaves(bcstwh) messages := collectMessages(ctx, t, messagesSent, 10*time.Millisecond) if len(messages) != 1 { t.Fatal("wrong number of messages were sent for initial wants") } + // All broadcast want-haves should have been sent + firstMessage := messages[0] + if len(firstMessage.Wantlist()) != len(bcstwh) { + t.Fatal("wrong number of wants") + } + + // Tell message queue to rebroadcast after 5ms, then wait 8ms messageQueue.SetRebroadcastInterval(5 * time.Millisecond) messages = collectMessages(ctx, t, messagesSent, 8*time.Millisecond) if len(messages) != 1 { t.Fatal("wrong number of messages were rebroadcast") } - firstMessage := messages[0] - if len(firstMessage.Wantlist()) != wl.Len() { - t.Fatal("did not add all wants to want list") + // All the want-haves should have been rebroadcast + firstMessage = messages[0] + if len(firstMessage.Wantlist()) != len(bcstwh) { + t.Fatal("did not rebroadcast all wants") + } + + // Tell message queue to rebroadcast after a long time (so it doesn't + // interfere with the next message collection), then send out some + // regular wants and collect them + messageQueue.SetRebroadcastInterval(1 * time.Second) + messageQueue.AddWants(wantBlocks, wantHaves) + messages = collectMessages(ctx, t, messagesSent, 10*time.Millisecond) + if len(messages) != 1 { + t.Fatal("wrong number of messages were rebroadcast") + } + + // All new wants should have been sent + firstMessage = messages[0] + if len(firstMessage.Wantlist()) != len(wantHaves)+len(wantBlocks) { + t.Fatal("wrong number of wants") + } + + // Tell message queue to rebroadcast after 5ms, then wait 8ms + messageQueue.SetRebroadcastInterval(5 * time.Millisecond) + messages = collectMessages(ctx, t, messagesSent, 8*time.Millisecond) + firstMessage = messages[0] + + // Both original and new wants should have been rebroadcast + totalWants := len(bcstwh) + len(wantHaves) + len(wantBlocks) + if len(firstMessage.Wantlist()) != totalWants { + t.Fatal("did not rebroadcast all wants") + } + + // Cancel some of the wants + messageQueue.SetRebroadcastInterval(1 * time.Second) + cancels := append([]cid.Cid{bcstwh[0]}, wantHaves[0], wantBlocks[0]) + messageQueue.AddCancels(cancels) + messages = collectMessages(ctx, t, messagesSent, 10*time.Millisecond) + if len(messages) != 1 { + t.Fatal("wrong number of messages were rebroadcast") + } + + // Cancels for each want should have been sent + firstMessage = messages[0] + if len(firstMessage.Wantlist()) != len(cancels) { + t.Fatal("wrong number of cancels") } for _, entry := range firstMessage.Wantlist() { - if entry.Cancel { - t.Fatal("initial add sent cancel entry when it should not have") + if !entry.Cancel { + t.Fatal("expected cancels") + } + } + + // Tell message queue to rebroadcast after 5ms, then wait 8ms + messageQueue.SetRebroadcastInterval(5 * time.Millisecond) + messages = collectMessages(ctx, t, messagesSent, 8*time.Millisecond) + firstMessage = messages[0] + if len(firstMessage.Wantlist()) != totalWants-len(cancels) { + t.Fatal("did not rebroadcast all wants") + } +} + +func TestSendingLargeMessages(t *testing.T) { + ctx := context.Background() + messagesSent := make(chan bsmsg.BitSwapMessage) + sendErrors := make(chan error) + resetChan := make(chan struct{}, 1) + fullClosedChan := make(chan struct{}, 1) + fakeSender := &fakeMessageSender{nil, fullClosedChan, resetChan, messagesSent, sendErrors, true} + fakenet := &fakeMessageNetwork{nil, nil, fakeSender} + peerID := testutil.GeneratePeers(1)[0] + + wantBlocks := testutil.GenerateCids(10) + entrySize := 44 + maxMsgSize := entrySize * 3 // 3 wants + messageQueue := newMessageQueue(ctx, peerID, fakenet, maxMsgSize, sendErrorBackoff) + + messageQueue.Startup() + messageQueue.AddWants(wantBlocks, []cid.Cid{}) + messages := collectMessages(ctx, t, messagesSent, 10*time.Millisecond) + + // want-block has size 44, so with maxMsgSize 44 * 3 (3 want-blocks), then if + // we send 10 want-blocks we should expect 4 messages: + // [***] [***] [***] [*] + if len(messages) != 4 { + t.Fatal("expected 4 messages to be sent, got", len(messages)) + } + if totalEntriesLength(messages) != len(wantBlocks) { + t.Fatal("wrong number of wants") + } +} + +func TestSendToPeerThatDoesntSupportHave(t *testing.T) { + ctx := context.Background() + messagesSent := make(chan bsmsg.BitSwapMessage) + sendErrors := make(chan error) + resetChan := make(chan struct{}, 1) + fullClosedChan := make(chan struct{}, 1) + fakeSender := &fakeMessageSender{nil, fullClosedChan, resetChan, messagesSent, sendErrors, false} + fakenet := &fakeMessageNetwork{nil, nil, fakeSender} + peerID := testutil.GeneratePeers(1)[0] + + messageQueue := New(ctx, peerID, fakenet) + messageQueue.Startup() + + // If the remote peer doesn't support HAVE / DONT_HAVE messages + // - want-blocks should be sent normally + // - want-haves should not be sent + // - broadcast want-haves should be sent as want-blocks + + // Check broadcast want-haves + bcwh := testutil.GenerateCids(10) + messageQueue.AddBroadcastWantHaves(bcwh) + messages := collectMessages(ctx, t, messagesSent, 10*time.Millisecond) + + if len(messages) != 1 { + t.Fatal("wrong number of messages were sent", len(messages)) + } + wl := messages[0].Wantlist() + if len(wl) != len(bcwh) { + t.Fatal("wrong number of entries in wantlist", len(wl)) + } + for _, entry := range wl { + if entry.WantType != pb.Message_Wantlist_Block { + t.Fatal("broadcast want-haves should be sent as want-blocks") + } + } + + // Check regular want-haves and want-blocks + wbs := testutil.GenerateCids(10) + whs := testutil.GenerateCids(10) + messageQueue.AddWants(wbs, whs) + messages = collectMessages(ctx, t, messagesSent, 10*time.Millisecond) + + if len(messages) != 1 { + t.Fatal("wrong number of messages were sent", len(messages)) + } + wl = messages[0].Wantlist() + if len(wl) != len(wbs) { + t.Fatal("should only send want-blocks (no want-haves)", len(wl)) + } + for _, entry := range wl { + if entry.WantType != pb.Message_Wantlist_Block { + t.Fatal("should only send want-blocks") + } + } +} + +func TestResendAfterError(t *testing.T) { + ctx := context.Background() + messagesSent := make(chan bsmsg.BitSwapMessage) + sendErrors := make(chan error) + resetChan := make(chan struct{}, 1) + fullClosedChan := make(chan struct{}, 1) + fakeSender := &fakeMessageSender{nil, fullClosedChan, resetChan, messagesSent, sendErrors, true} + fakenet := &fakeMessageNetwork{nil, nil, fakeSender} + peerID := testutil.GeneratePeers(1)[0] + sendErrBackoff := 5 * time.Millisecond + messageQueue := newMessageQueue(ctx, peerID, fakenet, maxMessageSize, sendErrBackoff) + wantBlocks := testutil.GenerateCids(10) + wantHaves := testutil.GenerateCids(10) + + messageQueue.Startup() + + var errs []error + go func() { + // After the first error is received, clear sendError so that + // subsequent sends will not error + errs = append(errs, <-sendErrors) + fakeSender.sendError = nil + }() + + // Make the first send error out + fakeSender.sendError = errors.New("send err") + messageQueue.AddWants(wantBlocks, wantHaves) + messages := collectMessages(ctx, t, messagesSent, 10*time.Millisecond) + + if len(errs) != 1 { + t.Fatal("Expected first send to error") + } + + if totalEntriesLength(messages) != len(wantHaves)+len(wantBlocks) { + t.Fatal("Expected subsequent send to succeed") + } +} + +func TestResendAfterMaxRetries(t *testing.T) { + ctx := context.Background() + messagesSent := make(chan bsmsg.BitSwapMessage) + sendErrors := make(chan error) + resetChan := make(chan struct{}, maxRetries*2) + fullClosedChan := make(chan struct{}, 1) + fakeSender := &fakeMessageSender{nil, fullClosedChan, resetChan, messagesSent, sendErrors, true} + fakenet := &fakeMessageNetwork{nil, nil, fakeSender} + peerID := testutil.GeneratePeers(1)[0] + sendErrBackoff := 2 * time.Millisecond + messageQueue := newMessageQueue(ctx, peerID, fakenet, maxMessageSize, sendErrBackoff) + wantBlocks := testutil.GenerateCids(10) + wantHaves := testutil.GenerateCids(10) + wantBlocks2 := testutil.GenerateCids(10) + wantHaves2 := testutil.GenerateCids(10) + + messageQueue.Startup() + + var errs []error + go func() { + for len(errs) < maxRetries { + err := <-sendErrors + errs = append(errs, err) + } + }() + + // Make the first group of send attempts error out + fakeSender.sendError = errors.New("send err") + messageQueue.AddWants(wantBlocks, wantHaves) + messages := collectMessages(ctx, t, messagesSent, 50*time.Millisecond) + + if len(errs) != maxRetries { + t.Fatal("Expected maxRetries errors, got", len(errs)) + } + + // No successful send after max retries, so expect no messages sent + if totalEntriesLength(messages) != 0 { + t.Fatal("Expected no messages") + } + + // Clear sendError so that subsequent sends will not error + fakeSender.sendError = nil + + // Add a new batch of wants + messageQueue.AddWants(wantBlocks2, wantHaves2) + messages = collectMessages(ctx, t, messagesSent, 10*time.Millisecond) + + // All wants from previous and new send should be sent + if totalEntriesLength(messages) != len(wantHaves)+len(wantBlocks)+len(wantHaves2)+len(wantBlocks2) { + t.Fatal("Expected subsequent send to send first and second batches of wants") + } +} + +func filterWantTypes(wantlist []bsmsg.Entry) ([]cid.Cid, []cid.Cid, []cid.Cid) { + var wbs []cid.Cid + var whs []cid.Cid + var cls []cid.Cid + for _, e := range wantlist { + if e.Cancel { + cls = append(cls, e.Cid) + } else if e.WantType == pb.Message_Wantlist_Block { + wbs = append(wbs, e.Cid) + } else { + whs = append(whs, e.Cid) } } + return wbs, whs, cls } diff --git a/bitswap/network/interface.go b/bitswap/network/interface.go index 783e29e9e..704d851fb 100644 --- a/bitswap/network/interface.go +++ b/bitswap/network/interface.go @@ -13,18 +13,19 @@ import ( ) var ( - // ProtocolBitswapOne is the prefix for the legacy bitswap protocol - ProtocolBitswapOne protocol.ID = "/ipfs/bitswap/1.0.0" // ProtocolBitswapNoVers is equivalent to the legacy bitswap protocol ProtocolBitswapNoVers protocol.ID = "/ipfs/bitswap" - - // ProtocolBitswap is the current version of bitswap protocol, 1.1.0 - ProtocolBitswap protocol.ID = "/ipfs/bitswap/1.1.0" + // ProtocolBitswapOneZero is the prefix for the legacy bitswap protocol + ProtocolBitswapOneZero protocol.ID = "/ipfs/bitswap/1.0.0" + // ProtocolBitswapOneOne is the the prefix for version 1.1.0 + ProtocolBitswapOneOne protocol.ID = "/ipfs/bitswap/1.1.0" + // ProtocolBitswap is the current version of the bitswap protocol: 1.2.0 + ProtocolBitswap protocol.ID = "/ipfs/bitswap/1.2.0" ) // BitSwapNetwork provides network connectivity for BitSwap sessions. type BitSwapNetwork interface { - + Self() peer.ID // SendMessage sends a BitSwap message to a peer. SendMessage( context.Context, @@ -36,6 +37,7 @@ type BitSwapNetwork interface { SetDelegate(Receiver) ConnectTo(context.Context, peer.ID) error + DisconnectFrom(context.Context, peer.ID) error NewMessageSender(context.Context, peer.ID) (MessageSender, error) @@ -52,6 +54,8 @@ type MessageSender interface { SendMsg(context.Context, bsmsg.BitSwapMessage) error Close() error Reset() error + // Indicates whether the remote peer supports HAVE / DONT_HAVE messages + SupportsHave() bool } // Receiver is an interface that can receive messages from the BitSwapNetwork. diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index 036d15328..2a25b7a00 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -29,31 +29,52 @@ var sendMessageTimeout = time.Minute * 10 // NewFromIpfsHost returns a BitSwapNetwork supported by underlying IPFS host. func NewFromIpfsHost(host host.Host, r routing.ContentRouting, opts ...NetOpt) BitSwapNetwork { - s := Settings{} - for _, opt := range opts { - opt(&s) - } + s := processSettings(opts...) bitswapNetwork := impl{ host: host, routing: r, - protocolBitswap: s.ProtocolPrefix + ProtocolBitswap, - protocolBitswapOne: s.ProtocolPrefix + ProtocolBitswapOne, - protocolBitswapNoVers: s.ProtocolPrefix + ProtocolBitswapNoVers, + protocolBitswapNoVers: s.ProtocolPrefix + ProtocolBitswapNoVers, + protocolBitswapOneZero: s.ProtocolPrefix + ProtocolBitswapOneZero, + protocolBitswapOneOne: s.ProtocolPrefix + ProtocolBitswapOneOne, + protocolBitswap: s.ProtocolPrefix + ProtocolBitswap, + + supportedProtocols: s.SupportedProtocols, } return &bitswapNetwork } +func processSettings(opts ...NetOpt) Settings { + s := Settings{ + SupportedProtocols: []protocol.ID{ + ProtocolBitswap, + ProtocolBitswapOneOne, + ProtocolBitswapOneZero, + ProtocolBitswapNoVers, + }, + } + for _, opt := range opts { + opt(&s) + } + for i, proto := range s.SupportedProtocols { + s.SupportedProtocols[i] = s.ProtocolPrefix + proto + } + return s +} + // impl transforms the ipfs network interface, which sends and receives // NetMessage objects, into the bitswap network interface. type impl struct { host host.Host routing routing.ContentRouting - protocolBitswap protocol.ID - protocolBitswapOne protocol.ID - protocolBitswapNoVers protocol.ID + protocolBitswapNoVers protocol.ID + protocolBitswapOneZero protocol.ID + protocolBitswapOneOne protocol.ID + protocolBitswap protocol.ID + + supportedProtocols []protocol.ID // inbound messages from the network are forwarded to the receiver receiver Receiver @@ -78,6 +99,23 @@ func (s *streamMessageSender) SendMsg(ctx context.Context, msg bsmsg.BitSwapMess return s.bsnet.msgToStream(ctx, s.s, msg) } +func (s *streamMessageSender) SupportsHave() bool { + return s.bsnet.SupportsHave(s.s.Protocol()) +} + +func (bsnet *impl) Self() peer.ID { + return bsnet.host.ID() +} + +// Indicates whether the given protocol supports HAVE / DONT_HAVE messages +func (bsnet *impl) SupportsHave(proto protocol.ID) bool { + switch proto { + case bsnet.protocolBitswapOneOne, bsnet.protocolBitswapOneZero, bsnet.protocolBitswapNoVers: + return false + } + return true +} + func (bsnet *impl) msgToStream(ctx context.Context, s network.Stream, msg bsmsg.BitSwapMessage) error { deadline := time.Now().Add(sendMessageTimeout) if dl, ok := ctx.Deadline(); ok { @@ -88,13 +126,16 @@ func (bsnet *impl) msgToStream(ctx context.Context, s network.Stream, msg bsmsg. log.Warningf("error setting deadline: %s", err) } + // Older Bitswap versions use a slightly different wire format so we need + // to convert the message to the appropriate format depending on the remote + // peer's Bitswap version. switch s.Protocol() { - case bsnet.protocolBitswap: + case bsnet.protocolBitswapOneOne, bsnet.protocolBitswap: if err := msg.ToNetV1(s); err != nil { log.Debugf("error: %s", err) return err } - case bsnet.protocolBitswapOne, bsnet.protocolBitswapNoVers: + case bsnet.protocolBitswapOneZero, bsnet.protocolBitswapNoVers: if err := msg.ToNetV0(s); err != nil { log.Debugf("error: %s", err) return err @@ -119,7 +160,7 @@ func (bsnet *impl) NewMessageSender(ctx context.Context, p peer.ID) (MessageSend } func (bsnet *impl) newStreamToPeer(ctx context.Context, p peer.ID) (network.Stream, error) { - return bsnet.host.NewStream(ctx, p, bsnet.protocolBitswap, bsnet.protocolBitswapOne, bsnet.protocolBitswapNoVers) + return bsnet.host.NewStream(ctx, p, bsnet.supportedProtocols...) } func (bsnet *impl) SendMessage( @@ -147,9 +188,9 @@ func (bsnet *impl) SendMessage( func (bsnet *impl) SetDelegate(r Receiver) { bsnet.receiver = r - bsnet.host.SetStreamHandler(bsnet.protocolBitswap, bsnet.handleNewStream) - bsnet.host.SetStreamHandler(bsnet.protocolBitswapOne, bsnet.handleNewStream) - bsnet.host.SetStreamHandler(bsnet.protocolBitswapNoVers, bsnet.handleNewStream) + for _, proto := range bsnet.supportedProtocols { + bsnet.host.SetStreamHandler(proto, bsnet.handleNewStream) + } bsnet.host.Network().Notify((*netNotifiee)(bsnet)) // TODO: StopNotify. @@ -159,6 +200,10 @@ func (bsnet *impl) ConnectTo(ctx context.Context, p peer.ID) error { return bsnet.host.Connect(ctx, peer.AddrInfo{ID: p}) } +func (bsnet *impl) DisconnectFrom(ctx context.Context, p peer.ID) error { + panic("Not implemented: DisconnectFrom() is only used by tests") +} + // FindProvidersAsync returns a channel of providers for the given key. func (bsnet *impl) FindProvidersAsync(ctx context.Context, k cid.Cid, max int) <-chan peer.ID { out := make(chan peer.ID, max) @@ -234,12 +279,10 @@ func (nn *netNotifiee) impl() *impl { func (nn *netNotifiee) Connected(n network.Network, v network.Conn) { nn.impl().receiver.PeerConnected(v.RemotePeer()) } - func (nn *netNotifiee) Disconnected(n network.Network, v network.Conn) { nn.impl().receiver.PeerDisconnected(v.RemotePeer()) } - -func (nn *netNotifiee) OpenedStream(n network.Network, v network.Stream) {} +func (nn *netNotifiee) OpenedStream(n network.Network, s network.Stream) {} func (nn *netNotifiee) ClosedStream(n network.Network, v network.Stream) {} func (nn *netNotifiee) Listen(n network.Network, a ma.Multiaddr) {} func (nn *netNotifiee) ListenClose(n network.Network, a ma.Multiaddr) {} diff --git a/bitswap/network/ipfs_impl_test.go b/bitswap/network/ipfs_impl_test.go index cbcc4fecb..beecf09c7 100644 --- a/bitswap/network/ipfs_impl_test.go +++ b/bitswap/network/ipfs_impl_test.go @@ -6,12 +6,15 @@ import ( "time" bsmsg "github.com/ipfs/go-bitswap/message" + pb "github.com/ipfs/go-bitswap/message/pb" + bsnet "github.com/ipfs/go-bitswap/network" tn "github.com/ipfs/go-bitswap/testnet" blocksutil "github.com/ipfs/go-ipfs-blocksutil" mockrouting "github.com/ipfs/go-ipfs-routing/mock" "github.com/libp2p/go-libp2p-core/peer" - "github.com/libp2p/go-libp2p-testing/net" + "github.com/libp2p/go-libp2p-core/protocol" + tnet "github.com/libp2p/go-libp2p-testing/net" mocknet "github.com/libp2p/go-libp2p/p2p/net/mock" ) @@ -24,6 +27,14 @@ type receiver struct { lastSender peer.ID } +func newReceiver() *receiver { + return &receiver{ + peers: make(map[peer.ID]struct{}), + messageReceived: make(chan struct{}), + connectionEvent: make(chan struct{}, 1), + } +} + func (r *receiver) ReceiveMessage( ctx context.Context, sender peer.ID, @@ -48,6 +59,7 @@ func (r *receiver) PeerDisconnected(p peer.ID) { delete(r.peers, p) r.connectionEvent <- struct{}{} } + func TestMessageSendAndReceive(t *testing.T) { // create network ctx := context.Background() @@ -64,16 +76,8 @@ func TestMessageSendAndReceive(t *testing.T) { bsnet1 := streamNet.Adapter(p1) bsnet2 := streamNet.Adapter(p2) - r1 := &receiver{ - peers: make(map[peer.ID]struct{}), - messageReceived: make(chan struct{}), - connectionEvent: make(chan struct{}, 1), - } - r2 := &receiver{ - peers: make(map[peer.ID]struct{}), - messageReceived: make(chan struct{}), - connectionEvent: make(chan struct{}, 1), - } + r1 := newReceiver() + r2 := newReceiver() bsnet1.SetDelegate(r1) bsnet2.SetDelegate(r2) @@ -109,7 +113,7 @@ func TestMessageSendAndReceive(t *testing.T) { block1 := blockGenerator.Next() block2 := blockGenerator.Next() sent := bsmsg.New(false) - sent.AddEntry(block1.Cid(), 1) + sent.AddEntry(block1.Cid(), 1, pb.Message_Wantlist_Block, true) sent.AddBlock(block2) err = bsnet1.SendMessage(ctx, p2.ID(), sent) @@ -159,3 +163,49 @@ func TestMessageSendAndReceive(t *testing.T) { t.Fatal("Sent message blocks did not match received message blocks") } } + +func TestSupportsHave(t *testing.T) { + ctx := context.Background() + mn := mocknet.New(ctx) + mr := mockrouting.NewServer() + streamNet, err := tn.StreamNet(ctx, mn, mr) + if err != nil { + t.Fatal("Unable to setup network") + } + + type testCase struct { + proto protocol.ID + expSupportsHave bool + } + + testCases := []testCase{ + testCase{bsnet.ProtocolBitswap, true}, + testCase{bsnet.ProtocolBitswapOneOne, false}, + testCase{bsnet.ProtocolBitswapOneZero, false}, + testCase{bsnet.ProtocolBitswapNoVers, false}, + } + + for _, tc := range testCases { + p1 := tnet.RandIdentityOrFatal(t) + bsnet1 := streamNet.Adapter(p1) + bsnet1.SetDelegate(newReceiver()) + + p2 := tnet.RandIdentityOrFatal(t) + bsnet2 := streamNet.Adapter(p2, bsnet.SupportedProtocols([]protocol.ID{tc.proto})) + bsnet2.SetDelegate(newReceiver()) + + err = mn.LinkAll() + if err != nil { + t.Fatal(err) + } + + senderCurrent, err := bsnet1.NewMessageSender(ctx, p2.ID()) + if err != nil { + t.Fatal(err) + } + + if senderCurrent.SupportsHave() != tc.expSupportsHave { + t.Fatal("Expected sender HAVE message support", tc.proto, tc.expSupportsHave) + } + } +} diff --git a/bitswap/network/options.go b/bitswap/network/options.go index 38bb63d10..1df8963a3 100644 --- a/bitswap/network/options.go +++ b/bitswap/network/options.go @@ -5,7 +5,8 @@ import "github.com/libp2p/go-libp2p-core/protocol" type NetOpt func(*Settings) type Settings struct { - ProtocolPrefix protocol.ID + ProtocolPrefix protocol.ID + SupportedProtocols []protocol.ID } func Prefix(prefix protocol.ID) NetOpt { @@ -13,3 +14,9 @@ func Prefix(prefix protocol.ID) NetOpt { settings.ProtocolPrefix = prefix } } + +func SupportedProtocols(protos []protocol.ID) NetOpt { + return func(settings *Settings) { + settings.SupportedProtocols = protos + } +} diff --git a/bitswap/peermanager/peermanager.go b/bitswap/peermanager/peermanager.go index 18fc56b7d..ddd59399f 100644 --- a/bitswap/peermanager/peermanager.go +++ b/bitswap/peermanager/peermanager.go @@ -2,21 +2,28 @@ package peermanager import ( "context" + "sync" - bsmsg "github.com/ipfs/go-bitswap/message" - wantlist "github.com/ipfs/go-bitswap/wantlist" + "github.com/ipfs/go-metrics-interface" + cid "github.com/ipfs/go-cid" peer "github.com/libp2p/go-libp2p-core/peer" ) // PeerQueue provides a queue of messages to be sent for a single peer. type PeerQueue interface { - AddMessage(entries []bsmsg.Entry, ses uint64) + AddBroadcastWantHaves([]cid.Cid) + AddWants([]cid.Cid, []cid.Cid) + AddCancels([]cid.Cid) Startup() - AddWantlist(initialWants *wantlist.SessionTrackedWantlist) Shutdown() } +type Session interface { + ID() uint64 + SignalAvailability(peer.ID, bool) +} + // PeerQueueFactory provides a function that will create a PeerQueue. type PeerQueueFactory func(ctx context.Context, p peer.ID) PeerQueue @@ -27,24 +34,47 @@ type peerQueueInstance struct { // PeerManager manages a pool of peers and sends messages to peers in the pool. type PeerManager struct { + // sync access to peerQueues and peerWantManager + pqLk sync.RWMutex // peerQueues -- interact through internal utility functions get/set/remove/iterate peerQueues map[peer.ID]*peerQueueInstance + pwm *peerWantManager createPeerQueue PeerQueueFactory ctx context.Context + + psLk sync.RWMutex + sessions map[uint64]Session + peerSessions map[peer.ID]map[uint64]struct{} + + self peer.ID } // New creates a new PeerManager, given a context and a peerQueueFactory. -func New(ctx context.Context, createPeerQueue PeerQueueFactory) *PeerManager { +func New(ctx context.Context, createPeerQueue PeerQueueFactory, self peer.ID) *PeerManager { + wantGauge := metrics.NewCtx(ctx, "wantlist_total", "Number of items in wantlist.").Gauge() return &PeerManager{ peerQueues: make(map[peer.ID]*peerQueueInstance), + pwm: newPeerWantManager(wantGauge), createPeerQueue: createPeerQueue, ctx: ctx, + self: self, + + sessions: make(map[uint64]Session), + peerSessions: make(map[peer.ID]map[uint64]struct{}), } } +func (pm *PeerManager) AvailablePeers() []peer.ID { + // TODO: Rate-limit peers + return pm.ConnectedPeers() +} + // ConnectedPeers returns a list of peers this PeerManager is managing. func (pm *PeerManager) ConnectedPeers() []peer.ID { + pm.pqLk.RLock() + defer pm.pqLk.RUnlock() + peers := make([]peer.ID, 0, len(pm.peerQueues)) for p := range pm.peerQueues { peers = append(peers, p) @@ -54,18 +84,31 @@ func (pm *PeerManager) ConnectedPeers() []peer.ID { // Connected is called to add a new peer to the pool, and send it an initial set // of wants. -func (pm *PeerManager) Connected(p peer.ID, initialWants *wantlist.SessionTrackedWantlist) { +func (pm *PeerManager) Connected(p peer.ID, initialWantHaves []cid.Cid) { + pm.pqLk.Lock() + defer pm.pqLk.Unlock() + pq := pm.getOrCreate(p) + pq.refcnt++ - if pq.refcnt == 0 { - pq.pq.AddWantlist(initialWants) + // If this is the first connection to the peer + if pq.refcnt == 1 { + // Inform the peer want manager that there's a new peer + pm.pwm.AddPeer(p) + // Record that the want-haves are being sent to the peer + pm.pwm.PrepareSendWants(p, nil, initialWantHaves) + // Broadcast any live want-haves to the newly connected peers + pq.pq.AddBroadcastWantHaves(initialWantHaves) + // Inform the sessions that the peer has connected + pm.signalAvailability(p, true) } - - pq.refcnt++ } // Disconnected is called to remove a peer from the pool. func (pm *PeerManager) Disconnected(p peer.ID) { + pm.pqLk.Lock() + defer pm.pqLk.Unlock() + pq, ok := pm.peerQueues[p] if !ok { @@ -77,25 +120,62 @@ func (pm *PeerManager) Disconnected(p peer.ID) { return } + // Inform the sessions that the peer has disconnected + pm.signalAvailability(p, false) + + // Clean up the peer delete(pm.peerQueues, p) pq.pq.Shutdown() + pm.pwm.RemovePeer(p) } -// SendMessage is called to send a message to all or some peers in the pool; -// if targets is nil, it sends to all. -func (pm *PeerManager) SendMessage(entries []bsmsg.Entry, targets []peer.ID, from uint64) { - if len(targets) == 0 { - for _, p := range pm.peerQueues { - p.pq.AddMessage(entries, from) +func (pm *PeerManager) BroadcastWantHaves(ctx context.Context, wantHaves []cid.Cid) { + pm.pqLk.Lock() + defer pm.pqLk.Unlock() + + for p, ks := range pm.pwm.PrepareBroadcastWantHaves(wantHaves) { + if pqi, ok := pm.peerQueues[p]; ok { + pqi.pq.AddBroadcastWantHaves(ks) } - } else { - for _, t := range targets { - pqi := pm.getOrCreate(t) - pqi.pq.AddMessage(entries, from) + } +} + +func (pm *PeerManager) SendWants(ctx context.Context, p peer.ID, wantBlocks []cid.Cid, wantHaves []cid.Cid) { + pm.pqLk.Lock() + defer pm.pqLk.Unlock() + + if pqi, ok := pm.peerQueues[p]; ok { + wblks, whvs := pm.pwm.PrepareSendWants(p, wantBlocks, wantHaves) + pqi.pq.AddWants(wblks, whvs) + } +} + +func (pm *PeerManager) SendCancels(ctx context.Context, cancelKs []cid.Cid) { + pm.pqLk.Lock() + defer pm.pqLk.Unlock() + + // Send a CANCEL to each peer that has been sent a want-block or want-have + for p, ks := range pm.pwm.PrepareSendCancels(cancelKs) { + if pqi, ok := pm.peerQueues[p]; ok { + pqi.pq.AddCancels(ks) } } } +func (pm *PeerManager) CurrentWants() []cid.Cid { + pm.pqLk.RLock() + defer pm.pqLk.RUnlock() + + return pm.pwm.GetWantBlocks() +} + +func (pm *PeerManager) CurrentWantHaves() []cid.Cid { + pm.pqLk.RLock() + defer pm.pqLk.RUnlock() + + return pm.pwm.GetWantHaves() +} + func (pm *PeerManager) getOrCreate(p peer.ID) *peerQueueInstance { pqi, ok := pm.peerQueues[p] if !ok { @@ -106,3 +186,44 @@ func (pm *PeerManager) getOrCreate(p peer.ID) *peerQueueInstance { } return pqi } + +func (pm *PeerManager) RegisterSession(p peer.ID, s Session) bool { + pm.psLk.Lock() + defer pm.psLk.Unlock() + + if _, ok := pm.sessions[s.ID()]; !ok { + pm.sessions[s.ID()] = s + } + + if _, ok := pm.peerSessions[p]; !ok { + pm.peerSessions[p] = make(map[uint64]struct{}) + } + pm.peerSessions[p][s.ID()] = struct{}{} + + _, ok := pm.peerQueues[p] + return ok +} + +func (pm *PeerManager) UnregisterSession(ses uint64) { + pm.psLk.Lock() + defer pm.psLk.Unlock() + + for p := range pm.peerSessions { + delete(pm.peerSessions[p], ses) + if len(pm.peerSessions[p]) == 0 { + delete(pm.peerSessions, p) + } + } + + delete(pm.sessions, ses) +} + +func (pm *PeerManager) signalAvailability(p peer.ID, isConnected bool) { + for p, sesIds := range pm.peerSessions { + for sesId := range sesIds { + if s, ok := pm.sessions[sesId]; ok { + s.SignalAvailability(p, isConnected) + } + } + } +} diff --git a/bitswap/peermanager/peermanager_test.go b/bitswap/peermanager/peermanager_test.go index cea9ce26b..c62cb3aa5 100644 --- a/bitswap/peermanager/peermanager_test.go +++ b/bitswap/peermanager/peermanager_test.go @@ -2,77 +2,85 @@ package peermanager import ( "context" - "reflect" "testing" "time" "github.com/ipfs/go-bitswap/testutil" + cid "github.com/ipfs/go-cid" - bsmsg "github.com/ipfs/go-bitswap/message" - wantlist "github.com/ipfs/go-bitswap/wantlist" "github.com/libp2p/go-libp2p-core/peer" ) -type messageSent struct { - p peer.ID - entries []bsmsg.Entry - ses uint64 +type msg struct { + p peer.ID + wantBlocks []cid.Cid + wantHaves []cid.Cid + cancels []cid.Cid } -type fakePeer struct { - p peer.ID - messagesSent chan messageSent +type mockPeerQueue struct { + p peer.ID + msgs chan msg } -func (fp *fakePeer) Startup() {} -func (fp *fakePeer) Shutdown() {} +func (fp *mockPeerQueue) Startup() {} +func (fp *mockPeerQueue) Shutdown() {} -func (fp *fakePeer) AddMessage(entries []bsmsg.Entry, ses uint64) { - fp.messagesSent <- messageSent{fp.p, entries, ses} +func (fp *mockPeerQueue) AddBroadcastWantHaves(whs []cid.Cid) { + fp.msgs <- msg{fp.p, nil, whs, nil} } -func (fp *fakePeer) AddWantlist(initialWants *wantlist.SessionTrackedWantlist) {} -func makePeerQueueFactory(messagesSent chan messageSent) PeerQueueFactory { - return func(ctx context.Context, p peer.ID) PeerQueue { - return &fakePeer{ - p: p, - messagesSent: messagesSent, - } - } +func (fp *mockPeerQueue) AddWants(wbs []cid.Cid, whs []cid.Cid) { + fp.msgs <- msg{fp.p, wbs, whs, nil} +} +func (fp *mockPeerQueue) AddCancels(cs []cid.Cid) { + fp.msgs <- msg{fp.p, nil, nil, cs} } -func collectAndCheckMessages( - ctx context.Context, - t *testing.T, - messagesSent <-chan messageSent, - entries []bsmsg.Entry, - ses uint64, - timeout time.Duration) []peer.ID { - var peersReceived []peer.ID - timeoutCtx, cancel := context.WithTimeout(ctx, timeout) +type peerWants struct { + wantHaves []cid.Cid + wantBlocks []cid.Cid + cancels []cid.Cid +} + +func collectMessages(ch chan msg, timeout time.Duration) map[peer.ID]peerWants { + ctx, cancel := context.WithTimeout(context.Background(), timeout) defer cancel() + + collected := make(map[peer.ID]peerWants) for { select { - case nextMessage := <-messagesSent: - if nextMessage.ses != ses { - t.Fatal("Message enqueued with wrong session") - } - if !reflect.DeepEqual(nextMessage.entries, entries) { - t.Fatal("Message enqueued with wrong wants") + case m := <-ch: + pw, ok := collected[m.p] + if !ok { + pw = peerWants{} } - peersReceived = append(peersReceived, nextMessage.p) - case <-timeoutCtx.Done(): - return peersReceived + pw.wantHaves = append(pw.wantHaves, m.wantHaves...) + pw.wantBlocks = append(pw.wantBlocks, m.wantBlocks...) + pw.cancels = append(pw.cancels, m.cancels...) + collected[m.p] = pw + case <-ctx.Done(): + return collected + } + } +} + +func makePeerQueueFactory(msgs chan msg) PeerQueueFactory { + return func(ctx context.Context, p peer.ID) PeerQueue { + return &mockPeerQueue{ + p: p, + msgs: msgs, } } } func TestAddingAndRemovingPeers(t *testing.T) { ctx := context.Background() - peerQueueFactory := makePeerQueueFactory(nil) + msgs := make(chan msg, 16) + peerQueueFactory := makePeerQueueFactory(msgs) - tp := testutil.GeneratePeers(5) - peer1, peer2, peer3, peer4, peer5 := tp[0], tp[1], tp[2], tp[3], tp[4] - peerManager := New(ctx, peerQueueFactory) + tp := testutil.GeneratePeers(6) + self, peer1, peer2, peer3, peer4, peer5 := tp[0], tp[1], tp[2], tp[3], tp[4], tp[5] + peerManager := New(ctx, peerQueueFactory, self) peerManager.Connected(peer1, nil) peerManager.Connected(peer2, nil) @@ -109,63 +117,186 @@ func TestAddingAndRemovingPeers(t *testing.T) { } } -func TestSendingMessagesToPeers(t *testing.T) { - ctx := context.Background() - messagesSent := make(chan messageSent, 16) - peerQueueFactory := makePeerQueueFactory(messagesSent) +func TestBroadcastOnConnect(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + msgs := make(chan msg, 16) + peerQueueFactory := makePeerQueueFactory(msgs) + tp := testutil.GeneratePeers(2) + self, peer1 := tp[0], tp[1] + peerManager := New(ctx, peerQueueFactory, self) - tp := testutil.GeneratePeers(5) + cids := testutil.GenerateCids(2) - peer1, peer2, peer3, peer4, peer5 := tp[0], tp[1], tp[2], tp[3], tp[4] - peerManager := New(ctx, peerQueueFactory) + // Connect with two broadcast wants for first peer + peerManager.Connected(peer1, cids) + collected := collectMessages(msgs, 2*time.Millisecond) - peerManager.Connected(peer1, nil) + if len(collected[peer1].wantHaves) != 2 { + t.Fatal("Expected want-haves to be sent to newly connected peer") + } +} + +func TestBroadcastWantHaves(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + msgs := make(chan msg, 16) + peerQueueFactory := makePeerQueueFactory(msgs) + tp := testutil.GeneratePeers(3) + self, peer1, peer2 := tp[0], tp[1], tp[2] + peerManager := New(ctx, peerQueueFactory, self) + + cids := testutil.GenerateCids(3) + + // Connect to first peer with two broadcast wants + peerManager.Connected(peer1, []cid.Cid{cids[0], cids[1]}) + collected := collectMessages(msgs, 2*time.Millisecond) + + if len(collected[peer1].wantHaves) != 2 { + t.Fatal("Expected want-haves to be sent to newly connected peer") + } + + // Connect to second peer peerManager.Connected(peer2, nil) - peerManager.Connected(peer3, nil) - entries := testutil.GenerateMessageEntries(5, false) - ses := testutil.GenerateSessionID() + // Send a broadcast to all peers, including cid that was already sent to + // first peer + peerManager.BroadcastWantHaves(ctx, []cid.Cid{cids[0], cids[2]}) + collected = collectMessages(msgs, 2*time.Millisecond) + + // One of the want-haves was already sent to peer1 + if len(collected[peer1].wantHaves) != 1 { + t.Fatal("Expected 1 want-haves to be sent to first peer", collected[peer1].wantHaves) + } + if len(collected[peer2].wantHaves) != 2 { + t.Fatal("Expected 2 want-haves to be sent to second peer") + } +} + +func TestSendWants(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + msgs := make(chan msg, 16) + peerQueueFactory := makePeerQueueFactory(msgs) + tp := testutil.GeneratePeers(2) + self, peer1 := tp[0], tp[1] + peerManager := New(ctx, peerQueueFactory, self) + cids := testutil.GenerateCids(4) - peerManager.SendMessage(entries, nil, ses) + peerManager.Connected(peer1, nil) + peerManager.SendWants(ctx, peer1, []cid.Cid{cids[0]}, []cid.Cid{cids[2]}) + collected := collectMessages(msgs, 2*time.Millisecond) - peersReceived := collectAndCheckMessages( - ctx, t, messagesSent, entries, ses, 10*time.Millisecond) - if len(peersReceived) != 3 { - t.Fatal("Incorrect number of peers received messages") + if len(collected[peer1].wantHaves) != 1 { + t.Fatal("Expected want-have to be sent to peer") } + if len(collected[peer1].wantBlocks) != 1 { + t.Fatal("Expected want-block to be sent to peer") + } + + peerManager.SendWants(ctx, peer1, []cid.Cid{cids[0], cids[1]}, []cid.Cid{cids[2], cids[3]}) + collected = collectMessages(msgs, 2*time.Millisecond) - if !testutil.ContainsPeer(peersReceived, peer1) || - !testutil.ContainsPeer(peersReceived, peer2) || - !testutil.ContainsPeer(peersReceived, peer3) { - t.Fatal("Peers should have received message but did not") + // First want-have and want-block should be filtered (because they were + // already sent) + if len(collected[peer1].wantHaves) != 1 { + t.Fatal("Expected want-have to be sent to peer") } + if len(collected[peer1].wantBlocks) != 1 { + t.Fatal("Expected want-block to be sent to peer") + } +} + +func TestSendCancels(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + msgs := make(chan msg, 16) + peerQueueFactory := makePeerQueueFactory(msgs) + tp := testutil.GeneratePeers(3) + self, peer1, peer2 := tp[0], tp[1], tp[2] + peerManager := New(ctx, peerQueueFactory, self) + cids := testutil.GenerateCids(4) + + // Connect to peer1 and peer2 + peerManager.Connected(peer1, nil) + peerManager.Connected(peer2, nil) + + // Send 2 want-blocks and 1 want-have to peer1 + peerManager.SendWants(ctx, peer1, []cid.Cid{cids[0], cids[1]}, []cid.Cid{cids[2]}) + + // Clear messages + collectMessages(msgs, 2*time.Millisecond) + + // Send cancels for 1 want-block and 1 want-have + peerManager.SendCancels(ctx, []cid.Cid{cids[0], cids[2]}) + collected := collectMessages(msgs, 2*time.Millisecond) - if testutil.ContainsPeer(peersReceived, peer4) || - testutil.ContainsPeer(peersReceived, peer5) { - t.Fatal("Peers received message but should not have") + if _, ok := collected[peer2]; ok { + t.Fatal("Expected no cancels to be sent to peer that was not sent messages") } + if len(collected[peer1].cancels) != 2 { + t.Fatal("Expected cancel to be sent for want-block and want-have sent to peer") + } + + // Send cancels for all cids + peerManager.SendCancels(ctx, cids) + collected = collectMessages(msgs, 2*time.Millisecond) + + if _, ok := collected[peer2]; ok { + t.Fatal("Expected no cancels to be sent to peer that was not sent messages") + } + if len(collected[peer1].cancels) != 1 { + t.Fatal("Expected cancel to be sent for remaining want-block") + } +} + +func (s *sess) ID() uint64 { + return s.id +} +func (s *sess) SignalAvailability(p peer.ID, isAvailable bool) { + s.available[p] = isAvailable +} - var peersToSendTo []peer.ID - peersToSendTo = append(peersToSendTo, peer1, peer3, peer4) - peerManager.SendMessage(entries, peersToSendTo, ses) - peersReceived = collectAndCheckMessages( - ctx, t, messagesSent, entries, ses, 10*time.Millisecond) +type sess struct { + id uint64 + available map[peer.ID]bool +} - if len(peersReceived) != 3 { - t.Fatal("Incorrect number of peers received messages") +func newSess(id uint64) *sess { + return &sess{id, make(map[peer.ID]bool)} +} + +func TestSessionRegistration(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + msgs := make(chan msg, 16) + peerQueueFactory := makePeerQueueFactory(msgs) + + tp := testutil.GeneratePeers(2) + self, p1 := tp[0], tp[1] + peerManager := New(ctx, peerQueueFactory, self) + + id := uint64(1) + s := newSess(id) + peerManager.RegisterSession(p1, s) + if s.available[p1] { + t.Fatal("Expected peer not be available till connected") } - if !testutil.ContainsPeer(peersReceived, peer1) || - !testutil.ContainsPeer(peersReceived, peer3) { - t.Fatal("Peers should have received message but did not") + peerManager.Connected(p1, nil) + if !s.available[p1] { + t.Fatal("Expected signal callback") } - if testutil.ContainsPeer(peersReceived, peer2) || - testutil.ContainsPeer(peersReceived, peer5) { - t.Fatal("Peers received message but should not have") + peerManager.Disconnected(p1) + if s.available[p1] { + t.Fatal("Expected signal callback") } - if !testutil.ContainsPeer(peersReceived, peer4) { - t.Fatal("Peer should have autoconnected on message send") + peerManager.UnregisterSession(id) + + peerManager.Connected(p1, nil) + if s.available[p1] { + t.Fatal("Expected no signal callback (session unregistered)") } } diff --git a/bitswap/peermanager/peerwantmanager.go b/bitswap/peermanager/peerwantmanager.go new file mode 100644 index 000000000..31bcf795f --- /dev/null +++ b/bitswap/peermanager/peerwantmanager.go @@ -0,0 +1,206 @@ +package peermanager + +import ( + "bytes" + "fmt" + + lu "github.com/ipfs/go-bitswap/logutil" + + cid "github.com/ipfs/go-cid" + peer "github.com/libp2p/go-libp2p-core/peer" +) + +// Gauge can be used to keep track of a metric that increases and decreases +// incrementally. It is used by the peerWantManager to track the number of +// want-blocks that are active (ie sent but no response received) +type Gauge interface { + Inc() + Dec() +} + +// peerWantManager keeps track of which want-haves and want-blocks have been +// sent to each peer, so that the PeerManager doesn't send duplicates. +type peerWantManager struct { + peerWants map[peer.ID]*peerWant + // Keeps track of the number of active want-blocks + wantBlockGauge Gauge +} + +type peerWant struct { + wantBlocks *cid.Set + wantHaves *cid.Set +} + +// New creates a new peerWantManager with a Gauge that keeps track of the +// number of active want-blocks (ie sent but no response received) +func newPeerWantManager(wantBlockGauge Gauge) *peerWantManager { + return &peerWantManager{ + peerWants: make(map[peer.ID]*peerWant), + wantBlockGauge: wantBlockGauge, + } +} + +// AddPeer adds a peer whose wants we need to keep track of +func (pwm *peerWantManager) AddPeer(p peer.ID) { + if _, ok := pwm.peerWants[p]; !ok { + pwm.peerWants[p] = &peerWant{ + wantBlocks: cid.NewSet(), + wantHaves: cid.NewSet(), + } + } +} + +// RemovePeer removes a peer and its associated wants from tracking +func (pwm *peerWantManager) RemovePeer(p peer.ID) { + delete(pwm.peerWants, p) +} + +// PrepareBroadcastWantHaves filters the list of want-haves for each peer, +// returning a map of peers to the want-haves they have not yet been sent. +func (pwm *peerWantManager) PrepareBroadcastWantHaves(wantHaves []cid.Cid) map[peer.ID][]cid.Cid { + res := make(map[peer.ID][]cid.Cid) + + // Iterate over all known peers + for p, pws := range pwm.peerWants { + // Iterate over all want-haves + for _, c := range wantHaves { + // If the CID has not been sent as a want-block or want-have + if !pws.wantBlocks.Has(c) && !pws.wantHaves.Has(c) { + // Record that the CID has been sent as a want-have + pws.wantHaves.Add(c) + + // Add the CID to the results + if _, ok := res[p]; !ok { + res[p] = make([]cid.Cid, 0, 1) + } + res[p] = append(res[p], c) + } + } + } + + return res +} + +// PrepareSendWants filters the list of want-blocks and want-haves such that +// it only contains wants that have not already been sent to the peer. +func (pwm *peerWantManager) PrepareSendWants(p peer.ID, wantBlocks []cid.Cid, wantHaves []cid.Cid) ([]cid.Cid, []cid.Cid) { + resWantBlks := make([]cid.Cid, 0) + resWantHvs := make([]cid.Cid, 0) + + // Get the existing want-blocks and want-haves for the peer + if pws, ok := pwm.peerWants[p]; ok { + // Iterate over the requested want-blocks + for _, c := range wantBlocks { + // If the want-block hasn't been sent to the peer + if !pws.wantBlocks.Has(c) { + // Record that the CID was sent as a want-block + pws.wantBlocks.Add(c) + + // Add the CID to the results + resWantBlks = append(resWantBlks, c) + + // Make sure the CID is no longer recorded as a want-have + pws.wantHaves.Remove(c) + + // Increment the count of want-blocks + pwm.wantBlockGauge.Inc() + } + } + + // Iterate over the requested want-haves + for _, c := range wantHaves { + // If the CID has not been sent as a want-block or want-have + if !pws.wantBlocks.Has(c) && !pws.wantHaves.Has(c) { + // Record that the CID was sent as a want-have + pws.wantHaves.Add(c) + + // Add the CID to the results + resWantHvs = append(resWantHvs, c) + } + } + } + + return resWantBlks, resWantHvs +} + +// PrepareSendCancels filters the list of cancels for each peer, +// returning a map of peers which only contains cancels for wants that have +// been sent to the peer. +func (pwm *peerWantManager) PrepareSendCancels(cancelKs []cid.Cid) map[peer.ID][]cid.Cid { + res := make(map[peer.ID][]cid.Cid) + + // Iterate over all known peers + for p, pws := range pwm.peerWants { + // Iterate over all requested cancels + for _, c := range cancelKs { + isWantBlock := pws.wantBlocks.Has(c) + isWantHave := pws.wantHaves.Has(c) + + // If the CID was sent as a want-block, decrement the want-block count + if isWantBlock { + pwm.wantBlockGauge.Dec() + } + + // If the CID was sent as a want-block or want-have + if isWantBlock || isWantHave { + // Remove the CID from the recorded want-blocks and want-haves + pws.wantBlocks.Remove(c) + pws.wantHaves.Remove(c) + + // Add the CID to the results + if _, ok := res[p]; !ok { + res[p] = make([]cid.Cid, 0, 1) + } + res[p] = append(res[p], c) + } + } + } + + return res +} + +// GetWantBlocks returns the set of all want-blocks sent to all peers +func (pwm *peerWantManager) GetWantBlocks() []cid.Cid { + res := cid.NewSet() + + // Iterate over all known peers + for _, pws := range pwm.peerWants { + // Iterate over all want-blocks + for _, c := range pws.wantBlocks.Keys() { + // Add the CID to the results + res.Add(c) + } + } + + return res.Keys() +} + +// GetWantHaves returns the set of all want-haves sent to all peers +func (pwm *peerWantManager) GetWantHaves() []cid.Cid { + res := cid.NewSet() + + // Iterate over all known peers + for _, pws := range pwm.peerWants { + // Iterate over all want-haves + for _, c := range pws.wantHaves.Keys() { + // Add the CID to the results + res.Add(c) + } + } + + return res.Keys() +} + +func (pwm *peerWantManager) String() string { + var b bytes.Buffer + for p, ws := range pwm.peerWants { + b.WriteString(fmt.Sprintf("Peer %s: %d want-have / %d want-block:\n", lu.P(p), ws.wantHaves.Len(), ws.wantBlocks.Len())) + for _, c := range ws.wantHaves.Keys() { + b.WriteString(fmt.Sprintf(" want-have %s\n", lu.C(c))) + } + for _, c := range ws.wantBlocks.Keys() { + b.WriteString(fmt.Sprintf(" want-block %s\n", lu.C(c))) + } + } + return b.String() +} diff --git a/bitswap/peermanager/peerwantmanager_test.go b/bitswap/peermanager/peerwantmanager_test.go new file mode 100644 index 000000000..dc9e181ce --- /dev/null +++ b/bitswap/peermanager/peerwantmanager_test.go @@ -0,0 +1,292 @@ +package peermanager + +import ( + "testing" + + "github.com/ipfs/go-bitswap/testutil" + + cid "github.com/ipfs/go-cid" +) + +type gauge struct { + count int +} + +func (g *gauge) Inc() { + g.count++ +} +func (g *gauge) Dec() { + g.count-- +} + +func TestEmpty(t *testing.T) { + pwm := newPeerWantManager(&gauge{}) + + if len(pwm.GetWantBlocks()) > 0 { + t.Fatal("Expected GetWantBlocks() to have length 0") + } + if len(pwm.GetWantHaves()) > 0 { + t.Fatal("Expected GetWantHaves() to have length 0") + } +} + +func TestPrepareBroadcastWantHaves(t *testing.T) { + pwm := newPeerWantManager(&gauge{}) + + peers := testutil.GeneratePeers(3) + cids := testutil.GenerateCids(2) + cids2 := testutil.GenerateCids(2) + cids3 := testutil.GenerateCids(2) + + pwm.AddPeer(peers[0]) + pwm.AddPeer(peers[1]) + + // Broadcast 2 cids to 2 peers + bcst := pwm.PrepareBroadcastWantHaves(cids) + if len(bcst) != 2 { + t.Fatal("Expected 2 peers") + } + for p := range bcst { + if !testutil.MatchKeysIgnoreOrder(bcst[p], cids) { + t.Fatal("Expected all cids to be broadcast") + } + } + + // Broadcasting same cids should have no effect + bcst2 := pwm.PrepareBroadcastWantHaves(cids) + if len(bcst2) != 0 { + t.Fatal("Expected 0 peers") + } + + // Broadcast 2 other cids + bcst3 := pwm.PrepareBroadcastWantHaves(cids2) + if len(bcst3) != 2 { + t.Fatal("Expected 2 peers") + } + for p := range bcst3 { + if !testutil.MatchKeysIgnoreOrder(bcst3[p], cids2) { + t.Fatal("Expected all new cids to be broadcast") + } + } + + // Broadcast mix of old and new cids + bcst4 := pwm.PrepareBroadcastWantHaves(append(cids, cids3...)) + if len(bcst4) != 2 { + t.Fatal("Expected 2 peers") + } + // Only new cids should be broadcast + for p := range bcst4 { + if !testutil.MatchKeysIgnoreOrder(bcst4[p], cids3) { + t.Fatal("Expected all new cids to be broadcast") + } + } + + // Sending want-block for a cid should prevent broadcast to that peer + cids4 := testutil.GenerateCids(4) + wantBlocks := []cid.Cid{cids4[0], cids4[2]} + pwm.PrepareSendWants(peers[0], wantBlocks, []cid.Cid{}) + + bcst5 := pwm.PrepareBroadcastWantHaves(cids4) + if len(bcst4) != 2 { + t.Fatal("Expected 2 peers") + } + // Only cids that were not sent as want-block to peer should be broadcast + for p := range bcst5 { + if p == peers[0] { + if !testutil.MatchKeysIgnoreOrder(bcst5[p], []cid.Cid{cids4[1], cids4[3]}) { + t.Fatal("Expected unsent cids to be broadcast") + } + } + if p == peers[1] { + if !testutil.MatchKeysIgnoreOrder(bcst5[p], cids4) { + t.Fatal("Expected all cids to be broadcast") + } + } + } + + // Add another peer + pwm.AddPeer(peers[2]) + bcst6 := pwm.PrepareBroadcastWantHaves(cids) + if len(bcst6) != 1 { + t.Fatal("Expected 1 peer") + } + for p := range bcst6 { + if !testutil.MatchKeysIgnoreOrder(bcst6[p], cids) { + t.Fatal("Expected all cids to be broadcast") + } + } +} + +func TestPrepareSendWants(t *testing.T) { + pwm := newPeerWantManager(&gauge{}) + + peers := testutil.GeneratePeers(2) + p0 := peers[0] + p1 := peers[1] + cids := testutil.GenerateCids(2) + cids2 := testutil.GenerateCids(2) + + pwm.AddPeer(p0) + pwm.AddPeer(p1) + + // Send 2 want-blocks and 2 want-haves to p0 + wb, wh := pwm.PrepareSendWants(p0, cids, cids2) + if !testutil.MatchKeysIgnoreOrder(wb, cids) { + t.Fatal("Expected 2 want-blocks") + } + if !testutil.MatchKeysIgnoreOrder(wh, cids2) { + t.Fatal("Expected 2 want-haves") + } + + // Send to p0 + // - 1 old want-block and 2 new want-blocks + // - 1 old want-have and 2 new want-haves + cids3 := testutil.GenerateCids(2) + cids4 := testutil.GenerateCids(2) + wb2, wh2 := pwm.PrepareSendWants(p0, append(cids3, cids[0]), append(cids4, cids2[0])) + if !testutil.MatchKeysIgnoreOrder(wb2, cids3) { + t.Fatal("Expected 2 want-blocks") + } + if !testutil.MatchKeysIgnoreOrder(wh2, cids4) { + t.Fatal("Expected 2 want-haves") + } + + // Send to p0 as want-blocks: 1 new want-block, 1 old want-have + cids5 := testutil.GenerateCids(1) + newWantBlockOldWantHave := append(cids5, cids2[0]) + wb3, wh3 := pwm.PrepareSendWants(p0, newWantBlockOldWantHave, []cid.Cid{}) + // If a want was sent as a want-have, it should be ok to now send it as a + // want-block + if !testutil.MatchKeysIgnoreOrder(wb3, newWantBlockOldWantHave) { + t.Fatal("Expected 2 want-blocks") + } + if len(wh3) != 0 { + t.Fatal("Expected 0 want-haves") + } + + // Send to p0 as want-haves: 1 new want-have, 1 old want-block + cids6 := testutil.GenerateCids(1) + newWantHaveOldWantBlock := append(cids6, cids[0]) + wb4, wh4 := pwm.PrepareSendWants(p0, []cid.Cid{}, newWantHaveOldWantBlock) + // If a want was previously sent as a want-block, it should not be + // possible to now send it as a want-have + if !testutil.MatchKeysIgnoreOrder(wh4, cids6) { + t.Fatal("Expected 1 want-have") + } + if len(wb4) != 0 { + t.Fatal("Expected 0 want-blocks") + } + + // Send 2 want-blocks and 2 want-haves to p1 + wb5, wh5 := pwm.PrepareSendWants(p1, cids, cids2) + if !testutil.MatchKeysIgnoreOrder(wb5, cids) { + t.Fatal("Expected 2 want-blocks") + } + if !testutil.MatchKeysIgnoreOrder(wh5, cids2) { + t.Fatal("Expected 2 want-haves") + } +} + +func TestPrepareSendCancels(t *testing.T) { + pwm := newPeerWantManager(&gauge{}) + + peers := testutil.GeneratePeers(2) + p0 := peers[0] + p1 := peers[1] + wb1 := testutil.GenerateCids(2) + wh1 := testutil.GenerateCids(2) + wb2 := testutil.GenerateCids(2) + wh2 := testutil.GenerateCids(2) + allwb := append(wb1, wb2...) + allwh := append(wh1, wh2...) + + pwm.AddPeer(p0) + pwm.AddPeer(p1) + + // Send 2 want-blocks and 2 want-haves to p0 + pwm.PrepareSendWants(p0, wb1, wh1) + // Send 3 want-blocks and 3 want-haves to p1 + // (1 overlapping want-block / want-have with p0) + pwm.PrepareSendWants(p1, append(wb2, wb1[1]), append(wh2, wh1[1])) + + if !testutil.MatchKeysIgnoreOrder(pwm.GetWantBlocks(), allwb) { + t.Fatal("Expected 4 cids to be wanted") + } + if !testutil.MatchKeysIgnoreOrder(pwm.GetWantHaves(), allwh) { + t.Fatal("Expected 4 cids to be wanted") + } + + // Cancel 1 want-block and 1 want-have that were sent to p0 + res := pwm.PrepareSendCancels([]cid.Cid{wb1[0], wh1[0]}) + // Should cancel the want-block and want-have + if len(res) != 1 { + t.Fatal("Expected 1 peer") + } + if !testutil.MatchKeysIgnoreOrder(res[p0], []cid.Cid{wb1[0], wh1[0]}) { + t.Fatal("Expected 2 cids to be cancelled") + } + if !testutil.MatchKeysIgnoreOrder(pwm.GetWantBlocks(), append(wb2, wb1[1])) { + t.Fatal("Expected 3 want-blocks") + } + if !testutil.MatchKeysIgnoreOrder(pwm.GetWantHaves(), append(wh2, wh1[1])) { + t.Fatal("Expected 3 want-haves") + } + + // Cancel everything + allCids := append(allwb, allwh...) + res2 := pwm.PrepareSendCancels(allCids) + // Should cancel the remaining want-blocks and want-haves + if len(res2) != 2 { + t.Fatal("Expected 2 peers", len(res2)) + } + if !testutil.MatchKeysIgnoreOrder(res2[p0], []cid.Cid{wb1[1], wh1[1]}) { + t.Fatal("Expected un-cancelled cids to be cancelled") + } + remainingP2 := append(wb2, wh2...) + remainingP2 = append(remainingP2, wb1[1], wh1[1]) + if !testutil.MatchKeysIgnoreOrder(res2[p1], remainingP2) { + t.Fatal("Expected un-cancelled cids to be cancelled") + } + if len(pwm.GetWantBlocks()) != 0 { + t.Fatal("Expected 0 want-blocks") + } + if len(pwm.GetWantHaves()) != 0 { + t.Fatal("Expected 0 want-haves") + } +} + +func TestStats(t *testing.T) { + g := &gauge{} + pwm := newPeerWantManager(g) + + peers := testutil.GeneratePeers(2) + p0 := peers[0] + cids := testutil.GenerateCids(2) + cids2 := testutil.GenerateCids(2) + + pwm.AddPeer(p0) + + // Send 2 want-blocks and 2 want-haves to p0 + pwm.PrepareSendWants(p0, cids, cids2) + + if g.count != 2 { + t.Fatal("Expected 2 want-blocks") + } + + // Send 1 old want-block and 2 new want-blocks to p0 + cids3 := testutil.GenerateCids(2) + pwm.PrepareSendWants(p0, append(cids3, cids[0]), []cid.Cid{}) + + if g.count != 4 { + t.Fatal("Expected 4 want-blocks") + } + + // Cancel 1 want-block that was sent to p0 + // and 1 want-block that was not sent + cids4 := testutil.GenerateCids(1) + pwm.PrepareSendCancels(append(cids4, cids[0])) + + if g.count != 3 { + t.Fatal("Expected 3 want-blocks", g.count) + } +} diff --git a/bitswap/session/cidqueue.go b/bitswap/session/cidqueue.go index cf461a6cb..aedfa944c 100644 --- a/bitswap/session/cidqueue.go +++ b/bitswap/session/cidqueue.go @@ -27,6 +27,23 @@ func (cq *cidQueue) Pop() cid.Cid { } } +func (cq *cidQueue) Cids() []cid.Cid { + // Lazily delete from the list any cids that were removed from the set + if len(cq.elems) > cq.eset.Len() { + i := 0 + for _, c := range cq.elems { + if cq.eset.Has(c) { + cq.elems[i] = c + i++ + } + } + cq.elems = cq.elems[:i] + } + + // Make a copy of the cids + return append([]cid.Cid{}, cq.elems...) +} + func (cq *cidQueue) Push(c cid.Cid) { if cq.eset.Visit(c) { cq.elems = append(cq.elems, c) diff --git a/bitswap/session/peeravailabilitymanager.go b/bitswap/session/peeravailabilitymanager.go new file mode 100644 index 000000000..31b887c62 --- /dev/null +++ b/bitswap/session/peeravailabilitymanager.go @@ -0,0 +1,57 @@ +package session + +import ( + peer "github.com/libp2p/go-libp2p-core/peer" +) + +// peerAvailabilityManager keeps track of which peers have available space +// to receive want requests +type peerAvailabilityManager struct { + peerAvailable map[peer.ID]bool +} + +func newPeerAvailabilityManager() *peerAvailabilityManager { + return &peerAvailabilityManager{ + peerAvailable: make(map[peer.ID]bool), + } +} + +func (pam *peerAvailabilityManager) addPeer(p peer.ID) { + pam.peerAvailable[p] = false +} + +func (pam *peerAvailabilityManager) isAvailable(p peer.ID) (bool, bool) { + is, ok := pam.peerAvailable[p] + return is, ok +} + +func (pam *peerAvailabilityManager) setPeerAvailability(p peer.ID, isAvailable bool) { + pam.peerAvailable[p] = isAvailable +} + +func (pam *peerAvailabilityManager) haveAvailablePeers() bool { + for _, isAvailable := range pam.peerAvailable { + if isAvailable { + return true + } + } + return false +} + +func (pam *peerAvailabilityManager) availablePeers() []peer.ID { + var available []peer.ID + for p, isAvailable := range pam.peerAvailable { + if isAvailable { + available = append(available, p) + } + } + return available +} + +func (pam *peerAvailabilityManager) allPeers() []peer.ID { + var available []peer.ID + for p := range pam.peerAvailable { + available = append(available, p) + } + return available +} diff --git a/bitswap/session/peeravailabilitymanager_test.go b/bitswap/session/peeravailabilitymanager_test.go new file mode 100644 index 000000000..4c4b4b1e0 --- /dev/null +++ b/bitswap/session/peeravailabilitymanager_test.go @@ -0,0 +1,74 @@ +package session + +import ( + "testing" + + "github.com/ipfs/go-bitswap/testutil" +) + +func TestPeerAvailabilityManager(t *testing.T) { + peers := testutil.GeneratePeers(2) + pam := newPeerAvailabilityManager() + + isAvailable, ok := pam.isAvailable(peers[0]) + if isAvailable || ok { + t.Fatal("expected not to have any availability yet") + } + + if pam.haveAvailablePeers() { + t.Fatal("expected not to have any availability yet") + } + + pam.addPeer(peers[0]) + isAvailable, ok = pam.isAvailable(peers[0]) + if !ok { + t.Fatal("expected to have a peer") + } + if isAvailable { + t.Fatal("expected not to have any availability yet") + } + if pam.haveAvailablePeers() { + t.Fatal("expected not to have any availability yet") + } + if len(pam.availablePeers()) != 0 { + t.Fatal("expected not to have any availability yet") + } + if len(pam.allPeers()) != 1 { + t.Fatal("expected one peer") + } + + pam.setPeerAvailability(peers[0], true) + isAvailable, ok = pam.isAvailable(peers[0]) + if !ok { + t.Fatal("expected to have a peer") + } + if !isAvailable { + t.Fatal("expected peer to be available") + } + if !pam.haveAvailablePeers() { + t.Fatal("expected peer to be available") + } + if len(pam.availablePeers()) != 1 { + t.Fatal("expected peer to be available") + } + if len(pam.allPeers()) != 1 { + t.Fatal("expected one peer") + } + + pam.addPeer(peers[1]) + if len(pam.availablePeers()) != 1 { + t.Fatal("expected one peer to be available") + } + if len(pam.allPeers()) != 2 { + t.Fatal("expected two peers") + } + + pam.setPeerAvailability(peers[0], false) + isAvailable, ok = pam.isAvailable(peers[0]) + if !ok { + t.Fatal("expected to have a peer") + } + if isAvailable { + t.Fatal("expected peer to not be available") + } +} diff --git a/bitswap/session/peerresponsetracker.go b/bitswap/session/peerresponsetracker.go new file mode 100644 index 000000000..220398968 --- /dev/null +++ b/bitswap/session/peerresponsetracker.go @@ -0,0 +1,68 @@ +package session + +import ( + "math/rand" + + peer "github.com/libp2p/go-libp2p-core/peer" +) + +// peerResponseTracker keeps track of how many times each peer was the first +// to send us a block for a given CID (used to rank peers) +type peerResponseTracker struct { + firstResponder map[peer.ID]int +} + +func newPeerResponseTracker() *peerResponseTracker { + return &peerResponseTracker{ + firstResponder: make(map[peer.ID]int), + } +} + +func (prt *peerResponseTracker) receivedBlockFrom(from peer.ID) { + prt.firstResponder[from]++ +} + +func (prt *peerResponseTracker) choose(peers []peer.ID) peer.ID { + if len(peers) == 0 { + return "" + } + + rnd := rand.Float64() + + // Find the total received blocks for all candidate peers + total := 0 + for _, p := range peers { + total += prt.getPeerCount(p) + } + + // Choose one of the peers with a chance proportional to the number + // of blocks received from that peer + counted := 0.0 + for _, p := range peers { + counted += float64(prt.getPeerCount(p)) / float64(total) + if counted > rnd { + // log.Warningf(" chose %s from %s (%d) / %s (%d) with pivot %.2f", + // lu.P(p), lu.P(peers[0]), prt.firstResponder[peers[0]], lu.P(peers[1]), prt.firstResponder[peers[1]], rnd) + return p + } + } + + // We shouldn't get here unless there is some weirdness with floating point + // math that doesn't quite cover the whole range of peers in the for loop + // so just choose the last peer. + index := len(peers) - 1 + // log.Warningf(" chose last (indx %d) %s from %s (%d) / %s (%d) with pivot %.2f", + // index, lu.P(peers[index]), lu.P(peers[0]), prt.firstResponder[peers[0]], lu.P(peers[1]), prt.firstResponder[peers[1]], rnd) + return peers[index] +} + +func (prt *peerResponseTracker) getPeerCount(p peer.ID) int { + count, ok := prt.firstResponder[p] + if ok { + return count + } + + // Make sure there is always at least a small chance a new peer + // will be chosen + return 1 +} diff --git a/bitswap/session/peerresponsetracker_test.go b/bitswap/session/peerresponsetracker_test.go new file mode 100644 index 000000000..bbe6bd756 --- /dev/null +++ b/bitswap/session/peerresponsetracker_test.go @@ -0,0 +1,117 @@ +package session + +import ( + "math" + "testing" + + "github.com/ipfs/go-bitswap/testutil" + peer "github.com/libp2p/go-libp2p-core/peer" +) + +func TestPeerResponseTrackerInit(t *testing.T) { + peers := testutil.GeneratePeers(2) + prt := newPeerResponseTracker() + + if prt.choose([]peer.ID{}) != "" { + t.Fatal("expected empty peer ID") + } + if prt.choose([]peer.ID{peers[0]}) != peers[0] { + t.Fatal("expected single peer ID") + } + p := prt.choose(peers) + if p != peers[0] && p != peers[1] { + t.Fatal("expected randomly chosen peer") + } +} + +func TestPeerResponseTrackerProbabilityUnknownPeers(t *testing.T) { + peers := testutil.GeneratePeers(4) + prt := newPeerResponseTracker() + + choices := []int{0, 0, 0, 0} + count := 1000 + for i := 0; i < count; i++ { + p := prt.choose(peers) + if p == peers[0] { + choices[0]++ + } else if p == peers[1] { + choices[1]++ + } else if p == peers[2] { + choices[2]++ + } else if p == peers[3] { + choices[3]++ + } + } + + for _, c := range choices { + if c == 0 { + t.Fatal("expected each peer to be chosen at least once") + } + if math.Abs(float64(c-choices[0])) > 0.2*float64(count) { + t.Fatal("expected unknown peers to have roughly equal chance of being chosen") + } + } +} + +func TestPeerResponseTrackerProbabilityOneKnownOneUnknownPeer(t *testing.T) { + peers := testutil.GeneratePeers(2) + prt := newPeerResponseTracker() + + prt.receivedBlockFrom(peers[0]) + + chooseFirst := 0 + chooseSecond := 0 + for i := 0; i < 1000; i++ { + p := prt.choose(peers) + if p == peers[0] { + chooseFirst++ + } else if p == peers[1] { + chooseSecond++ + } + } + + if chooseSecond == 0 { + t.Fatal("expected unknown peer to occasionally be chosen") + } + if chooseSecond > chooseFirst { + t.Fatal("expected known peer to be chosen more often") + } +} + +func TestPeerResponseTrackerProbabilityProportional(t *testing.T) { + peers := testutil.GeneratePeers(3) + prt := newPeerResponseTracker() + + probabilities := []float64{0.1, 0.6, 0.3} + count := 1000 + for pi, prob := range probabilities { + for i := 0; float64(i) < float64(count)*prob; i++ { + prt.receivedBlockFrom(peers[pi]) + } + } + + var choices []int + for range probabilities { + choices = append(choices, 0) + } + + for i := 0; i < count; i++ { + p := prt.choose(peers) + if p == peers[0] { + choices[0]++ + } else if p == peers[1] { + choices[1]++ + } else if p == peers[2] { + choices[2]++ + } + } + + for i, c := range choices { + if c == 0 { + t.Fatal("expected each peer to be chosen at least once") + } + if math.Abs(float64(c)-(float64(count)*probabilities[i])) > 0.2*float64(count) { + t.Fatal("expected peers to be chosen proportionally to probability") + } + } +} diff --git a/bitswap/session/sentwantblockstracker.go b/bitswap/session/sentwantblockstracker.go new file mode 100644 index 000000000..cf0581ef3 --- /dev/null +++ b/bitswap/session/sentwantblockstracker.go @@ -0,0 +1,33 @@ +package session + +import ( + cid "github.com/ipfs/go-cid" + peer "github.com/libp2p/go-libp2p-core/peer" +) + +// sentWantBlocksTracker keeps track of which peers we've sent a want-block to +type sentWantBlocksTracker struct { + sentWantBlocks map[peer.ID]map[cid.Cid]struct{} +} + +func newSentWantBlocksTracker() *sentWantBlocksTracker { + return &sentWantBlocksTracker{ + sentWantBlocks: make(map[peer.ID]map[cid.Cid]struct{}), + } +} + +func (s *sentWantBlocksTracker) addSentWantBlocksTo(p peer.ID, ks []cid.Cid) { + cids, ok := s.sentWantBlocks[p] + if !ok { + cids = make(map[cid.Cid]struct{}, len(ks)) + s.sentWantBlocks[p] = cids + } + for _, c := range ks { + cids[c] = struct{}{} + } +} + +func (s *sentWantBlocksTracker) haveSentWantBlockTo(p peer.ID, c cid.Cid) bool { + _, ok := s.sentWantBlocks[p][c] + return ok +} diff --git a/bitswap/session/sentwantblockstracker_test.go b/bitswap/session/sentwantblockstracker_test.go new file mode 100644 index 000000000..097cac6b4 --- /dev/null +++ b/bitswap/session/sentwantblockstracker_test.go @@ -0,0 +1,28 @@ +package session + +import ( + "testing" + + "github.com/ipfs/go-bitswap/testutil" +) + +func TestSendWantBlocksTracker(t *testing.T) { + peers := testutil.GeneratePeers(2) + cids := testutil.GenerateCids(2) + swbt := newSentWantBlocksTracker() + + if swbt.haveSentWantBlockTo(peers[0], cids[0]) { + t.Fatal("expected not to have sent anything yet") + } + + swbt.addSentWantBlocksTo(peers[0], cids) + if !swbt.haveSentWantBlockTo(peers[0], cids[0]) { + t.Fatal("expected to have sent cid to peer") + } + if !swbt.haveSentWantBlockTo(peers[0], cids[1]) { + t.Fatal("expected to have sent cid to peer") + } + if swbt.haveSentWantBlockTo(peers[1], cids[0]) { + t.Fatal("expected not to have sent cid to peer") + } +} diff --git a/bitswap/session/session.go b/bitswap/session/session.go index 6c8363550..d9fb24437 100644 --- a/bitswap/session/session.go +++ b/bitswap/session/session.go @@ -2,11 +2,15 @@ package session import ( "context" + "sync" "time" + // lu "github.com/ipfs/go-bitswap/logutil" + bsbpm "github.com/ipfs/go-bitswap/blockpresencemanager" bsgetter "github.com/ipfs/go-bitswap/getter" notifications "github.com/ipfs/go-bitswap/notifications" - bssd "github.com/ipfs/go-bitswap/sessiondata" + bspm "github.com/ipfs/go-bitswap/peermanager" + bssim "github.com/ipfs/go-bitswap/sessioninterestmanager" blocks "github.com/ipfs/go-block-format" cid "github.com/ipfs/go-cid" delay "github.com/ipfs/go-ipfs-delay" @@ -15,47 +19,71 @@ import ( loggables "github.com/libp2p/go-libp2p-loggables" ) +var log = logging.Logger("bs:sess") + const ( - broadcastLiveWantsLimit = 4 - targetedLiveWantsLimit = 32 + broadcastLiveWantsLimit = 64 ) // WantManager is an interface that can be used to request blocks // from given peers. type WantManager interface { - WantBlocks(ctx context.Context, ks []cid.Cid, peers []peer.ID, ses uint64) - CancelWants(ctx context.Context, ks []cid.Cid, peers []peer.ID, ses uint64) + // BroadcastWantHaves sends want-haves to all connected peers (used for + // session discovery) + BroadcastWantHaves(context.Context, uint64, []cid.Cid) + // RemoveSession removes the session from the WantManager (when the + // session shuts down) + RemoveSession(context.Context, uint64) +} + +// PeerManager keeps track of which sessions are interested in which peers +// and takes care of sending wants for the sessions +type PeerManager interface { + // RegisterSession tells the PeerManager that the session is interested + // in a peer's connection state + RegisterSession(peer.ID, bspm.Session) bool + // UnregisterSession tells the PeerManager that the session is no longer + // interested in a peer's connection state + UnregisterSession(uint64) + // SendWants tells the PeerManager to send wants to the given peer + SendWants(ctx context.Context, peerId peer.ID, wantBlocks []cid.Cid, wantHaves []cid.Cid) } // PeerManager provides an interface for tracking and optimize peers, and // requesting more when neccesary. -type PeerManager interface { +type SessionPeerManager interface { + // ReceiveFrom is called when blocks and HAVEs are received from a peer. + // It returns a boolean indicating if the peer is new to the session. + ReceiveFrom(peerId peer.ID, blks []cid.Cid, haves []cid.Cid) bool + // Peers returns the set of peers in the session. + Peers() *peer.Set + // FindMorePeers queries Content Routing to discover providers of the given cid FindMorePeers(context.Context, cid.Cid) - GetOptimizedPeers() []bssd.OptimizedPeer + // RecordPeerRequests records the time that a cid was requested from a peer RecordPeerRequests([]peer.ID, []cid.Cid) + // RecordPeerResponse records the time that a response for a cid arrived + // from a peer RecordPeerResponse(peer.ID, []cid.Cid) + // RecordCancels records that cancels were sent for the given cids RecordCancels([]cid.Cid) } -// RequestSplitter provides an interface for splitting -// a request for Cids up among peers. -type RequestSplitter interface { - SplitRequest([]bssd.OptimizedPeer, []cid.Cid) []bssd.PartialRequest - RecordDuplicateBlock() - RecordUniqueBlock() -} - +// opType is the kind of operation that is being processed by the event loop type opType int const ( + // Receive blocks opReceive opType = iota + // Want blocks opWant + // Cancel wants opCancel + // Broadcast want-haves + opBroadcast ) type op struct { op opType - from peer.ID keys []cid.Cid } @@ -64,24 +92,24 @@ type op struct { // info to, and who to request blocks from. type Session struct { // dependencies - ctx context.Context - wm WantManager - pm PeerManager - srs RequestSplitter + ctx context.Context + wm WantManager + sprm SessionPeerManager + sim *bssim.SessionInterestManager + + sw sessionWants + sws sessionWantSender - sw sessionWants + latencyTrkr latencyTracker // channels incoming chan op - latencyReqs chan chan time.Duration tickDelayReqs chan time.Duration // do not touch outside run loop idleTick *time.Timer periodicSearchTimer *time.Timer baseTickDelay time.Duration - latTotal time.Duration - fetchcnt int consecutiveTicks int initialSearchDelay time.Duration periodicSearchDelay delay.D @@ -89,6 +117,8 @@ type Session struct { notif notifications.PubSub uuid logging.Loggable id uint64 + + self peer.ID } // New creates a new bitswap session whose lifetime is bounded by the @@ -96,53 +126,111 @@ type Session struct { func New(ctx context.Context, id uint64, wm WantManager, + sprm SessionPeerManager, + sim *bssim.SessionInterestManager, pm PeerManager, - srs RequestSplitter, + bpm *bsbpm.BlockPresenceManager, notif notifications.PubSub, initialSearchDelay time.Duration, - periodicSearchDelay delay.D) *Session { + periodicSearchDelay delay.D, + self peer.ID) *Session { s := &Session{ - sw: sessionWants{ - toFetch: newCidQueue(), - liveWants: make(map[cid.Cid]time.Time), - pastWants: cid.NewSet(), - }, - latencyReqs: make(chan chan time.Duration), + sw: newSessionWants(), tickDelayReqs: make(chan time.Duration), ctx: ctx, wm: wm, - pm: pm, - srs: srs, - incoming: make(chan op, 16), + sprm: sprm, + sim: sim, + incoming: make(chan op, 128), + latencyTrkr: latencyTracker{}, notif: notif, uuid: loggables.Uuid("GetBlockRequest"), baseTickDelay: time.Millisecond * 500, id: id, initialSearchDelay: initialSearchDelay, periodicSearchDelay: periodicSearchDelay, + self: self, } + s.sws = newSessionWantSender(ctx, id, pm, bpm, s.onWantsSent, s.onPeersExhausted) go s.run(ctx) return s } +func (s *Session) ID() uint64 { + return s.id +} + // ReceiveFrom receives incoming blocks from the given peer. -func (s *Session) ReceiveFrom(from peer.ID, ks []cid.Cid) { - interested := s.sw.FilterInteresting(ks) - if len(interested) == 0 { +func (s *Session) ReceiveFrom(from peer.ID, ks []cid.Cid, haves []cid.Cid, dontHaves []cid.Cid) { + interestedRes := s.sim.FilterSessionInterested(s.id, ks, haves, dontHaves) + ks = interestedRes[0] + haves = interestedRes[1] + dontHaves = interestedRes[2] + // s.logReceiveFrom(from, ks, haves, dontHaves) + + // Add any newly discovered peers that have blocks we're interested in to + // the peer set + isNewPeer := s.sprm.ReceiveFrom(from, ks, haves) + + // Record response timing only if the blocks came from the network + // (blocks can also be received from the local node) + if len(ks) > 0 && from != "" { + s.sprm.RecordPeerResponse(from, ks) + } + + // Update want potential + s.sws.Update(from, ks, haves, dontHaves, isNewPeer) + + if len(ks) == 0 { return } + // Record which blocks have been received and figure out the total latency + // for fetching the blocks + wanted, totalLatency := s.sw.BlocksReceived(ks) + s.latencyTrkr.receiveUpdate(len(wanted), totalLatency) + + if len(wanted) == 0 { + return + } + + // Inform the SessionInterestManager that this session is no longer + // expecting to receive the wanted keys + s.sim.RemoveSessionWants(s.id, wanted) + select { - case s.incoming <- op{op: opReceive, from: from, keys: interested}: + case s.incoming <- op{op: opReceive, keys: wanted}: case <-s.ctx.Done(): } } -// IsWanted returns true if this session is waiting to receive the given Cid. -func (s *Session) IsWanted(c cid.Cid) bool { - return s.sw.IsWanted(c) +// func (s *Session) logReceiveFrom(from peer.ID, interestedKs []cid.Cid, haves []cid.Cid, dontHaves []cid.Cid) { +// // log.Infof("Ses%d<-%s: %d blocks, %d haves, %d dont haves\n", +// // s.id, from, len(interestedKs), len(wantedHaves), len(wantedDontHaves)) +// for _, c := range interestedKs { +// log.Warningf("Ses%d %s<-%s: block %s\n", s.id, lu.P(s.self), lu.P(from), lu.C(c)) +// } +// for _, c := range haves { +// log.Warningf("Ses%d %s<-%s: HAVE %s\n", s.id, lu.P(s.self), lu.P(from), lu.C(c)) +// } +// for _, c := range dontHaves { +// log.Warningf("Ses%d %s<-%s: DONT_HAVE %s\n", s.id, lu.P(s.self), lu.P(from), lu.C(c)) +// } +// } + +func (s *Session) onWantsSent(p peer.ID, wantBlocks []cid.Cid, wantHaves []cid.Cid) { + allBlks := append(wantBlocks[:len(wantBlocks):len(wantBlocks)], wantHaves...) + s.sw.WantsSent(allBlks) + s.sprm.RecordPeerRequests([]peer.ID{p}, allBlks) +} + +func (s *Session) onPeersExhausted(ks []cid.Cid) { + select { + case s.incoming <- op{op: opBroadcast, keys: ks}: + case <-s.ctx.Done(): + } } // GetBlock fetches a single block. @@ -173,23 +261,6 @@ func (s *Session) GetBlocks(ctx context.Context, keys []cid.Cid) (<-chan blocks. ) } -// GetAverageLatency returns the average latency for block requests. -func (s *Session) GetAverageLatency() time.Duration { - resp := make(chan time.Duration) - select { - case s.latencyReqs <- resp: - case <-s.ctx.Done(): - return -1 * time.Millisecond - } - - select { - case latency := <-resp: - return latency - case <-s.ctx.Done(): - return -1 * time.Millisecond - } -} - // SetBaseTickDelay changes the rate at which ticks happen. func (s *Session) SetBaseTickDelay(baseTickDelay time.Duration) { select { @@ -198,9 +269,11 @@ func (s *Session) SetBaseTickDelay(baseTickDelay time.Duration) { } } -// Session run loop -- everything function below here should not be called -// of this loop +// Session run loop -- everything in this function should not be called +// outside of this loop func (s *Session) run(ctx context.Context) { + go s.sws.Run() + s.idleTick = time.NewTimer(s.initialSearchDelay) s.periodicSearchTimer = time.NewTimer(s.periodicSearchDelay.NextWaitTime()) for { @@ -208,11 +281,13 @@ func (s *Session) run(ctx context.Context) { case oper := <-s.incoming: switch oper.op { case opReceive: - s.handleReceive(ctx, oper.from, oper.keys) + s.handleReceive(oper.keys) case opWant: s.wantBlocks(ctx, oper.keys) case opCancel: s.sw.CancelPending(oper.keys) + case opBroadcast: + s.handleIdleTick(ctx) default: panic("unhandled operation") } @@ -220,8 +295,6 @@ func (s *Session) run(ctx context.Context) { s.handleIdleTick(ctx) case <-s.periodicSearchTimer.C: s.handlePeriodicSearch(ctx) - case resp := <-s.latencyReqs: - resp <- s.averageLatency() case baseTickDelay := <-s.tickDelayReqs: s.baseTickDelay = baseTickDelay case <-ctx.Done(): @@ -233,18 +306,22 @@ func (s *Session) run(ctx context.Context) { func (s *Session) handleIdleTick(ctx context.Context) { live := s.sw.PrepareBroadcast() + // log.Warningf("\n\n\n\n\nSes%d: broadcast %d keys\n\n\n\n\n", s.id, len(live)) + // log.Infof("Ses%d: broadcast %d keys\n", s.id, len(live)) + log.Warningf("Ses%d: broadcast %d keys", s.id, len(live)) - // Broadcast these keys to everyone we're connected to - s.pm.RecordPeerRequests(nil, live) - s.wm.WantBlocks(ctx, live, nil, s.id) + // Broadcast a want-have for the live wants to everyone we're connected to + s.sprm.RecordPeerRequests(nil, live) + s.wm.BroadcastWantHaves(ctx, s.id, live) - // do no find providers on consecutive ticks + // do not find providers on consecutive ticks // -- just rely on periodic search widening if len(live) > 0 && (s.consecutiveTicks == 0) { - s.pm.FindMorePeers(ctx, live[0]) + s.sprm.FindMorePeers(ctx, live[0]) } s.resetIdleTick() + // If we have live wants if s.sw.HasLiveWants() { s.consecutiveTicks++ } @@ -258,110 +335,89 @@ func (s *Session) handlePeriodicSearch(ctx context.Context) { // TODO: come up with a better strategy for determining when to search // for new providers for blocks. - s.pm.FindMorePeers(ctx, randomWant) - s.wm.WantBlocks(ctx, []cid.Cid{randomWant}, nil, s.id) + s.sprm.FindMorePeers(ctx, randomWant) + + s.wm.BroadcastWantHaves(ctx, s.id, []cid.Cid{randomWant}) s.periodicSearchTimer.Reset(s.periodicSearchDelay.NextWaitTime()) } func (s *Session) handleShutdown() { s.idleTick.Stop() - - live := s.sw.LiveWants() - s.wm.CancelWants(s.ctx, live, nil, s.id) + s.wm.RemoveSession(s.ctx, s.id) } -func (s *Session) handleReceive(ctx context.Context, from peer.ID, keys []cid.Cid) { - // Record statistics only if the blocks came from the network - // (blocks can also be received from the local node) - if from != "" { - s.updateReceiveCounters(ctx, from, keys) - } - - // Update the want list - wanted, totalLatency := s.sw.BlocksReceived(keys) - if len(wanted) == 0 { - return - } - - // We've received the blocks so we can cancel any outstanding wants for them - s.cancelIncoming(ctx, wanted) - +func (s *Session) handleReceive(ks []cid.Cid) { s.idleTick.Stop() - // Process the received blocks - s.processReceive(ctx, wanted, totalLatency) - - s.resetIdleTick() -} - -func (s *Session) updateReceiveCounters(ctx context.Context, from peer.ID, keys []cid.Cid) { - // Record unique vs duplicate blocks - s.sw.ForEachUniqDup(keys, s.srs.RecordUniqueBlock, s.srs.RecordDuplicateBlock) - - // Record response (to be able to time latency) - if len(keys) > 0 { - s.pm.RecordPeerResponse(from, keys) - } -} - -func (s *Session) cancelIncoming(ctx context.Context, ks []cid.Cid) { - s.pm.RecordCancels(ks) - s.wm.CancelWants(s.ctx, ks, nil, s.id) -} - -func (s *Session) processReceive(ctx context.Context, ks []cid.Cid, totalLatency time.Duration) { - // Keep track of the total number of blocks received and total latency - s.fetchcnt += len(ks) - s.latTotal += totalLatency - // We've received new wanted blocks, so reset the number of ticks // that have occurred since the last new block s.consecutiveTicks = 0 - s.wantBlocks(ctx, nil) + s.sprm.RecordCancels(ks) + + s.resetIdleTick() } func (s *Session) wantBlocks(ctx context.Context, newks []cid.Cid) { - // Given the want limit and any newly received blocks, get as many wants as - // we can to send out - ks := s.sw.GetNextWants(s.wantLimit(), newks) - if len(ks) == 0 { - return + if len(newks) > 0 { + s.sim.RecordSessionInterest(s.id, newks) + s.sw.BlocksRequested(newks) + s.sws.Add(newks) } - peers := s.pm.GetOptimizedPeers() - if len(peers) > 0 { - splitRequests := s.srs.SplitRequest(peers, ks) - for _, splitRequest := range splitRequests { - s.pm.RecordPeerRequests(splitRequest.Peers, splitRequest.Keys) - s.wm.WantBlocks(ctx, splitRequest.Keys, splitRequest.Peers, s.id) - } - } else { - s.pm.RecordPeerRequests(nil, ks) - s.wm.WantBlocks(ctx, ks, nil, s.id) + // If we have discovered peers already, the SessionPotentialManager will + // send wants to them + if s.sprm.Peers().Size() > 0 { + return } -} -func (s *Session) averageLatency() time.Duration { - return s.latTotal / time.Duration(s.fetchcnt) + // No peers discovered yet, broadcast some want-haves + ks := s.sw.GetNextWants(broadcastLiveWantsLimit) + if len(ks) > 0 { + log.Infof("Ses%d: No peers - broadcasting %d want HAVE requests\n", s.id, len(ks)) + s.sprm.RecordPeerRequests(nil, ks) + s.wm.BroadcastWantHaves(ctx, s.id, ks) + } } func (s *Session) resetIdleTick() { var tickDelay time.Duration - if s.latTotal == 0 { + if !s.latencyTrkr.hasLatency() { tickDelay = s.initialSearchDelay } else { - avLat := s.averageLatency() + avLat := s.latencyTrkr.averageLatency() + // log.Warningf("averageLatency %s", avLat) tickDelay = s.baseTickDelay + (3 * avLat) } tickDelay = tickDelay * time.Duration(1+s.consecutiveTicks) s.idleTick.Reset(tickDelay) } -func (s *Session) wantLimit() int { - if len(s.pm.GetOptimizedPeers()) > 0 { - return targetedLiveWantsLimit - } - return broadcastLiveWantsLimit +type latencyTracker struct { + sync.RWMutex + totalLatency time.Duration + count int +} + +func (lt *latencyTracker) hasLatency() bool { + lt.RLock() + defer lt.RUnlock() + + return lt.totalLatency > 0 && lt.count > 0 +} + +func (lt *latencyTracker) averageLatency() time.Duration { + lt.RLock() + defer lt.RUnlock() + + return lt.totalLatency / time.Duration(lt.count) +} + +func (lt *latencyTracker) receiveUpdate(count int, totalLatency time.Duration) { + lt.Lock() + defer lt.Unlock() + + lt.totalLatency += totalLatency + lt.count += count } diff --git a/bitswap/session/session_test.go b/bitswap/session/session_test.go index 19266d1b4..688f7883c 100644 --- a/bitswap/session/session_test.go +++ b/bitswap/session/session_test.go @@ -2,14 +2,14 @@ package session import ( "context" - "sync" "testing" "time" + bsbpm "github.com/ipfs/go-bitswap/blockpresencemanager" notifications "github.com/ipfs/go-bitswap/notifications" - bssd "github.com/ipfs/go-bitswap/sessiondata" + bspm "github.com/ipfs/go-bitswap/peermanager" + bssim "github.com/ipfs/go-bitswap/sessioninterestmanager" "github.com/ipfs/go-bitswap/testutil" - blocks "github.com/ipfs/go-block-format" cid "github.com/ipfs/go-cid" blocksutil "github.com/ipfs/go-ipfs-blocksutil" delay "github.com/ipfs/go-ipfs-delay" @@ -17,225 +17,164 @@ import ( ) type wantReq struct { - cids []cid.Cid - peers []peer.ID + cids []cid.Cid } type fakeWantManager struct { - wantReqs chan wantReq - cancelReqs chan wantReq + wantReqs chan wantReq } -func (fwm *fakeWantManager) WantBlocks(ctx context.Context, cids []cid.Cid, peers []peer.ID, ses uint64) { - select { - case fwm.wantReqs <- wantReq{cids, peers}: - case <-ctx.Done(): +func newFakeWantManager() *fakeWantManager { + return &fakeWantManager{ + wantReqs: make(chan wantReq, 1), } } -func (fwm *fakeWantManager) CancelWants(ctx context.Context, cids []cid.Cid, peers []peer.ID, ses uint64) { +func (fwm *fakeWantManager) BroadcastWantHaves(ctx context.Context, sesid uint64, cids []cid.Cid) { select { - case fwm.cancelReqs <- wantReq{cids, peers}: + case fwm.wantReqs <- wantReq{cids}: case <-ctx.Done(): } } +func (fwm *fakeWantManager) RemoveSession(context.Context, uint64) {} -type fakePeerManager struct { - lk sync.RWMutex - peers []peer.ID +type fakeSessionPeerManager struct { + peers *peer.Set findMorePeersRequested chan cid.Cid } -func (fpm *fakePeerManager) FindMorePeers(ctx context.Context, k cid.Cid) { +func newFakeSessionPeerManager() *fakeSessionPeerManager { + return &fakeSessionPeerManager{ + peers: peer.NewSet(), + findMorePeersRequested: make(chan cid.Cid, 1), + } +} + +func (fpm *fakeSessionPeerManager) FindMorePeers(ctx context.Context, k cid.Cid) { select { case fpm.findMorePeersRequested <- k: case <-ctx.Done(): } } -func (fpm *fakePeerManager) GetOptimizedPeers() []bssd.OptimizedPeer { - fpm.lk.Lock() - defer fpm.lk.Unlock() - optimizedPeers := make([]bssd.OptimizedPeer, 0, len(fpm.peers)) - for _, peer := range fpm.peers { - optimizedPeers = append(optimizedPeers, bssd.OptimizedPeer{Peer: peer, OptimizationRating: 1.0}) - } - return optimizedPeers +func (fpm *fakeSessionPeerManager) Peers() *peer.Set { + return fpm.peers } -func (fpm *fakePeerManager) RecordPeerRequests([]peer.ID, []cid.Cid) {} -func (fpm *fakePeerManager) RecordPeerResponse(p peer.ID, c []cid.Cid) { - fpm.lk.Lock() - fpm.peers = append(fpm.peers, p) - fpm.lk.Unlock() +func (fpm *fakeSessionPeerManager) ReceiveFrom(p peer.ID, ks []cid.Cid, haves []cid.Cid) bool { + if !fpm.peers.Contains(p) { + fpm.peers.Add(p) + return true + } + return false +} +func (fpm *fakeSessionPeerManager) RecordCancels(c []cid.Cid) {} +func (fpm *fakeSessionPeerManager) RecordPeerRequests([]peer.ID, []cid.Cid) {} +func (fpm *fakeSessionPeerManager) RecordPeerResponse(p peer.ID, c []cid.Cid) { + fpm.peers.Add(p) } -func (fpm *fakePeerManager) RecordCancels(c []cid.Cid) {} -type fakeRequestSplitter struct { +type fakePeerManager struct { } -func (frs *fakeRequestSplitter) SplitRequest(optimizedPeers []bssd.OptimizedPeer, keys []cid.Cid) []bssd.PartialRequest { - peers := make([]peer.ID, len(optimizedPeers)) - for i, optimizedPeer := range optimizedPeers { - peers[i] = optimizedPeer.Peer - } - return []bssd.PartialRequest{bssd.PartialRequest{Peers: peers, Keys: keys}} +func newFakePeerManager() *fakePeerManager { + return &fakePeerManager{} } -func (frs *fakeRequestSplitter) RecordDuplicateBlock() {} -func (frs *fakeRequestSplitter) RecordUniqueBlock() {} +func (pm *fakePeerManager) RegisterSession(peer.ID, bspm.Session) bool { + return true +} +func (pm *fakePeerManager) UnregisterSession(uint64) {} +func (pm *fakePeerManager) SendWants(context.Context, peer.ID, []cid.Cid, []cid.Cid) {} func TestSessionGetBlocks(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond) defer cancel() - wantReqs := make(chan wantReq, 1) - cancelReqs := make(chan wantReq, 1) - fwm := &fakeWantManager{wantReqs, cancelReqs} - fpm := &fakePeerManager{} - frs := &fakeRequestSplitter{} + fwm := newFakeWantManager() + fpm := newFakeSessionPeerManager() + sim := bssim.New() + bpm := bsbpm.New() notif := notifications.New() defer notif.Shutdown() id := testutil.GenerateSessionID() - session := New(ctx, id, fwm, fpm, frs, notif, time.Second, delay.Fixed(time.Minute)) + session := New(ctx, id, fwm, fpm, sim, newFakePeerManager(), bpm, notif, time.Second, delay.Fixed(time.Minute), "") blockGenerator := blocksutil.NewBlockGenerator() blks := blockGenerator.Blocks(broadcastLiveWantsLimit * 2) var cids []cid.Cid for _, block := range blks { cids = append(cids, block.Cid()) } - getBlocksCh, err := session.GetBlocks(ctx, cids) + _, err := session.GetBlocks(ctx, cids) if err != nil { t.Fatal("error getting blocks") } - // check initial want request + // Wait for initial want request receivedWantReq := <-fwm.wantReqs + // Should have registered session's interest in blocks + intSes := sim.FilterSessionInterested(id, cids) + if !testutil.MatchKeysIgnoreOrder(intSes[0], cids) { + t.Fatal("did not register session interest in blocks") + } + + // Should have sent out broadcast request for wants if len(receivedWantReq.cids) != broadcastLiveWantsLimit { t.Fatal("did not enqueue correct initial number of wants") } - if receivedWantReq.peers != nil { - t.Fatal("first want request should be a broadcast") - } - for _, c := range cids { - if !session.IsWanted(c) { - t.Fatal("expected session to want cids") - } - } - // now receive the first set of blocks + // Simulate receiving HAVEs from several peers peers := testutil.GeneratePeers(broadcastLiveWantsLimit) - var newCancelReqs []wantReq - var newBlockReqs []wantReq - var receivedBlocks []blocks.Block for i, p := range peers { - // simulate what bitswap does on receiving a message: - // - calls ReceiveFrom() on session - // - publishes block to pubsub channel blk := blks[testutil.IndexOf(blks, receivedWantReq.cids[i])] - session.ReceiveFrom(p, []cid.Cid{blk.Cid()}) - notif.Publish(blk) - - select { - case cancelBlock := <-cancelReqs: - newCancelReqs = append(newCancelReqs, cancelBlock) - case <-ctx.Done(): - t.Fatal("did not cancel block want") - } - - select { - case receivedBlock := <-getBlocksCh: - receivedBlocks = append(receivedBlocks, receivedBlock) - case <-ctx.Done(): - t.Fatal("Did not receive block!") - } - - select { - case wantBlock := <-wantReqs: - newBlockReqs = append(newBlockReqs, wantBlock) - default: - } - } - - // verify new peers were recorded - fpm.lk.Lock() - if len(fpm.peers) != broadcastLiveWantsLimit { - t.Fatal("received blocks not recorded by the peer manager") - } - for _, p := range fpm.peers { - if !testutil.ContainsPeer(peers, p) { - t.Fatal("incorrect peer recorded to peer manager") - } + session.ReceiveFrom(p, []cid.Cid{}, []cid.Cid{blk.Cid()}, []cid.Cid{}) } - fpm.lk.Unlock() - // look at new interactions with want manager - - // should have cancelled each received block - if len(newCancelReqs) != broadcastLiveWantsLimit { - t.Fatal("did not cancel each block once it was received") - } - // new session reqs should be targeted - var newCidsRequested []cid.Cid - for _, w := range newBlockReqs { - if len(w.peers) == 0 { - t.Fatal("should not have broadcast again after initial broadcast") - } - newCidsRequested = append(newCidsRequested, w.cids...) + // Verify new peers were recorded + if !testutil.MatchPeersIgnoreOrder(fpm.Peers().Peers(), peers) { + t.Fatal("peers not recorded by the peer manager") } - // full new round of cids should be requested - if len(newCidsRequested) != broadcastLiveWantsLimit { - t.Fatal("new blocks were not requested") + // Verify session still wants received blocks + _, unwanted := sim.SplitWantedUnwanted(blks) + if len(unwanted) > 0 { + t.Fatal("all blocks should still be wanted") } - // receive remaining blocks - for i, p := range peers { - // simulate what bitswap does on receiving a message: - // - calls ReceiveFrom() on session - // - publishes block to pubsub channel - blk := blks[testutil.IndexOf(blks, newCidsRequested[i])] - session.ReceiveFrom(p, []cid.Cid{blk.Cid()}) - notif.Publish(blk) + // Simulate receiving DONT_HAVE for a CID + session.ReceiveFrom(peers[0], []cid.Cid{}, []cid.Cid{}, []cid.Cid{blks[0].Cid()}) - receivedBlock := <-getBlocksCh - receivedBlocks = append(receivedBlocks, receivedBlock) - cancelBlock := <-cancelReqs - newCancelReqs = append(newCancelReqs, cancelBlock) + // Verify session still wants received blocks + _, unwanted = sim.SplitWantedUnwanted(blks) + if len(unwanted) > 0 { + t.Fatal("all blocks should still be wanted") } - if len(receivedBlocks) != len(blks) { - t.Fatal("did not receive enough blocks") - } - if len(newCancelReqs) != len(receivedBlocks) { - t.Fatal("expected an equal number of received blocks and cancels") - } - for _, block := range receivedBlocks { - if !testutil.ContainsBlock(blks, block) { - t.Fatal("received incorrect block") - } + // Simulate receiving block for a CID + session.ReceiveFrom(peers[1], []cid.Cid{blks[0].Cid()}, []cid.Cid{}, []cid.Cid{}) + + // Verify session no longer wants received block + wanted, unwanted := sim.SplitWantedUnwanted(blks) + if len(unwanted) != 1 || !unwanted[0].Cid().Equals(blks[0].Cid()) { + t.Fatal("session wants block that has already been received") } - for _, c := range cids { - if session.IsWanted(c) { - t.Fatal("expected session NOT to want cids") - } + if len(wanted) != len(blks)-1 { + t.Fatal("session wants incorrect number of blocks") } } func TestSessionFindMorePeers(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), 900*time.Millisecond) defer cancel() - wantReqs := make(chan wantReq, 1) - cancelReqs := make(chan wantReq, 1) - fwm := &fakeWantManager{wantReqs, cancelReqs} - fpm := &fakePeerManager{findMorePeersRequested: make(chan cid.Cid, 1)} - frs := &fakeRequestSplitter{} + fwm := newFakeWantManager() + fpm := newFakeSessionPeerManager() + sim := bssim.New() + bpm := bsbpm.New() notif := notifications.New() defer notif.Shutdown() id := testutil.GenerateSessionID() - session := New(ctx, id, fwm, fpm, frs, notif, time.Second, delay.Fixed(time.Minute)) + session := New(ctx, id, fwm, fpm, sim, newFakePeerManager(), bpm, notif, time.Second, delay.Fixed(time.Minute), "") session.SetBaseTickDelay(200 * time.Microsecond) blockGenerator := blocksutil.NewBlockGenerator() blks := blockGenerator.Blocks(broadcastLiveWantsLimit * 2) @@ -243,14 +182,14 @@ func TestSessionFindMorePeers(t *testing.T) { for _, block := range blks { cids = append(cids, block.Cid()) } - getBlocksCh, err := session.GetBlocks(ctx, cids) + _, err := session.GetBlocks(ctx, cids) if err != nil { t.Fatal("error getting blocks") } - // clear the initial block of wants + // The session should initially broadcast want-haves select { - case <-wantReqs: + case <-fwm.wantReqs: case <-ctx.Done(): t.Fatal("Did not make first want request ") } @@ -261,42 +200,28 @@ func TestSessionFindMorePeers(t *testing.T) { // millisecond range p := testutil.GeneratePeers(1)[0] - // simulate what bitswap does on receiving a message: - // - calls ReceiveFrom() on session - // - publishes block to pubsub channel blk := blks[0] - session.ReceiveFrom(p, []cid.Cid{blk.Cid()}) - notif.Publish(blk) - select { - case <-cancelReqs: - case <-ctx.Done(): - t.Fatal("Did not cancel block") - } - select { - case <-getBlocksCh: - case <-ctx.Done(): - t.Fatal("Did not get block") - } + session.ReceiveFrom(p, []cid.Cid{blk.Cid()}, []cid.Cid{}, []cid.Cid{}) + + // The session should now time out waiting for a response and broadcast + // want-haves again select { - case <-wantReqs: + case <-fwm.wantReqs: case <-ctx.Done(): t.Fatal("Did not make second want request ") } - // verify a broadcast was made + // Verify a broadcast was made select { - case receivedWantReq := <-wantReqs: + case receivedWantReq := <-fwm.wantReqs: if len(receivedWantReq.cids) < broadcastLiveWantsLimit { t.Fatal("did not rebroadcast whole live list") } - if receivedWantReq.peers != nil { - t.Fatal("did not make a broadcast") - } case <-ctx.Done(): t.Fatal("Never rebroadcast want list") } - // wait for a request to get more peers to occur + // The session should eventually try to find more peers select { case <-fpm.findMorePeersRequested: case <-ctx.Done(): @@ -307,16 +232,14 @@ func TestSessionFindMorePeers(t *testing.T) { func TestSessionFailingToGetFirstBlock(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) defer cancel() - wantReqs := make(chan wantReq, 1) - cancelReqs := make(chan wantReq, 1) - fwm := &fakeWantManager{wantReqs, cancelReqs} - fpm := &fakePeerManager{findMorePeersRequested: make(chan cid.Cid, 1)} - frs := &fakeRequestSplitter{} + fwm := newFakeWantManager() + fpm := newFakeSessionPeerManager() + sim := bssim.New() + bpm := bsbpm.New() notif := notifications.New() defer notif.Shutdown() id := testutil.GenerateSessionID() - - session := New(ctx, id, fwm, fpm, frs, notif, 10*time.Millisecond, delay.Fixed(100*time.Millisecond)) + session := New(ctx, id, fwm, fpm, sim, newFakePeerManager(), bpm, notif, 10*time.Millisecond, delay.Fixed(100*time.Millisecond), "") blockGenerator := blocksutil.NewBlockGenerator() blks := blockGenerator.Blocks(4) var cids []cid.Cid @@ -329,27 +252,24 @@ func TestSessionFailingToGetFirstBlock(t *testing.T) { t.Fatal("error getting blocks") } - // clear the initial block of wants + // The session should initially broadcast want-haves select { - case <-wantReqs: + case <-fwm.wantReqs: case <-ctx.Done(): t.Fatal("Did not make first want request ") } - // verify a broadcast is made + // Verify a broadcast was made select { - case receivedWantReq := <-wantReqs: + case receivedWantReq := <-fwm.wantReqs: if len(receivedWantReq.cids) < len(cids) { t.Fatal("did not rebroadcast whole live list") } - if receivedWantReq.peers != nil { - t.Fatal("did not make a broadcast") - } case <-ctx.Done(): t.Fatal("Never rebroadcast want list") } - // wait for a request to get more peers to occur + // Wait for a request to find more peers to occur select { case k := <-fpm.findMorePeersRequested: if testutil.IndexOf(blks, k) == -1 { @@ -360,63 +280,58 @@ func TestSessionFailingToGetFirstBlock(t *testing.T) { } firstTickLength := time.Since(startTick) - // wait for another broadcast to occur + // Wait for another broadcast to occur select { - case receivedWantReq := <-wantReqs: + case receivedWantReq := <-fwm.wantReqs: if len(receivedWantReq.cids) < len(cids) { t.Fatal("did not rebroadcast whole live list") } - if receivedWantReq.peers != nil { - t.Fatal("did not make a broadcast") - } case <-ctx.Done(): t.Fatal("Never rebroadcast want list") } + + // Wait for another broadcast to occur startTick = time.Now() - // wait for another broadcast to occur select { - case receivedWantReq := <-wantReqs: + case receivedWantReq := <-fwm.wantReqs: if len(receivedWantReq.cids) < len(cids) { t.Fatal("did not rebroadcast whole live list") } - if receivedWantReq.peers != nil { - t.Fatal("did not make a broadcast") - } case <-ctx.Done(): t.Fatal("Never rebroadcast want list") } + + // Tick should take longer consecutiveTickLength := time.Since(startTick) - // tick should take longer if firstTickLength > consecutiveTickLength { t.Fatal("Should have increased tick length after first consecutive tick") } + + // Wait for another broadcast to occur startTick = time.Now() - // wait for another broadcast to occur select { - case receivedWantReq := <-wantReqs: + case receivedWantReq := <-fwm.wantReqs: if len(receivedWantReq.cids) < len(cids) { t.Fatal("did not rebroadcast whole live list") } - if receivedWantReq.peers != nil { - t.Fatal("did not make a broadcast") - } case <-ctx.Done(): t.Fatal("Never rebroadcast want list") } + + // Tick should take longer secondConsecutiveTickLength := time.Since(startTick) - // tick should take longer if consecutiveTickLength > secondConsecutiveTickLength { t.Fatal("Should have increased tick length after first consecutive tick") } - // should not have looked for peers on consecutive ticks + // Should not have tried to find peers on consecutive ticks select { case <-fpm.findMorePeersRequested: - t.Fatal("Should not have looked for peers on consecutive tick") + t.Fatal("Should not have tried to find peers on consecutive ticks") default: } - // wait for rebroadcast to occur + // Wait for rebroadcast to occur select { case k := <-fpm.findMorePeersRequested: if testutil.IndexOf(blks, k) == -1 { @@ -428,18 +343,17 @@ func TestSessionFailingToGetFirstBlock(t *testing.T) { } func TestSessionCtxCancelClosesGetBlocksChannel(t *testing.T) { - wantReqs := make(chan wantReq, 1) - cancelReqs := make(chan wantReq, 1) - fwm := &fakeWantManager{wantReqs, cancelReqs} - fpm := &fakePeerManager{} - frs := &fakeRequestSplitter{} + fwm := newFakeWantManager() + fpm := newFakeSessionPeerManager() + sim := bssim.New() + bpm := bsbpm.New() notif := notifications.New() defer notif.Shutdown() id := testutil.GenerateSessionID() // Create a new session with its own context sessctx, sesscancel := context.WithTimeout(context.Background(), 100*time.Millisecond) - session := New(sessctx, id, fwm, fpm, frs, notif, time.Second, delay.Fixed(time.Minute)) + session := New(sessctx, id, fwm, fpm, sim, newFakePeerManager(), bpm, notif, time.Second, delay.Fixed(time.Minute), "") timerCtx, timerCancel := context.WithTimeout(context.Background(), 10*time.Millisecond) defer timerCancel() @@ -468,3 +382,37 @@ func TestSessionCtxCancelClosesGetBlocksChannel(t *testing.T) { t.Fatal("expected channel to be closed before timeout") } } + +func TestSessionReceiveMessageAfterShutdown(t *testing.T) { + ctx, cancelCtx := context.WithTimeout(context.Background(), 10*time.Millisecond) + fwm := newFakeWantManager() + fpm := newFakeSessionPeerManager() + sim := bssim.New() + bpm := bsbpm.New() + notif := notifications.New() + defer notif.Shutdown() + id := testutil.GenerateSessionID() + session := New(ctx, id, fwm, fpm, sim, newFakePeerManager(), bpm, notif, time.Second, delay.Fixed(time.Minute), "") + blockGenerator := blocksutil.NewBlockGenerator() + blks := blockGenerator.Blocks(2) + cids := []cid.Cid{blks[0].Cid(), blks[1].Cid()} + + _, err := session.GetBlocks(ctx, cids) + if err != nil { + t.Fatal("error getting blocks") + } + + // Wait for initial want request + <-fwm.wantReqs + + // Shut down session + cancelCtx() + + // Simulate receiving block for a CID + peer := testutil.GeneratePeers(1)[0] + session.ReceiveFrom(peer, []cid.Cid{blks[0].Cid()}, []cid.Cid{}, []cid.Cid{}) + + time.Sleep(5 * time.Millisecond) + + // If we don't get a panic then the test is considered passing +} diff --git a/bitswap/session/sessionwants.go b/bitswap/session/sessionwants.go index aa487f121..9f896049f 100644 --- a/bitswap/session/sessionwants.go +++ b/bitswap/session/sessionwants.go @@ -1,6 +1,7 @@ package session import ( + "fmt" "math/rand" "sync" "time" @@ -8,60 +9,43 @@ import ( cid "github.com/ipfs/go-cid" ) +// sessionWants keeps track of which cids are waiting to be sent out, and which +// peers are "live" - ie, we've sent a request but haven't received a block yet type sessionWants struct { sync.RWMutex toFetch *cidQueue liveWants map[cid.Cid]time.Time - pastWants *cid.Set } -// BlocksReceived moves received block CIDs from live to past wants and -// measures latency. It returns the CIDs of blocks that were actually wanted -// (as opposed to duplicates) and the total latency for all incoming blocks. -func (sw *sessionWants) BlocksReceived(cids []cid.Cid) ([]cid.Cid, time.Duration) { - now := time.Now() +func newSessionWants() sessionWants { + return sessionWants{ + toFetch: newCidQueue(), + liveWants: make(map[cid.Cid]time.Time), + } +} + +func (sw *sessionWants) String() string { + return fmt.Sprintf("%d pending / %d live", sw.toFetch.Len(), len(sw.liveWants)) +} +// BlocksRequested is called when the client makes a request for blocks +func (sw *sessionWants) BlocksRequested(newWants []cid.Cid) { sw.Lock() defer sw.Unlock() - totalLatency := time.Duration(0) - wanted := make([]cid.Cid, 0, len(cids)) - for _, c := range cids { - if sw.unlockedIsWanted(c) { - wanted = append(wanted, c) - - // If the block CID was in the live wants queue, remove it - tval, ok := sw.liveWants[c] - if ok { - totalLatency += now.Sub(tval) - delete(sw.liveWants, c) - } else { - // Otherwise remove it from the toFetch queue, if it was there - sw.toFetch.Remove(c) - } - - // Keep track of CIDs we've successfully fetched - sw.pastWants.Add(c) - } + for _, k := range newWants { + sw.toFetch.Push(k) } - - return wanted, totalLatency } -// GetNextWants adds any new wants to the list of CIDs to fetch, then moves as -// many CIDs from the fetch queue to the live wants list as possible (given the -// limit). Returns the newly live wants. -func (sw *sessionWants) GetNextWants(limit int, newWants []cid.Cid) []cid.Cid { +// GetNextWants moves as many CIDs from the fetch queue to the live wants +// list as possible (given the limit). Returns the newly live wants. +func (sw *sessionWants) GetNextWants(limit int) []cid.Cid { now := time.Now() sw.Lock() defer sw.Unlock() - // Add new wants to the fetch queue - for _, k := range newWants { - sw.toFetch.Push(k) - } - // Move CIDs from fetch queue to the live wants queue (up to the limit) currentLiveCount := len(sw.liveWants) toAdd := limit - currentLiveCount @@ -76,6 +60,55 @@ func (sw *sessionWants) GetNextWants(limit int, newWants []cid.Cid) []cid.Cid { return live } +// WantsSent is called when wants are sent to a peer +func (sw *sessionWants) WantsSent(ks []cid.Cid) { + now := time.Now() + + sw.Lock() + defer sw.Unlock() + + for _, c := range ks { + if _, ok := sw.liveWants[c]; !ok { + sw.toFetch.Remove(c) + sw.liveWants[c] = now + } + } +} + +// BlocksReceived removes received block CIDs from the live wants list and +// measures latency. It returns the CIDs of blocks that were actually +// wanted (as opposed to duplicates) and the total latency for all incoming blocks. +func (sw *sessionWants) BlocksReceived(ks []cid.Cid) ([]cid.Cid, time.Duration) { + wanted := make([]cid.Cid, 0, len(ks)) + totalLatency := time.Duration(0) + if len(ks) == 0 { + return wanted, totalLatency + } + + now := time.Now() + + sw.Lock() + defer sw.Unlock() + + for _, c := range ks { + if sw.unlockedIsWanted(c) { + wanted = append(wanted, c) + + sentAt, ok := sw.liveWants[c] + if ok && !sentAt.IsZero() { + totalLatency += now.Sub(sentAt) + } + + // Remove the CID from the live wants / toFetch queue and add it + // to the past wants + delete(sw.liveWants, c) + sw.toFetch.Remove(c) + } + } + + return wanted, totalLatency +} + // PrepareBroadcast saves the current time for each live want and returns the // live want CIDs. func (sw *sessionWants) PrepareBroadcast() []cid.Cid { @@ -102,23 +135,6 @@ func (sw *sessionWants) CancelPending(keys []cid.Cid) { } } -// ForEachUniqDup iterates over each of the given CIDs and calls isUniqFn -// if the session is expecting a block for the CID, or isDupFn if the session -// has already received the block. -func (sw *sessionWants) ForEachUniqDup(ks []cid.Cid, isUniqFn, isDupFn func()) { - sw.RLock() - - for _, k := range ks { - if sw.unlockedIsWanted(k) { - isUniqFn() - } else if sw.pastWants.Has(k) { - isDupFn() - } - } - - sw.RUnlock() -} - // LiveWants returns a list of live wants func (sw *sessionWants) LiveWants() []cid.Cid { sw.RLock() @@ -131,7 +147,6 @@ func (sw *sessionWants) LiveWants() []cid.Cid { return live } -// RandomLiveWant returns a randomly selected live want func (sw *sessionWants) RandomLiveWant() cid.Cid { i := rand.Uint64() @@ -160,31 +175,6 @@ func (sw *sessionWants) HasLiveWants() bool { return len(sw.liveWants) > 0 } -// IsWanted indicates if the session is expecting to receive the block with the -// given CID -func (sw *sessionWants) IsWanted(c cid.Cid) bool { - sw.RLock() - defer sw.RUnlock() - - return sw.unlockedIsWanted(c) -} - -// FilterInteresting filters the list so that it only contains keys for -// blocks that the session is waiting to receive or has received in the past -func (sw *sessionWants) FilterInteresting(ks []cid.Cid) []cid.Cid { - sw.RLock() - defer sw.RUnlock() - - var interested []cid.Cid - for _, k := range ks { - if sw.unlockedIsWanted(k) || sw.pastWants.Has(k) { - interested = append(interested, k) - } - } - - return interested -} - func (sw *sessionWants) unlockedIsWanted(c cid.Cid) bool { _, ok := sw.liveWants[c] if !ok { diff --git a/bitswap/session/sessionwants_test.go b/bitswap/session/sessionwants_test.go index 879729242..953ecce9a 100644 --- a/bitswap/session/sessionwants_test.go +++ b/bitswap/session/sessionwants_test.go @@ -2,20 +2,13 @@ package session import ( "testing" - "time" "github.com/ipfs/go-bitswap/testutil" cid "github.com/ipfs/go-cid" ) -func TestSessionWants(t *testing.T) { - sw := sessionWants{ - toFetch: newCidQueue(), - liveWants: make(map[cid.Cid]time.Time), - pastWants: cid.NewSet(), - } - cids := testutil.GenerateCids(10) - others := testutil.GenerateCids(1) +func TestEmptySessionWants(t *testing.T) { + sw := newSessionWants() // Expect these functions to return nothing on a new sessionWants lws := sw.PrepareBroadcast() @@ -33,25 +26,29 @@ func TestSessionWants(t *testing.T) { if rw.Defined() { t.Fatal("expected no random want") } - if sw.IsWanted(cids[0]) { - t.Fatal("expected cid to not be wanted") - } - if len(sw.FilterInteresting(cids)) > 0 { - t.Fatal("expected no interesting wants") - } +} - // Add 10 new wants with a limit of 5 - // The first 5 cids should go into the toFetch queue - // The other 5 cids should go into the live want queue - // toFetch Live Past +func TestSessionWants(t *testing.T) { + sw := newSessionWants() + cids := testutil.GenerateCids(10) + others := testutil.GenerateCids(1) + + // Add 10 new wants + // toFetch Live + // 9876543210 + sw.BlocksRequested(cids) + + // Get next wants with a limit of 5 + // The first 5 cids should go move into the live queue + // toFetch Live // 98765 43210 - nextw := sw.GetNextWants(5, cids) + nextw := sw.GetNextWants(5) if len(nextw) != 5 { t.Fatal("expected 5 next wants") } - lws = sw.PrepareBroadcast() + lws := sw.PrepareBroadcast() if len(lws) != 5 { - t.Fatal("expected 5 broadcast wants") + t.Fatal("expected 5 broadcast wants", len(lws)) } lws = sw.LiveWants() if len(lws) != 5 { @@ -60,52 +57,28 @@ func TestSessionWants(t *testing.T) { if !sw.HasLiveWants() { t.Fatal("expected to have live wants") } - rw = sw.RandomLiveWant() + rw := sw.RandomLiveWant() if !rw.Defined() { t.Fatal("expected random want") } - if !sw.IsWanted(cids[0]) { - t.Fatal("expected cid to be wanted") - } - if !sw.IsWanted(cids[9]) { - t.Fatal("expected cid to be wanted") - } - if len(sw.FilterInteresting([]cid.Cid{cids[0], cids[9], others[0]})) != 2 { - t.Fatal("expected 2 interesting wants") - } // Two wanted blocks and one other block are received. - // The wanted blocks should be moved from the live wants queue - // to the past wants set (the other block CID should be ignored) - // toFetch Live Past - // 98765 432__ 10 + // The wanted blocks should be removed from the live wants queue + // (the other block CID should be ignored) + // toFetch Live + // 98765 432__ recvdCids := []cid.Cid{cids[0], cids[1], others[0]} - uniq := 0 - dup := 0 - sw.ForEachUniqDup(recvdCids, func() { uniq++ }, func() { dup++ }) - if uniq != 2 || dup != 0 { - t.Fatal("expected 2 uniqs / 0 dups", uniq, dup) - } sw.BlocksReceived(recvdCids) lws = sw.LiveWants() if len(lws) != 3 { t.Fatal("expected 3 live wants") } - if sw.IsWanted(cids[0]) { - t.Fatal("expected cid to no longer be wanted") - } - if !sw.IsWanted(cids[9]) { - t.Fatal("expected cid to be wanted") - } - if len(sw.FilterInteresting([]cid.Cid{cids[0], cids[9], others[0]})) != 2 { - t.Fatal("expected 2 interesting wants") - } // Ask for next wants with a limit of 5 // Should move 2 wants from toFetch queue to live wants - // toFetch Live Past - // 987__ 65432 10 - nextw = sw.GetNextWants(5, nil) + // toFetch Live + // 987__ 65432 + nextw = sw.GetNextWants(5) if len(nextw) != 2 { t.Fatal("expected 2 next wants") } @@ -113,22 +86,13 @@ func TestSessionWants(t *testing.T) { if len(lws) != 5 { t.Fatal("expected 5 live wants") } - if !sw.IsWanted(cids[5]) { - t.Fatal("expected cid to be wanted") - } // One wanted block and one dup block are received. - // The wanted block should be moved from the live wants queue - // to the past wants set - // toFetch Live Past - // 987 654_2 310 + // The wanted block should be removed from the live + // wants queue. + // toFetch Live + // 987 654_2 recvdCids = []cid.Cid{cids[0], cids[3]} - uniq = 0 - dup = 0 - sw.ForEachUniqDup(recvdCids, func() { uniq++ }, func() { dup++ }) - if uniq != 1 || dup != 1 { - t.Fatal("expected 1 uniq / 1 dup", uniq, dup) - } sw.BlocksReceived(recvdCids) lws = sw.LiveWants() if len(lws) != 4 { @@ -136,17 +100,11 @@ func TestSessionWants(t *testing.T) { } // One block in the toFetch queue should be cancelled - // toFetch Live Past - // 9_7 654_2 310 + // toFetch Live + // 9_7 654_2 sw.CancelPending([]cid.Cid{cids[8]}) lws = sw.LiveWants() if len(lws) != 4 { t.Fatal("expected 4 live wants") } - if sw.IsWanted(cids[8]) { - t.Fatal("expected cid to no longer be wanted") - } - if len(sw.FilterInteresting([]cid.Cid{cids[0], cids[8]})) != 1 { - t.Fatal("expected 1 interesting wants") - } } diff --git a/bitswap/session/sessionwantsender.go b/bitswap/session/sessionwantsender.go new file mode 100644 index 000000000..ddd24ee01 --- /dev/null +++ b/bitswap/session/sessionwantsender.go @@ -0,0 +1,605 @@ +package session + +import ( + "context" + + bsbpm "github.com/ipfs/go-bitswap/blockpresencemanager" + + cid "github.com/ipfs/go-cid" + peer "github.com/libp2p/go-libp2p-core/peer" +) + +// Maximum number of changes to accept before blocking +const changesBufferSize = 128 + +// BlockPresence indicates whether a peer has a block. +// Note that the order is important, we decide which peer to send a want to +// based on knowing whether peer has the block. eg we're more likely to send +// a want to a peer that has the block than a peer that doesnt have the block +// so BPHave > BPDontHave +type BlockPresence int + +const ( + BPDontHave BlockPresence = iota + BPUnknown + BPHave +) + +// update encapsulates a message received by the session +type update struct { + // Which peer sent the update + from peer.ID + // cids of blocks received + ks []cid.Cid + // HAVE message + haves []cid.Cid + // DONT_HAVE message + dontHaves []cid.Cid +} + +// peerAvailability indicates a peer's connection state +type peerAvailability struct { + target peer.ID + available bool +} + +// change can be a new peer being discovered, a new message received by the +// session, or a change in the connect status of a peer +type change struct { + // the peer ID of a new peer + addPeer peer.ID + // new wants requested + add []cid.Cid + // new message received by session (blocks / HAVEs / DONT_HAVEs) + update update + // peer has connected / disconnected + availability peerAvailability +} + +type onSendFn func(to peer.ID, wantBlocks []cid.Cid, wantHaves []cid.Cid) +type onPeersExhaustedFn func([]cid.Cid) + +// +// sessionWantSender is responsible for sending want-have and want-block to +// peers. For each want, it sends a single optimistic want-block request to +// one peer and want-have requests to all other peers in the session. +// To choose the best peer for the optimistic want-block it maintains a list +// of how peers have responded to each want (HAVE / DONT_HAVE / Unknown) and +// consults the peer response tracker (records which peers sent us blocks). +// +type sessionWantSender struct { + // When the context is cancelled, sessionWantSender shuts down + ctx context.Context + // The session ID + sessionID uint64 + // A channel that collects incoming changes (events) + changes chan change + // Information about each want indexed by CID + wants map[cid.Cid]*wantInfo + // Tracks which peers we have send want-block to + swbt *sentWantBlocksTracker + // Maintains a list of peers and whether they are connected + peerAvlMgr *peerAvailabilityManager + // Tracks the number of blocks each peer sent us + peerRspTrkr *peerResponseTracker + + // Sends wants to peers + pm PeerManager + // Keeps track of which peer has / doesn't have a block + bpm *bsbpm.BlockPresenceManager + // Called when wants are sent + onSend onSendFn + // Called when all peers explicitly don't have a block + onPeersExhausted onPeersExhaustedFn +} + +func newSessionWantSender(ctx context.Context, sid uint64, pm PeerManager, bpm *bsbpm.BlockPresenceManager, + onSend onSendFn, onPeersExhausted onPeersExhaustedFn) sessionWantSender { + + spm := sessionWantSender{ + ctx: ctx, + sessionID: sid, + changes: make(chan change, changesBufferSize), + wants: make(map[cid.Cid]*wantInfo), + swbt: newSentWantBlocksTracker(), + peerAvlMgr: newPeerAvailabilityManager(), + peerRspTrkr: newPeerResponseTracker(), + + pm: pm, + bpm: bpm, + onSend: onSend, + onPeersExhausted: onPeersExhausted, + } + + return spm +} + +func (spm *sessionWantSender) ID() uint64 { + return spm.sessionID +} + +// Add is called when new wants are added to the session +func (spm *sessionWantSender) Add(ks []cid.Cid) { + if len(ks) == 0 { + return + } + spm.addChange(change{add: ks}) +} + +// Update is called when the session receives a message with incoming blocks +// or HAVE / DONT_HAVE +func (spm *sessionWantSender) Update(from peer.ID, ks []cid.Cid, haves []cid.Cid, dontHaves []cid.Cid, isNewPeer bool) { + // fmt.Printf("Update(%s, %d, %d, %d, %t)\n", lu.P(from), len(ks), len(haves), len(dontHaves), isNewPeer) + hasUpdate := len(ks) > 0 || len(haves) > 0 || len(dontHaves) > 0 + if !hasUpdate && !isNewPeer { + return + } + + ch := change{} + + if hasUpdate { + ch.update = update{from, ks, haves, dontHaves} + } + + // If the message came from a new peer register with the peer manager + if isNewPeer { + available := spm.pm.RegisterSession(from, spm) + ch.addPeer = from + ch.availability = peerAvailability{from, available} + } + + spm.addChange(ch) +} + +// SignalAvailability is called by the PeerManager to signal that a peer has +// connected / disconnected +func (spm *sessionWantSender) SignalAvailability(p peer.ID, isAvailable bool) { + // fmt.Printf("SignalAvailability(%s, %t)\n", lu.P(p), isAvailable) + availability := peerAvailability{p, isAvailable} + spm.addChange(change{availability: availability}) +} + +// Run is the main loop for processing incoming changes +func (spm *sessionWantSender) Run() { + for { + select { + case ch := <-spm.changes: + spm.onChange([]change{ch}) + case <-spm.ctx.Done(): + spm.shutdown() + return + } + } +} + +// addChange adds a new change to the queue +func (spm *sessionWantSender) addChange(c change) { + select { + case spm.changes <- c: + case <-spm.ctx.Done(): + } +} + +// shutdown unregisters the session with the PeerManager +func (spm *sessionWantSender) shutdown() { + spm.pm.UnregisterSession(spm.sessionID) +} + +// collectChanges collects all the changes that have occurred since the last +// invocation of onChange +func (spm *sessionWantSender) collectChanges(changes []change) []change { + for len(changes) < changesBufferSize { + select { + case next := <-spm.changes: + changes = append(changes, next) + default: + return changes + } + } + return changes +} + +// onChange processes the next set of changes +func (spm *sessionWantSender) onChange(changes []change) { + // Several changes may have been recorded since the last time we checked, + // so pop all outstanding changes from the channel + changes = spm.collectChanges(changes) + + // Apply each change + availability := make(map[peer.ID]bool, len(changes)) + var updates []update + for _, chng := range changes { + // Add newly discovered peers + if chng.addPeer != "" { + spm.peerAvlMgr.addPeer(chng.addPeer) + } + + // Initialize info for new wants + for _, c := range chng.add { + spm.trackWant(c) + } + + // Consolidate updates and changes to availability + if chng.update.from != "" { + updates = append(updates, chng.update) + } + if chng.availability.target != "" { + availability[chng.availability.target] = chng.availability.available + } + } + + // Update peer availability + newlyAvailable := spm.processAvailability(availability) + + // Update wants + spm.processUpdates(updates) + + // If there are some connected peers, send any pending wants + if spm.peerAvlMgr.haveAvailablePeers() { + // fmt.Printf("sendNextWants()\n") + spm.sendNextWants(newlyAvailable) + // fmt.Println(spm) + } +} + +// processAvailability updates the want queue with any changes in +// peer availability +func (spm *sessionWantSender) processAvailability(availability map[peer.ID]bool) []peer.ID { + var newlyAvailable []peer.ID + for p, isNowAvailable := range availability { + // Make sure this is a peer that the session is actually interested in + if wasAvailable, ok := spm.peerAvlMgr.isAvailable(p); ok { + // If the state has changed + if wasAvailable != isNowAvailable { + // Update the state and record that something changed + spm.peerAvlMgr.setPeerAvailability(p, isNowAvailable) + // fmt.Printf("processAvailability change %s %t\n", lu.P(p), isNowAvailable) + spm.updateWantsPeerAvailability(p, isNowAvailable) + if isNowAvailable { + newlyAvailable = append(newlyAvailable, p) + } + } + } + } + + return newlyAvailable +} + +// trackWant creates a new entry in the map of CID -> want info +func (spm *sessionWantSender) trackWant(c cid.Cid) { + // fmt.Printf("trackWant %s\n", lu.C(c)) + if _, ok := spm.wants[c]; ok { + return + } + + // Create the want info + wi := newWantInfo(spm.peerRspTrkr) + spm.wants[c] = wi + + // For each available peer, register any information we know about + // whether the peer has the block + for _, p := range spm.peerAvlMgr.availablePeers() { + spm.updateWantBlockPresence(c, p) + } +} + +// processUpdates processes incoming blocks and HAVE / DONT_HAVEs +func (spm *sessionWantSender) processUpdates(updates []update) { + dontHaves := cid.NewSet() + for _, upd := range updates { + // TODO: If there is a timeout for the want from the peer, remove want.sentTo + // so the want can be sent to another peer (and blacklist the peer?) + // TODO: If a peer is no longer available, check if all providers of + // each CID have been exhausted + + // For each DONT_HAVE + for _, c := range upd.dontHaves { + dontHaves.Add(c) + + // Update the block presence for the peer + spm.updateWantBlockPresence(c, upd.from) + + // Check if the DONT_HAVE is in response to a want-block + // (could also be in response to want-have) + if spm.swbt.haveSentWantBlockTo(upd.from, c) { + // If we were waiting for a response from this peer, clear + // sentTo so that we can send the want to another peer + if sentTo, ok := spm.getWantSentTo(c); ok && sentTo == upd.from { + spm.setWantSentTo(c, "") + } + } + } + + // For each HAVE + for _, c := range upd.haves { + // Update the block presence for the peer + spm.updateWantBlockPresence(c, upd.from) + } + + // For each received block + for _, c := range upd.ks { + // Remove the want + removed := spm.removeWant(c) + if removed != nil { + // Inform the peer tracker that this peer was the first to send + // us the block + spm.peerRspTrkr.receivedBlockFrom(upd.from) + } + } + } + + // If all available peers for a cid sent a DONT_HAVE, signal to the session + // that we've exhausted available peers + if dontHaves.Len() > 0 { + exhausted := spm.bpm.AllPeersDoNotHaveBlock(spm.peerAvlMgr.availablePeers(), dontHaves.Keys()) + newlyExhausted := spm.newlyExhausted(exhausted) + if len(newlyExhausted) > 0 { + spm.onPeersExhausted(newlyExhausted) + } + } +} + +// convenience structs for passing around want-blocks and want-haves for a peer +type wantSets struct { + wantBlocks *cid.Set + wantHaves *cid.Set +} + +type allWants map[peer.ID]*wantSets + +func (aw allWants) forPeer(p peer.ID) *wantSets { + if _, ok := aw[p]; !ok { + aw[p] = &wantSets{ + wantBlocks: cid.NewSet(), + wantHaves: cid.NewSet(), + } + } + return aw[p] +} + +// sendNextWants sends wants to peers according to the latest information +// about which peers have / dont have blocks +func (spm *sessionWantSender) sendNextWants(newlyAvailable []peer.ID) { + toSend := make(allWants) + + for c, wi := range spm.wants { + // Ensure we send want-haves to any newly available peers + for _, p := range newlyAvailable { + toSend.forPeer(p).wantHaves.Add(c) + } + + // We already sent a want-block to a peer and haven't yet received a + // response yet + if wi.sentTo != "" { + // fmt.Printf(" q - already sent want-block %s to %s\n", lu.C(c), lu.P(wi.sentTo)) + continue + } + + // All the peers have indicated that they don't have the block + // corresponding to this want, so we must wait to discover more peers + if wi.bestPeer == "" { + // TODO: work this out in real time instead of using bestP? + // fmt.Printf(" q - no best peer for %s\n", lu.C(c)) + continue + } + + // fmt.Printf(" q - send best: %s: %s\n", lu.C(c), lu.P(wi.bestPeer)) + + // Record that we are sending a want-block for this want to the peer + spm.setWantSentTo(c, wi.bestPeer) + + // Send a want-block to the chosen peer + toSend.forPeer(wi.bestPeer).wantBlocks.Add(c) + + // Send a want-have to each other peer + for _, op := range spm.peerAvlMgr.availablePeers() { + if op != wi.bestPeer { + toSend.forPeer(op).wantHaves.Add(c) + } + } + } + + // Send any wants we've collected + spm.sendWants(toSend) +} + +// sendWants sends want-have and want-blocks to the appropriate peers +func (spm *sessionWantSender) sendWants(sends allWants) { + // fmt.Printf(" send wants to %d peers\n", len(sends)) + + // For each peer we're sending a request to + for p, snd := range sends { + // fmt.Printf(" send %d wants to %s\n", snd.wantBlocks.Len(), lu.P(p)) + + // Piggyback some other want-haves onto the request to the peer + for _, c := range spm.getPiggybackWantHaves(p, snd.wantBlocks) { + snd.wantHaves.Add(c) + } + + // Send the wants to the peer. + // Note that the PeerManager ensures that we don't sent duplicate + // want-haves / want-blocks to a peer, and that want-blocks take + // precedence over want-haves. + wblks := snd.wantBlocks.Keys() + whaves := snd.wantHaves.Keys() + spm.pm.SendWants(spm.ctx, p, wblks, whaves) + + // Inform the session that we've sent the wants + spm.onSend(p, wblks, whaves) + + // Record which peers we send want-block to + spm.swbt.addSentWantBlocksTo(p, wblks) + } +} + +// getPiggybackWantHaves gets the want-haves that should be piggybacked onto +// a request that we are making to send want-blocks to a peer +func (spm *sessionWantSender) getPiggybackWantHaves(p peer.ID, wantBlocks *cid.Set) []cid.Cid { + var whs []cid.Cid + for c := range spm.wants { + // Don't send want-have if we're already sending a want-block + // (or have previously) + if !wantBlocks.Has(c) && !spm.swbt.haveSentWantBlockTo(p, c) { + whs = append(whs, c) + } + } + return whs +} + +// newlyExhausted filters the list of keys for wants that have not already +// been marked as exhausted (all peers indicated they don't have the block) +func (spm *sessionWantSender) newlyExhausted(ks []cid.Cid) []cid.Cid { + var res []cid.Cid + for _, c := range ks { + if wi, ok := spm.wants[c]; ok { + if !wi.exhausted { + res = append(res, c) + wi.exhausted = true + } + } + } + return res +} + +// removeWant is called when the corresponding block is received +func (spm *sessionWantSender) removeWant(c cid.Cid) *wantInfo { + if wi, ok := spm.wants[c]; ok { + delete(spm.wants, c) + return wi + } + return nil +} + +// updateWantsPeerAvailability is called when the availability changes for a +// peer. It updates all the wants accordingly. +func (spm *sessionWantSender) updateWantsPeerAvailability(p peer.ID, isNowAvailable bool) { + for c, wi := range spm.wants { + if isNowAvailable { + spm.updateWantBlockPresence(c, p) + } else { + wi.removePeer(p) + } + } +} + +// updateWantBlockPresence is called when a HAVE / DONT_HAVE is received for the given +// want / peer +func (spm *sessionWantSender) updateWantBlockPresence(c cid.Cid, p peer.ID) { + wi, ok := spm.wants[c] + if !ok { + return + } + + // If the peer sent us a HAVE or DONT_HAVE for the cid, adjust the + // block presence for the peer / cid combination + if spm.bpm.PeerHasBlock(p, c) { + wi.setPeerBlockPresence(p, BPHave) + } else if spm.bpm.PeerDoesNotHaveBlock(p, c) { + wi.setPeerBlockPresence(p, BPDontHave) + } else { + wi.setPeerBlockPresence(p, BPUnknown) + } +} + +// Which peer was the want sent to +func (spm *sessionWantSender) getWantSentTo(c cid.Cid) (peer.ID, bool) { + if wi, ok := spm.wants[c]; ok { + return wi.sentTo, true + } + return "", false +} + +// Record which peer the want was sent to +func (spm *sessionWantSender) setWantSentTo(c cid.Cid, p peer.ID) { + if wi, ok := spm.wants[c]; ok { + wi.sentTo = p + } +} + +// wantInfo keeps track of the information for a want +type wantInfo struct { + // Tracks HAVE / DONT_HAVE sent to us for the want by each peer + blockPresence map[peer.ID]BlockPresence + // The peer that we've sent a want-block to (cleared when we get a response) + sentTo peer.ID + // The "best" peer to send the want to next + bestPeer peer.ID + // Keeps track of how many hits / misses each peer has sent us for wants + // in the session + peerRspTrkr *peerResponseTracker + // true if all known peers have sent a DONT_HAVE for this want + exhausted bool +} + +// func newWantInfo(prt *peerResponseTracker, c cid.Cid, startIndex int) *wantInfo { +func newWantInfo(prt *peerResponseTracker) *wantInfo { + return &wantInfo{ + blockPresence: make(map[peer.ID]BlockPresence), + peerRspTrkr: prt, + exhausted: false, + } +} + +// setPeerBlockPresence sets the block presence for the given peer +func (wi *wantInfo) setPeerBlockPresence(p peer.ID, bp BlockPresence) { + wi.blockPresence[p] = bp + wi.calculateBestPeer() + + // If a peer informed us that it has a block then make sure the want is no + // longer flagged as exhausted (exhausted means no peers have the block) + if bp == BPHave { + wi.exhausted = false + } +} + +// removePeer deletes the given peer from the want info +func (wi *wantInfo) removePeer(p peer.ID) { + // If we were waiting to hear back from the peer that is being removed, + // clear the sentTo field so we no longer wait + if p == wi.sentTo { + wi.sentTo = "" + } + delete(wi.blockPresence, p) + wi.calculateBestPeer() +} + +// calculateBestPeer finds the best peer to send the want to next +func (wi *wantInfo) calculateBestPeer() { + // Recalculate the best peer + bestBP := BPDontHave + bestPeer := peer.ID("") + + // Find the peer with the best block presence, recording how many peers + // share the block presence + countWithBest := 0 + for p, bp := range wi.blockPresence { + if bp > bestBP { + bestBP = bp + bestPeer = p + countWithBest = 1 + } else if bp == bestBP { + countWithBest++ + } + } + wi.bestPeer = bestPeer + + // If no peer has a block presence better than DONT_HAVE, bail out + if bestPeer == "" { + return + } + + // If there was only one peer with the best block presence, we're done + if countWithBest <= 1 { + return + } + + // There were multiple peers with the best block presence, so choose one of + // them to be the best + var peersWithBest []peer.ID + for p, bp := range wi.blockPresence { + if bp == bestBP { + peersWithBest = append(peersWithBest, p) + } + } + wi.bestPeer = wi.peerRspTrkr.choose(peersWithBest) +} diff --git a/bitswap/session/sessionwantsender_test.go b/bitswap/session/sessionwantsender_test.go new file mode 100644 index 000000000..e37744096 --- /dev/null +++ b/bitswap/session/sessionwantsender_test.go @@ -0,0 +1,348 @@ +package session + +import ( + "context" + "sync" + "testing" + "time" + + bsbpm "github.com/ipfs/go-bitswap/blockpresencemanager" + bspm "github.com/ipfs/go-bitswap/peermanager" + "github.com/ipfs/go-bitswap/testutil" + cid "github.com/ipfs/go-cid" + peer "github.com/libp2p/go-libp2p-core/peer" +) + +type sentWants struct { + p peer.ID + wantHaves *cid.Set + wantBlocks *cid.Set +} + +type mockPeerManager struct { + peerSessions sync.Map + peerSends sync.Map +} + +func newMockPeerManager() *mockPeerManager { + return &mockPeerManager{} +} + +func (pm *mockPeerManager) RegisterSession(p peer.ID, sess bspm.Session) bool { + pm.peerSessions.Store(p, sess) + return true +} + +func (pm *mockPeerManager) UnregisterSession(sesid uint64) { +} + +func (pm *mockPeerManager) SendWants(ctx context.Context, p peer.ID, wantBlocks []cid.Cid, wantHaves []cid.Cid) { + swi, _ := pm.peerSends.LoadOrStore(p, sentWants{p, cid.NewSet(), cid.NewSet()}) + sw := swi.(sentWants) + for _, c := range wantBlocks { + sw.wantBlocks.Add(c) + } + for _, c := range wantHaves { + if !sw.wantBlocks.Has(c) { + sw.wantHaves.Add(c) + } + } +} + +func (pm *mockPeerManager) waitNextWants() map[peer.ID]sentWants { + time.Sleep(5 * time.Millisecond) + nw := make(map[peer.ID]sentWants) + pm.peerSends.Range(func(k, v interface{}) bool { + nw[k.(peer.ID)] = v.(sentWants) + return true + }) + return nw +} + +func (pm *mockPeerManager) clearWants() { + pm.peerSends.Range(func(k, v interface{}) bool { + pm.peerSends.Delete(k) + return true + }) +} + +func TestSendWants(t *testing.T) { + cids := testutil.GenerateCids(4) + peers := testutil.GeneratePeers(1) + peerA := peers[0] + sid := uint64(1) + pm := newMockPeerManager() + bpm := bsbpm.New() + onSend := func(peer.ID, []cid.Cid, []cid.Cid) {} + onPeersExhausted := func([]cid.Cid) {} + spm := newSessionWantSender(context.Background(), sid, pm, bpm, onSend, onPeersExhausted) + + go spm.Run() + + // add cid0, cid1 + blkCids0 := cids[0:2] + spm.Add(blkCids0) + // peerA: HAVE cid0 + spm.Update(peerA, []cid.Cid{}, []cid.Cid{cids[0]}, []cid.Cid{}, true) + + // Wait for processing to complete + peerSends := pm.waitNextWants() + + // Should have sent + // peerA: want-block cid0, cid1 + sw, ok := peerSends[peerA] + if !ok { + t.Fatal("Nothing sent to peer") + } + if !testutil.MatchKeysIgnoreOrder(sw.wantBlocks.Keys(), blkCids0) { + t.Fatal("Wrong keys") + } + if sw.wantHaves.Len() > 0 { + t.Fatal("Expecting no want-haves") + } +} + +func TestSendsWantBlockToOnePeerOnly(t *testing.T) { + cids := testutil.GenerateCids(4) + peers := testutil.GeneratePeers(2) + peerA := peers[0] + peerB := peers[1] + sid := uint64(1) + pm := newMockPeerManager() + bpm := bsbpm.New() + onSend := func(peer.ID, []cid.Cid, []cid.Cid) {} + onPeersExhausted := func([]cid.Cid) {} + spm := newSessionWantSender(context.Background(), sid, pm, bpm, onSend, onPeersExhausted) + + go spm.Run() + + // add cid0, cid1 + blkCids0 := cids[0:2] + spm.Add(blkCids0) + // peerA: HAVE cid0 + spm.Update(peerA, []cid.Cid{}, []cid.Cid{cids[0]}, []cid.Cid{}, true) + + // Wait for processing to complete + peerSends := pm.waitNextWants() + + // Should have sent + // peerA: want-block cid0, cid1 + sw, ok := peerSends[peerA] + if !ok { + t.Fatal("Nothing sent to peer") + } + if !testutil.MatchKeysIgnoreOrder(sw.wantBlocks.Keys(), blkCids0) { + t.Fatal("Wrong keys") + } + + // Clear wants (makes keeping track of what's been sent easier) + pm.clearWants() + + // peerB: HAVE cid0 + spm.Update(peerB, []cid.Cid{}, []cid.Cid{cids[0]}, []cid.Cid{}, true) + + // Wait for processing to complete + peerSends = pm.waitNextWants() + + // Have not received response from peerA, so should not send want-block to + // peerB. Should have sent + // peerB: want-have cid0, cid1 + sw, ok = peerSends[peerB] + if !ok { + t.Fatal("Nothing sent to peer") + } + if sw.wantBlocks.Len() > 0 { + t.Fatal("Expecting no want-blocks") + } + if !testutil.MatchKeysIgnoreOrder(sw.wantHaves.Keys(), blkCids0) { + t.Fatal("Wrong keys") + } +} + +func TestReceiveBlock(t *testing.T) { + cids := testutil.GenerateCids(2) + peers := testutil.GeneratePeers(2) + peerA := peers[0] + peerB := peers[1] + sid := uint64(1) + pm := newMockPeerManager() + bpm := bsbpm.New() + onSend := func(peer.ID, []cid.Cid, []cid.Cid) {} + onPeersExhausted := func([]cid.Cid) {} + spm := newSessionWantSender(context.Background(), sid, pm, bpm, onSend, onPeersExhausted) + + go spm.Run() + + // add cid0, cid1 + spm.Add(cids) + // peerA: HAVE cid0 + spm.Update(peerA, []cid.Cid{}, []cid.Cid{cids[0]}, []cid.Cid{}, true) + + // Wait for processing to complete + peerSends := pm.waitNextWants() + + // Should have sent + // peerA: want-block cid0, cid1 + sw, ok := peerSends[peerA] + if !ok { + t.Fatal("Nothing sent to peer") + } + if !testutil.MatchKeysIgnoreOrder(sw.wantBlocks.Keys(), cids) { + t.Fatal("Wrong keys") + } + + // Clear wants (makes keeping track of what's been sent easier) + pm.clearWants() + + // peerA: block cid0, DONT_HAVE cid1 + bpm.ReceiveFrom(peerA, []cid.Cid{}, []cid.Cid{cids[1]}) + spm.Update(peerA, []cid.Cid{cids[0]}, []cid.Cid{}, []cid.Cid{cids[1]}, false) + // peerB: HAVE cid0, cid1 + bpm.ReceiveFrom(peerB, cids, []cid.Cid{}) + spm.Update(peerB, []cid.Cid{}, cids, []cid.Cid{}, true) + + // Wait for processing to complete + peerSends = pm.waitNextWants() + + // Should have sent + // peerB: want-block cid1 + // (should not have sent want-block for cid0 because block0 has already + // been received) + sw, ok = peerSends[peerB] + if !ok { + t.Fatal("Nothing sent to peer") + } + wb := sw.wantBlocks.Keys() + if len(wb) != 1 || !wb[0].Equals(cids[1]) { + t.Fatal("Wrong keys", wb) + } +} + +func TestPeerUnavailable(t *testing.T) { + cids := testutil.GenerateCids(2) + peers := testutil.GeneratePeers(2) + peerA := peers[0] + peerB := peers[1] + sid := uint64(1) + pm := newMockPeerManager() + bpm := bsbpm.New() + onSend := func(peer.ID, []cid.Cid, []cid.Cid) {} + onPeersExhausted := func([]cid.Cid) {} + spm := newSessionWantSender(context.Background(), sid, pm, bpm, onSend, onPeersExhausted) + + go spm.Run() + + // add cid0, cid1 + spm.Add(cids) + // peerA: HAVE cid0 + spm.Update(peerA, []cid.Cid{}, []cid.Cid{cids[0]}, []cid.Cid{}, true) + + // Wait for processing to complete + peerSends := pm.waitNextWants() + + // Should have sent + // peerA: want-block cid0, cid1 + sw, ok := peerSends[peerA] + if !ok { + t.Fatal("Nothing sent to peer") + } + if !testutil.MatchKeysIgnoreOrder(sw.wantBlocks.Keys(), cids) { + t.Fatal("Wrong keys") + } + + // Clear wants (makes keeping track of what's been sent easier) + pm.clearWants() + + // peerB: HAVE cid0 + spm.Update(peerB, []cid.Cid{}, []cid.Cid{cids[0]}, []cid.Cid{}, true) + + // Wait for processing to complete + peerSends = pm.waitNextWants() + + // Should not have sent anything because want-blocks were already sent to + // peer A + sw, ok = peerSends[peerB] + if ok && sw.wantBlocks.Len() > 0 { + t.Fatal("Expected no wants sent to peer") + } + + // peerA becomes unavailable + spm.SignalAvailability(peerA, false) + + // Wait for processing to complete + peerSends = pm.waitNextWants() + + // Should now have sent want-block cid0, cid1 to peerB + sw, ok = peerSends[peerB] + if !ok { + t.Fatal("Nothing sent to peer") + } + if !testutil.MatchKeysIgnoreOrder(sw.wantBlocks.Keys(), cids) { + t.Fatal("Wrong keys") + } +} + +func TestPeersExhausted(t *testing.T) { + cids := testutil.GenerateCids(2) + peers := testutil.GeneratePeers(2) + peerA := peers[0] + peerB := peers[1] + sid := uint64(1) + pm := newMockPeerManager() + bpm := bsbpm.New() + onSend := func(peer.ID, []cid.Cid, []cid.Cid) {} + + var exhausted []cid.Cid + onPeersExhausted := func(ks []cid.Cid) { + exhausted = append(exhausted, ks...) + } + spm := newSessionWantSender(context.Background(), sid, pm, bpm, onSend, onPeersExhausted) + + go spm.Run() + + // add cid0, cid1 + spm.Add(cids) + + // peerA: DONT_HAVE cid0 + bpm.ReceiveFrom(peerA, []cid.Cid{}, []cid.Cid{cids[0]}) + // Note: this also registers peer A as being available + spm.Update(peerA, []cid.Cid{}, []cid.Cid{}, []cid.Cid{cids[0]}, true) + + time.Sleep(5 * time.Millisecond) + + // All available peers (peer A) have sent us a DONT_HAVE for cid0, + // so expect that onPeersExhausted() will be called with cid0 + if !testutil.MatchKeysIgnoreOrder(exhausted, []cid.Cid{cids[0]}) { + t.Fatal("Wrong keys") + } + + // Clear exhausted cids + exhausted = []cid.Cid{} + + // peerB: DONT_HAVE cid0, cid1 + bpm.ReceiveFrom(peerB, []cid.Cid{}, cids) + spm.Update(peerB, []cid.Cid{}, []cid.Cid{}, cids, true) + + // Wait for processing to complete + pm.waitNextWants() + + // All available peers (peer A and peer B) have sent us a DONT_HAVE + // for cid0, but we already called onPeersExhausted with cid0, so it + // should not be called again + if len(exhausted) > 0 { + t.Fatal("Wrong keys") + } + + // peerA: DONT_HAVE cid1 + bpm.ReceiveFrom(peerA, []cid.Cid{}, []cid.Cid{cids[1]}) + spm.Update(peerA, []cid.Cid{}, []cid.Cid{}, []cid.Cid{cids[1]}, false) + + // Wait for processing to complete + pm.waitNextWants() + + // All available peers (peer A and peer B) have sent us a DONT_HAVE for + // cid1, so expect that onPeersExhausted() will be called with cid1 + if !testutil.MatchKeysIgnoreOrder(exhausted, []cid.Cid{cids[1]}) { + t.Fatal("Wrong keys") + } +} diff --git a/bitswap/session/wantinfo_test.go b/bitswap/session/wantinfo_test.go new file mode 100644 index 000000000..618b231a5 --- /dev/null +++ b/bitswap/session/wantinfo_test.go @@ -0,0 +1,80 @@ +package session + +import ( + "testing" + + "github.com/ipfs/go-bitswap/testutil" +) + +func TestEmptyWantInfo(t *testing.T) { + wp := newWantInfo(newPeerResponseTracker()) + + if wp.bestPeer != "" { + t.Fatal("expected no best peer") + } +} + +func TestSetPeerBlockPresence(t *testing.T) { + peers := testutil.GeneratePeers(2) + wp := newWantInfo(newPeerResponseTracker()) + + wp.setPeerBlockPresence(peers[0], BPUnknown) + if wp.bestPeer != peers[0] { + t.Fatal("wrong best peer") + } + + wp.setPeerBlockPresence(peers[1], BPHave) + if wp.bestPeer != peers[1] { + t.Fatal("wrong best peer") + } + + wp.setPeerBlockPresence(peers[0], BPDontHave) + if wp.bestPeer != peers[1] { + t.Fatal("wrong best peer") + } +} + +func TestSetPeerBlockPresenceBestLower(t *testing.T) { + peers := testutil.GeneratePeers(2) + wp := newWantInfo(newPeerResponseTracker()) + + wp.setPeerBlockPresence(peers[0], BPHave) + if wp.bestPeer != peers[0] { + t.Fatal("wrong best peer") + } + + wp.setPeerBlockPresence(peers[1], BPUnknown) + if wp.bestPeer != peers[0] { + t.Fatal("wrong best peer") + } + + wp.setPeerBlockPresence(peers[0], BPDontHave) + if wp.bestPeer != peers[1] { + t.Fatal("wrong best peer") + } +} + +func TestRemoveThenSetDontHave(t *testing.T) { + peers := testutil.GeneratePeers(2) + wp := newWantInfo(newPeerResponseTracker()) + + wp.setPeerBlockPresence(peers[0], BPUnknown) + if wp.bestPeer != peers[0] { + t.Fatal("wrong best peer") + } + + wp.removePeer(peers[0]) + if wp.bestPeer != "" { + t.Fatal("wrong best peer") + } + + wp.setPeerBlockPresence(peers[1], BPUnknown) + if wp.bestPeer != peers[1] { + t.Fatal("wrong best peer") + } + + wp.setPeerBlockPresence(peers[0], BPDontHave) + if wp.bestPeer != peers[1] { + t.Fatal("wrong best peer") + } +} diff --git a/bitswap/sessioninterestmanager/sessioninterestmanager.go b/bitswap/sessioninterestmanager/sessioninterestmanager.go new file mode 100644 index 000000000..9deb37954 --- /dev/null +++ b/bitswap/sessioninterestmanager/sessioninterestmanager.go @@ -0,0 +1,73 @@ +package sessioninterestmanager + +import ( + bsswl "github.com/ipfs/go-bitswap/sessionwantlist" + blocks "github.com/ipfs/go-block-format" + + cid "github.com/ipfs/go-cid" +) + +type SessionInterestManager struct { + interested *bsswl.SessionWantlist + wanted *bsswl.SessionWantlist +} + +// New initializes a new SessionInterestManager. +func New() *SessionInterestManager { + return &SessionInterestManager{ + interested: bsswl.NewSessionWantlist(), + wanted: bsswl.NewSessionWantlist(), + } +} + +func (sim *SessionInterestManager) RecordSessionInterest(ses uint64, ks []cid.Cid) { + sim.interested.Add(ks, ses) + sim.wanted.Add(ks, ses) +} + +func (sim *SessionInterestManager) RemoveSessionInterest(ses uint64) []cid.Cid { + sim.wanted.RemoveSession(ses) + return sim.interested.RemoveSession(ses) +} + +func (sim *SessionInterestManager) RemoveSessionWants(ses uint64, wants []cid.Cid) { + sim.wanted.RemoveSessionKeys(ses, wants) +} + +func (sim *SessionInterestManager) FilterSessionInterested(ses uint64, ksets ...[]cid.Cid) [][]cid.Cid { + kres := make([][]cid.Cid, len(ksets)) + for i, ks := range ksets { + kres[i] = sim.interested.SessionHas(ses, ks).Keys() + } + return kres +} + +func (sim *SessionInterestManager) SplitWantedUnwanted(blks []blocks.Block) ([]blocks.Block, []blocks.Block) { + // Get the wanted block keys + ks := make([]cid.Cid, len(blks)) + for _, b := range blks { + ks = append(ks, b.Cid()) + } + wantedKs := sim.wanted.Has(ks) + + // Separate the blocks into wanted and unwanted + wantedBlks := make([]blocks.Block, 0, len(blks)) + notWantedBlks := make([]blocks.Block, 0) + for _, b := range blks { + if wantedKs.Has(b.Cid()) { + wantedBlks = append(wantedBlks, b) + } else { + notWantedBlks = append(notWantedBlks, b) + } + } + return wantedBlks, notWantedBlks +} + +func (sim *SessionInterestManager) InterestedSessions(blks []cid.Cid, haves []cid.Cid, dontHaves []cid.Cid) []uint64 { + ks := make([]cid.Cid, 0, len(blks)+len(haves)+len(dontHaves)) + ks = append(ks, blks...) + ks = append(ks, haves...) + ks = append(ks, dontHaves...) + + return sim.interested.SessionsFor(ks) +} diff --git a/bitswap/sessioninterestmanager/sessioninterestmanager_test.go b/bitswap/sessioninterestmanager/sessioninterestmanager_test.go new file mode 100644 index 000000000..d882cabc3 --- /dev/null +++ b/bitswap/sessioninterestmanager/sessioninterestmanager_test.go @@ -0,0 +1,182 @@ +package sessioninterestmanager + +import ( + "testing" + + "github.com/ipfs/go-bitswap/testutil" + cid "github.com/ipfs/go-cid" +) + +func TestEmpty(t *testing.T) { + sim := New() + + ses := uint64(1) + cids := testutil.GenerateCids(2) + res := sim.FilterSessionInterested(ses, cids) + if len(res) != 1 || len(res[0]) > 0 { + t.Fatal("Expected no interest") + } + if len(sim.InterestedSessions(cids, []cid.Cid{}, []cid.Cid{})) > 0 { + t.Fatal("Expected no interest") + } +} + +func TestBasic(t *testing.T) { + sim := New() + + ses1 := uint64(1) + ses2 := uint64(2) + cids1 := testutil.GenerateCids(2) + cids2 := append(testutil.GenerateCids(1), cids1[1]) + sim.RecordSessionInterest(ses1, cids1) + + res := sim.FilterSessionInterested(ses1, cids1) + if len(res) != 1 || len(res[0]) != 2 { + t.Fatal("Expected 2 keys") + } + if len(sim.InterestedSessions(cids1, []cid.Cid{}, []cid.Cid{})) != 1 { + t.Fatal("Expected 1 session") + } + + sim.RecordSessionInterest(ses2, cids2) + res = sim.FilterSessionInterested(ses2, cids1[:1]) + if len(res) != 1 || len(res[0]) != 0 { + t.Fatal("Expected no interest") + } + res = sim.FilterSessionInterested(ses2, cids2) + if len(res) != 1 || len(res[0]) != 2 { + t.Fatal("Expected 2 keys") + } + + if len(sim.InterestedSessions(cids1[:1], []cid.Cid{}, []cid.Cid{})) != 1 { + t.Fatal("Expected 1 session") + } + if len(sim.InterestedSessions(cids1[1:], []cid.Cid{}, []cid.Cid{})) != 2 { + t.Fatal("Expected 2 sessions") + } +} + +func TestInterestedSessions(t *testing.T) { + sim := New() + + ses := uint64(1) + cids := testutil.GenerateCids(3) + sim.RecordSessionInterest(ses, cids[0:2]) + + if len(sim.InterestedSessions(cids, []cid.Cid{}, []cid.Cid{})) != 1 { + t.Fatal("Expected 1 session") + } + if len(sim.InterestedSessions(cids[0:1], []cid.Cid{}, []cid.Cid{})) != 1 { + t.Fatal("Expected 1 session") + } + if len(sim.InterestedSessions([]cid.Cid{}, cids, []cid.Cid{})) != 1 { + t.Fatal("Expected 1 session") + } + if len(sim.InterestedSessions([]cid.Cid{}, cids[0:1], []cid.Cid{})) != 1 { + t.Fatal("Expected 1 session") + } + if len(sim.InterestedSessions([]cid.Cid{}, []cid.Cid{}, cids)) != 1 { + t.Fatal("Expected 1 session") + } + if len(sim.InterestedSessions([]cid.Cid{}, []cid.Cid{}, cids[0:1])) != 1 { + t.Fatal("Expected 1 session") + } +} + +func TestRemoveSessionInterest(t *testing.T) { + sim := New() + + ses1 := uint64(1) + ses2 := uint64(2) + cids1 := testutil.GenerateCids(2) + cids2 := append(testutil.GenerateCids(1), cids1[1]) + sim.RecordSessionInterest(ses1, cids1) + sim.RecordSessionInterest(ses2, cids2) + sim.RemoveSessionInterest(ses1) + + res := sim.FilterSessionInterested(ses1, cids1) + if len(res) != 1 || len(res[0]) != 0 { + t.Fatal("Expected no interest") + } + + res = sim.FilterSessionInterested(ses2, cids1, cids2) + if len(res) != 2 { + t.Fatal("unexpected results size") + } + if len(res[0]) != 1 { + t.Fatal("Expected 1 key") + } + if len(res[1]) != 2 { + t.Fatal("Expected 2 keys") + } +} + +func TestSplitWantedUnwanted(t *testing.T) { + blks := testutil.GenerateBlocksOfSize(3, 1024) + sim := New() + ses1 := uint64(1) + ses2 := uint64(2) + + var cids []cid.Cid + for _, b := range blks { + cids = append(cids, b.Cid()) + } + + // ses1: + // ses2: + wanted, unwanted := sim.SplitWantedUnwanted(blks) + if len(wanted) > 0 { + t.Fatal("Expected no blocks") + } + if len(unwanted) != 3 { + t.Fatal("Expected 3 blocks") + } + + // ses1: 0 1 + // ses2: + sim.RecordSessionInterest(ses1, cids[0:2]) + wanted, unwanted = sim.SplitWantedUnwanted(blks) + if len(wanted) != 2 { + t.Fatal("Expected 2 blocks") + } + if len(unwanted) != 1 { + t.Fatal("Expected 1 block") + } + + // ses1: 1 + // ses2: 1 2 + sim.RecordSessionInterest(ses2, cids[1:]) + sim.RemoveSessionWants(ses1, cids[:1]) + + wanted, unwanted = sim.SplitWantedUnwanted(blks) + if len(wanted) != 2 { + t.Fatal("Expected 2 blocks") + } + if len(unwanted) != 1 { + t.Fatal("Expected no blocks") + } + + // ses1: + // ses2: 1 2 + sim.RemoveSessionWants(ses1, cids[1:2]) + + wanted, unwanted = sim.SplitWantedUnwanted(blks) + if len(wanted) != 2 { + t.Fatal("Expected 2 blocks") + } + if len(unwanted) != 1 { + t.Fatal("Expected no blocks") + } + + // ses1: + // ses2: 2 + sim.RemoveSessionWants(ses2, cids[1:2]) + + wanted, unwanted = sim.SplitWantedUnwanted(blks) + if len(wanted) != 1 { + t.Fatal("Expected 2 blocks") + } + if len(unwanted) != 2 { + t.Fatal("Expected 2 blocks") + } +} diff --git a/bitswap/sessionmanager/sessionmanager.go b/bitswap/sessionmanager/sessionmanager.go index c967a04a4..3090e8291 100644 --- a/bitswap/sessionmanager/sessionmanager.go +++ b/bitswap/sessionmanager/sessionmanager.go @@ -8,8 +8,10 @@ import ( cid "github.com/ipfs/go-cid" delay "github.com/ipfs/go-ipfs-delay" + bsbpm "github.com/ipfs/go-bitswap/blockpresencemanager" notifications "github.com/ipfs/go-bitswap/notifications" bssession "github.com/ipfs/go-bitswap/session" + bssim "github.com/ipfs/go-bitswap/sessioninterestmanager" exchange "github.com/ipfs/go-ipfs-exchange-interface" peer "github.com/libp2p/go-libp2p-core/peer" ) @@ -17,52 +19,51 @@ import ( // Session is a session that is managed by the session manager type Session interface { exchange.Fetcher - ReceiveFrom(peer.ID, []cid.Cid) - IsWanted(cid.Cid) bool -} - -type sesTrk struct { - session Session - pm bssession.PeerManager - srs bssession.RequestSplitter + ID() uint64 + ReceiveFrom(peer.ID, []cid.Cid, []cid.Cid, []cid.Cid) } // SessionFactory generates a new session for the SessionManager to track. -type SessionFactory func(ctx context.Context, id uint64, pm bssession.PeerManager, srs bssession.RequestSplitter, notif notifications.PubSub, provSearchDelay time.Duration, rebroadcastDelay delay.D) Session - -// RequestSplitterFactory generates a new request splitter for a session. -type RequestSplitterFactory func(ctx context.Context) bssession.RequestSplitter +type SessionFactory func(ctx context.Context, id uint64, sprm bssession.SessionPeerManager, sim *bssim.SessionInterestManager, pm bssession.PeerManager, bpm *bsbpm.BlockPresenceManager, notif notifications.PubSub, provSearchDelay time.Duration, rebroadcastDelay delay.D, self peer.ID) Session // PeerManagerFactory generates a new peer manager for a session. -type PeerManagerFactory func(ctx context.Context, id uint64) bssession.PeerManager +type PeerManagerFactory func(ctx context.Context, id uint64) bssession.SessionPeerManager // SessionManager is responsible for creating, managing, and dispatching to // sessions. type SessionManager struct { ctx context.Context sessionFactory SessionFactory + sessionInterestManager *bssim.SessionInterestManager peerManagerFactory PeerManagerFactory - requestSplitterFactory RequestSplitterFactory + blockPresenceManager *bsbpm.BlockPresenceManager + peerManager bssession.PeerManager notif notifications.PubSub // Sessions sessLk sync.RWMutex - sessions []sesTrk + sessions map[uint64]Session // Session Index sessIDLk sync.Mutex sessID uint64 + + self peer.ID } // New creates a new SessionManager. -func New(ctx context.Context, sessionFactory SessionFactory, peerManagerFactory PeerManagerFactory, - requestSplitterFactory RequestSplitterFactory, notif notifications.PubSub) *SessionManager { +func New(ctx context.Context, sessionFactory SessionFactory, sessionInterestManager *bssim.SessionInterestManager, peerManagerFactory PeerManagerFactory, + blockPresenceManager *bsbpm.BlockPresenceManager, peerManager bssession.PeerManager, notif notifications.PubSub, self peer.ID) *SessionManager { return &SessionManager{ ctx: ctx, sessionFactory: sessionFactory, + sessionInterestManager: sessionInterestManager, peerManagerFactory: peerManagerFactory, - requestSplitterFactory: requestSplitterFactory, + blockPresenceManager: blockPresenceManager, + peerManager: peerManager, notif: notif, + sessions: make(map[uint64]Session), + self: self, } } @@ -75,66 +76,53 @@ func (sm *SessionManager) NewSession(ctx context.Context, sessionctx, cancel := context.WithCancel(ctx) pm := sm.peerManagerFactory(sessionctx, id) - srs := sm.requestSplitterFactory(sessionctx) - session := sm.sessionFactory(sessionctx, id, pm, srs, sm.notif, provSearchDelay, rebroadcastDelay) - tracked := sesTrk{session, pm, srs} + session := sm.sessionFactory(sessionctx, id, pm, sm.sessionInterestManager, sm.peerManager, sm.blockPresenceManager, sm.notif, provSearchDelay, rebroadcastDelay, sm.self) sm.sessLk.Lock() - sm.sessions = append(sm.sessions, tracked) + sm.sessions[id] = session sm.sessLk.Unlock() go func() { defer cancel() select { case <-sm.ctx.Done(): - sm.removeSession(tracked) + sm.removeSession(id) case <-ctx.Done(): - sm.removeSession(tracked) + sm.removeSession(id) } }() return session } -func (sm *SessionManager) removeSession(session sesTrk) { +func (sm *SessionManager) removeSession(sesid uint64) { sm.sessLk.Lock() defer sm.sessLk.Unlock() - for i := 0; i < len(sm.sessions); i++ { - if sm.sessions[i] == session { - sm.sessions[i] = sm.sessions[len(sm.sessions)-1] - sm.sessions[len(sm.sessions)-1] = sesTrk{} // free memory. - sm.sessions = sm.sessions[:len(sm.sessions)-1] - return - } - } + + delete(sm.sessions, sesid) } -// GetNextSessionID returns the next sequentional identifier for a session. +// GetNextSessionID returns the next sequential identifier for a session. func (sm *SessionManager) GetNextSessionID() uint64 { sm.sessIDLk.Lock() defer sm.sessIDLk.Unlock() + sm.sessID++ return sm.sessID } -// ReceiveFrom receives block CIDs from a peer and dispatches to sessions. -func (sm *SessionManager) ReceiveFrom(from peer.ID, ks []cid.Cid) { - sm.sessLk.RLock() - defer sm.sessLk.RUnlock() - - for _, s := range sm.sessions { - s.session.ReceiveFrom(from, ks) - } -} +func (sm *SessionManager) ReceiveFrom(p peer.ID, blks []cid.Cid, haves []cid.Cid, dontHaves []cid.Cid) []Session { + sessions := make([]Session, 0) -// IsWanted indicates whether any of the sessions are waiting to receive -// the block with the given CID. -func (sm *SessionManager) IsWanted(cid cid.Cid) bool { - sm.sessLk.RLock() - defer sm.sessLk.RUnlock() + // Notify each session that is interested in the blocks / HAVEs / DONT_HAVEs + for _, id := range sm.sessionInterestManager.InterestedSessions(blks, haves, dontHaves) { + sm.sessLk.RLock() + sess, ok := sm.sessions[id] + sm.sessLk.RUnlock() - for _, s := range sm.sessions { - if s.session.IsWanted(cid) { - return true + if ok { + sess.ReceiveFrom(p, blks, haves, dontHaves) + sessions = append(sessions, sess) } } - return false + + return sessions } diff --git a/bitswap/sessionmanager/sessionmanager_test.go b/bitswap/sessionmanager/sessionmanager_test.go index 95c12b128..8f25a952b 100644 --- a/bitswap/sessionmanager/sessionmanager_test.go +++ b/bitswap/sessionmanager/sessionmanager_test.go @@ -7,10 +7,11 @@ import ( delay "github.com/ipfs/go-ipfs-delay" + bsbpm "github.com/ipfs/go-bitswap/blockpresencemanager" notifications "github.com/ipfs/go-bitswap/notifications" + bspm "github.com/ipfs/go-bitswap/peermanager" bssession "github.com/ipfs/go-bitswap/session" - bssd "github.com/ipfs/go-bitswap/sessiondata" - "github.com/ipfs/go-bitswap/testutil" + bssim "github.com/ipfs/go-bitswap/sessioninterestmanager" blocks "github.com/ipfs/go-block-format" cid "github.com/ipfs/go-cid" @@ -18,12 +19,12 @@ import ( ) type fakeSession struct { - wanted []cid.Cid - ks []cid.Cid - id uint64 - pm *fakePeerManager - srs *fakeRequestSplitter - notif notifications.PubSub + ks []cid.Cid + wantBlocks []cid.Cid + wantHaves []cid.Cid + id uint64 + pm *fakeSesPeerManager + notif notifications.PubSub } func (*fakeSession) GetBlock(context.Context, cid.Cid) (blocks.Block, error) { @@ -32,149 +33,124 @@ func (*fakeSession) GetBlock(context.Context, cid.Cid) (blocks.Block, error) { func (*fakeSession) GetBlocks(context.Context, []cid.Cid) (<-chan blocks.Block, error) { return nil, nil } -func (fs *fakeSession) IsWanted(c cid.Cid) bool { - for _, ic := range fs.wanted { - if c == ic { - return true - } - } - return false +func (fs *fakeSession) ID() uint64 { + return fs.id } -func (fs *fakeSession) ReceiveFrom(p peer.ID, ks []cid.Cid) { +func (fs *fakeSession) ReceiveFrom(p peer.ID, ks []cid.Cid, wantBlocks []cid.Cid, wantHaves []cid.Cid) { fs.ks = append(fs.ks, ks...) + fs.wantBlocks = append(fs.wantBlocks, wantBlocks...) + fs.wantHaves = append(fs.wantHaves, wantHaves...) } -type fakePeerManager struct { - id uint64 +type fakeSesPeerManager struct { } -func (*fakePeerManager) FindMorePeers(context.Context, cid.Cid) {} -func (*fakePeerManager) GetOptimizedPeers() []bssd.OptimizedPeer { return nil } -func (*fakePeerManager) RecordPeerRequests([]peer.ID, []cid.Cid) {} -func (*fakePeerManager) RecordPeerResponse(peer.ID, []cid.Cid) {} -func (*fakePeerManager) RecordCancels(c []cid.Cid) {} - -type fakeRequestSplitter struct { -} +func (*fakeSesPeerManager) ReceiveFrom(peer.ID, []cid.Cid, []cid.Cid) bool { return true } +func (*fakeSesPeerManager) Peers() *peer.Set { return nil } +func (*fakeSesPeerManager) FindMorePeers(context.Context, cid.Cid) {} +func (*fakeSesPeerManager) RecordPeerRequests([]peer.ID, []cid.Cid) {} +func (*fakeSesPeerManager) RecordPeerResponse(peer.ID, []cid.Cid) {} +func (*fakeSesPeerManager) RecordCancels(c []cid.Cid) {} -func (frs *fakeRequestSplitter) SplitRequest(optimizedPeers []bssd.OptimizedPeer, keys []cid.Cid) []bssd.PartialRequest { - return nil +type fakePeerManager struct { } -func (frs *fakeRequestSplitter) RecordDuplicateBlock() {} -func (frs *fakeRequestSplitter) RecordUniqueBlock() {} -var nextWanted []cid.Cid +func (*fakePeerManager) RegisterSession(peer.ID, bspm.Session) bool { return true } +func (*fakePeerManager) UnregisterSession(uint64) {} +func (*fakePeerManager) SendWants(context.Context, peer.ID, []cid.Cid, []cid.Cid) {} func sessionFactory(ctx context.Context, id uint64, + sprm bssession.SessionPeerManager, + sim *bssim.SessionInterestManager, pm bssession.PeerManager, - srs bssession.RequestSplitter, + bpm *bsbpm.BlockPresenceManager, notif notifications.PubSub, provSearchDelay time.Duration, - rebroadcastDelay delay.D) Session { + rebroadcastDelay delay.D, + self peer.ID) Session { return &fakeSession{ - wanted: nextWanted, - id: id, - pm: pm.(*fakePeerManager), - srs: srs.(*fakeRequestSplitter), - notif: notif, + id: id, + pm: sprm.(*fakeSesPeerManager), + notif: notif, } } -func peerManagerFactory(ctx context.Context, id uint64) bssession.PeerManager { - return &fakePeerManager{id} -} - -func requestSplitterFactory(ctx context.Context) bssession.RequestSplitter { - return &fakeRequestSplitter{} +func peerManagerFactory(ctx context.Context, id uint64) bssession.SessionPeerManager { + return &fakeSesPeerManager{} } -func TestAddingSessions(t *testing.T) { +func TestReceiveFrom(t *testing.T) { ctx := context.Background() ctx, cancel := context.WithCancel(ctx) defer cancel() notif := notifications.New() defer notif.Shutdown() - sm := New(ctx, sessionFactory, peerManagerFactory, requestSplitterFactory, notif) + sim := bssim.New() + bpm := bsbpm.New() + pm := &fakePeerManager{} + sm := New(ctx, sessionFactory, sim, peerManagerFactory, bpm, pm, notif, "") p := peer.ID(123) block := blocks.NewBlock([]byte("block")) - // we'll be interested in all blocks for this test - nextWanted = []cid.Cid{block.Cid()} - currentID := sm.GetNextSessionID() firstSession := sm.NewSession(ctx, time.Second, delay.Fixed(time.Minute)).(*fakeSession) - if firstSession.id != firstSession.pm.id || - firstSession.id != currentID+1 { - t.Fatal("session does not have correct id set") - } secondSession := sm.NewSession(ctx, time.Second, delay.Fixed(time.Minute)).(*fakeSession) - if secondSession.id != secondSession.pm.id || - secondSession.id != firstSession.id+1 { - t.Fatal("session does not have correct id set") - } - sm.GetNextSessionID() thirdSession := sm.NewSession(ctx, time.Second, delay.Fixed(time.Minute)).(*fakeSession) - if thirdSession.id != thirdSession.pm.id || - thirdSession.id != secondSession.id+2 { - t.Fatal("session does not have correct id set") - } - sm.ReceiveFrom(p, []cid.Cid{block.Cid()}) + + sim.RecordSessionInterest(firstSession.ID(), []cid.Cid{block.Cid()}) + sim.RecordSessionInterest(thirdSession.ID(), []cid.Cid{block.Cid()}) + + sm.ReceiveFrom(p, []cid.Cid{block.Cid()}, []cid.Cid{}, []cid.Cid{}) if len(firstSession.ks) == 0 || - len(secondSession.ks) == 0 || + len(secondSession.ks) > 0 || len(thirdSession.ks) == 0 { t.Fatal("should have received blocks but didn't") } -} -func TestIsWanted(t *testing.T) { - ctx := context.Background() - ctx, cancel := context.WithCancel(ctx) - defer cancel() - notif := notifications.New() - defer notif.Shutdown() - sm := New(ctx, sessionFactory, peerManagerFactory, requestSplitterFactory, notif) - - blks := testutil.GenerateBlocksOfSize(4, 1024) - var cids []cid.Cid - for _, b := range blks { - cids = append(cids, b.Cid()) + sm.ReceiveFrom(p, []cid.Cid{}, []cid.Cid{block.Cid()}, []cid.Cid{}) + if len(firstSession.wantBlocks) == 0 || + len(secondSession.wantBlocks) > 0 || + len(thirdSession.wantBlocks) == 0 { + t.Fatal("should have received want-blocks but didn't") } - nextWanted = []cid.Cid{cids[0], cids[1]} - _ = sm.NewSession(ctx, time.Second, delay.Fixed(time.Minute)).(*fakeSession) - nextWanted = []cid.Cid{cids[0], cids[2]} - _ = sm.NewSession(ctx, time.Second, delay.Fixed(time.Minute)).(*fakeSession) - - if !sm.IsWanted(cids[0]) || - !sm.IsWanted(cids[1]) || - !sm.IsWanted(cids[2]) { - t.Fatal("expected unwanted but session manager did want cid") - } - if sm.IsWanted(cids[3]) { - t.Fatal("expected wanted but session manager did not want cid") + sm.ReceiveFrom(p, []cid.Cid{}, []cid.Cid{}, []cid.Cid{block.Cid()}) + if len(firstSession.wantHaves) == 0 || + len(secondSession.wantHaves) > 0 || + len(thirdSession.wantHaves) == 0 { + t.Fatal("should have received want-haves but didn't") } } -func TestRemovingPeersWhenManagerContextCancelled(t *testing.T) { +func TestReceiveBlocksWhenManagerContextCancelled(t *testing.T) { ctx := context.Background() ctx, cancel := context.WithCancel(ctx) + defer cancel() notif := notifications.New() defer notif.Shutdown() - sm := New(ctx, sessionFactory, peerManagerFactory, requestSplitterFactory, notif) + sim := bssim.New() + bpm := bsbpm.New() + pm := &fakePeerManager{} + sm := New(ctx, sessionFactory, sim, peerManagerFactory, bpm, pm, notif, "") p := peer.ID(123) block := blocks.NewBlock([]byte("block")) - // we'll be interested in all blocks for this test - nextWanted = []cid.Cid{block.Cid()} + firstSession := sm.NewSession(ctx, time.Second, delay.Fixed(time.Minute)).(*fakeSession) secondSession := sm.NewSession(ctx, time.Second, delay.Fixed(time.Minute)).(*fakeSession) thirdSession := sm.NewSession(ctx, time.Second, delay.Fixed(time.Minute)).(*fakeSession) + sim.RecordSessionInterest(firstSession.ID(), []cid.Cid{block.Cid()}) + sim.RecordSessionInterest(secondSession.ID(), []cid.Cid{block.Cid()}) + sim.RecordSessionInterest(thirdSession.ID(), []cid.Cid{block.Cid()}) + cancel() + // wait for sessions to get removed time.Sleep(10 * time.Millisecond) - sm.ReceiveFrom(p, []cid.Cid{block.Cid()}) + + sm.ReceiveFrom(p, []cid.Cid{block.Cid()}, []cid.Cid{}, []cid.Cid{}) if len(firstSession.ks) > 0 || len(secondSession.ks) > 0 || len(thirdSession.ks) > 0 { @@ -182,27 +158,35 @@ func TestRemovingPeersWhenManagerContextCancelled(t *testing.T) { } } -func TestRemovingPeersWhenSessionContextCancelled(t *testing.T) { +func TestReceiveBlocksWhenSessionContextCancelled(t *testing.T) { ctx := context.Background() ctx, cancel := context.WithCancel(ctx) defer cancel() notif := notifications.New() defer notif.Shutdown() - sm := New(ctx, sessionFactory, peerManagerFactory, requestSplitterFactory, notif) + sim := bssim.New() + bpm := bsbpm.New() + pm := &fakePeerManager{} + sm := New(ctx, sessionFactory, sim, peerManagerFactory, bpm, pm, notif, "") p := peer.ID(123) block := blocks.NewBlock([]byte("block")) - // we'll be interested in all blocks for this test - nextWanted = []cid.Cid{block.Cid()} + firstSession := sm.NewSession(ctx, time.Second, delay.Fixed(time.Minute)).(*fakeSession) sessionCtx, sessionCancel := context.WithCancel(ctx) secondSession := sm.NewSession(sessionCtx, time.Second, delay.Fixed(time.Minute)).(*fakeSession) thirdSession := sm.NewSession(ctx, time.Second, delay.Fixed(time.Minute)).(*fakeSession) + sim.RecordSessionInterest(firstSession.ID(), []cid.Cid{block.Cid()}) + sim.RecordSessionInterest(secondSession.ID(), []cid.Cid{block.Cid()}) + sim.RecordSessionInterest(thirdSession.ID(), []cid.Cid{block.Cid()}) + sessionCancel() + // wait for sessions to get removed time.Sleep(10 * time.Millisecond) - sm.ReceiveFrom(p, []cid.Cid{block.Cid()}) + + sm.ReceiveFrom(p, []cid.Cid{block.Cid()}, []cid.Cid{}, []cid.Cid{}) if len(firstSession.ks) == 0 || len(secondSession.ks) > 0 || len(thirdSession.ks) == 0 { diff --git a/bitswap/sessionpeermanager/sessionpeermanager.go b/bitswap/sessionpeermanager/sessionpeermanager.go index 3c4e13749..060df0915 100644 --- a/bitswap/sessionpeermanager/sessionpeermanager.go +++ b/bitswap/sessionpeermanager/sessionpeermanager.go @@ -8,11 +8,14 @@ import ( "time" bssd "github.com/ipfs/go-bitswap/sessiondata" + logging "github.com/ipfs/go-log" cid "github.com/ipfs/go-cid" peer "github.com/libp2p/go-libp2p-core/peer" ) +var log = logging.Logger("bs:sprmgr") + const ( defaultTimeoutDuration = 5 * time.Second maxOptimizedPeers = 32 @@ -41,6 +44,7 @@ type SessionPeerManager struct { ctx context.Context tagger PeerTagger providerFinder PeerProviderFinder + peers *peer.Set tag string id uint64 @@ -61,7 +65,8 @@ func New(ctx context.Context, id uint64, tagger PeerTagger, providerFinder PeerP id: id, tagger: tagger, providerFinder: providerFinder, - peerMessages: make(chan peerMessage, 16), + peers: peer.NewSet(), + peerMessages: make(chan peerMessage, 128), activePeers: make(map[peer.ID]*peerData), broadcastLatency: newLatencyTracker(), timeoutDuration: defaultTimeoutDuration, @@ -73,6 +78,19 @@ func New(ctx context.Context, id uint64, tagger PeerTagger, providerFinder PeerP return spm } +func (spm *SessionPeerManager) ReceiveFrom(p peer.ID, ks []cid.Cid, haves []cid.Cid) bool { + if len(ks) > 0 || len(haves) > 0 && !spm.peers.Contains(p) { + log.Infof("Added peer %s to session: %d peers\n", p, spm.peers.Size()) + spm.peers.Add(p) + return true + } + return false +} + +func (spm *SessionPeerManager) Peers() *peer.Set { + return spm.peers +} + // RecordPeerResponse records that a peer received some blocks, and adds the // peer to the list of peers if it wasn't already added func (spm *SessionPeerManager) RecordPeerResponse(p peer.ID, ks []cid.Cid) { @@ -176,6 +194,11 @@ func (spm *SessionPeerManager) insertPeer(p peer.ID, data *peerData) { } else { spm.unoptimizedPeersArr = append(spm.unoptimizedPeersArr, p) } + + if !spm.peers.Contains(p) { + log.Infof("Added peer %s to session: %d peers\n", p, spm.peers.Size()) + spm.peers.Add(p) + } } func (spm *SessionPeerManager) removeOptimizedPeer(p peer.ID) { diff --git a/bitswap/sessionwantlist/sessionwantlist.go b/bitswap/sessionwantlist/sessionwantlist.go new file mode 100644 index 000000000..d98147396 --- /dev/null +++ b/bitswap/sessionwantlist/sessionwantlist.go @@ -0,0 +1,126 @@ +package sessionwantlist + +import ( + "sync" + + cid "github.com/ipfs/go-cid" +) + +type SessionWantlist struct { + sync.RWMutex + wants map[cid.Cid]map[uint64]struct{} +} + +func NewSessionWantlist() *SessionWantlist { + return &SessionWantlist{ + wants: make(map[cid.Cid]map[uint64]struct{}), + } +} + +func (swl *SessionWantlist) Add(ks []cid.Cid, ses uint64) { + swl.Lock() + defer swl.Unlock() + + for _, c := range ks { + if _, ok := swl.wants[c]; !ok { + swl.wants[c] = make(map[uint64]struct{}) + } + swl.wants[c][ses] = struct{}{} + } +} + +func (swl *SessionWantlist) RemoveKeys(ks []cid.Cid) { + swl.Lock() + defer swl.Unlock() + + for _, c := range ks { + delete(swl.wants, c) + } +} + +func (swl *SessionWantlist) RemoveSession(ses uint64) []cid.Cid { + swl.Lock() + defer swl.Unlock() + + deletedKs := make([]cid.Cid, 0) + for c := range swl.wants { + delete(swl.wants[c], ses) + if len(swl.wants[c]) == 0 { + delete(swl.wants, c) + deletedKs = append(deletedKs, c) + } + } + + return deletedKs +} + +func (swl *SessionWantlist) RemoveSessionKeys(ses uint64, ks []cid.Cid) { + swl.Lock() + defer swl.Unlock() + + for _, c := range ks { + if _, ok := swl.wants[c]; ok { + delete(swl.wants[c], ses) + if len(swl.wants[c]) == 0 { + delete(swl.wants, c) + } + } + } +} + +func (swl *SessionWantlist) Keys() []cid.Cid { + swl.RLock() + defer swl.RUnlock() + + ks := make([]cid.Cid, 0, len(swl.wants)) + for c := range swl.wants { + ks = append(ks, c) + } + return ks +} + +func (swl *SessionWantlist) SessionsFor(ks []cid.Cid) []uint64 { + swl.RLock() + defer swl.RUnlock() + + sesMap := make(map[uint64]struct{}) + for _, c := range ks { + for s := range swl.wants[c] { + sesMap[s] = struct{}{} + } + } + + ses := make([]uint64, 0, len(sesMap)) + for s := range sesMap { + ses = append(ses, s) + } + return ses +} + +func (swl *SessionWantlist) Has(ks []cid.Cid) *cid.Set { + swl.RLock() + defer swl.RUnlock() + + has := cid.NewSet() + for _, c := range ks { + if _, ok := swl.wants[c]; ok { + has.Add(c) + } + } + return has +} + +func (swl *SessionWantlist) SessionHas(ses uint64, ks []cid.Cid) *cid.Set { + swl.RLock() + defer swl.RUnlock() + + has := cid.NewSet() + for _, c := range ks { + if sesMap, cok := swl.wants[c]; cok { + if _, sok := sesMap[ses]; sok { + has.Add(c) + } + } + } + return has +} diff --git a/bitswap/sessionwantlist/sessionwantlist_test.go b/bitswap/sessionwantlist/sessionwantlist_test.go new file mode 100644 index 000000000..0b89b8ae8 --- /dev/null +++ b/bitswap/sessionwantlist/sessionwantlist_test.go @@ -0,0 +1,258 @@ +package sessionwantlist + +import ( + "os" + "testing" + + "github.com/ipfs/go-bitswap/testutil" + + cid "github.com/ipfs/go-cid" +) + +var c0 cid.Cid +var c1 cid.Cid +var c2 cid.Cid + +const s0 = uint64(0) +const s1 = uint64(1) + +func setup() { + cids := testutil.GenerateCids(3) + c0 = cids[0] + c1 = cids[1] + c2 = cids[2] +} + +func TestMain(m *testing.M) { + setup() + os.Exit(m.Run()) +} + +func TestEmpty(t *testing.T) { + swl := NewSessionWantlist() + + if len(swl.Keys()) != 0 { + t.Fatal("Expected Keys() to be empty") + } + if len(swl.SessionsFor([]cid.Cid{c0})) != 0 { + t.Fatal("Expected SessionsFor() to be empty") + } +} + +func TestSimpleAdd(t *testing.T) { + swl := NewSessionWantlist() + + // s0: c0 + swl.Add([]cid.Cid{c0}, s0) + if len(swl.Keys()) != 1 { + t.Fatal("Expected Keys() to have length 1") + } + if !swl.Keys()[0].Equals(c0) { + t.Fatal("Expected Keys() to be [cid0]") + } + if len(swl.SessionsFor([]cid.Cid{c0})) != 1 { + t.Fatal("Expected SessionsFor() to have length 1") + } + if swl.SessionsFor([]cid.Cid{c0})[0] != s0 { + t.Fatal("Expected SessionsFor() to be [s0]") + } + + // s0: c0, c1 + swl.Add([]cid.Cid{c1}, s0) + if len(swl.Keys()) != 2 { + t.Fatal("Expected Keys() to have length 2") + } + if !testutil.MatchKeysIgnoreOrder(swl.Keys(), []cid.Cid{c0, c1}) { + t.Fatal("Expected Keys() to contain [cid0, cid1]") + } + if len(swl.SessionsFor([]cid.Cid{c0})) != 1 { + t.Fatal("Expected SessionsFor() to have length 1") + } + if swl.SessionsFor([]cid.Cid{c0})[0] != s0 { + t.Fatal("Expected SessionsFor() to be [s0]") + } + + // s0: c0, c1 + // s1: c0 + swl.Add([]cid.Cid{c0}, s1) + if len(swl.Keys()) != 2 { + t.Fatal("Expected Keys() to have length 2") + } + if !testutil.MatchKeysIgnoreOrder(swl.Keys(), []cid.Cid{c0, c1}) { + t.Fatal("Expected Keys() to contain [cid0, cid1]") + } + if len(swl.SessionsFor([]cid.Cid{c0})) != 2 { + t.Fatal("Expected SessionsFor() to have length 2") + } +} + +func TestMultiKeyAdd(t *testing.T) { + swl := NewSessionWantlist() + + // s0: c0, c1 + swl.Add([]cid.Cid{c0, c1}, s0) + if len(swl.Keys()) != 2 { + t.Fatal("Expected Keys() to have length 2") + } + if !testutil.MatchKeysIgnoreOrder(swl.Keys(), []cid.Cid{c0, c1}) { + t.Fatal("Expected Keys() to contain [cid0, cid1]") + } + if len(swl.SessionsFor([]cid.Cid{c0})) != 1 { + t.Fatal("Expected SessionsFor() to have length 1") + } + if swl.SessionsFor([]cid.Cid{c0})[0] != s0 { + t.Fatal("Expected SessionsFor() to be [s0]") + } +} + +func TestSessionHas(t *testing.T) { + swl := NewSessionWantlist() + + if swl.Has([]cid.Cid{c0, c1}).Len() > 0 { + t.Fatal("Expected Has([c0, c1]) to be []") + } + if swl.SessionHas(s0, []cid.Cid{c0, c1}).Len() > 0 { + t.Fatal("Expected SessionHas(s0, [c0, c1]) to be []") + } + + // s0: c0 + swl.Add([]cid.Cid{c0}, s0) + if !matchSet(swl.Has([]cid.Cid{c0, c1}), []cid.Cid{c0}) { + t.Fatal("Expected Has([c0, c1]) to be [c0]") + } + if !matchSet(swl.SessionHas(s0, []cid.Cid{c0, c1}), []cid.Cid{c0}) { + t.Fatal("Expected SessionHas(s0, [c0, c1]) to be [c0]") + } + if swl.SessionHas(s1, []cid.Cid{c0, c1}).Len() > 0 { + t.Fatal("Expected SessionHas(s1, [c0, c1]) to be []") + } + + // s0: c0, c1 + swl.Add([]cid.Cid{c1}, s0) + if !matchSet(swl.Has([]cid.Cid{c0, c1}), []cid.Cid{c0, c1}) { + t.Fatal("Expected Has([c0, c1]) to be [c0, c1]") + } + if !matchSet(swl.SessionHas(s0, []cid.Cid{c0, c1}), []cid.Cid{c0, c1}) { + t.Fatal("Expected SessionHas(s0, [c0, c1]) to be [c0, c1]") + } + + // s0: c0, c1 + // s1: c0 + swl.Add([]cid.Cid{c0}, s1) + if len(swl.Keys()) != 2 { + t.Fatal("Expected Keys() to have length 2") + } + if !matchSet(swl.Has([]cid.Cid{c0, c1}), []cid.Cid{c0, c1}) { + t.Fatal("Expected Has([c0, c1]) to be [c0, c1]") + } + if !matchSet(swl.SessionHas(s0, []cid.Cid{c0, c1}), []cid.Cid{c0, c1}) { + t.Fatal("Expected SessionHas(s0, [c0, c1]) to be [c0, c1]") + } + if !matchSet(swl.SessionHas(s1, []cid.Cid{c0, c1}), []cid.Cid{c0}) { + t.Fatal("Expected SessionHas(s1, [c0, c1]) to be [c0]") + } +} + +func TestSimpleRemoveKeys(t *testing.T) { + swl := NewSessionWantlist() + + // s0: c0, c1 + // s1: c0 + swl.Add([]cid.Cid{c0, c1}, s0) + swl.Add([]cid.Cid{c0}, s1) + + // s0: c1 + swl.RemoveKeys([]cid.Cid{c0}) + if len(swl.Keys()) != 1 { + t.Fatal("Expected Keys() to have length 1") + } + if !swl.Keys()[0].Equals(c1) { + t.Fatal("Expected Keys() to be [cid1]") + } + if len(swl.SessionsFor([]cid.Cid{c0})) != 0 { + t.Fatal("Expected SessionsFor(c0) to be empty") + } + if len(swl.SessionsFor([]cid.Cid{c1})) != 1 { + t.Fatal("Expected SessionsFor(c1) to have length 1") + } + if swl.SessionsFor([]cid.Cid{c1})[0] != s0 { + t.Fatal("Expected SessionsFor(c1) to be [s0]") + } +} + +func TestMultiRemoveKeys(t *testing.T) { + swl := NewSessionWantlist() + + // s0: c0, c1 + // s1: c0 + swl.Add([]cid.Cid{c0, c1}, s0) + swl.Add([]cid.Cid{c0}, s1) + + // + swl.RemoveKeys([]cid.Cid{c0, c1}) + if len(swl.Keys()) != 0 { + t.Fatal("Expected Keys() to be empty") + } + if len(swl.SessionsFor([]cid.Cid{c0})) != 0 { + t.Fatal("Expected SessionsFor() to be empty") + } +} + +func TestRemoveSession(t *testing.T) { + swl := NewSessionWantlist() + + // s0: c0, c1 + // s1: c0 + swl.Add([]cid.Cid{c0, c1}, s0) + swl.Add([]cid.Cid{c0}, s1) + + // s1: c0 + swl.RemoveSession(s0) + if len(swl.Keys()) != 1 { + t.Fatal("Expected Keys() to have length 1") + } + if !swl.Keys()[0].Equals(c0) { + t.Fatal("Expected Keys() to be [cid0]") + } + if len(swl.SessionsFor([]cid.Cid{c1})) != 0 { + t.Fatal("Expected SessionsFor(c1) to be empty") + } + if len(swl.SessionsFor([]cid.Cid{c0})) != 1 { + t.Fatal("Expected SessionsFor(c0) to have length 1") + } + if swl.SessionsFor([]cid.Cid{c0})[0] != s1 { + t.Fatal("Expected SessionsFor(c0) to be [s1]") + } +} + +func TestRemoveSessionKeys(t *testing.T) { + swl := NewSessionWantlist() + + // s0: c0, c1, c2 + // s1: c0 + swl.Add([]cid.Cid{c0, c1, c2}, s0) + swl.Add([]cid.Cid{c0}, s1) + + // s0: c2 + // s1: c0 + swl.RemoveSessionKeys(s0, []cid.Cid{c0, c1}) + if !matchSet(swl.SessionHas(s0, []cid.Cid{c0, c1, c2}), []cid.Cid{c2}) { + t.Fatal("Expected SessionHas(s0, [c0, c1, c2]) to be [c2]") + } + if !matchSet(swl.SessionHas(s1, []cid.Cid{c0, c1, c2}), []cid.Cid{c0}) { + t.Fatal("Expected SessionHas(s1, [c0, c1, c2]) to be [c0]") + } +} + +func matchSet(ks1 *cid.Set, ks2 []cid.Cid) bool { + if ks1.Len() != len(ks2) { + return false + } + + for _, k := range ks2 { + if !ks1.Has(k) { + return false + } + } + return true +} diff --git a/bitswap/testinstance/testinstance.go b/bitswap/testinstance/testinstance.go index be9eb10f6..f0c855149 100644 --- a/bitswap/testinstance/testinstance.go +++ b/bitswap/testinstance/testinstance.go @@ -55,7 +55,8 @@ func (g *InstanceGenerator) Next() Instance { return NewInstance(g.ctx, g.net, p, g.bsOptions...) } -// Instances creates N test instances of bitswap + dependencies +// Instances creates N test instances of bitswap + dependencies and connects +// them to each other func (g *InstanceGenerator) Instances(n int) []Instance { var instances []Instance for j := 0; j < n; j++ { diff --git a/bitswap/testnet/interface.go b/bitswap/testnet/interface.go index b6616256f..b49dd80ad 100644 --- a/bitswap/testnet/interface.go +++ b/bitswap/testnet/interface.go @@ -4,13 +4,13 @@ import ( bsnet "github.com/ipfs/go-bitswap/network" "github.com/libp2p/go-libp2p-core/peer" - "github.com/libp2p/go-libp2p-testing/net" + tnet "github.com/libp2p/go-libp2p-testing/net" ) // Network is an interface for generating bitswap network interfaces // based on a test network. type Network interface { - Adapter(tnet.Identity) bsnet.BitSwapNetwork + Adapter(tnet.Identity, ...bsnet.NetOpt) bsnet.BitSwapNetwork HasPeer(peer.ID) bool } diff --git a/bitswap/testnet/network_test.go b/bitswap/testnet/network_test.go index 350e95eef..89f3d68f0 100644 --- a/bitswap/testnet/network_test.go +++ b/bitswap/testnet/network_test.go @@ -13,7 +13,7 @@ import ( mockrouting "github.com/ipfs/go-ipfs-routing/mock" "github.com/libp2p/go-libp2p-core/peer" - "github.com/libp2p/go-libp2p-testing/net" + tnet "github.com/libp2p/go-libp2p-testing/net" ) func TestSendMessageAsyncButWaitForResponse(t *testing.T) { diff --git a/bitswap/testnet/peernet.go b/bitswap/testnet/peernet.go index ffbe10264..5e6430691 100644 --- a/bitswap/testnet/peernet.go +++ b/bitswap/testnet/peernet.go @@ -9,7 +9,7 @@ import ( mockrouting "github.com/ipfs/go-ipfs-routing/mock" "github.com/libp2p/go-libp2p-core/peer" - "github.com/libp2p/go-libp2p-testing/net" + tnet "github.com/libp2p/go-libp2p-testing/net" mockpeernet "github.com/libp2p/go-libp2p/p2p/net/mock" ) @@ -23,13 +23,13 @@ func StreamNet(ctx context.Context, net mockpeernet.Mocknet, rs mockrouting.Serv return &peernet{net, rs}, nil } -func (pn *peernet) Adapter(p tnet.Identity) bsnet.BitSwapNetwork { +func (pn *peernet) Adapter(p tnet.Identity, opts ...bsnet.NetOpt) bsnet.BitSwapNetwork { client, err := pn.Mocknet.AddPeer(p.PrivateKey(), p.Address()) if err != nil { panic(err.Error()) } routing := pn.routingserver.ClientWithDatastore(context.TODO(), p, ds.NewMapDatastore()) - return bsnet.NewFromIpfsHost(client, routing) + return bsnet.NewFromIpfsHost(client, routing, opts...) } func (pn *peernet) HasPeer(p peer.ID) bool { diff --git a/bitswap/testnet/virtual.go b/bitswap/testnet/virtual.go index 8421c2db9..9a92d1c75 100644 --- a/bitswap/testnet/virtual.go +++ b/bitswap/testnet/virtual.go @@ -14,17 +14,14 @@ import ( cid "github.com/ipfs/go-cid" delay "github.com/ipfs/go-ipfs-delay" mockrouting "github.com/ipfs/go-ipfs-routing/mock" - logging "github.com/ipfs/go-log" "github.com/libp2p/go-libp2p-core/connmgr" "github.com/libp2p/go-libp2p-core/peer" "github.com/libp2p/go-libp2p-core/routing" - "github.com/libp2p/go-libp2p-testing/net" + tnet "github.com/libp2p/go-libp2p-testing/net" mocknet "github.com/libp2p/go-libp2p/p2p/net/mock" ) -var log = logging.Logger("bstestnet") - // VirtualNetwork generates a new testnet instance - a fake network that // is used to simulate sending messages. func VirtualNetwork(rs mockrouting.Server, d delay.D) Network { @@ -87,7 +84,7 @@ type receiverQueue struct { lk sync.Mutex } -func (n *network) Adapter(p tnet.Identity) bsnet.BitSwapNetwork { +func (n *network) Adapter(p tnet.Identity, opts ...bsnet.NetOpt) bsnet.BitSwapNetwork { n.mu.Lock() defer n.mu.Unlock() @@ -177,6 +174,10 @@ type networkClient struct { stats bsnet.Stats } +func (nc *networkClient) Self() peer.ID { + return nc.local +} + func (nc *networkClient) SendMessage( ctx context.Context, to peer.ID, @@ -197,7 +198,6 @@ func (nc *networkClient) Stats() bsnet.Stats { // FindProvidersAsync returns a channel of providers for the given key. func (nc *networkClient) FindProvidersAsync(ctx context.Context, k cid.Cid, max int) <-chan peer.ID { - // NB: this function duplicates the AddrInfo -> ID transformation in the // bitswap network adapter. Not to worry. This network client will be // deprecated once the ipfsnet.Mock is added. The code below is only @@ -240,6 +240,10 @@ func (mp *messagePasser) Reset() error { return nil } +func (mp *messagePasser) SupportsHave() bool { + return true +} + func (nc *networkClient) NewMessageSender(ctx context.Context, p peer.ID) (bsnet.MessageSender, error) { return &messagePasser{ net: nc, @@ -260,7 +264,6 @@ func (nc *networkClient) SetDelegate(r bsnet.Receiver) { func (nc *networkClient) ConnectTo(_ context.Context, p peer.ID) error { nc.network.mu.Lock() - otherClient, ok := nc.network.clients[p] if !ok { nc.network.mu.Unlock() @@ -270,19 +273,38 @@ func (nc *networkClient) ConnectTo(_ context.Context, p peer.ID) error { tag := tagForPeers(nc.local, p) if _, ok := nc.network.conns[tag]; ok { nc.network.mu.Unlock() - log.Warning("ALREADY CONNECTED TO PEER (is this a reconnect? test lib needs fixing)") + // log.Warning("ALREADY CONNECTED TO PEER (is this a reconnect? test lib needs fixing)") return nil } nc.network.conns[tag] = struct{}{} nc.network.mu.Unlock() - // TODO: add handling for disconnects - otherClient.receiver.PeerConnected(nc.local) nc.Receiver.PeerConnected(p) return nil } +func (nc *networkClient) DisconnectFrom(_ context.Context, p peer.ID) error { + nc.network.mu.Lock() + defer nc.network.mu.Unlock() + + otherClient, ok := nc.network.clients[p] + if !ok { + return errors.New("no such peer in network") + } + + tag := tagForPeers(nc.local, p) + if _, ok := nc.network.conns[tag]; !ok { + // Already disconnected + return nil + } + delete(nc.network.conns, tag) + + otherClient.receiver.PeerDisconnected(nc.local) + nc.Receiver.PeerDisconnected(p) + return nil +} + func (rq *receiverQueue) enqueue(m *message) { rq.lk.Lock() defer rq.lk.Unlock() diff --git a/bitswap/testutil/testutil.go b/bitswap/testutil/testutil.go index de6777ff3..9f0c5817e 100644 --- a/bitswap/testutil/testutil.go +++ b/bitswap/testutil/testutil.go @@ -39,17 +39,6 @@ func GenerateCids(n int) []cid.Cid { return cids } -// GenerateWantlist makes a populated wantlist. -func GenerateWantlist(n int, ses uint64) *wantlist.SessionTrackedWantlist { - wl := wantlist.NewSessionTrackedWantlist() - for i := 0; i < n; i++ { - prioritySeq++ - entry := wantlist.NewRefEntry(blockGenerator.Next().Cid(), prioritySeq) - wl.AddEntry(entry, ses) - } - return wl -} - // GenerateMessageEntries makes fake bitswap message entries. func GenerateMessageEntries(n int, isCancel bool) []bsmsg.Entry { bsmsgs := make([]bsmsg.Entry, 0, n) @@ -127,3 +116,43 @@ func IndexOf(blks []blocks.Block, c cid.Cid) int { func ContainsBlock(blks []blocks.Block, block blocks.Block) bool { return IndexOf(blks, block.Cid()) != -1 } + +// ContainsKey returns true if a key is found n a list of CIDs. +func ContainsKey(ks []cid.Cid, c cid.Cid) bool { + for _, k := range ks { + if c == k { + return true + } + } + return false +} + +// MatchKeysIgnoreOrder returns true if the lists of CIDs match (even if +// they're in a different order) +func MatchKeysIgnoreOrder(ks1 []cid.Cid, ks2 []cid.Cid) bool { + if len(ks1) != len(ks2) { + return false + } + + for _, k := range ks1 { + if !ContainsKey(ks2, k) { + return false + } + } + return true +} + +// MatchPeersIgnoreOrder returns true if the lists of peers match (even if +// they're in a different order) +func MatchPeersIgnoreOrder(ps1 []peer.ID, ps2 []peer.ID) bool { + if len(ps1) != len(ps2) { + return false + } + + for _, p := range ps1 { + if !ContainsPeer(ps2, p) { + return false + } + } + return true +} diff --git a/bitswap/wantlist/wantlist.go b/bitswap/wantlist/wantlist.go index b5c2a602c..d891ad0ba 100644 --- a/bitswap/wantlist/wantlist.go +++ b/bitswap/wantlist/wantlist.go @@ -5,15 +5,11 @@ package wantlist import ( "sort" + pb "github.com/ipfs/go-bitswap/message/pb" + cid "github.com/ipfs/go-cid" ) -// SessionTrackedWantlist is a list of wants that also track which bitswap -// sessions have requested them -type SessionTrackedWantlist struct { - set map[cid.Cid]*sessionTrackedEntry -} - // Wantlist is a raw list of wanted blocks and their priorities type Wantlist struct { set map[cid.Cid]Entry @@ -23,11 +19,7 @@ type Wantlist struct { type Entry struct { Cid cid.Cid Priority int -} - -type sessionTrackedEntry struct { - Entry - sesTrk map[uint64]struct{} + WantType pb.Message_Wantlist_WantType } // NewRefEntry creates a new reference tracked wantlist entry. @@ -35,6 +27,7 @@ func NewRefEntry(c cid.Cid, p int) Entry { return Entry{ Cid: c, Priority: p, + WantType: pb.Message_Wantlist_Block, } } @@ -44,13 +37,6 @@ func (es entrySlice) Len() int { return len(es) } func (es entrySlice) Swap(i, j int) { es[i], es[j] = es[j], es[i] } func (es entrySlice) Less(i, j int) bool { return es[i].Priority > es[j].Priority } -// NewSessionTrackedWantlist generates a new SessionTrackedWantList. -func NewSessionTrackedWantlist() *SessionTrackedWantlist { - return &SessionTrackedWantlist{ - set: make(map[cid.Cid]*sessionTrackedEntry), - } -} - // New generates a new raw Wantlist func New() *Wantlist { return &Wantlist{ @@ -58,136 +44,53 @@ func New() *Wantlist { } } -// Add adds the given cid to the wantlist with the specified priority, governed -// by the session ID 'ses'. if a cid is added under multiple session IDs, then -// it must be removed by each of those sessions before it is no longer 'in the -// wantlist'. Calls to Add are idempotent given the same arguments. Subsequent -// calls with different values for priority will not update the priority. -// TODO: think through priority changes here -// Add returns true if the cid did not exist in the wantlist before this call -// (even if it was under a different session). -func (w *SessionTrackedWantlist) Add(c cid.Cid, priority int, ses uint64) bool { - - if e, ok := w.set[c]; ok { - e.sesTrk[ses] = struct{}{} - return false - } - - w.set[c] = &sessionTrackedEntry{ - Entry: Entry{Cid: c, Priority: priority}, - sesTrk: map[uint64]struct{}{ses: struct{}{}}, - } - - return true -} - -// AddEntry adds given Entry to the wantlist. For more information see Add method. -func (w *SessionTrackedWantlist) AddEntry(e Entry, ses uint64) bool { - if ex, ok := w.set[e.Cid]; ok { - ex.sesTrk[ses] = struct{}{} - return false - } - w.set[e.Cid] = &sessionTrackedEntry{ - Entry: e, - sesTrk: map[uint64]struct{}{ses: struct{}{}}, - } - return true -} - -// Remove removes the given cid from being tracked by the given session. -// 'true' is returned if this call to Remove removed the final session ID -// tracking the cid. (meaning true will be returned iff this call caused the -// value of 'Contains(c)' to change from true to false) -func (w *SessionTrackedWantlist) Remove(c cid.Cid, ses uint64) bool { - e, ok := w.set[c] - if !ok { - return false - } - - delete(e.sesTrk, ses) - if len(e.sesTrk) == 0 { - delete(w.set, c) - return true - } - return false -} - -// Contains returns true if the given cid is in the wantlist tracked by one or -// more sessions. -func (w *SessionTrackedWantlist) Contains(k cid.Cid) (Entry, bool) { - e, ok := w.set[k] - if !ok { - return Entry{}, false - } - return e.Entry, true -} - -// Entries returns all wantlist entries for a given session tracked want list. -func (w *SessionTrackedWantlist) Entries() []Entry { - es := make([]Entry, 0, len(w.set)) - for _, e := range w.set { - es = append(es, e.Entry) - } - return es -} - -// SortedEntries returns wantlist entries ordered by priority. -func (w *SessionTrackedWantlist) SortedEntries() []Entry { - es := w.Entries() - sort.Sort(entrySlice(es)) - return es -} - -// Len returns the number of entries in a wantlist. -func (w *SessionTrackedWantlist) Len() int { - return len(w.set) -} - -// CopyWants copies all wants from one SessionTrackWantlist to another (along with -// the session data) -func (w *SessionTrackedWantlist) CopyWants(to *SessionTrackedWantlist) { - for _, e := range w.set { - for k := range e.sesTrk { - to.AddEntry(e.Entry, k) - } - } -} - // Len returns the number of entries in a wantlist. func (w *Wantlist) Len() int { return len(w.set) } // Add adds an entry in a wantlist from CID & Priority, if not already present. -func (w *Wantlist) Add(c cid.Cid, priority int) bool { - if _, ok := w.set[c]; ok { +func (w *Wantlist) Add(c cid.Cid, priority int, wantType pb.Message_Wantlist_WantType) bool { + e, ok := w.set[c] + + // Adding want-have should not override want-block + if ok && (e.WantType == pb.Message_Wantlist_Block || wantType == pb.Message_Wantlist_Have) { return false } w.set[c] = Entry{ Cid: c, Priority: priority, + WantType: wantType, } return true } -// AddEntry adds an entry to a wantlist if not already present. -func (w *Wantlist) AddEntry(e Entry) bool { - if _, ok := w.set[e.Cid]; ok { +// Remove removes the given cid from the wantlist. +func (w *Wantlist) Remove(c cid.Cid) bool { + _, ok := w.set[c] + if !ok { return false } - w.set[e.Cid] = e + + delete(w.set, c) return true } -// Remove removes the given cid from the wantlist. -func (w *Wantlist) Remove(c cid.Cid) bool { - _, ok := w.set[c] +// Remove removes the given cid from the wantlist, respecting the type: +// Remove with want-have will not remove an existing want-block. +func (w *Wantlist) RemoveType(c cid.Cid, wantType pb.Message_Wantlist_WantType) bool { + e, ok := w.set[c] if !ok { return false } + // Removing want-have should not remove want-block + if e.WantType == pb.Message_Wantlist_Block && wantType == pb.Message_Wantlist_Have { + return false + } + delete(w.set, c) return true } @@ -214,3 +117,10 @@ func (w *Wantlist) SortedEntries() []Entry { sort.Sort(entrySlice(es)) return es } + +// Absorb all the entries in other into this want list +func (w *Wantlist) Absorb(other *Wantlist) { + for _, e := range other.Entries() { + w.Add(e.Cid, e.Priority, e.WantType) + } +} diff --git a/bitswap/wantlist/wantlist_test.go b/bitswap/wantlist/wantlist_test.go index 8616efb0e..1139e87ae 100644 --- a/bitswap/wantlist/wantlist_test.go +++ b/bitswap/wantlist/wantlist_test.go @@ -3,6 +3,7 @@ package wantlist import ( "testing" + pb "github.com/ipfs/go-bitswap/message/pb" cid "github.com/ipfs/go-cid" ) @@ -38,21 +39,14 @@ func assertHasCid(t *testing.T, w wli, c cid.Cid) { } } -func assertNotHasCid(t *testing.T, w wli, c cid.Cid) { - _, ok := w.Contains(c) - if ok { - t.Fatal("expected not to have ", c) - } -} - func TestBasicWantlist(t *testing.T) { wl := New() - if !wl.Add(testcids[0], 5) { + if !wl.Add(testcids[0], 5, pb.Message_Wantlist_Block) { t.Fatal("expected true") } assertHasCid(t, wl, testcids[0]) - if !wl.Add(testcids[1], 4) { + if !wl.Add(testcids[1], 4, pb.Message_Wantlist_Block) { t.Fatal("expected true") } assertHasCid(t, wl, testcids[0]) @@ -62,7 +56,7 @@ func TestBasicWantlist(t *testing.T) { t.Fatal("should have had two items") } - if wl.Add(testcids[1], 4) { + if wl.Add(testcids[1], 4, pb.Message_Wantlist_Block) { t.Fatal("add shouldnt report success on second add") } assertHasCid(t, wl, testcids[0]) @@ -72,7 +66,7 @@ func TestBasicWantlist(t *testing.T) { t.Fatal("should have had two items") } - if !wl.Remove(testcids[0]) { + if !wl.RemoveType(testcids[0], pb.Message_Wantlist_Block) { t.Fatal("should have gotten true") } @@ -82,23 +76,144 @@ func TestBasicWantlist(t *testing.T) { } } -func TestSessionTrackedWantlist(t *testing.T) { - wl := NewSessionTrackedWantlist() +func TestAddHaveThenBlock(t *testing.T) { + wl := New() - if !wl.Add(testcids[0], 5, 1) { - t.Fatal("should have added") + wl.Add(testcids[0], 5, pb.Message_Wantlist_Have) + wl.Add(testcids[0], 5, pb.Message_Wantlist_Block) + + e, ok := wl.Contains(testcids[0]) + if !ok { + t.Fatal("expected to have ", testcids[0]) } - assertHasCid(t, wl, testcids[0]) - if wl.Remove(testcids[0], 2) { - t.Fatal("shouldnt have removed") + if e.WantType != pb.Message_Wantlist_Block { + t.Fatal("expected to be ", pb.Message_Wantlist_Block) } - assertHasCid(t, wl, testcids[0]) - if wl.Add(testcids[0], 5, 1) { - t.Fatal("shouldnt have added") +} + +func TestAddBlockThenHave(t *testing.T) { + wl := New() + + wl.Add(testcids[0], 5, pb.Message_Wantlist_Block) + wl.Add(testcids[0], 5, pb.Message_Wantlist_Have) + + e, ok := wl.Contains(testcids[0]) + if !ok { + t.Fatal("expected to have ", testcids[0]) } - assertHasCid(t, wl, testcids[0]) - if !wl.Remove(testcids[0], 1) { - t.Fatal("should have removed") + if e.WantType != pb.Message_Wantlist_Block { + t.Fatal("expected to be ", pb.Message_Wantlist_Block) + } +} + +func TestAddHaveThenRemoveBlock(t *testing.T) { + wl := New() + + wl.Add(testcids[0], 5, pb.Message_Wantlist_Have) + wl.RemoveType(testcids[0], pb.Message_Wantlist_Block) + + _, ok := wl.Contains(testcids[0]) + if ok { + t.Fatal("expected not to have ", testcids[0]) + } +} + +func TestAddBlockThenRemoveHave(t *testing.T) { + wl := New() + + wl.Add(testcids[0], 5, pb.Message_Wantlist_Block) + wl.RemoveType(testcids[0], pb.Message_Wantlist_Have) + + e, ok := wl.Contains(testcids[0]) + if !ok { + t.Fatal("expected to have ", testcids[0]) + } + if e.WantType != pb.Message_Wantlist_Block { + t.Fatal("expected to be ", pb.Message_Wantlist_Block) + } +} + +func TestAddHaveThenRemoveAny(t *testing.T) { + wl := New() + + wl.Add(testcids[0], 5, pb.Message_Wantlist_Have) + wl.Remove(testcids[0]) + + _, ok := wl.Contains(testcids[0]) + if ok { + t.Fatal("expected not to have ", testcids[0]) + } +} + +func TestAddBlockThenRemoveAny(t *testing.T) { + wl := New() + + wl.Add(testcids[0], 5, pb.Message_Wantlist_Block) + wl.Remove(testcids[0]) + + _, ok := wl.Contains(testcids[0]) + if ok { + t.Fatal("expected not to have ", testcids[0]) + } +} + +func TestAbsort(t *testing.T) { + wl := New() + wl.Add(testcids[0], 5, pb.Message_Wantlist_Block) + wl.Add(testcids[1], 4, pb.Message_Wantlist_Have) + wl.Add(testcids[2], 3, pb.Message_Wantlist_Have) + + wl2 := New() + wl2.Add(testcids[0], 2, pb.Message_Wantlist_Have) + wl2.Add(testcids[1], 1, pb.Message_Wantlist_Block) + + wl.Absorb(wl2) + + e, ok := wl.Contains(testcids[0]) + if !ok { + t.Fatal("expected to have ", testcids[0]) + } + if e.Priority != 5 { + t.Fatal("expected priority 5") + } + if e.WantType != pb.Message_Wantlist_Block { + t.Fatal("expected type ", pb.Message_Wantlist_Block) + } + + e, ok = wl.Contains(testcids[1]) + if !ok { + t.Fatal("expected to have ", testcids[1]) + } + if e.Priority != 1 { + t.Fatal("expected priority 1") + } + if e.WantType != pb.Message_Wantlist_Block { + t.Fatal("expected type ", pb.Message_Wantlist_Block) + } + + e, ok = wl.Contains(testcids[2]) + if !ok { + t.Fatal("expected to have ", testcids[2]) + } + if e.Priority != 3 { + t.Fatal("expected priority 3") + } + if e.WantType != pb.Message_Wantlist_Have { + t.Fatal("expected type ", pb.Message_Wantlist_Have) + } +} + +func TestSortedEntries(t *testing.T) { + wl := New() + + wl.Add(testcids[0], 3, pb.Message_Wantlist_Block) + wl.Add(testcids[1], 5, pb.Message_Wantlist_Have) + wl.Add(testcids[2], 4, pb.Message_Wantlist_Have) + + entries := wl.SortedEntries() + if !entries[0].Cid.Equals(testcids[1]) || + !entries[1].Cid.Equals(testcids[2]) || + !entries[2].Cid.Equals(testcids[0]) { + t.Fatal("wrong order") } - assertNotHasCid(t, wl, testcids[0]) } diff --git a/bitswap/wantmanager/wantmanager.go b/bitswap/wantmanager/wantmanager.go index f726d6843..009359935 100644 --- a/bitswap/wantmanager/wantmanager.go +++ b/bitswap/wantmanager/wantmanager.go @@ -2,256 +2,112 @@ package wantmanager import ( "context" - "math" - bsmsg "github.com/ipfs/go-bitswap/message" - wantlist "github.com/ipfs/go-bitswap/wantlist" - logging "github.com/ipfs/go-log" + bsbpm "github.com/ipfs/go-bitswap/blockpresencemanager" + bssim "github.com/ipfs/go-bitswap/sessioninterestmanager" + "github.com/ipfs/go-bitswap/sessionmanager" + bsswl "github.com/ipfs/go-bitswap/sessionwantlist" cid "github.com/ipfs/go-cid" - metrics "github.com/ipfs/go-metrics-interface" peer "github.com/libp2p/go-libp2p-core/peer" ) -var log = logging.Logger("bitswap") - -const ( - // maxPriority is the max priority as defined by the bitswap protocol - maxPriority = math.MaxInt32 -) - -// PeerHandler sends changes out to the network as they get added to the wantlist -// managed by the WantManager. +// PeerHandler sends wants / cancels to other peers type PeerHandler interface { + // Connected is called when a peer connects, with any initial want-haves + // that have been broadcast to all peers (as part of session discovery) + Connected(p peer.ID, initialWants []cid.Cid) + // Disconnected is called when a peer disconnects Disconnected(p peer.ID) - Connected(p peer.ID, initialWants *wantlist.SessionTrackedWantlist) - SendMessage(entries []bsmsg.Entry, targets []peer.ID, from uint64) + // BroadcastWantHaves sends want-haves to all connected peers + BroadcastWantHaves(ctx context.Context, wantHaves []cid.Cid) + // SendCancels sends cancels to all peers that had previously been sent + // a want-block or want-have for the given key + SendCancels(context.Context, []cid.Cid) } -type wantMessage interface { - handle(wm *WantManager) +// SessionManager receives incoming messages and distributes them to sessions +type SessionManager interface { + ReceiveFrom(p peer.ID, blks []cid.Cid, haves []cid.Cid, dontHaves []cid.Cid) []sessionmanager.Session } -// WantManager manages a global want list. It tracks two seperate want lists - -// one for all wants, and one for wants that are specifically broadcast to the -// internet. +// WantManager +// - informs the SessionManager and BlockPresenceManager of incoming information +// and cancelled sessions +// - informs the PeerManager of connects and disconnects +// - manages the list of want-haves that are broadcast to the internet +// (as opposed to being sent to specific peers) type WantManager struct { - // channel requests to the run loop - // to get predictable behavior while running this in a go routine - // having only one channel is neccesary, so requests are processed serially - wantMessages chan wantMessage - - // synchronized by Run loop, only touch inside there - wl *wantlist.SessionTrackedWantlist - bcwl *wantlist.SessionTrackedWantlist + bcwl *bsswl.SessionWantlist - ctx context.Context - cancel func() - - peerHandler PeerHandler - wantlistGauge metrics.Gauge + peerHandler PeerHandler + sim *bssim.SessionInterestManager + bpm *bsbpm.BlockPresenceManager + sm SessionManager } // New initializes a new WantManager for a given context. -func New(ctx context.Context, peerHandler PeerHandler) *WantManager { - ctx, cancel := context.WithCancel(ctx) - wantlistGauge := metrics.NewCtx(ctx, "wantlist_total", - "Number of items in wantlist.").Gauge() +func New(ctx context.Context, peerHandler PeerHandler, sim *bssim.SessionInterestManager, bpm *bsbpm.BlockPresenceManager) *WantManager { return &WantManager{ - wantMessages: make(chan wantMessage, 10), - wl: wantlist.NewSessionTrackedWantlist(), - bcwl: wantlist.NewSessionTrackedWantlist(), - ctx: ctx, - cancel: cancel, - peerHandler: peerHandler, - wantlistGauge: wantlistGauge, + bcwl: bsswl.NewSessionWantlist(), + peerHandler: peerHandler, + sim: sim, + bpm: bpm, } } -// WantBlocks adds the given cids to the wantlist, tracked by the given session. -func (wm *WantManager) WantBlocks(ctx context.Context, ks []cid.Cid, peers []peer.ID, ses uint64) { - log.Debugf("[wantlist] want blocks; cids=%s, peers=%s, ses=%d", ks, peers, ses) - wm.addEntries(ctx, ks, peers, false, ses) +func (wm *WantManager) SetSessionManager(sm SessionManager) { + wm.sm = sm } -// CancelWants removes the given cids from the wantlist, tracked by the given session. -func (wm *WantManager) CancelWants(ctx context.Context, ks []cid.Cid, peers []peer.ID, ses uint64) { - log.Debugf("[wantlist] unwant blocks; cids=%s, peers=%s, ses=%d", ks, peers, ses) - wm.addEntries(context.Background(), ks, peers, true, ses) +// ReceiveFrom is called when a new message is received +func (wm *WantManager) ReceiveFrom(ctx context.Context, p peer.ID, blks []cid.Cid, haves []cid.Cid, dontHaves []cid.Cid) { + // Record block presence for HAVE / DONT_HAVE + wm.bpm.ReceiveFrom(p, haves, dontHaves) + // Inform interested sessions + wm.sm.ReceiveFrom(p, blks, haves, dontHaves) + // Remove received blocks from broadcast wantlist + wm.bcwl.RemoveKeys(blks) + // Send CANCEL to all peers with want-have / want-block + wm.peerHandler.SendCancels(ctx, blks) } -// CurrentWants returns the list of current wants. -func (wm *WantManager) CurrentWants() []wantlist.Entry { - resp := make(chan []wantlist.Entry, 1) - select { - case wm.wantMessages <- ¤tWantsMessage{resp}: - case <-wm.ctx.Done(): - return nil - } - select { - case wantlist := <-resp: - return wantlist - case <-wm.ctx.Done(): - return nil - } -} +// BroadcastWantHaves is called when want-haves should be broadcast to all +// connected peers (as part of session discovery) +func (wm *WantManager) BroadcastWantHaves(ctx context.Context, ses uint64, wantHaves []cid.Cid) { + // log.Warningf("BroadcastWantHaves session%d: %s", ses, wantHaves) -// CurrentBroadcastWants returns the current list of wants that are broadcasts. -func (wm *WantManager) CurrentBroadcastWants() []wantlist.Entry { - resp := make(chan []wantlist.Entry, 1) - select { - case wm.wantMessages <- ¤tBroadcastWantsMessage{resp}: - case <-wm.ctx.Done(): - return nil - } - select { - case wl := <-resp: - return wl - case <-wm.ctx.Done(): - return nil - } -} + // Record broadcast wants + wm.bcwl.Add(wantHaves, ses) -// WantCount returns the total count of wants. -func (wm *WantManager) WantCount() int { - resp := make(chan int, 1) - select { - case wm.wantMessages <- &wantCountMessage{resp}: - case <-wm.ctx.Done(): - return 0 - } - select { - case count := <-resp: - return count - case <-wm.ctx.Done(): - return 0 - } + // Send want-haves to all peers + wm.peerHandler.BroadcastWantHaves(ctx, wantHaves) } -// Connected is called when a new peer is connected -func (wm *WantManager) Connected(p peer.ID) { - select { - case wm.wantMessages <- &connectedMessage{p}: - case <-wm.ctx.Done(): - } -} +// RemoveSession is called when the session is shut down +func (wm *WantManager) RemoveSession(ctx context.Context, ses uint64) { + // Remove session's interest in the given blocks + cancelKs := wm.sim.RemoveSessionInterest(ses) -// Disconnected is called when a peer is disconnected -func (wm *WantManager) Disconnected(p peer.ID) { - select { - case wm.wantMessages <- &disconnectedMessage{p}: - case <-wm.ctx.Done(): - } -} + // Remove broadcast want-haves for session + wm.bcwl.RemoveSession(ses) -// Startup starts processing for the WantManager. -func (wm *WantManager) Startup() { - go wm.run() -} + // Free up block presence tracking for keys that no session is interested + // in anymore + wm.bpm.RemoveKeys(cancelKs) -// Shutdown ends processing for the want manager. -func (wm *WantManager) Shutdown() { - wm.cancel() -} - -func (wm *WantManager) run() { - // NOTE: Do not open any streams or connections from anywhere in this - // event loop. Really, just don't do anything likely to block. - for { - select { - case message := <-wm.wantMessages: - message.handle(wm) - case <-wm.ctx.Done(): - return - } - } -} - -func (wm *WantManager) addEntries(ctx context.Context, ks []cid.Cid, targets []peer.ID, cancel bool, ses uint64) { - entries := make([]bsmsg.Entry, 0, len(ks)) - for i, k := range ks { - entries = append(entries, bsmsg.Entry{ - Cancel: cancel, - Entry: wantlist.NewRefEntry(k, maxPriority-i), - }) - } - select { - case wm.wantMessages <- &wantSet{entries: entries, targets: targets, from: ses}: - case <-wm.ctx.Done(): - case <-ctx.Done(): - } + // Send CANCEL to all peers for blocks that no session is interested in anymore + wm.peerHandler.SendCancels(ctx, cancelKs) } -type wantSet struct { - entries []bsmsg.Entry - targets []peer.ID - from uint64 -} - -func (ws *wantSet) handle(wm *WantManager) { - // is this a broadcast or not? - brdc := len(ws.targets) == 0 - - // add changes to our wantlist - for _, e := range ws.entries { - if e.Cancel { - if brdc { - wm.bcwl.Remove(e.Cid, ws.from) - } - - if wm.wl.Remove(e.Cid, ws.from) { - wm.wantlistGauge.Dec() - } - } else { - if brdc { - wm.bcwl.AddEntry(e.Entry, ws.from) - } - if wm.wl.AddEntry(e.Entry, ws.from) { - wm.wantlistGauge.Inc() - } - } - } - - // broadcast those wantlist changes - wm.peerHandler.SendMessage(ws.entries, ws.targets, ws.from) -} - -type currentWantsMessage struct { - resp chan<- []wantlist.Entry -} - -func (cwm *currentWantsMessage) handle(wm *WantManager) { - cwm.resp <- wm.wl.Entries() -} - -type currentBroadcastWantsMessage struct { - resp chan<- []wantlist.Entry -} - -func (cbcwm *currentBroadcastWantsMessage) handle(wm *WantManager) { - cbcwm.resp <- wm.bcwl.Entries() -} - -type wantCountMessage struct { - resp chan<- int -} - -func (wcm *wantCountMessage) handle(wm *WantManager) { - wcm.resp <- wm.wl.Len() -} - -type connectedMessage struct { - p peer.ID -} - -func (cm *connectedMessage) handle(wm *WantManager) { - wm.peerHandler.Connected(cm.p, wm.bcwl) -} - -type disconnectedMessage struct { - p peer.ID +// Connected is called when a new peer connects +func (wm *WantManager) Connected(p peer.ID) { + // Tell the peer handler that there is a new connection and give it the + // list of outstanding broadcast wants + wm.peerHandler.Connected(p, wm.bcwl.Keys()) } -func (dm *disconnectedMessage) handle(wm *WantManager) { - wm.peerHandler.Disconnected(dm.p) +// Disconnected is called when a peer disconnects +func (wm *WantManager) Disconnected(p peer.ID) { + wm.peerHandler.Disconnected(p) } diff --git a/bitswap/wantmanager/wantmanager_test.go b/bitswap/wantmanager/wantmanager_test.go index a721e24ab..b4e7cd585 100644 --- a/bitswap/wantmanager/wantmanager_test.go +++ b/bitswap/wantmanager/wantmanager_test.go @@ -2,217 +2,236 @@ package wantmanager import ( "context" - "reflect" - "sync" "testing" + bsbpm "github.com/ipfs/go-bitswap/blockpresencemanager" + bssim "github.com/ipfs/go-bitswap/sessioninterestmanager" + "github.com/ipfs/go-bitswap/sessionmanager" "github.com/ipfs/go-bitswap/testutil" - wantlist "github.com/ipfs/go-bitswap/wantlist" - bsmsg "github.com/ipfs/go-bitswap/message" "github.com/ipfs/go-cid" "github.com/libp2p/go-libp2p-core/peer" ) type fakePeerHandler struct { - lk sync.RWMutex - lastWantSet wantSet + lastInitialWants []cid.Cid + lastBcstWants []cid.Cid + lastCancels []cid.Cid } -func (fph *fakePeerHandler) SendMessage(entries []bsmsg.Entry, targets []peer.ID, from uint64) { - fph.lk.Lock() - fph.lastWantSet = wantSet{entries, targets, from} - fph.lk.Unlock() +func (fph *fakePeerHandler) Connected(p peer.ID, initialWants []cid.Cid) { + fph.lastInitialWants = initialWants } +func (fph *fakePeerHandler) Disconnected(p peer.ID) { -func (fph *fakePeerHandler) Connected(p peer.ID, initialWants *wantlist.SessionTrackedWantlist) {} -func (fph *fakePeerHandler) Disconnected(p peer.ID) {} - -func (fph *fakePeerHandler) getLastWantSet() wantSet { - fph.lk.Lock() - defer fph.lk.Unlock() - return fph.lastWantSet } - -func setupTestFixturesAndInitialWantList() ( - context.Context, *fakePeerHandler, *WantManager, []cid.Cid, []cid.Cid, []peer.ID, uint64, uint64) { - ctx := context.Background() - - // setup fixtures - wantSender := &fakePeerHandler{} - wantManager := New(ctx, wantSender) - keys := testutil.GenerateCids(10) - otherKeys := testutil.GenerateCids(5) - peers := testutil.GeneratePeers(10) - session := testutil.GenerateSessionID() - otherSession := testutil.GenerateSessionID() - - // startup wantManager - wantManager.Startup() - - // add initial wants - wantManager.WantBlocks( - ctx, - keys, - peers, - session) - - return ctx, wantSender, wantManager, keys, otherKeys, peers, session, otherSession +func (fph *fakePeerHandler) BroadcastWantHaves(ctx context.Context, wantHaves []cid.Cid) { + fph.lastBcstWants = wantHaves +} +func (fph *fakePeerHandler) SendCancels(ctx context.Context, cancels []cid.Cid) { + fph.lastCancels = cancels } -func TestInitialWantsAddedCorrectly(t *testing.T) { +type fakeSessionManager struct { +} - _, wantSender, wantManager, keys, _, peers, session, _ := - setupTestFixturesAndInitialWantList() +func (*fakeSessionManager) ReceiveFrom(p peer.ID, blks []cid.Cid, haves []cid.Cid, dontHaves []cid.Cid) []sessionmanager.Session { + return nil +} - bcwl := wantManager.CurrentBroadcastWants() - wl := wantManager.CurrentWants() +func TestInitialBroadcastWantsAddedCorrectly(t *testing.T) { + ctx := context.Background() + ph := &fakePeerHandler{} + sim := bssim.New() + bpm := bsbpm.New() + wm := New(context.Background(), ph, sim, bpm) + sm := &fakeSessionManager{} + wm.SetSessionManager(sm) - if len(bcwl) > 0 { - t.Fatal("should not create broadcast wants when peers are specified") - } + peers := testutil.GeneratePeers(3) - if len(wl) != len(keys) { - t.Fatal("did not add correct number of wants to want lsit") + // Connect peer 0. Should not receive anything yet. + wm.Connected(peers[0]) + if len(ph.lastInitialWants) != 0 { + t.Fatal("expected no initial wants") } - generatedWantSet := wantSender.getLastWantSet() - - if len(generatedWantSet.entries) != len(keys) { - t.Fatal("incorrect wants sent") + // Broadcast 2 wants + wantHaves := testutil.GenerateCids(2) + wm.BroadcastWantHaves(ctx, 1, wantHaves) + if len(ph.lastBcstWants) != 2 { + t.Fatal("expected broadcast wants") } - for _, entry := range generatedWantSet.entries { - if entry.Cancel { - t.Fatal("did not send only non-cancel messages") - } + // Connect peer 1. Should receive all wants broadcast so far. + wm.Connected(peers[1]) + if len(ph.lastInitialWants) != 2 { + t.Fatal("expected broadcast wants") } - if generatedWantSet.from != session { - t.Fatal("incorrect session used in sending") + // Broadcast 3 more wants + wantHaves2 := testutil.GenerateCids(3) + wm.BroadcastWantHaves(ctx, 2, wantHaves2) + if len(ph.lastBcstWants) != 3 { + t.Fatal("expected broadcast wants") } - if !reflect.DeepEqual(generatedWantSet.targets, peers) { - t.Fatal("did not setup peers correctly") + // Connect peer 2. Should receive all wants broadcast so far. + wm.Connected(peers[2]) + if len(ph.lastInitialWants) != 5 { + t.Fatal("expected all wants to be broadcast") } - - wantManager.Shutdown() } -func TestCancellingWants(t *testing.T) { - ctx, wantSender, wantManager, keys, _, peers, session, _ := - setupTestFixturesAndInitialWantList() - - wantManager.CancelWants(ctx, keys, peers, session) - - wl := wantManager.CurrentWants() - - if len(wl) != 0 { - t.Fatal("did not remove blocks from want list") - } - - generatedWantSet := wantSender.getLastWantSet() - - if len(generatedWantSet.entries) != len(keys) { - t.Fatal("incorrect wants sent") - } +func TestReceiveFromRemovesBroadcastWants(t *testing.T) { + ctx := context.Background() + ph := &fakePeerHandler{} + sim := bssim.New() + bpm := bsbpm.New() + wm := New(context.Background(), ph, sim, bpm) + sm := &fakeSessionManager{} + wm.SetSessionManager(sm) - for _, entry := range generatedWantSet.entries { - if !entry.Cancel { - t.Fatal("did not send only cancel messages") - } - } + peers := testutil.GeneratePeers(3) - if generatedWantSet.from != session { - t.Fatal("incorrect session used in sending") + // Broadcast 2 wants + cids := testutil.GenerateCids(2) + wm.BroadcastWantHaves(ctx, 1, cids) + if len(ph.lastBcstWants) != 2 { + t.Fatal("expected broadcast wants") } - if !reflect.DeepEqual(generatedWantSet.targets, peers) { - t.Fatal("did not setup peers correctly") + // Connect peer 0. Should receive all wants. + wm.Connected(peers[0]) + if len(ph.lastInitialWants) != 2 { + t.Fatal("expected broadcast wants") } - wantManager.Shutdown() - -} - -func TestCancellingWantsFromAnotherSessionHasNoEffect(t *testing.T) { - ctx, _, wantManager, keys, _, peers, _, otherSession := - setupTestFixturesAndInitialWantList() - - // cancelling wants from another session has no effect - wantManager.CancelWants(ctx, keys, peers, otherSession) - - wl := wantManager.CurrentWants() + // Receive block for first want + ks := cids[0:1] + haves := []cid.Cid{} + dontHaves := []cid.Cid{} + wm.ReceiveFrom(ctx, peers[1], ks, haves, dontHaves) - if len(wl) != len(keys) { - t.Fatal("should not cancel wants unless they match session that made them") + // Connect peer 2. Should get remaining want (the one that the block has + // not yet been received for). + wm.Connected(peers[2]) + if len(ph.lastInitialWants) != 1 { + t.Fatal("expected remaining wants") } - - wantManager.Shutdown() } -func TestAddingWantsWithNoPeersAddsToBroadcastAndRegularWantList(t *testing.T) { - ctx, _, wantManager, keys, otherKeys, _, session, _ := - setupTestFixturesAndInitialWantList() - - wantManager.WantBlocks(ctx, otherKeys, nil, session) - - bcwl := wantManager.CurrentBroadcastWants() - wl := wantManager.CurrentWants() - - if len(bcwl) != len(otherKeys) { - t.Fatal("want requests with no peers should get added to broadcast list") - } - - if len(wl) != len(otherKeys)+len(keys) { - t.Fatal("want requests with no peers should get added to regular want list") +func TestRemoveSessionRemovesBroadcastWants(t *testing.T) { + ctx := context.Background() + ph := &fakePeerHandler{} + sim := bssim.New() + bpm := bsbpm.New() + wm := New(context.Background(), ph, sim, bpm) + sm := &fakeSessionManager{} + wm.SetSessionManager(sm) + + peers := testutil.GeneratePeers(2) + + // Broadcast 2 wants for session 0 and 2 wants for session 1 + ses0 := uint64(0) + ses1 := uint64(1) + ses0wants := testutil.GenerateCids(2) + ses1wants := testutil.GenerateCids(2) + wm.BroadcastWantHaves(ctx, ses0, ses0wants) + wm.BroadcastWantHaves(ctx, ses1, ses1wants) + + // Connect peer 0. Should receive all wants. + wm.Connected(peers[0]) + if len(ph.lastInitialWants) != 4 { + t.Fatal("expected broadcast wants") + } + + // Remove session 0 + wm.RemoveSession(ctx, ses0) + + // Connect peer 1. Should receive all wants from session that has not been + // removed. + wm.Connected(peers[1]) + if len(ph.lastInitialWants) != 2 { + t.Fatal("expected broadcast wants") } - - wantManager.Shutdown() } -func TestAddingRequestFromSecondSessionPreventsCancel(t *testing.T) { - ctx, wantSender, wantManager, keys, _, peers, session, otherSession := - setupTestFixturesAndInitialWantList() - - // add a second session requesting the first key - firstKeys := append([]cid.Cid(nil), keys[0]) - wantManager.WantBlocks(ctx, firstKeys, peers, otherSession) +func TestReceiveFrom(t *testing.T) { + ctx := context.Background() + ph := &fakePeerHandler{} + sim := bssim.New() + bpm := bsbpm.New() + wm := New(context.Background(), ph, sim, bpm) + sm := &fakeSessionManager{} + wm.SetSessionManager(sm) - wl := wantManager.CurrentWants() + p := testutil.GeneratePeers(1)[0] + ks := testutil.GenerateCids(2) + haves := testutil.GenerateCids(2) + dontHaves := testutil.GenerateCids(2) + wm.ReceiveFrom(ctx, p, ks, haves, dontHaves) - if len(wl) != len(keys) { - t.Fatal("wants from other sessions should not get added seperately") + if !bpm.PeerHasBlock(p, haves[0]) { + t.Fatal("expected block presence manager to be invoked") } - - generatedWantSet := wantSender.getLastWantSet() - if len(generatedWantSet.entries) != len(firstKeys) && - generatedWantSet.from != otherSession && - generatedWantSet.entries[0].Cid != firstKeys[0] && - generatedWantSet.entries[0].Cancel != false { - t.Fatal("should send additional message requesting want for new session") + if !bpm.PeerDoesNotHaveBlock(p, dontHaves[0]) { + t.Fatal("expected block presence manager to be invoked") } - - // cancel block from first session - wantManager.CancelWants(ctx, firstKeys, peers, session) - - wl = wantManager.CurrentWants() - - // want should still be on want list - if len(wl) != len(keys) { - t.Fatal("wants should not be removed until all sessions cancel wants") + if len(ph.lastCancels) != len(ks) { + t.Fatal("expected received blocks to be cancelled") } +} - // cancel other block from first session - secondKeys := append([]cid.Cid(nil), keys[1]) - wantManager.CancelWants(ctx, secondKeys, peers, session) - - wl = wantManager.CurrentWants() - - // want should not be on want list, cause it was only tracked by one session - if len(wl) != len(keys)-1 { - t.Fatal("wants should be removed if all sessions have cancelled") +func TestRemoveSession(t *testing.T) { + ctx := context.Background() + ph := &fakePeerHandler{} + sim := bssim.New() + bpm := bsbpm.New() + wm := New(context.Background(), ph, sim, bpm) + sm := &fakeSessionManager{} + wm.SetSessionManager(sm) + + // Record session interest in 2 keys for session 0 and 2 keys for session 1 + // with 1 overlapping key + cids := testutil.GenerateCids(3) + ses0 := uint64(0) + ses1 := uint64(1) + ses0ks := cids[:2] + ses1ks := cids[1:] + sim.RecordSessionInterest(ses0, ses0ks) + sim.RecordSessionInterest(ses1, ses1ks) + + // Receive HAVE for all keys + p := testutil.GeneratePeers(1)[0] + ks := []cid.Cid{} + haves := append(ses0ks, ses1ks...) + dontHaves := []cid.Cid{} + wm.ReceiveFrom(ctx, p, ks, haves, dontHaves) + + // Remove session 0 + wm.RemoveSession(ctx, ses0) + + // Expect session 0 interest to be removed and session 1 interest to be + // unchanged + if len(sim.FilterSessionInterested(ses0, ses0ks)[0]) != 0 { + t.Fatal("expected session 0 interest to be removed") + } + if len(sim.FilterSessionInterested(ses1, ses1ks)[0]) != len(ses1ks) { + t.Fatal("expected session 1 interest to be unchanged") + } + + // Should clear block presence for key that was in session 0 and not + // in session 1 + if bpm.PeerHasBlock(p, ses0ks[0]) { + t.Fatal("expected block presence manager to be cleared") + } + if !bpm.PeerHasBlock(p, ses0ks[1]) { + t.Fatal("expected block presence manager to be unchanged for overlapping key") + } + + // Should cancel key that was in session 0 and not session 1 + if len(ph.lastCancels) != 1 || !ph.lastCancels[0].Equals(cids[0]) { + t.Fatal("expected removed want-have to be cancelled") } - - wantManager.Shutdown() } diff --git a/bitswap/workers.go b/bitswap/workers.go index fb3dc019f..2028c4dfc 100644 --- a/bitswap/workers.go +++ b/bitswap/workers.go @@ -2,9 +2,11 @@ package bitswap import ( "context" + "fmt" engine "github.com/ipfs/go-bitswap/decision" bsmsg "github.com/ipfs/go-bitswap/message" + pb "github.com/ipfs/go-bitswap/message/pb" cid "github.com/ipfs/go-cid" logging "github.com/ipfs/go-log" process "github.com/jbenet/goprocess" @@ -50,6 +52,7 @@ func (bs *Bitswap) taskWorker(ctx context.Context, id int) { if !ok { continue } + // update the BS ledger to reflect sent message // TODO: Should only track *useful* messages in ledger outgoing := bsmsg.New(false) @@ -63,6 +66,10 @@ func (bs *Bitswap) taskWorker(ctx context.Context, id int) { })) outgoing.AddBlock(block) } + for _, blockPresence := range envelope.Message.BlockPresences() { + outgoing.AddBlockPresence(blockPresence.Cid, blockPresence.Type) + } + // TODO: Only record message as sent if there was no error? bs.engine.MessageSent(envelope.Peer, outgoing) bs.sendBlocks(ctx, envelope) @@ -88,6 +95,21 @@ func (bs *Bitswap) sendBlocks(ctx context.Context, env *engine.Envelope) { msgSize := 0 msg := bsmsg.New(false) + + for _, blockPresence := range env.Message.BlockPresences() { + c := blockPresence.Cid + switch blockPresence.Type { + case pb.Message_Have: + log.Infof("Sending HAVE %s to %s", c.String()[2:8], env.Peer) + case pb.Message_DontHave: + log.Infof("Sending DONT_HAVE %s to %s", c.String()[2:8], env.Peer) + default: + panic(fmt.Sprintf("unrecognized BlockPresence type %v", blockPresence.Type)) + } + + msgSize += bsmsg.BlockPresenceSize(c) + msg.AddBlockPresence(c, blockPresence.Type) + } for _, block := range env.Message.Blocks() { msgSize += len(block.RawData()) msg.AddBlock(block) @@ -97,8 +119,10 @@ func (bs *Bitswap) sendBlocks(ctx context.Context, env *engine.Envelope) { bs.sentHistogram.Observe(float64(msgSize)) err := bs.network.SendMessage(ctx, env.Peer, msg) if err != nil { - log.Infof("sendblock error: %s", err) + // log.Infof("sendblock error: %s", err) + log.Errorf("SendMessage error: %s. size: %d. block-presence length: %d", err, msg.Size(), len(env.Message.BlockPresences())) } + log.Infof("Sent message to %s", env.Peer) } func (bs *Bitswap) provideWorker(px process.Process) { From e86e2d2229f6c4ad0e54b1362f43da580fdebdb2 Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Thu, 30 Jan 2020 15:57:45 -0800 Subject: [PATCH 0830/1035] feat: move internals to an internal package This makes reading the docs much easier as it's clear what's "private" and what's not. fixes #238 This commit was moved from ipfs/go-bitswap@bbf65296b1a3a5bc76ee812ee6c5438a6c3dbb24 --- bitswap/benchmarks_test.go | 8 +++---- bitswap/bitswap.go | 24 +++++++++---------- bitswap/bitswap_test.go | 8 +++---- bitswap/bitswap_with_sessions_test.go | 4 ++-- .../blockpresencemanager.go | 0 .../blockpresencemanager_test.go | 2 +- .../decision/blockstoremanager.go | 0 .../decision/blockstoremanager_test.go | 2 +- bitswap/{ => internal}/decision/engine.go | 0 .../{ => internal}/decision/engine_test.go | 4 ++-- bitswap/{ => internal}/decision/ewma.go | 0 bitswap/{ => internal}/decision/ledger.go | 0 bitswap/{ => internal}/decision/taskmerger.go | 0 .../decision/taskmerger_test.go | 2 +- bitswap/{ => internal}/getter/getter.go | 2 +- bitswap/{ => internal}/logutil/logutil.go | 0 .../messagequeue/messagequeue.go | 0 .../messagequeue/messagequeue_test.go | 2 +- .../notifications/notifications.go | 0 .../notifications/notifications_test.go | 0 .../{ => internal}/peermanager/peermanager.go | 0 .../peermanager/peermanager_test.go | 2 +- .../peermanager/peerwantmanager.go | 2 +- .../peermanager/peerwantmanager_test.go | 2 +- .../providerquerymanager.go | 0 .../providerquerymanager_test.go | 2 +- bitswap/{ => internal}/session/cidqueue.go | 0 .../session/peeravailabilitymanager.go | 0 .../session/peeravailabilitymanager_test.go | 2 +- .../session/peerresponsetracker.go | 0 .../session/peerresponsetracker_test.go | 2 +- .../session/sentwantblockstracker.go | 0 .../session/sentwantblockstracker_test.go | 2 +- bitswap/{ => internal}/session/session.go | 12 +++++----- .../{ => internal}/session/session_test.go | 10 ++++---- .../{ => internal}/session/sessionwants.go | 0 .../session/sessionwants_test.go | 2 +- .../session/sessionwantsender.go | 2 +- .../session/sessionwantsender_test.go | 6 ++--- .../{ => internal}/session/wantinfo_test.go | 2 +- .../{ => internal}/sessiondata/sessiondata.go | 0 .../sessioninterestmanager.go | 2 +- .../sessioninterestmanager_test.go | 2 +- .../sessionmanager/sessionmanager.go | 8 +++---- .../sessionmanager/sessionmanager_test.go | 10 ++++---- .../sessionpeermanager/latencytracker.go | 0 .../sessionpeermanager/peerdata.go | 0 .../sessionpeermanager/sessionpeermanager.go | 2 +- .../sessionpeermanager_test.go | 2 +- .../sessionrequestsplitter.go | 2 +- .../sessionrequestsplitter_test.go | 2 +- .../sessionwantlist/sessionwantlist.go | 0 .../sessionwantlist/sessionwantlist_test.go | 2 +- .../testinstance/testinstance.go | 2 +- bitswap/{ => internal}/testnet/interface.go | 0 .../internet_latency_delay_generator.go | 0 .../internet_latency_delay_generator_test.go | 0 .../{ => internal}/testnet/network_test.go | 0 bitswap/{ => internal}/testnet/peernet.go | 0 .../testnet/rate_limit_generators.go | 0 bitswap/{ => internal}/testnet/virtual.go | 0 bitswap/{ => internal}/testutil/testutil.go | 2 +- .../{ => internal}/testutil/testutil_test.go | 0 .../{ => internal}/wantmanager/wantmanager.go | 8 +++---- .../wantmanager/wantmanager_test.go | 8 +++---- bitswap/network/ipfs_impl_test.go | 2 +- bitswap/workers.go | 2 +- 67 files changed, 81 insertions(+), 81 deletions(-) rename bitswap/{ => internal}/blockpresencemanager/blockpresencemanager.go (100%) rename bitswap/{ => internal}/blockpresencemanager/blockpresencemanager_test.go (99%) rename bitswap/{ => internal}/decision/blockstoremanager.go (100%) rename bitswap/{ => internal}/decision/blockstoremanager_test.go (99%) rename bitswap/{ => internal}/decision/engine.go (100%) rename bitswap/{ => internal}/decision/engine_test.go (99%) rename bitswap/{ => internal}/decision/ewma.go (100%) rename bitswap/{ => internal}/decision/ledger.go (100%) rename bitswap/{ => internal}/decision/taskmerger.go (100%) rename bitswap/{ => internal}/decision/taskmerger_test.go (99%) rename bitswap/{ => internal}/getter/getter.go (98%) rename bitswap/{ => internal}/logutil/logutil.go (100%) rename bitswap/{ => internal}/messagequeue/messagequeue.go (100%) rename bitswap/{ => internal}/messagequeue/messagequeue_test.go (99%) rename bitswap/{ => internal}/notifications/notifications.go (100%) rename bitswap/{ => internal}/notifications/notifications_test.go (100%) rename bitswap/{ => internal}/peermanager/peermanager.go (100%) rename bitswap/{ => internal}/peermanager/peermanager_test.go (99%) rename bitswap/{ => internal}/peermanager/peerwantmanager.go (99%) rename bitswap/{ => internal}/peermanager/peerwantmanager_test.go (99%) rename bitswap/{ => internal}/providerquerymanager/providerquerymanager.go (100%) rename bitswap/{ => internal}/providerquerymanager/providerquerymanager_test.go (99%) rename bitswap/{ => internal}/session/cidqueue.go (100%) rename bitswap/{ => internal}/session/peeravailabilitymanager.go (100%) rename bitswap/{ => internal}/session/peeravailabilitymanager_test.go (97%) rename bitswap/{ => internal}/session/peerresponsetracker.go (100%) rename bitswap/{ => internal}/session/peerresponsetracker_test.go (98%) rename bitswap/{ => internal}/session/sentwantblockstracker.go (100%) rename bitswap/{ => internal}/session/sentwantblockstracker_test.go (93%) rename bitswap/{ => internal}/session/session.go (96%) rename bitswap/{ => internal}/session/session_test.go (97%) rename bitswap/{ => internal}/session/sessionwants.go (100%) rename bitswap/{ => internal}/session/sessionwants_test.go (98%) rename bitswap/{ => internal}/session/sessionwantsender.go (99%) rename bitswap/{ => internal}/session/sessionwantsender_test.go (98%) rename bitswap/{ => internal}/session/wantinfo_test.go (97%) rename bitswap/{ => internal}/sessiondata/sessiondata.go (100%) rename bitswap/{ => internal}/sessioninterestmanager/sessioninterestmanager.go (97%) rename bitswap/{ => internal}/sessioninterestmanager/sessioninterestmanager_test.go (98%) rename bitswap/{ => internal}/sessionmanager/sessionmanager.go (93%) rename bitswap/{ => internal}/sessionmanager/sessionmanager_test.go (95%) rename bitswap/{ => internal}/sessionpeermanager/latencytracker.go (100%) rename bitswap/{ => internal}/sessionpeermanager/peerdata.go (100%) rename bitswap/{ => internal}/sessionpeermanager/sessionpeermanager.go (99%) rename bitswap/{ => internal}/sessionpeermanager/sessionpeermanager_test.go (99%) rename bitswap/{ => internal}/sessionrequestsplitter/sessionrequestsplitter.go (98%) rename bitswap/{ => internal}/sessionrequestsplitter/sessionrequestsplitter_test.go (98%) rename bitswap/{ => internal}/sessionwantlist/sessionwantlist.go (100%) rename bitswap/{ => internal}/sessionwantlist/sessionwantlist_test.go (99%) rename bitswap/{ => internal}/testinstance/testinstance.go (98%) rename bitswap/{ => internal}/testnet/interface.go (100%) rename bitswap/{ => internal}/testnet/internet_latency_delay_generator.go (100%) rename bitswap/{ => internal}/testnet/internet_latency_delay_generator_test.go (100%) rename bitswap/{ => internal}/testnet/network_test.go (100%) rename bitswap/{ => internal}/testnet/peernet.go (100%) rename bitswap/{ => internal}/testnet/rate_limit_generators.go (100%) rename bitswap/{ => internal}/testnet/virtual.go (100%) rename bitswap/{ => internal}/testutil/testutil.go (98%) rename bitswap/{ => internal}/testutil/testutil_test.go (100%) rename bitswap/{ => internal}/wantmanager/wantmanager.go (93%) rename bitswap/{ => internal}/wantmanager/wantmanager_test.go (96%) diff --git a/bitswap/benchmarks_test.go b/bitswap/benchmarks_test.go index 501488ded..e56214d96 100644 --- a/bitswap/benchmarks_test.go +++ b/bitswap/benchmarks_test.go @@ -13,13 +13,13 @@ import ( "testing" "time" - "github.com/ipfs/go-bitswap/testutil" + "github.com/ipfs/go-bitswap/internal/testutil" blocks "github.com/ipfs/go-block-format" bitswap "github.com/ipfs/go-bitswap" - bssession "github.com/ipfs/go-bitswap/session" - testinstance "github.com/ipfs/go-bitswap/testinstance" - tn "github.com/ipfs/go-bitswap/testnet" + bssession "github.com/ipfs/go-bitswap/internal/session" + testinstance "github.com/ipfs/go-bitswap/internal/testinstance" + tn "github.com/ipfs/go-bitswap/internal/testnet" cid "github.com/ipfs/go-cid" delay "github.com/ipfs/go-ipfs-delay" mockrouting "github.com/ipfs/go-ipfs-routing/mock" diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index d607274df..2bc7a189c 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -11,20 +11,20 @@ import ( delay "github.com/ipfs/go-ipfs-delay" - bsbpm "github.com/ipfs/go-bitswap/blockpresencemanager" - decision "github.com/ipfs/go-bitswap/decision" - bsgetter "github.com/ipfs/go-bitswap/getter" + bsbpm "github.com/ipfs/go-bitswap/internal/blockpresencemanager" + decision "github.com/ipfs/go-bitswap/internal/decision" + bsgetter "github.com/ipfs/go-bitswap/internal/getter" bsmsg "github.com/ipfs/go-bitswap/message" - bsmq "github.com/ipfs/go-bitswap/messagequeue" + bsmq "github.com/ipfs/go-bitswap/internal/messagequeue" bsnet "github.com/ipfs/go-bitswap/network" - notifications "github.com/ipfs/go-bitswap/notifications" - bspm "github.com/ipfs/go-bitswap/peermanager" - bspqm "github.com/ipfs/go-bitswap/providerquerymanager" - bssession "github.com/ipfs/go-bitswap/session" - bssim "github.com/ipfs/go-bitswap/sessioninterestmanager" - bssm "github.com/ipfs/go-bitswap/sessionmanager" - bsspm "github.com/ipfs/go-bitswap/sessionpeermanager" - bswm "github.com/ipfs/go-bitswap/wantmanager" + notifications "github.com/ipfs/go-bitswap/internal/notifications" + bspm "github.com/ipfs/go-bitswap/internal/peermanager" + bspqm "github.com/ipfs/go-bitswap/internal/providerquerymanager" + bssession "github.com/ipfs/go-bitswap/internal/session" + bssim "github.com/ipfs/go-bitswap/internal/sessioninterestmanager" + bssm "github.com/ipfs/go-bitswap/internal/sessionmanager" + bsspm "github.com/ipfs/go-bitswap/internal/sessionpeermanager" + bswm "github.com/ipfs/go-bitswap/internal/wantmanager" blocks "github.com/ipfs/go-block-format" cid "github.com/ipfs/go-cid" blockstore "github.com/ipfs/go-ipfs-blockstore" diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index 965c94ed6..723b25d63 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -9,11 +9,11 @@ import ( "time" bitswap "github.com/ipfs/go-bitswap" - decision "github.com/ipfs/go-bitswap/decision" + decision "github.com/ipfs/go-bitswap/internal/decision" "github.com/ipfs/go-bitswap/message" - bssession "github.com/ipfs/go-bitswap/session" - testinstance "github.com/ipfs/go-bitswap/testinstance" - tn "github.com/ipfs/go-bitswap/testnet" + bssession "github.com/ipfs/go-bitswap/internal/session" + testinstance "github.com/ipfs/go-bitswap/internal/testinstance" + tn "github.com/ipfs/go-bitswap/internal/testnet" blocks "github.com/ipfs/go-block-format" cid "github.com/ipfs/go-cid" detectrace "github.com/ipfs/go-detect-race" diff --git a/bitswap/bitswap_with_sessions_test.go b/bitswap/bitswap_with_sessions_test.go index 77ad03b2e..49e6d273c 100644 --- a/bitswap/bitswap_with_sessions_test.go +++ b/bitswap/bitswap_with_sessions_test.go @@ -7,8 +7,8 @@ import ( "time" bitswap "github.com/ipfs/go-bitswap" - bssession "github.com/ipfs/go-bitswap/session" - testinstance "github.com/ipfs/go-bitswap/testinstance" + bssession "github.com/ipfs/go-bitswap/internal/session" + testinstance "github.com/ipfs/go-bitswap/internal/testinstance" blocks "github.com/ipfs/go-block-format" cid "github.com/ipfs/go-cid" blocksutil "github.com/ipfs/go-ipfs-blocksutil" diff --git a/bitswap/blockpresencemanager/blockpresencemanager.go b/bitswap/internal/blockpresencemanager/blockpresencemanager.go similarity index 100% rename from bitswap/blockpresencemanager/blockpresencemanager.go rename to bitswap/internal/blockpresencemanager/blockpresencemanager.go diff --git a/bitswap/blockpresencemanager/blockpresencemanager_test.go b/bitswap/internal/blockpresencemanager/blockpresencemanager_test.go similarity index 99% rename from bitswap/blockpresencemanager/blockpresencemanager_test.go rename to bitswap/internal/blockpresencemanager/blockpresencemanager_test.go index 6154f4dff..579dbfcda 100644 --- a/bitswap/blockpresencemanager/blockpresencemanager_test.go +++ b/bitswap/internal/blockpresencemanager/blockpresencemanager_test.go @@ -4,7 +4,7 @@ import ( "fmt" "testing" - "github.com/ipfs/go-bitswap/testutil" + "github.com/ipfs/go-bitswap/internal/testutil" peer "github.com/libp2p/go-libp2p-core/peer" cid "github.com/ipfs/go-cid" diff --git a/bitswap/decision/blockstoremanager.go b/bitswap/internal/decision/blockstoremanager.go similarity index 100% rename from bitswap/decision/blockstoremanager.go rename to bitswap/internal/decision/blockstoremanager.go diff --git a/bitswap/decision/blockstoremanager_test.go b/bitswap/internal/decision/blockstoremanager_test.go similarity index 99% rename from bitswap/decision/blockstoremanager_test.go rename to bitswap/internal/decision/blockstoremanager_test.go index c57c48929..cac0a5b0e 100644 --- a/bitswap/decision/blockstoremanager_test.go +++ b/bitswap/internal/decision/blockstoremanager_test.go @@ -7,7 +7,7 @@ import ( "testing" "time" - "github.com/ipfs/go-bitswap/testutil" + "github.com/ipfs/go-bitswap/internal/testutil" cid "github.com/ipfs/go-cid" blocks "github.com/ipfs/go-block-format" diff --git a/bitswap/decision/engine.go b/bitswap/internal/decision/engine.go similarity index 100% rename from bitswap/decision/engine.go rename to bitswap/internal/decision/engine.go diff --git a/bitswap/decision/engine_test.go b/bitswap/internal/decision/engine_test.go similarity index 99% rename from bitswap/decision/engine_test.go rename to bitswap/internal/decision/engine_test.go index 12e7eca21..d465fde20 100644 --- a/bitswap/decision/engine_test.go +++ b/bitswap/internal/decision/engine_test.go @@ -10,10 +10,10 @@ import ( "testing" "time" - lu "github.com/ipfs/go-bitswap/logutil" + lu "github.com/ipfs/go-bitswap/internal/logutil" message "github.com/ipfs/go-bitswap/message" pb "github.com/ipfs/go-bitswap/message/pb" - "github.com/ipfs/go-bitswap/testutil" + "github.com/ipfs/go-bitswap/internal/testutil" blocks "github.com/ipfs/go-block-format" cid "github.com/ipfs/go-cid" diff --git a/bitswap/decision/ewma.go b/bitswap/internal/decision/ewma.go similarity index 100% rename from bitswap/decision/ewma.go rename to bitswap/internal/decision/ewma.go diff --git a/bitswap/decision/ledger.go b/bitswap/internal/decision/ledger.go similarity index 100% rename from bitswap/decision/ledger.go rename to bitswap/internal/decision/ledger.go diff --git a/bitswap/decision/taskmerger.go b/bitswap/internal/decision/taskmerger.go similarity index 100% rename from bitswap/decision/taskmerger.go rename to bitswap/internal/decision/taskmerger.go diff --git a/bitswap/decision/taskmerger_test.go b/bitswap/internal/decision/taskmerger_test.go similarity index 99% rename from bitswap/decision/taskmerger_test.go rename to bitswap/internal/decision/taskmerger_test.go index 7d4d61c8c..eb79f1569 100644 --- a/bitswap/decision/taskmerger_test.go +++ b/bitswap/internal/decision/taskmerger_test.go @@ -3,7 +3,7 @@ package decision import ( "testing" - "github.com/ipfs/go-bitswap/testutil" + "github.com/ipfs/go-bitswap/internal/testutil" "github.com/ipfs/go-peertaskqueue" "github.com/ipfs/go-peertaskqueue/peertask" ) diff --git a/bitswap/getter/getter.go b/bitswap/internal/getter/getter.go similarity index 98% rename from bitswap/getter/getter.go rename to bitswap/internal/getter/getter.go index 018bf87a4..d8c73d4d3 100644 --- a/bitswap/getter/getter.go +++ b/bitswap/internal/getter/getter.go @@ -4,7 +4,7 @@ import ( "context" "errors" - notifications "github.com/ipfs/go-bitswap/notifications" + notifications "github.com/ipfs/go-bitswap/internal/notifications" logging "github.com/ipfs/go-log" blocks "github.com/ipfs/go-block-format" diff --git a/bitswap/logutil/logutil.go b/bitswap/internal/logutil/logutil.go similarity index 100% rename from bitswap/logutil/logutil.go rename to bitswap/internal/logutil/logutil.go diff --git a/bitswap/messagequeue/messagequeue.go b/bitswap/internal/messagequeue/messagequeue.go similarity index 100% rename from bitswap/messagequeue/messagequeue.go rename to bitswap/internal/messagequeue/messagequeue.go diff --git a/bitswap/messagequeue/messagequeue_test.go b/bitswap/internal/messagequeue/messagequeue_test.go similarity index 99% rename from bitswap/messagequeue/messagequeue_test.go rename to bitswap/internal/messagequeue/messagequeue_test.go index 6ce146f94..ad66c944a 100644 --- a/bitswap/messagequeue/messagequeue_test.go +++ b/bitswap/internal/messagequeue/messagequeue_test.go @@ -7,7 +7,7 @@ import ( "time" "github.com/ipfs/go-bitswap/message" - "github.com/ipfs/go-bitswap/testutil" + "github.com/ipfs/go-bitswap/internal/testutil" cid "github.com/ipfs/go-cid" bsmsg "github.com/ipfs/go-bitswap/message" diff --git a/bitswap/notifications/notifications.go b/bitswap/internal/notifications/notifications.go similarity index 100% rename from bitswap/notifications/notifications.go rename to bitswap/internal/notifications/notifications.go diff --git a/bitswap/notifications/notifications_test.go b/bitswap/internal/notifications/notifications_test.go similarity index 100% rename from bitswap/notifications/notifications_test.go rename to bitswap/internal/notifications/notifications_test.go diff --git a/bitswap/peermanager/peermanager.go b/bitswap/internal/peermanager/peermanager.go similarity index 100% rename from bitswap/peermanager/peermanager.go rename to bitswap/internal/peermanager/peermanager.go diff --git a/bitswap/peermanager/peermanager_test.go b/bitswap/internal/peermanager/peermanager_test.go similarity index 99% rename from bitswap/peermanager/peermanager_test.go rename to bitswap/internal/peermanager/peermanager_test.go index c62cb3aa5..afa79a9d4 100644 --- a/bitswap/peermanager/peermanager_test.go +++ b/bitswap/internal/peermanager/peermanager_test.go @@ -5,7 +5,7 @@ import ( "testing" "time" - "github.com/ipfs/go-bitswap/testutil" + "github.com/ipfs/go-bitswap/internal/testutil" cid "github.com/ipfs/go-cid" "github.com/libp2p/go-libp2p-core/peer" diff --git a/bitswap/peermanager/peerwantmanager.go b/bitswap/internal/peermanager/peerwantmanager.go similarity index 99% rename from bitswap/peermanager/peerwantmanager.go rename to bitswap/internal/peermanager/peerwantmanager.go index 31bcf795f..9833b3e8b 100644 --- a/bitswap/peermanager/peerwantmanager.go +++ b/bitswap/internal/peermanager/peerwantmanager.go @@ -4,7 +4,7 @@ import ( "bytes" "fmt" - lu "github.com/ipfs/go-bitswap/logutil" + lu "github.com/ipfs/go-bitswap/internal/logutil" cid "github.com/ipfs/go-cid" peer "github.com/libp2p/go-libp2p-core/peer" diff --git a/bitswap/peermanager/peerwantmanager_test.go b/bitswap/internal/peermanager/peerwantmanager_test.go similarity index 99% rename from bitswap/peermanager/peerwantmanager_test.go rename to bitswap/internal/peermanager/peerwantmanager_test.go index dc9e181ce..0172a6816 100644 --- a/bitswap/peermanager/peerwantmanager_test.go +++ b/bitswap/internal/peermanager/peerwantmanager_test.go @@ -3,7 +3,7 @@ package peermanager import ( "testing" - "github.com/ipfs/go-bitswap/testutil" + "github.com/ipfs/go-bitswap/internal/testutil" cid "github.com/ipfs/go-cid" ) diff --git a/bitswap/providerquerymanager/providerquerymanager.go b/bitswap/internal/providerquerymanager/providerquerymanager.go similarity index 100% rename from bitswap/providerquerymanager/providerquerymanager.go rename to bitswap/internal/providerquerymanager/providerquerymanager.go diff --git a/bitswap/providerquerymanager/providerquerymanager_test.go b/bitswap/internal/providerquerymanager/providerquerymanager_test.go similarity index 99% rename from bitswap/providerquerymanager/providerquerymanager_test.go rename to bitswap/internal/providerquerymanager/providerquerymanager_test.go index 689c5ec2d..8f560536b 100644 --- a/bitswap/providerquerymanager/providerquerymanager_test.go +++ b/bitswap/internal/providerquerymanager/providerquerymanager_test.go @@ -8,7 +8,7 @@ import ( "testing" "time" - "github.com/ipfs/go-bitswap/testutil" + "github.com/ipfs/go-bitswap/internal/testutil" cid "github.com/ipfs/go-cid" "github.com/libp2p/go-libp2p-core/peer" diff --git a/bitswap/session/cidqueue.go b/bitswap/internal/session/cidqueue.go similarity index 100% rename from bitswap/session/cidqueue.go rename to bitswap/internal/session/cidqueue.go diff --git a/bitswap/session/peeravailabilitymanager.go b/bitswap/internal/session/peeravailabilitymanager.go similarity index 100% rename from bitswap/session/peeravailabilitymanager.go rename to bitswap/internal/session/peeravailabilitymanager.go diff --git a/bitswap/session/peeravailabilitymanager_test.go b/bitswap/internal/session/peeravailabilitymanager_test.go similarity index 97% rename from bitswap/session/peeravailabilitymanager_test.go rename to bitswap/internal/session/peeravailabilitymanager_test.go index 4c4b4b1e0..1d5b8f234 100644 --- a/bitswap/session/peeravailabilitymanager_test.go +++ b/bitswap/internal/session/peeravailabilitymanager_test.go @@ -3,7 +3,7 @@ package session import ( "testing" - "github.com/ipfs/go-bitswap/testutil" + "github.com/ipfs/go-bitswap/internal/testutil" ) func TestPeerAvailabilityManager(t *testing.T) { diff --git a/bitswap/session/peerresponsetracker.go b/bitswap/internal/session/peerresponsetracker.go similarity index 100% rename from bitswap/session/peerresponsetracker.go rename to bitswap/internal/session/peerresponsetracker.go diff --git a/bitswap/session/peerresponsetracker_test.go b/bitswap/internal/session/peerresponsetracker_test.go similarity index 98% rename from bitswap/session/peerresponsetracker_test.go rename to bitswap/internal/session/peerresponsetracker_test.go index bbe6bd756..aafd2ced9 100644 --- a/bitswap/session/peerresponsetracker_test.go +++ b/bitswap/internal/session/peerresponsetracker_test.go @@ -4,7 +4,7 @@ import ( "math" "testing" - "github.com/ipfs/go-bitswap/testutil" + "github.com/ipfs/go-bitswap/internal/testutil" peer "github.com/libp2p/go-libp2p-core/peer" ) diff --git a/bitswap/session/sentwantblockstracker.go b/bitswap/internal/session/sentwantblockstracker.go similarity index 100% rename from bitswap/session/sentwantblockstracker.go rename to bitswap/internal/session/sentwantblockstracker.go diff --git a/bitswap/session/sentwantblockstracker_test.go b/bitswap/internal/session/sentwantblockstracker_test.go similarity index 93% rename from bitswap/session/sentwantblockstracker_test.go rename to bitswap/internal/session/sentwantblockstracker_test.go index 097cac6b4..2449840c9 100644 --- a/bitswap/session/sentwantblockstracker_test.go +++ b/bitswap/internal/session/sentwantblockstracker_test.go @@ -3,7 +3,7 @@ package session import ( "testing" - "github.com/ipfs/go-bitswap/testutil" + "github.com/ipfs/go-bitswap/internal/testutil" ) func TestSendWantBlocksTracker(t *testing.T) { diff --git a/bitswap/session/session.go b/bitswap/internal/session/session.go similarity index 96% rename from bitswap/session/session.go rename to bitswap/internal/session/session.go index d9fb24437..77a76ce62 100644 --- a/bitswap/session/session.go +++ b/bitswap/internal/session/session.go @@ -5,12 +5,12 @@ import ( "sync" "time" - // lu "github.com/ipfs/go-bitswap/logutil" - bsbpm "github.com/ipfs/go-bitswap/blockpresencemanager" - bsgetter "github.com/ipfs/go-bitswap/getter" - notifications "github.com/ipfs/go-bitswap/notifications" - bspm "github.com/ipfs/go-bitswap/peermanager" - bssim "github.com/ipfs/go-bitswap/sessioninterestmanager" + // lu "github.com/ipfs/go-bitswap/internal/logutil" + bsbpm "github.com/ipfs/go-bitswap/internal/blockpresencemanager" + bsgetter "github.com/ipfs/go-bitswap/internal/getter" + notifications "github.com/ipfs/go-bitswap/internal/notifications" + bspm "github.com/ipfs/go-bitswap/internal/peermanager" + bssim "github.com/ipfs/go-bitswap/internal/sessioninterestmanager" blocks "github.com/ipfs/go-block-format" cid "github.com/ipfs/go-cid" delay "github.com/ipfs/go-ipfs-delay" diff --git a/bitswap/session/session_test.go b/bitswap/internal/session/session_test.go similarity index 97% rename from bitswap/session/session_test.go rename to bitswap/internal/session/session_test.go index 688f7883c..21e196f7f 100644 --- a/bitswap/session/session_test.go +++ b/bitswap/internal/session/session_test.go @@ -5,11 +5,11 @@ import ( "testing" "time" - bsbpm "github.com/ipfs/go-bitswap/blockpresencemanager" - notifications "github.com/ipfs/go-bitswap/notifications" - bspm "github.com/ipfs/go-bitswap/peermanager" - bssim "github.com/ipfs/go-bitswap/sessioninterestmanager" - "github.com/ipfs/go-bitswap/testutil" + bsbpm "github.com/ipfs/go-bitswap/internal/blockpresencemanager" + notifications "github.com/ipfs/go-bitswap/internal/notifications" + bspm "github.com/ipfs/go-bitswap/internal/peermanager" + bssim "github.com/ipfs/go-bitswap/internal/sessioninterestmanager" + "github.com/ipfs/go-bitswap/internal/testutil" cid "github.com/ipfs/go-cid" blocksutil "github.com/ipfs/go-ipfs-blocksutil" delay "github.com/ipfs/go-ipfs-delay" diff --git a/bitswap/session/sessionwants.go b/bitswap/internal/session/sessionwants.go similarity index 100% rename from bitswap/session/sessionwants.go rename to bitswap/internal/session/sessionwants.go diff --git a/bitswap/session/sessionwants_test.go b/bitswap/internal/session/sessionwants_test.go similarity index 98% rename from bitswap/session/sessionwants_test.go rename to bitswap/internal/session/sessionwants_test.go index 953ecce9a..8389faa06 100644 --- a/bitswap/session/sessionwants_test.go +++ b/bitswap/internal/session/sessionwants_test.go @@ -3,7 +3,7 @@ package session import ( "testing" - "github.com/ipfs/go-bitswap/testutil" + "github.com/ipfs/go-bitswap/internal/testutil" cid "github.com/ipfs/go-cid" ) diff --git a/bitswap/session/sessionwantsender.go b/bitswap/internal/session/sessionwantsender.go similarity index 99% rename from bitswap/session/sessionwantsender.go rename to bitswap/internal/session/sessionwantsender.go index ddd24ee01..defb3578b 100644 --- a/bitswap/session/sessionwantsender.go +++ b/bitswap/internal/session/sessionwantsender.go @@ -3,7 +3,7 @@ package session import ( "context" - bsbpm "github.com/ipfs/go-bitswap/blockpresencemanager" + bsbpm "github.com/ipfs/go-bitswap/internal/blockpresencemanager" cid "github.com/ipfs/go-cid" peer "github.com/libp2p/go-libp2p-core/peer" diff --git a/bitswap/session/sessionwantsender_test.go b/bitswap/internal/session/sessionwantsender_test.go similarity index 98% rename from bitswap/session/sessionwantsender_test.go rename to bitswap/internal/session/sessionwantsender_test.go index e37744096..f49bce9de 100644 --- a/bitswap/session/sessionwantsender_test.go +++ b/bitswap/internal/session/sessionwantsender_test.go @@ -6,9 +6,9 @@ import ( "testing" "time" - bsbpm "github.com/ipfs/go-bitswap/blockpresencemanager" - bspm "github.com/ipfs/go-bitswap/peermanager" - "github.com/ipfs/go-bitswap/testutil" + bsbpm "github.com/ipfs/go-bitswap/internal/blockpresencemanager" + bspm "github.com/ipfs/go-bitswap/internal/peermanager" + "github.com/ipfs/go-bitswap/internal/testutil" cid "github.com/ipfs/go-cid" peer "github.com/libp2p/go-libp2p-core/peer" ) diff --git a/bitswap/session/wantinfo_test.go b/bitswap/internal/session/wantinfo_test.go similarity index 97% rename from bitswap/session/wantinfo_test.go rename to bitswap/internal/session/wantinfo_test.go index 618b231a5..8397d81fe 100644 --- a/bitswap/session/wantinfo_test.go +++ b/bitswap/internal/session/wantinfo_test.go @@ -3,7 +3,7 @@ package session import ( "testing" - "github.com/ipfs/go-bitswap/testutil" + "github.com/ipfs/go-bitswap/internal/testutil" ) func TestEmptyWantInfo(t *testing.T) { diff --git a/bitswap/sessiondata/sessiondata.go b/bitswap/internal/sessiondata/sessiondata.go similarity index 100% rename from bitswap/sessiondata/sessiondata.go rename to bitswap/internal/sessiondata/sessiondata.go diff --git a/bitswap/sessioninterestmanager/sessioninterestmanager.go b/bitswap/internal/sessioninterestmanager/sessioninterestmanager.go similarity index 97% rename from bitswap/sessioninterestmanager/sessioninterestmanager.go rename to bitswap/internal/sessioninterestmanager/sessioninterestmanager.go index 9deb37954..e85a645b9 100644 --- a/bitswap/sessioninterestmanager/sessioninterestmanager.go +++ b/bitswap/internal/sessioninterestmanager/sessioninterestmanager.go @@ -1,7 +1,7 @@ package sessioninterestmanager import ( - bsswl "github.com/ipfs/go-bitswap/sessionwantlist" + bsswl "github.com/ipfs/go-bitswap/internal/sessionwantlist" blocks "github.com/ipfs/go-block-format" cid "github.com/ipfs/go-cid" diff --git a/bitswap/sessioninterestmanager/sessioninterestmanager_test.go b/bitswap/internal/sessioninterestmanager/sessioninterestmanager_test.go similarity index 98% rename from bitswap/sessioninterestmanager/sessioninterestmanager_test.go rename to bitswap/internal/sessioninterestmanager/sessioninterestmanager_test.go index d882cabc3..ead920230 100644 --- a/bitswap/sessioninterestmanager/sessioninterestmanager_test.go +++ b/bitswap/internal/sessioninterestmanager/sessioninterestmanager_test.go @@ -3,7 +3,7 @@ package sessioninterestmanager import ( "testing" - "github.com/ipfs/go-bitswap/testutil" + "github.com/ipfs/go-bitswap/internal/testutil" cid "github.com/ipfs/go-cid" ) diff --git a/bitswap/sessionmanager/sessionmanager.go b/bitswap/internal/sessionmanager/sessionmanager.go similarity index 93% rename from bitswap/sessionmanager/sessionmanager.go rename to bitswap/internal/sessionmanager/sessionmanager.go index 3090e8291..f7382fad3 100644 --- a/bitswap/sessionmanager/sessionmanager.go +++ b/bitswap/internal/sessionmanager/sessionmanager.go @@ -8,10 +8,10 @@ import ( cid "github.com/ipfs/go-cid" delay "github.com/ipfs/go-ipfs-delay" - bsbpm "github.com/ipfs/go-bitswap/blockpresencemanager" - notifications "github.com/ipfs/go-bitswap/notifications" - bssession "github.com/ipfs/go-bitswap/session" - bssim "github.com/ipfs/go-bitswap/sessioninterestmanager" + bsbpm "github.com/ipfs/go-bitswap/internal/blockpresencemanager" + notifications "github.com/ipfs/go-bitswap/internal/notifications" + bssession "github.com/ipfs/go-bitswap/internal/session" + bssim "github.com/ipfs/go-bitswap/internal/sessioninterestmanager" exchange "github.com/ipfs/go-ipfs-exchange-interface" peer "github.com/libp2p/go-libp2p-core/peer" ) diff --git a/bitswap/sessionmanager/sessionmanager_test.go b/bitswap/internal/sessionmanager/sessionmanager_test.go similarity index 95% rename from bitswap/sessionmanager/sessionmanager_test.go rename to bitswap/internal/sessionmanager/sessionmanager_test.go index 8f25a952b..e89ea4644 100644 --- a/bitswap/sessionmanager/sessionmanager_test.go +++ b/bitswap/internal/sessionmanager/sessionmanager_test.go @@ -7,11 +7,11 @@ import ( delay "github.com/ipfs/go-ipfs-delay" - bsbpm "github.com/ipfs/go-bitswap/blockpresencemanager" - notifications "github.com/ipfs/go-bitswap/notifications" - bspm "github.com/ipfs/go-bitswap/peermanager" - bssession "github.com/ipfs/go-bitswap/session" - bssim "github.com/ipfs/go-bitswap/sessioninterestmanager" + bsbpm "github.com/ipfs/go-bitswap/internal/blockpresencemanager" + notifications "github.com/ipfs/go-bitswap/internal/notifications" + bspm "github.com/ipfs/go-bitswap/internal/peermanager" + bssession "github.com/ipfs/go-bitswap/internal/session" + bssim "github.com/ipfs/go-bitswap/internal/sessioninterestmanager" blocks "github.com/ipfs/go-block-format" cid "github.com/ipfs/go-cid" diff --git a/bitswap/sessionpeermanager/latencytracker.go b/bitswap/internal/sessionpeermanager/latencytracker.go similarity index 100% rename from bitswap/sessionpeermanager/latencytracker.go rename to bitswap/internal/sessionpeermanager/latencytracker.go diff --git a/bitswap/sessionpeermanager/peerdata.go b/bitswap/internal/sessionpeermanager/peerdata.go similarity index 100% rename from bitswap/sessionpeermanager/peerdata.go rename to bitswap/internal/sessionpeermanager/peerdata.go diff --git a/bitswap/sessionpeermanager/sessionpeermanager.go b/bitswap/internal/sessionpeermanager/sessionpeermanager.go similarity index 99% rename from bitswap/sessionpeermanager/sessionpeermanager.go rename to bitswap/internal/sessionpeermanager/sessionpeermanager.go index 060df0915..7957638d3 100644 --- a/bitswap/sessionpeermanager/sessionpeermanager.go +++ b/bitswap/internal/sessionpeermanager/sessionpeermanager.go @@ -7,7 +7,7 @@ import ( "sort" "time" - bssd "github.com/ipfs/go-bitswap/sessiondata" + bssd "github.com/ipfs/go-bitswap/internal/sessiondata" logging "github.com/ipfs/go-log" cid "github.com/ipfs/go-cid" diff --git a/bitswap/sessionpeermanager/sessionpeermanager_test.go b/bitswap/internal/sessionpeermanager/sessionpeermanager_test.go similarity index 99% rename from bitswap/sessionpeermanager/sessionpeermanager_test.go rename to bitswap/internal/sessionpeermanager/sessionpeermanager_test.go index 87262b69d..9a771b188 100644 --- a/bitswap/sessionpeermanager/sessionpeermanager_test.go +++ b/bitswap/internal/sessionpeermanager/sessionpeermanager_test.go @@ -8,7 +8,7 @@ import ( "testing" "time" - "github.com/ipfs/go-bitswap/testutil" + "github.com/ipfs/go-bitswap/internal/testutil" cid "github.com/ipfs/go-cid" peer "github.com/libp2p/go-libp2p-core/peer" diff --git a/bitswap/sessionrequestsplitter/sessionrequestsplitter.go b/bitswap/internal/sessionrequestsplitter/sessionrequestsplitter.go similarity index 98% rename from bitswap/sessionrequestsplitter/sessionrequestsplitter.go rename to bitswap/internal/sessionrequestsplitter/sessionrequestsplitter.go index 94535e174..b96985ec9 100644 --- a/bitswap/sessionrequestsplitter/sessionrequestsplitter.go +++ b/bitswap/internal/sessionrequestsplitter/sessionrequestsplitter.go @@ -3,7 +3,7 @@ package sessionrequestsplitter import ( "context" - bssd "github.com/ipfs/go-bitswap/sessiondata" + bssd "github.com/ipfs/go-bitswap/internal/sessiondata" "github.com/ipfs/go-cid" "github.com/libp2p/go-libp2p-core/peer" diff --git a/bitswap/sessionrequestsplitter/sessionrequestsplitter_test.go b/bitswap/internal/sessionrequestsplitter/sessionrequestsplitter_test.go similarity index 98% rename from bitswap/sessionrequestsplitter/sessionrequestsplitter_test.go rename to bitswap/internal/sessionrequestsplitter/sessionrequestsplitter_test.go index 10ed64ead..b0e7a0f30 100644 --- a/bitswap/sessionrequestsplitter/sessionrequestsplitter_test.go +++ b/bitswap/internal/sessionrequestsplitter/sessionrequestsplitter_test.go @@ -4,7 +4,7 @@ import ( "context" "testing" - "github.com/ipfs/go-bitswap/testutil" + "github.com/ipfs/go-bitswap/internal/testutil" ) func quadEaseOut(t float64) float64 { return t * t } diff --git a/bitswap/sessionwantlist/sessionwantlist.go b/bitswap/internal/sessionwantlist/sessionwantlist.go similarity index 100% rename from bitswap/sessionwantlist/sessionwantlist.go rename to bitswap/internal/sessionwantlist/sessionwantlist.go diff --git a/bitswap/sessionwantlist/sessionwantlist_test.go b/bitswap/internal/sessionwantlist/sessionwantlist_test.go similarity index 99% rename from bitswap/sessionwantlist/sessionwantlist_test.go rename to bitswap/internal/sessionwantlist/sessionwantlist_test.go index 0b89b8ae8..d57f93959 100644 --- a/bitswap/sessionwantlist/sessionwantlist_test.go +++ b/bitswap/internal/sessionwantlist/sessionwantlist_test.go @@ -4,7 +4,7 @@ import ( "os" "testing" - "github.com/ipfs/go-bitswap/testutil" + "github.com/ipfs/go-bitswap/internal/testutil" cid "github.com/ipfs/go-cid" ) diff --git a/bitswap/testinstance/testinstance.go b/bitswap/internal/testinstance/testinstance.go similarity index 98% rename from bitswap/testinstance/testinstance.go rename to bitswap/internal/testinstance/testinstance.go index f0c855149..2068928d6 100644 --- a/bitswap/testinstance/testinstance.go +++ b/bitswap/internal/testinstance/testinstance.go @@ -6,7 +6,7 @@ import ( bitswap "github.com/ipfs/go-bitswap" bsnet "github.com/ipfs/go-bitswap/network" - tn "github.com/ipfs/go-bitswap/testnet" + tn "github.com/ipfs/go-bitswap/internal/testnet" ds "github.com/ipfs/go-datastore" delayed "github.com/ipfs/go-datastore/delayed" ds_sync "github.com/ipfs/go-datastore/sync" diff --git a/bitswap/testnet/interface.go b/bitswap/internal/testnet/interface.go similarity index 100% rename from bitswap/testnet/interface.go rename to bitswap/internal/testnet/interface.go diff --git a/bitswap/testnet/internet_latency_delay_generator.go b/bitswap/internal/testnet/internet_latency_delay_generator.go similarity index 100% rename from bitswap/testnet/internet_latency_delay_generator.go rename to bitswap/internal/testnet/internet_latency_delay_generator.go diff --git a/bitswap/testnet/internet_latency_delay_generator_test.go b/bitswap/internal/testnet/internet_latency_delay_generator_test.go similarity index 100% rename from bitswap/testnet/internet_latency_delay_generator_test.go rename to bitswap/internal/testnet/internet_latency_delay_generator_test.go diff --git a/bitswap/testnet/network_test.go b/bitswap/internal/testnet/network_test.go similarity index 100% rename from bitswap/testnet/network_test.go rename to bitswap/internal/testnet/network_test.go diff --git a/bitswap/testnet/peernet.go b/bitswap/internal/testnet/peernet.go similarity index 100% rename from bitswap/testnet/peernet.go rename to bitswap/internal/testnet/peernet.go diff --git a/bitswap/testnet/rate_limit_generators.go b/bitswap/internal/testnet/rate_limit_generators.go similarity index 100% rename from bitswap/testnet/rate_limit_generators.go rename to bitswap/internal/testnet/rate_limit_generators.go diff --git a/bitswap/testnet/virtual.go b/bitswap/internal/testnet/virtual.go similarity index 100% rename from bitswap/testnet/virtual.go rename to bitswap/internal/testnet/virtual.go diff --git a/bitswap/testutil/testutil.go b/bitswap/internal/testutil/testutil.go similarity index 98% rename from bitswap/testutil/testutil.go rename to bitswap/internal/testutil/testutil.go index 9f0c5817e..48c306ab0 100644 --- a/bitswap/testutil/testutil.go +++ b/bitswap/internal/testutil/testutil.go @@ -4,7 +4,7 @@ import ( "math/rand" bsmsg "github.com/ipfs/go-bitswap/message" - bssd "github.com/ipfs/go-bitswap/sessiondata" + bssd "github.com/ipfs/go-bitswap/internal/sessiondata" "github.com/ipfs/go-bitswap/wantlist" blocks "github.com/ipfs/go-block-format" cid "github.com/ipfs/go-cid" diff --git a/bitswap/testutil/testutil_test.go b/bitswap/internal/testutil/testutil_test.go similarity index 100% rename from bitswap/testutil/testutil_test.go rename to bitswap/internal/testutil/testutil_test.go diff --git a/bitswap/wantmanager/wantmanager.go b/bitswap/internal/wantmanager/wantmanager.go similarity index 93% rename from bitswap/wantmanager/wantmanager.go rename to bitswap/internal/wantmanager/wantmanager.go index 009359935..4ddda4b79 100644 --- a/bitswap/wantmanager/wantmanager.go +++ b/bitswap/internal/wantmanager/wantmanager.go @@ -3,10 +3,10 @@ package wantmanager import ( "context" - bsbpm "github.com/ipfs/go-bitswap/blockpresencemanager" - bssim "github.com/ipfs/go-bitswap/sessioninterestmanager" - "github.com/ipfs/go-bitswap/sessionmanager" - bsswl "github.com/ipfs/go-bitswap/sessionwantlist" + bsbpm "github.com/ipfs/go-bitswap/internal/blockpresencemanager" + bssim "github.com/ipfs/go-bitswap/internal/sessioninterestmanager" + "github.com/ipfs/go-bitswap/internal/sessionmanager" + bsswl "github.com/ipfs/go-bitswap/internal/sessionwantlist" cid "github.com/ipfs/go-cid" peer "github.com/libp2p/go-libp2p-core/peer" diff --git a/bitswap/wantmanager/wantmanager_test.go b/bitswap/internal/wantmanager/wantmanager_test.go similarity index 96% rename from bitswap/wantmanager/wantmanager_test.go rename to bitswap/internal/wantmanager/wantmanager_test.go index b4e7cd585..38d41d9f1 100644 --- a/bitswap/wantmanager/wantmanager_test.go +++ b/bitswap/internal/wantmanager/wantmanager_test.go @@ -4,10 +4,10 @@ import ( "context" "testing" - bsbpm "github.com/ipfs/go-bitswap/blockpresencemanager" - bssim "github.com/ipfs/go-bitswap/sessioninterestmanager" - "github.com/ipfs/go-bitswap/sessionmanager" - "github.com/ipfs/go-bitswap/testutil" + bsbpm "github.com/ipfs/go-bitswap/internal/blockpresencemanager" + bssim "github.com/ipfs/go-bitswap/internal/sessioninterestmanager" + "github.com/ipfs/go-bitswap/internal/sessionmanager" + "github.com/ipfs/go-bitswap/internal/testutil" "github.com/ipfs/go-cid" "github.com/libp2p/go-libp2p-core/peer" diff --git a/bitswap/network/ipfs_impl_test.go b/bitswap/network/ipfs_impl_test.go index beecf09c7..6b8059fa5 100644 --- a/bitswap/network/ipfs_impl_test.go +++ b/bitswap/network/ipfs_impl_test.go @@ -8,7 +8,7 @@ import ( bsmsg "github.com/ipfs/go-bitswap/message" pb "github.com/ipfs/go-bitswap/message/pb" bsnet "github.com/ipfs/go-bitswap/network" - tn "github.com/ipfs/go-bitswap/testnet" + tn "github.com/ipfs/go-bitswap/internal/testnet" blocksutil "github.com/ipfs/go-ipfs-blocksutil" mockrouting "github.com/ipfs/go-ipfs-routing/mock" diff --git a/bitswap/workers.go b/bitswap/workers.go index 2028c4dfc..4b07008d4 100644 --- a/bitswap/workers.go +++ b/bitswap/workers.go @@ -4,7 +4,7 @@ import ( "context" "fmt" - engine "github.com/ipfs/go-bitswap/decision" + engine "github.com/ipfs/go-bitswap/internal/decision" bsmsg "github.com/ipfs/go-bitswap/message" pb "github.com/ipfs/go-bitswap/message/pb" cid "github.com/ipfs/go-cid" From 73a062d73e702b7bc2b4878b11b6ff8c4e4772e3 Mon Sep 17 00:00:00 2001 From: Dirk McCormick Date: Fri, 31 Jan 2020 14:58:48 -0500 Subject: [PATCH 0831/1035] fix: bug with signaling peer availability to sessions This commit was moved from ipfs/go-bitswap@717c564e01dcda46e7b45462784dc549dd766dd1 --- bitswap/internal/peermanager/peermanager.go | 28 +++++++++++++++---- .../internal/peermanager/peermanager_test.go | 15 ++++++++-- 2 files changed, 36 insertions(+), 7 deletions(-) diff --git a/bitswap/internal/peermanager/peermanager.go b/bitswap/internal/peermanager/peermanager.go index ddd59399f..ab73fd965 100644 --- a/bitswap/internal/peermanager/peermanager.go +++ b/bitswap/internal/peermanager/peermanager.go @@ -129,6 +129,10 @@ func (pm *PeerManager) Disconnected(p peer.ID) { pm.pwm.RemovePeer(p) } +// BroadcastWantHaves broadcasts want-haves to all peers (used by the session +// to discover seeds). +// For each peer it filters out want-haves that have previously been sent to +// the peer. func (pm *PeerManager) BroadcastWantHaves(ctx context.Context, wantHaves []cid.Cid) { pm.pqLk.Lock() defer pm.pqLk.Unlock() @@ -140,6 +144,8 @@ func (pm *PeerManager) BroadcastWantHaves(ctx context.Context, wantHaves []cid.C } } +// SendWants sends the given want-blocks and want-haves to the given peer. +// It filters out wants that have previously been sent to the peer. func (pm *PeerManager) SendWants(ctx context.Context, p peer.ID, wantBlocks []cid.Cid, wantHaves []cid.Cid) { pm.pqLk.Lock() defer pm.pqLk.Unlock() @@ -150,6 +156,8 @@ func (pm *PeerManager) SendWants(ctx context.Context, p peer.ID, wantBlocks []ci } } +// SendCancels sends cancels for the given keys to all peers who had previously +// received a want for those keys. func (pm *PeerManager) SendCancels(ctx context.Context, cancelKs []cid.Cid) { pm.pqLk.Lock() defer pm.pqLk.Unlock() @@ -162,6 +170,7 @@ func (pm *PeerManager) SendCancels(ctx context.Context, cancelKs []cid.Cid) { } } +// CurrentWants returns the list of pending want-blocks func (pm *PeerManager) CurrentWants() []cid.Cid { pm.pqLk.RLock() defer pm.pqLk.RUnlock() @@ -169,6 +178,7 @@ func (pm *PeerManager) CurrentWants() []cid.Cid { return pm.pwm.GetWantBlocks() } +// CurrentWantHaves returns the list of pending want-haves func (pm *PeerManager) CurrentWantHaves() []cid.Cid { pm.pqLk.RLock() defer pm.pqLk.RUnlock() @@ -187,6 +197,8 @@ func (pm *PeerManager) getOrCreate(p peer.ID) *peerQueueInstance { return pqi } +// RegisterSession tells the PeerManager that the given session is interested +// in events about the given peer. func (pm *PeerManager) RegisterSession(p peer.ID, s Session) bool { pm.psLk.Lock() defer pm.psLk.Unlock() @@ -204,6 +216,8 @@ func (pm *PeerManager) RegisterSession(p peer.ID, s Session) bool { return ok } +// UnregisterSession tells the PeerManager that the given session is no longer +// interested in PeerManager events. func (pm *PeerManager) UnregisterSession(ses uint64) { pm.psLk.Lock() defer pm.psLk.Unlock() @@ -218,12 +232,16 @@ func (pm *PeerManager) UnregisterSession(ses uint64) { delete(pm.sessions, ses) } +// signalAvailability is called when a peer's connectivity changes. +// It informs interested sessions. func (pm *PeerManager) signalAvailability(p peer.ID, isConnected bool) { - for p, sesIds := range pm.peerSessions { - for sesId := range sesIds { - if s, ok := pm.sessions[sesId]; ok { - s.SignalAvailability(p, isConnected) - } + sesIds, ok := pm.peerSessions[p] + if !ok { + return + } + for sesId := range sesIds { + if s, ok := pm.sessions[sesId]; ok { + s.SignalAvailability(p, isConnected) } } } diff --git a/bitswap/internal/peermanager/peermanager_test.go b/bitswap/internal/peermanager/peermanager_test.go index afa79a9d4..0305b9f90 100644 --- a/bitswap/internal/peermanager/peermanager_test.go +++ b/bitswap/internal/peermanager/peermanager_test.go @@ -272,8 +272,8 @@ func TestSessionRegistration(t *testing.T) { msgs := make(chan msg, 16) peerQueueFactory := makePeerQueueFactory(msgs) - tp := testutil.GeneratePeers(2) - self, p1 := tp[0], tp[1] + tp := testutil.GeneratePeers(3) + self, p1, p2 := tp[0], tp[1], tp[2] peerManager := New(ctx, peerQueueFactory, self) id := uint64(1) @@ -282,16 +282,27 @@ func TestSessionRegistration(t *testing.T) { if s.available[p1] { t.Fatal("Expected peer not be available till connected") } + peerManager.RegisterSession(p2, s) + if s.available[p2] { + t.Fatal("Expected peer not be available till connected") + } peerManager.Connected(p1, nil) if !s.available[p1] { t.Fatal("Expected signal callback") } + peerManager.Connected(p2, nil) + if !s.available[p2] { + t.Fatal("Expected signal callback") + } peerManager.Disconnected(p1) if s.available[p1] { t.Fatal("Expected signal callback") } + if !s.available[p2] { + t.Fatal("Expected signal callback only for disconnected peer") + } peerManager.UnregisterSession(id) From 36dbc322c9139bb002d55fe70ed665d593cfcad4 Mon Sep 17 00:00:00 2001 From: dirkmc Date: Wed, 12 Feb 2020 16:26:42 -0500 Subject: [PATCH 0832/1035] Simulate DONT_HAVE for older peers (#248) This commit was moved from ipfs/go-bitswap@20be084856f61d3cce0c671a776b697619aa8f5f --- bitswap/benchmarks_test.go | 85 ++++- bitswap/bitswap.go | 27 +- bitswap/bitswap_test.go | 31 +- bitswap/bitswap_with_sessions_test.go | 18 +- bitswap/internal/decision/engine.go | 15 +- .../messagequeue/donthavetimeoutmgr.go | 304 +++++++++++++++++ .../messagequeue/donthavetimeoutmgr_test.go | 314 ++++++++++++++++++ bitswap/internal/messagequeue/messagequeue.go | 107 +++++- .../messagequeue/messagequeue_test.go | 94 +++++- bitswap/internal/testinstance/testinstance.go | 41 ++- bitswap/internal/testnet/virtual.go | 52 ++- bitswap/network/interface.go | 13 + bitswap/network/ipfs_impl.go | 12 + 13 files changed, 1032 insertions(+), 81 deletions(-) create mode 100644 bitswap/internal/messagequeue/donthavetimeoutmgr.go create mode 100644 bitswap/internal/messagequeue/donthavetimeoutmgr_test.go diff --git a/bitswap/benchmarks_test.go b/bitswap/benchmarks_test.go index e56214d96..71e046298 100644 --- a/bitswap/benchmarks_test.go +++ b/bitswap/benchmarks_test.go @@ -15,11 +15,13 @@ import ( "github.com/ipfs/go-bitswap/internal/testutil" blocks "github.com/ipfs/go-block-format" + protocol "github.com/libp2p/go-libp2p-core/protocol" bitswap "github.com/ipfs/go-bitswap" bssession "github.com/ipfs/go-bitswap/internal/session" testinstance "github.com/ipfs/go-bitswap/internal/testinstance" tn "github.com/ipfs/go-bitswap/internal/testnet" + bsnet "github.com/ipfs/go-bitswap/network" cid "github.com/ipfs/go-cid" delay "github.com/ipfs/go-ipfs-delay" mockrouting "github.com/ipfs/go-ipfs-routing/mock" @@ -118,6 +120,74 @@ func BenchmarkFixedDelay(b *testing.B) { printResults(benchmarkLog) } +type mixedBench struct { + bench + fetcherCount int // number of nodes that fetch data + oldSeedCount int // number of seed nodes running old version of Bitswap +} + +var mixedBenches = []mixedBench{ + mixedBench{bench{"3Nodes-Overlap3-OneAtATime", 3, 10, overlap2, oneAtATime}, 1, 2}, + mixedBench{bench{"3Nodes-AllToAll-OneAtATime", 3, 10, allToAll, oneAtATime}, 1, 2}, + mixedBench{bench{"3Nodes-Overlap3-AllConcurrent", 3, 10, overlap2, fetchAllConcurrent}, 1, 2}, + mixedBench{bench{"3Nodes-Overlap3-UnixfsFetch", 3, 100, overlap2, unixfsFileFetch}, 1, 2}, +} + +func BenchmarkFetchFromOldBitswap(b *testing.B) { + benchmarkLog = nil + fixedDelay := delay.Fixed(10 * time.Millisecond) + bstoreLatency := time.Duration(0) + + for _, bch := range mixedBenches { + b.Run(bch.name, func(b *testing.B) { + fetcherCount := bch.fetcherCount + oldSeedCount := bch.oldSeedCount + newSeedCount := bch.nodeCount - (fetcherCount + oldSeedCount) + + net := tn.VirtualNetwork(mockrouting.NewServer(), fixedDelay) + + // Simulate an older Bitswap node (old protocol ID) that doesn't + // send DONT_HAVE responses + oldProtocol := []protocol.ID{bsnet.ProtocolBitswapOneOne} + oldNetOpts := []bsnet.NetOpt{bsnet.SupportedProtocols(oldProtocol)} + oldBsOpts := []bitswap.Option{bitswap.SetSendDontHaves(false)} + oldNodeGenerator := testinstance.NewTestInstanceGenerator(net, oldNetOpts, oldBsOpts) + + // Regular new Bitswap node + newNodeGenerator := testinstance.NewTestInstanceGenerator(net, nil, nil) + var instances []testinstance.Instance + + // Create new nodes (fetchers + seeds) + for i := 0; i < fetcherCount+newSeedCount; i++ { + inst := newNodeGenerator.Next() + instances = append(instances, inst) + } + // Create old nodes (just seeds) + for i := 0; i < oldSeedCount; i++ { + inst := oldNodeGenerator.Next() + instances = append(instances, inst) + } + // Connect all the nodes together + testinstance.ConnectInstances(instances) + + // Generate blocks, with a smaller root block + rootBlock := testutil.GenerateBlocksOfSize(1, rootBlockSize) + blocks := testutil.GenerateBlocksOfSize(bch.blockCount, stdBlockSize) + blocks[0] = rootBlock[0] + + // Run the distribution + runDistributionMulti(b, instances[:fetcherCount], instances[fetcherCount:], blocks, bstoreLatency, bch.distFn, bch.fetchFn) + + newNodeGenerator.Close() + oldNodeGenerator.Close() + }) + } + + out, _ := json.MarshalIndent(benchmarkLog, "", " ") + _ = ioutil.WriteFile("tmp/benchmark.json", out, 0666) + printResults(benchmarkLog) +} + const datacenterSpeed = 5 * time.Millisecond const fastSpeed = 60 * time.Millisecond const mediumSpeed = 200 * time.Millisecond @@ -226,12 +296,12 @@ func BenchmarkDatacenterMultiLeechMultiSeed(b *testing.B) { for i := 0; i < b.N; i++ { net := tn.RateLimitedVirtualNetwork(mockrouting.NewServer(), d, rateLimitGenerator) - ig := testinstance.NewTestInstanceGenerator(net) + ig := testinstance.NewTestInstanceGenerator(net, nil, nil) defer ig.Close() instances := ig.Instances(numnodes) blocks := testutil.GenerateBlocksOfSize(numblks, blockSize) - runDistributionMulti(b, instances, 3, blocks, bstoreLatency, df, ff) + runDistributionMulti(b, instances[:3], instances[3:], blocks, bstoreLatency, df, ff) } }) @@ -244,7 +314,7 @@ func subtestDistributeAndFetch(b *testing.B, numnodes, numblks int, d delay.D, b for i := 0; i < b.N; i++ { net := tn.VirtualNetwork(mockrouting.NewServer(), d) - ig := testinstance.NewTestInstanceGenerator(net) + ig := testinstance.NewTestInstanceGenerator(net, nil, nil) instances := ig.Instances(numnodes) rootBlock := testutil.GenerateBlocksOfSize(1, rootBlockSize) @@ -252,7 +322,6 @@ func subtestDistributeAndFetch(b *testing.B, numnodes, numblks int, d delay.D, b blocks[0] = rootBlock[0] runDistribution(b, instances, blocks, bstoreLatency, df, ff) ig.Close() - // panic("done") } } @@ -260,7 +329,7 @@ func subtestDistributeAndFetchRateLimited(b *testing.B, numnodes, numblks int, d for i := 0; i < b.N; i++ { net := tn.RateLimitedVirtualNetwork(mockrouting.NewServer(), d, rateLimitGenerator) - ig := testinstance.NewTestInstanceGenerator(net) + ig := testinstance.NewTestInstanceGenerator(net, nil, nil) defer ig.Close() instances := ig.Instances(numnodes) @@ -271,12 +340,8 @@ func subtestDistributeAndFetchRateLimited(b *testing.B, numnodes, numblks int, d } } -func runDistributionMulti(b *testing.B, instances []testinstance.Instance, numFetchers int, blocks []blocks.Block, bstoreLatency time.Duration, df distFunc, ff fetchFunc) { - numnodes := len(instances) - fetchers := instances[numnodes-numFetchers:] - +func runDistributionMulti(b *testing.B, fetchers []testinstance.Instance, seeds []testinstance.Instance, blocks []blocks.Block, bstoreLatency time.Duration, df distFunc, ff fetchFunc) { // Distribute blocks to seed nodes - seeds := instances[:numnodes-numFetchers] df(b, seeds, blocks) // Set the blockstore latency on seed nodes diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 2bc7a189c..e5e0ef148 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -14,9 +14,7 @@ import ( bsbpm "github.com/ipfs/go-bitswap/internal/blockpresencemanager" decision "github.com/ipfs/go-bitswap/internal/decision" bsgetter "github.com/ipfs/go-bitswap/internal/getter" - bsmsg "github.com/ipfs/go-bitswap/message" bsmq "github.com/ipfs/go-bitswap/internal/messagequeue" - bsnet "github.com/ipfs/go-bitswap/network" notifications "github.com/ipfs/go-bitswap/internal/notifications" bspm "github.com/ipfs/go-bitswap/internal/peermanager" bspqm "github.com/ipfs/go-bitswap/internal/providerquerymanager" @@ -25,6 +23,8 @@ import ( bssm "github.com/ipfs/go-bitswap/internal/sessionmanager" bsspm "github.com/ipfs/go-bitswap/internal/sessionpeermanager" bswm "github.com/ipfs/go-bitswap/internal/wantmanager" + bsmsg "github.com/ipfs/go-bitswap/message" + bsnet "github.com/ipfs/go-bitswap/network" blocks "github.com/ipfs/go-block-format" cid "github.com/ipfs/go-cid" blockstore "github.com/ipfs/go-ipfs-blockstore" @@ -84,6 +84,17 @@ func RebroadcastDelay(newRebroadcastDelay delay.D) Option { } } +// SetSendDontHaves indicates what to do when the engine receives a want-block +// for a block that is not in the blockstore. Either +// - Send a DONT_HAVE message +// - Simply don't respond +// This option is only used for testing. +func SetSendDontHaves(send bool) Option { + return func(bs *Bitswap) { + bs.engine.SetSendDontHaves(send) + } +} + // New initializes a BitSwap instance that communicates over the provided // BitSwapNetwork. This function registers the returned instance as the network // delegate. Runs until context is cancelled or bitswap.Close is called. @@ -111,14 +122,22 @@ func New(parent context.Context, network bsnet.BitSwapNetwork, return nil }) + var wm *bswm.WantManager + // onDontHaveTimeout is called when a want-block is sent to a peer that + // has an old version of Bitswap that doesn't support DONT_HAVE messages, + // and no response is received within a timeout. + onDontHaveTimeout := func(p peer.ID, dontHaves []cid.Cid) { + // Simulate a DONT_HAVE message arriving to the WantManager + wm.ReceiveFrom(ctx, p, nil, nil, dontHaves) + } peerQueueFactory := func(ctx context.Context, p peer.ID) bspm.PeerQueue { - return bsmq.New(ctx, p, network) + return bsmq.New(ctx, p, network, onDontHaveTimeout) } sim := bssim.New() bpm := bsbpm.New() pm := bspm.New(ctx, peerQueueFactory, network.Self()) - wm := bswm.New(ctx, pm, sim, bpm) + wm = bswm.New(ctx, pm, sim, bpm) pqm := bspqm.New(ctx, network) sessionFactory := func(ctx context.Context, id uint64, spm bssession.SessionPeerManager, diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index 723b25d63..0a0bcc98b 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -10,10 +10,10 @@ import ( bitswap "github.com/ipfs/go-bitswap" decision "github.com/ipfs/go-bitswap/internal/decision" - "github.com/ipfs/go-bitswap/message" bssession "github.com/ipfs/go-bitswap/internal/session" testinstance "github.com/ipfs/go-bitswap/internal/testinstance" tn "github.com/ipfs/go-bitswap/internal/testnet" + "github.com/ipfs/go-bitswap/message" blocks "github.com/ipfs/go-block-format" cid "github.com/ipfs/go-cid" detectrace "github.com/ipfs/go-detect-race" @@ -37,7 +37,7 @@ func getVirtualNetwork() tn.Network { func TestClose(t *testing.T) { vnet := getVirtualNetwork() - ig := testinstance.NewTestInstanceGenerator(vnet) + ig := testinstance.NewTestInstanceGenerator(vnet, nil, nil) defer ig.Close() bgen := blocksutil.NewBlockGenerator() @@ -55,7 +55,7 @@ func TestProviderForKeyButNetworkCannotFind(t *testing.T) { // TODO revisit this rs := mockrouting.NewServer() net := tn.VirtualNetwork(rs, delay.Fixed(kNetworkDelay)) - ig := testinstance.NewTestInstanceGenerator(net) + ig := testinstance.NewTestInstanceGenerator(net, nil, nil) defer ig.Close() block := blocks.NewBlock([]byte("block")) @@ -81,7 +81,7 @@ func TestGetBlockFromPeerAfterPeerAnnounces(t *testing.T) { net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay)) block := blocks.NewBlock([]byte("block")) - ig := testinstance.NewTestInstanceGenerator(net) + ig := testinstance.NewTestInstanceGenerator(net, nil, nil) defer ig.Close() peers := ig.Instances(2) @@ -111,7 +111,8 @@ func TestGetBlockFromPeerAfterPeerAnnounces(t *testing.T) { func TestDoesNotProvideWhenConfiguredNotTo(t *testing.T) { net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay)) block := blocks.NewBlock([]byte("block")) - ig := testinstance.NewTestInstanceGenerator(net, bitswap.ProvideEnabled(false), bitswap.ProviderSearchDelay(50*time.Millisecond)) + bsOpts := []bitswap.Option{bitswap.ProvideEnabled(false), bitswap.ProviderSearchDelay(50 * time.Millisecond)} + ig := testinstance.NewTestInstanceGenerator(net, nil, bsOpts) defer ig.Close() hasBlock := ig.Next() @@ -148,7 +149,7 @@ func TestUnwantedBlockNotAdded(t *testing.T) { bsMessage := message.New(true) bsMessage.AddBlock(block) - ig := testinstance.NewTestInstanceGenerator(net) + ig := testinstance.NewTestInstanceGenerator(net, nil, nil) defer ig.Close() peers := ig.Instances(2) @@ -184,7 +185,7 @@ func TestPendingBlockAdded(t *testing.T) { bg := blocksutil.NewBlockGenerator() sessionBroadcastWantCapacity := 4 - ig := testinstance.NewTestInstanceGenerator(net) + ig := testinstance.NewTestInstanceGenerator(net, nil, nil) defer ig.Close() instance := ig.Instances(1)[0] @@ -282,7 +283,7 @@ func PerformDistributionTest(t *testing.T, numInstances, numBlocks int) { t.SkipNow() } net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay)) - ig := testinstance.NewTestInstanceGenerator(net) + ig := testinstance.NewTestInstanceGenerator(net, nil, nil) defer ig.Close() bg := blocksutil.NewBlockGenerator() @@ -348,7 +349,7 @@ func TestSendToWantingPeer(t *testing.T) { } net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay)) - ig := testinstance.NewTestInstanceGenerator(net) + ig := testinstance.NewTestInstanceGenerator(net, nil, nil) defer ig.Close() bg := blocksutil.NewBlockGenerator() @@ -390,7 +391,7 @@ func TestSendToWantingPeer(t *testing.T) { func TestEmptyKey(t *testing.T) { net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay)) - ig := testinstance.NewTestInstanceGenerator(net) + ig := testinstance.NewTestInstanceGenerator(net, nil, nil) defer ig.Close() bs := ig.Instances(1)[0].Exchange @@ -423,7 +424,7 @@ func assertStat(t *testing.T, st *bitswap.Stat, sblks, rblks, sdata, rdata uint6 func TestBasicBitswap(t *testing.T) { net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay)) - ig := testinstance.NewTestInstanceGenerator(net) + ig := testinstance.NewTestInstanceGenerator(net, nil, nil) defer ig.Close() bg := blocksutil.NewBlockGenerator() @@ -499,7 +500,7 @@ func TestBasicBitswap(t *testing.T) { func TestDoubleGet(t *testing.T) { net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay)) - ig := testinstance.NewTestInstanceGenerator(net) + ig := testinstance.NewTestInstanceGenerator(net, nil, nil) defer ig.Close() bg := blocksutil.NewBlockGenerator() @@ -567,7 +568,7 @@ func TestDoubleGet(t *testing.T) { func TestWantlistCleanup(t *testing.T) { net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay)) - ig := testinstance.NewTestInstanceGenerator(net) + ig := testinstance.NewTestInstanceGenerator(net, nil, nil) defer ig.Close() bg := blocksutil.NewBlockGenerator() @@ -689,7 +690,7 @@ func newReceipt(sent, recv, exchanged uint64) *decision.Receipt { func TestBitswapLedgerOneWay(t *testing.T) { net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay)) - ig := testinstance.NewTestInstanceGenerator(net) + ig := testinstance.NewTestInstanceGenerator(net, nil, nil) defer ig.Close() bg := blocksutil.NewBlockGenerator() @@ -741,7 +742,7 @@ func TestBitswapLedgerOneWay(t *testing.T) { func TestBitswapLedgerTwoWay(t *testing.T) { net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay)) - ig := testinstance.NewTestInstanceGenerator(net) + ig := testinstance.NewTestInstanceGenerator(net, nil, nil) defer ig.Close() bg := blocksutil.NewBlockGenerator() diff --git a/bitswap/bitswap_with_sessions_test.go b/bitswap/bitswap_with_sessions_test.go index 49e6d273c..28d3a3255 100644 --- a/bitswap/bitswap_with_sessions_test.go +++ b/bitswap/bitswap_with_sessions_test.go @@ -20,7 +20,7 @@ func TestBasicSessions(t *testing.T) { defer cancel() vnet := getVirtualNetwork() - ig := testinstance.NewTestInstanceGenerator(vnet) + ig := testinstance.NewTestInstanceGenerator(vnet, nil, nil) defer ig.Close() bgen := blocksutil.NewBlockGenerator() @@ -71,7 +71,7 @@ func TestSessionBetweenPeers(t *testing.T) { defer cancel() vnet := getVirtualNetwork() - ig := testinstance.NewTestInstanceGenerator(vnet) + ig := testinstance.NewTestInstanceGenerator(vnet, nil, nil) defer ig.Close() bgen := blocksutil.NewBlockGenerator() @@ -127,7 +127,7 @@ func TestSessionSplitFetch(t *testing.T) { defer cancel() vnet := getVirtualNetwork() - ig := testinstance.NewTestInstanceGenerator(vnet) + ig := testinstance.NewTestInstanceGenerator(vnet, nil, nil) defer ig.Close() bgen := blocksutil.NewBlockGenerator() @@ -171,7 +171,7 @@ func TestFetchNotConnected(t *testing.T) { defer cancel() vnet := getVirtualNetwork() - ig := testinstance.NewTestInstanceGenerator(vnet, bitswap.ProviderSearchDelay(10*time.Millisecond)) + ig := testinstance.NewTestInstanceGenerator(vnet, nil, []bitswap.Option{bitswap.ProviderSearchDelay(10 * time.Millisecond)}) defer ig.Close() bgen := blocksutil.NewBlockGenerator() @@ -216,7 +216,7 @@ func TestFetchAfterDisconnect(t *testing.T) { defer cancel() vnet := getVirtualNetwork() - ig := testinstance.NewTestInstanceGenerator(vnet, bitswap.ProviderSearchDelay(10*time.Millisecond)) + ig := testinstance.NewTestInstanceGenerator(vnet, nil, []bitswap.Option{bitswap.ProviderSearchDelay(10 * time.Millisecond)}) defer ig.Close() bgen := blocksutil.NewBlockGenerator() @@ -290,7 +290,7 @@ func TestInterestCacheOverflow(t *testing.T) { defer cancel() vnet := getVirtualNetwork() - ig := testinstance.NewTestInstanceGenerator(vnet) + ig := testinstance.NewTestInstanceGenerator(vnet, nil, nil) defer ig.Close() bgen := blocksutil.NewBlockGenerator() @@ -342,7 +342,7 @@ func TestPutAfterSessionCacheEvict(t *testing.T) { defer cancel() vnet := getVirtualNetwork() - ig := testinstance.NewTestInstanceGenerator(vnet) + ig := testinstance.NewTestInstanceGenerator(vnet, nil, nil) defer ig.Close() bgen := blocksutil.NewBlockGenerator() @@ -382,7 +382,7 @@ func TestMultipleSessions(t *testing.T) { defer cancel() vnet := getVirtualNetwork() - ig := testinstance.NewTestInstanceGenerator(vnet) + ig := testinstance.NewTestInstanceGenerator(vnet, nil, nil) defer ig.Close() bgen := blocksutil.NewBlockGenerator() @@ -425,7 +425,7 @@ func TestWantlistClearsOnCancel(t *testing.T) { defer cancel() vnet := getVirtualNetwork() - ig := testinstance.NewTestInstanceGenerator(vnet) + ig := testinstance.NewTestInstanceGenerator(vnet, nil, nil) defer ig.Close() bgen := blocksutil.NewBlockGenerator() diff --git a/bitswap/internal/decision/engine.go b/bitswap/internal/decision/engine.go index 2e183b067..bf51beaef 100644 --- a/bitswap/internal/decision/engine.go +++ b/bitswap/internal/decision/engine.go @@ -161,6 +161,8 @@ type Engine struct { // bytes up to which we will replace a want-have with a want-block maxBlockSizeReplaceHasWithBlock int + sendDontHaves bool + self peer.ID } @@ -180,6 +182,7 @@ func newEngine(ctx context.Context, bs bstore.Blockstore, peerTagger PeerTagger, ticker: time.NewTicker(time.Millisecond * 100), maxBlockSizeReplaceHasWithBlock: maxReplaceSize, taskWorkerCount: taskWorkerCount, + sendDontHaves: true, self: self, } e.tagQueued = fmt.Sprintf(tagFormat, "queued", uuid.New().String()) @@ -193,6 +196,16 @@ func newEngine(ctx context.Context, bs bstore.Blockstore, peerTagger PeerTagger, return e } +// SetSendDontHaves indicates what to do when the engine receives a want-block +// for a block that is not in the blockstore. Either +// - Send a DONT_HAVE message +// - Simply don't respond +// Older versions of Bitswap did not respond, so this allows us to simulate +// those older versions for testing. +func (e *Engine) SetSendDontHaves(send bool) { + e.sendDontHaves = send +} + // Start up workers to handle requests from other nodes for the data on this node func (e *Engine) StartWorkers(ctx context.Context, px process.Process) { // Start up blockstore manager @@ -563,7 +576,7 @@ func (e *Engine) MessageReceived(ctx context.Context, p peer.ID, m bsmsg.BitSwap // If the block was not found if !found { // Only add the task to the queue if the requester wants a DONT_HAVE - if entry.SendDontHave { + if e.sendDontHaves && entry.SendDontHave { newWorkExists = true isWantBlock := false if entry.WantType == pb.Message_Wantlist_Block { diff --git a/bitswap/internal/messagequeue/donthavetimeoutmgr.go b/bitswap/internal/messagequeue/donthavetimeoutmgr.go new file mode 100644 index 000000000..ee7941b6d --- /dev/null +++ b/bitswap/internal/messagequeue/donthavetimeoutmgr.go @@ -0,0 +1,304 @@ +package messagequeue + +import ( + "context" + "sync" + "time" + + cid "github.com/ipfs/go-cid" + "github.com/libp2p/go-libp2p/p2p/protocol/ping" +) + +const ( + // dontHaveTimeout is used to simulate a DONT_HAVE when communicating with + // a peer whose Bitswap client doesn't support the DONT_HAVE response. + // If the peer doesn't respond to a want-block within the timeout, the + // local node assumes that the peer doesn't have the block. + dontHaveTimeout = 5 * time.Second + + // maxExpectedWantProcessTime is the maximum amount of time we expect a + // peer takes to process a want and initiate sending a response to us + maxExpectedWantProcessTime = 200 * time.Millisecond + + // latencyMultiplier is multiplied by the average ping time to + // get an upper bound on how long we expect to wait for a peer's response + // to arrive + latencyMultiplier = 2 +) + +// PeerConnection is a connection to a peer that can be pinged, and the +// average latency measured +type PeerConnection interface { + // Ping the peer + Ping(context.Context) ping.Result + // The average latency of all pings + Latency() time.Duration +} + +// pendingWant keeps track of a want that has been sent and we're waiting +// for a response or for a timeout to expire +type pendingWant struct { + c cid.Cid + active bool + sent time.Time +} + +// dontHaveTimeoutMgr pings the peer to measure latency. It uses the latency to +// set a reasonable timeout for simulating a DONT_HAVE message for peers that +// don't support DONT_HAVE +type dontHaveTimeoutMgr struct { + ctx context.Context + shutdown func() + peerConn PeerConnection + onDontHaveTimeout func([]cid.Cid) + defaultTimeout time.Duration + latencyMultiplier int + maxExpectedWantProcessTime time.Duration + + // All variables below here must be protected by the lock + lk sync.RWMutex + // has the timeout manager started + started bool + // wants that are active (waiting for a response or timeout) + activeWants map[cid.Cid]*pendingWant + // queue of wants, from oldest to newest + wantQueue []*pendingWant + // time to wait for a response (depends on latency) + timeout time.Duration + // timer used to wait until want at front of queue expires + checkForTimeoutsTimer *time.Timer +} + +// newDontHaveTimeoutMgr creates a new dontHaveTimeoutMgr +// onDontHaveTimeout is called when pending keys expire (not cancelled before timeout) +func newDontHaveTimeoutMgr(ctx context.Context, pc PeerConnection, onDontHaveTimeout func([]cid.Cid)) *dontHaveTimeoutMgr { + return newDontHaveTimeoutMgrWithParams(ctx, pc, onDontHaveTimeout, dontHaveTimeout, + latencyMultiplier, maxExpectedWantProcessTime) +} + +// newDontHaveTimeoutMgrWithParams is used by the tests +func newDontHaveTimeoutMgrWithParams(ctx context.Context, pc PeerConnection, onDontHaveTimeout func([]cid.Cid), + defaultTimeout time.Duration, latencyMultiplier int, + maxExpectedWantProcessTime time.Duration) *dontHaveTimeoutMgr { + + ctx, shutdown := context.WithCancel(ctx) + mqp := &dontHaveTimeoutMgr{ + ctx: ctx, + shutdown: shutdown, + peerConn: pc, + activeWants: make(map[cid.Cid]*pendingWant), + timeout: defaultTimeout, + defaultTimeout: defaultTimeout, + latencyMultiplier: latencyMultiplier, + maxExpectedWantProcessTime: maxExpectedWantProcessTime, + onDontHaveTimeout: onDontHaveTimeout, + } + + return mqp +} + +// Shutdown the dontHaveTimeoutMgr. Any subsequent call to Start() will be ignored +func (dhtm *dontHaveTimeoutMgr) Shutdown() { + dhtm.shutdown() +} + +// onShutdown is called when the dontHaveTimeoutMgr shuts down +func (dhtm *dontHaveTimeoutMgr) onShutdown() { + dhtm.lk.Lock() + defer dhtm.lk.Unlock() + + // Clear any pending check for timeouts + if dhtm.checkForTimeoutsTimer != nil { + dhtm.checkForTimeoutsTimer.Stop() + } +} + +// closeAfterContext is called when the dontHaveTimeoutMgr starts. +// It monitors for the context being cancelled. +func (dhtm *dontHaveTimeoutMgr) closeAfterContext() { + <-dhtm.ctx.Done() + dhtm.onShutdown() +} + +// Start the dontHaveTimeoutMgr. This method is idempotent +func (dhtm *dontHaveTimeoutMgr) Start() { + dhtm.lk.Lock() + defer dhtm.lk.Unlock() + + // Make sure the dont have timeout manager hasn't already been started + if dhtm.started { + return + } + dhtm.started = true + + go dhtm.closeAfterContext() + + // If we already have a measure of latency to the peer, use it to + // calculate a reasonable timeout + latency := dhtm.peerConn.Latency() + if latency.Nanoseconds() > 0 { + dhtm.timeout = dhtm.calculateTimeoutFromLatency(latency) + return + } + + // Otherwise measure latency by pinging the peer + go dhtm.measureLatency() +} + +// measureLatency measures the latency to the peer by pinging it +func (dhtm *dontHaveTimeoutMgr) measureLatency() { + // Wait up to defaultTimeout for a response to the ping + ctx, cancel := context.WithTimeout(dhtm.ctx, dhtm.defaultTimeout) + defer cancel() + + // Ping the peer + res := dhtm.peerConn.Ping(ctx) + if res.Error != nil { + // If there was an error, we'll just leave the timeout as + // defaultTimeout + return + } + + // Get the average latency to the peer + latency := dhtm.peerConn.Latency() + + dhtm.lk.Lock() + defer dhtm.lk.Unlock() + + // Calculate a reasonable timeout based on latency + dhtm.timeout = dhtm.calculateTimeoutFromLatency(latency) + + // Check if after changing the timeout there are any pending wants that are + // now over the timeout + dhtm.checkForTimeouts() +} + +// checkForTimeouts checks pending wants to see if any are over the timeout. +// Note: this function should only be called within the lock. +func (dhtm *dontHaveTimeoutMgr) checkForTimeouts() { + if len(dhtm.wantQueue) == 0 { + return + } + + // Figure out which of the blocks that were wanted were not received + // within the timeout + expired := make([]cid.Cid, 0, len(dhtm.activeWants)) + for len(dhtm.wantQueue) > 0 { + pw := dhtm.wantQueue[0] + + // If the want is still active + if pw.active { + // The queue is in order from earliest to latest, so if we + // didn't find an expired entry we can stop iterating + if time.Since(pw.sent) < dhtm.timeout { + break + } + + // Add the want to the expired list + expired = append(expired, pw.c) + // Remove the want from the activeWants map + delete(dhtm.activeWants, pw.c) + } + + // Remove expired or cancelled wants from the want queue + dhtm.wantQueue = dhtm.wantQueue[1:] + } + + // Fire the timeout event for the expired wants + if len(expired) > 0 { + go dhtm.fireTimeout(expired) + } + + if len(dhtm.wantQueue) == 0 { + return + } + + // Make sure the timeout manager is still running + if dhtm.ctx.Err() != nil { + return + } + + // Schedule the next check for the moment when the oldest pending want will + // timeout + oldestStart := dhtm.wantQueue[0].sent + until := time.Until(oldestStart.Add(dhtm.timeout)) + if dhtm.checkForTimeoutsTimer == nil { + dhtm.checkForTimeoutsTimer = time.AfterFunc(until, func() { + dhtm.lk.Lock() + defer dhtm.lk.Unlock() + + dhtm.checkForTimeouts() + }) + } else { + dhtm.checkForTimeoutsTimer.Stop() + dhtm.checkForTimeoutsTimer.Reset(until) + } +} + +// AddPending adds the given keys that will expire if not cancelled before +// the timeout +func (dhtm *dontHaveTimeoutMgr) AddPending(ks []cid.Cid) { + if len(ks) == 0 { + return + } + + start := time.Now() + + dhtm.lk.Lock() + defer dhtm.lk.Unlock() + + queueWasEmpty := len(dhtm.activeWants) == 0 + + // Record the start time for each key + for _, c := range ks { + if _, ok := dhtm.activeWants[c]; !ok { + pw := pendingWant{ + c: c, + sent: start, + active: true, + } + dhtm.activeWants[c] = &pw + dhtm.wantQueue = append(dhtm.wantQueue, &pw) + } + } + + // If there was already an earlier pending item in the queue, then there + // must already be a timeout check scheduled. If there is nothing in the + // queue then we should make sure to schedule a check. + if queueWasEmpty { + dhtm.checkForTimeouts() + } +} + +// CancelPending is called when we receive a response for a key +func (dhtm *dontHaveTimeoutMgr) CancelPending(ks []cid.Cid) { + dhtm.lk.Lock() + defer dhtm.lk.Unlock() + + // Mark the wants as cancelled + for _, c := range ks { + if pw, ok := dhtm.activeWants[c]; ok { + pw.active = false + delete(dhtm.activeWants, c) + } + } +} + +// fireTimeout fires the onDontHaveTimeout method with the timed out keys +func (dhtm *dontHaveTimeoutMgr) fireTimeout(pending []cid.Cid) { + // Make sure the timeout manager has not been shut down + if dhtm.ctx.Err() != nil { + return + } + + // Fire the timeout + dhtm.onDontHaveTimeout(pending) +} + +// calculateTimeoutFromLatency calculates a reasonable timeout derived from latency +func (dhtm *dontHaveTimeoutMgr) calculateTimeoutFromLatency(latency time.Duration) time.Duration { + // The maximum expected time for a response is + // the expected time to process the want + (latency * multiplier) + // The multiplier is to provide some padding for variable latency. + return dhtm.maxExpectedWantProcessTime + time.Duration(dhtm.latencyMultiplier)*latency +} diff --git a/bitswap/internal/messagequeue/donthavetimeoutmgr_test.go b/bitswap/internal/messagequeue/donthavetimeoutmgr_test.go new file mode 100644 index 000000000..78e622a74 --- /dev/null +++ b/bitswap/internal/messagequeue/donthavetimeoutmgr_test.go @@ -0,0 +1,314 @@ +package messagequeue + +import ( + "context" + "fmt" + "sync" + "testing" + "time" + + "github.com/ipfs/go-bitswap/internal/testutil" + cid "github.com/ipfs/go-cid" + "github.com/libp2p/go-libp2p/p2p/protocol/ping" +) + +type mockPeerConn struct { + err error + latency time.Duration + latencies []time.Duration +} + +func (pc *mockPeerConn) Ping(ctx context.Context) ping.Result { + timer := time.NewTimer(pc.latency) + select { + case <-timer.C: + if pc.err != nil { + return ping.Result{Error: pc.err} + } + pc.latencies = append(pc.latencies, pc.latency) + case <-ctx.Done(): + } + return ping.Result{RTT: pc.latency} +} + +func (pc *mockPeerConn) Latency() time.Duration { + sum := time.Duration(0) + if len(pc.latencies) == 0 { + return sum + } + for _, l := range pc.latencies { + sum += l + } + return sum / time.Duration(len(pc.latencies)) +} + +type timeoutRecorder struct { + timedOutKs []cid.Cid + lk sync.Mutex +} + +func (tr *timeoutRecorder) onTimeout(tks []cid.Cid) { + tr.lk.Lock() + defer tr.lk.Unlock() + tr.timedOutKs = append(tr.timedOutKs, tks...) +} + +func TestDontHaveTimeoutMgrTimeout(t *testing.T) { + firstks := testutil.GenerateCids(2) + secondks := append(firstks, testutil.GenerateCids(3)...) + latency := time.Millisecond * 10 + latMultiplier := 2 + expProcessTime := 5 * time.Millisecond + expectedTimeout := expProcessTime + latency*time.Duration(latMultiplier) + ctx := context.Background() + pc := &mockPeerConn{latency: latency} + tr := timeoutRecorder{} + + dhtm := newDontHaveTimeoutMgrWithParams(ctx, pc, tr.onTimeout, + dontHaveTimeout, latMultiplier, expProcessTime) + dhtm.Start() + + // Add first set of keys + dhtm.AddPending(firstks) + + // Wait for less than the expected timeout + time.Sleep(expectedTimeout - 5*time.Millisecond) + + // At this stage no keys should have timed out + if len(tr.timedOutKs) > 0 { + t.Fatal("expected timeout not to have happened yet") + } + + // Add second set of keys + dhtm.AddPending(secondks) + + // Wait until after the expected timeout + time.Sleep(10 * time.Millisecond) + + // At this stage first set of keys should have timed out + if len(tr.timedOutKs) != len(firstks) { + t.Fatal("expected timeout") + } + + // Clear the recorded timed out keys + tr.timedOutKs = nil + + // Sleep until the second set of keys should have timed out + time.Sleep(expectedTimeout) + + // At this stage all keys should have timed out. The second set included + // the first set of keys, but they were added before the first set timed + // out, so only the remaining keys should have beed added. + if len(tr.timedOutKs) != len(secondks)-len(firstks) { + t.Fatal("expected second set of keys to timeout") + } +} + +func TestDontHaveTimeoutMgrCancel(t *testing.T) { + ks := testutil.GenerateCids(3) + latency := time.Millisecond * 10 + latMultiplier := 1 + expProcessTime := time.Duration(0) + expectedTimeout := latency + ctx := context.Background() + pc := &mockPeerConn{latency: latency} + tr := timeoutRecorder{} + + dhtm := newDontHaveTimeoutMgrWithParams(ctx, pc, tr.onTimeout, + dontHaveTimeout, latMultiplier, expProcessTime) + dhtm.Start() + + // Add keys + dhtm.AddPending(ks) + time.Sleep(5 * time.Millisecond) + + // Cancel keys + cancelCount := 1 + dhtm.CancelPending(ks[:cancelCount]) + + // Wait for the expected timeout + time.Sleep(expectedTimeout) + + // At this stage all non-cancelled keys should have timed out + if len(tr.timedOutKs) != len(ks)-cancelCount { + t.Fatal("expected timeout") + } +} + +func TestDontHaveTimeoutWantCancelWant(t *testing.T) { + ks := testutil.GenerateCids(3) + latency := time.Millisecond * 20 + latMultiplier := 1 + expProcessTime := time.Duration(0) + expectedTimeout := latency + ctx := context.Background() + pc := &mockPeerConn{latency: latency} + tr := timeoutRecorder{} + + dhtm := newDontHaveTimeoutMgrWithParams(ctx, pc, tr.onTimeout, + dontHaveTimeout, latMultiplier, expProcessTime) + dhtm.Start() + + // Add keys + dhtm.AddPending(ks) + + // Wait for a short time + time.Sleep(expectedTimeout - 10*time.Millisecond) + + // Cancel two keys + dhtm.CancelPending(ks[:2]) + + time.Sleep(5 * time.Millisecond) + + // Add back one cancelled key + dhtm.AddPending(ks[:1]) + + // Wait till after initial timeout + time.Sleep(10 * time.Millisecond) + + // At this stage only the key that was never cancelled should have timed out + if len(tr.timedOutKs) != 1 { + t.Fatal("expected one key to timeout") + } + + // Wait till after added back key should time out + time.Sleep(latency) + + // At this stage the key that was added back should also have timed out + if len(tr.timedOutKs) != 2 { + t.Fatal("expected added back key to timeout") + } +} + +func TestDontHaveTimeoutRepeatedAddPending(t *testing.T) { + ks := testutil.GenerateCids(10) + latency := time.Millisecond * 5 + latMultiplier := 1 + expProcessTime := time.Duration(0) + ctx := context.Background() + pc := &mockPeerConn{latency: latency} + tr := timeoutRecorder{} + + dhtm := newDontHaveTimeoutMgrWithParams(ctx, pc, tr.onTimeout, + dontHaveTimeout, latMultiplier, expProcessTime) + dhtm.Start() + + // Add keys repeatedly + for _, c := range ks { + dhtm.AddPending([]cid.Cid{c}) + } + + // Wait for the expected timeout + time.Sleep(latency + 5*time.Millisecond) + + // At this stage all keys should have timed out + if len(tr.timedOutKs) != len(ks) { + t.Fatal("expected timeout") + } +} + +func TestDontHaveTimeoutMgrUsesDefaultTimeoutIfPingError(t *testing.T) { + ks := testutil.GenerateCids(2) + latency := time.Millisecond * 1 + latMultiplier := 2 + expProcessTime := 2 * time.Millisecond + defaultTimeout := 10 * time.Millisecond + expectedTimeout := expProcessTime + defaultTimeout + tr := timeoutRecorder{} + ctx := context.Background() + pc := &mockPeerConn{latency: latency, err: fmt.Errorf("ping error")} + + dhtm := newDontHaveTimeoutMgrWithParams(ctx, pc, tr.onTimeout, + defaultTimeout, latMultiplier, expProcessTime) + dhtm.Start() + + // Add keys + dhtm.AddPending(ks) + + // Sleep for less than the expected timeout + time.Sleep(expectedTimeout - 5*time.Millisecond) + + // At this stage no timeout should have happened yet + if len(tr.timedOutKs) > 0 { + t.Fatal("expected timeout not to have happened yet") + } + + // Sleep until after the expected timeout + time.Sleep(10 * time.Millisecond) + + // Now the keys should have timed out + if len(tr.timedOutKs) != len(ks) { + t.Fatal("expected timeout") + } +} + +func TestDontHaveTimeoutMgrUsesDefaultTimeoutIfLatencyLonger(t *testing.T) { + ks := testutil.GenerateCids(2) + latency := time.Millisecond * 20 + latMultiplier := 1 + expProcessTime := time.Duration(0) + defaultTimeout := 10 * time.Millisecond + tr := timeoutRecorder{} + ctx := context.Background() + pc := &mockPeerConn{latency: latency} + + dhtm := newDontHaveTimeoutMgrWithParams(ctx, pc, tr.onTimeout, + defaultTimeout, latMultiplier, expProcessTime) + dhtm.Start() + + // Add keys + dhtm.AddPending(ks) + + // Sleep for less than the default timeout + time.Sleep(defaultTimeout - 5*time.Millisecond) + + // At this stage no timeout should have happened yet + if len(tr.timedOutKs) > 0 { + t.Fatal("expected timeout not to have happened yet") + } + + // Sleep until after the default timeout + time.Sleep(10 * time.Millisecond) + + // Now the keys should have timed out + if len(tr.timedOutKs) != len(ks) { + t.Fatal("expected timeout") + } +} + +func TestDontHaveTimeoutNoTimeoutAfterShutdown(t *testing.T) { + ks := testutil.GenerateCids(2) + latency := time.Millisecond * 10 + latMultiplier := 1 + expProcessTime := time.Duration(0) + ctx := context.Background() + pc := &mockPeerConn{latency: latency} + + var lk sync.Mutex + var timedOutKs []cid.Cid + onTimeout := func(tks []cid.Cid) { + lk.Lock() + defer lk.Unlock() + timedOutKs = append(timedOutKs, tks...) + } + dhtm := newDontHaveTimeoutMgrWithParams(ctx, pc, onTimeout, + dontHaveTimeout, latMultiplier, expProcessTime) + dhtm.Start() + + // Add keys + dhtm.AddPending(ks) + + // Wait less than the timeout + time.Sleep(latency - 5*time.Millisecond) + + // Shutdown the manager + dhtm.Shutdown() + + // Wait for the expected timeout + time.Sleep(10 * time.Millisecond) + + // Manager was shut down so timeout should not have fired + if len(timedOutKs) != 0 { + t.Fatal("expected no timeout after shutdown") + } +} diff --git a/bitswap/internal/messagequeue/messagequeue.go b/bitswap/internal/messagequeue/messagequeue.go index b8caad57b..15f8100d2 100644 --- a/bitswap/internal/messagequeue/messagequeue.go +++ b/bitswap/internal/messagequeue/messagequeue.go @@ -15,6 +15,7 @@ import ( cid "github.com/ipfs/go-cid" logging "github.com/ipfs/go-log" peer "github.com/libp2p/go-libp2p-core/peer" + "github.com/libp2p/go-libp2p/p2p/protocol/ping" ) var log = logging.Logger("bitswap") @@ -40,7 +41,8 @@ const ( type MessageNetwork interface { ConnectTo(context.Context, peer.ID) error NewMessageSender(context.Context, peer.ID) (bsnet.MessageSender, error) - Self() peer.ID + Latency(peer.ID) time.Duration + Ping(context.Context, peer.ID) ping.Result } // MessageQueue implements queue of want messages to send to peers. @@ -48,6 +50,7 @@ type MessageQueue struct { ctx context.Context p peer.ID network MessageNetwork + dhTimeoutMgr DontHaveTimeoutManager maxMessageSize int sendErrorBackoff time.Duration @@ -104,17 +107,60 @@ func (r *recallWantlist) RemoveType(c cid.Cid, wtype pb.Message_Wantlist_WantTyp r.pending.RemoveType(c, wtype) } -// New creats a new MessageQueue. -func New(ctx context.Context, p peer.ID, network MessageNetwork) *MessageQueue { - return newMessageQueue(ctx, p, network, maxMessageSize, sendErrorBackoff) +type peerConn struct { + p peer.ID + network MessageNetwork +} + +func newPeerConnection(p peer.ID, network MessageNetwork) *peerConn { + return &peerConn{p, network} +} + +func (pc *peerConn) Ping(ctx context.Context) ping.Result { + return pc.network.Ping(ctx, pc.p) +} + +func (pc *peerConn) Latency() time.Duration { + return pc.network.Latency(pc.p) +} + +// Fires when a timeout occurs waiting for a response from a peer running an +// older version of Bitswap that doesn't support DONT_HAVE messages. +type OnDontHaveTimeout func(peer.ID, []cid.Cid) + +// DontHaveTimeoutManager pings a peer to estimate latency so it can set a reasonable +// upper bound on when to consider a DONT_HAVE request as timed out (when connected to +// a peer that doesn't support DONT_HAVE messages) +type DontHaveTimeoutManager interface { + // Start the manager (idempotent) + Start() + // Shutdown the manager (Shutdown is final, manager cannot be restarted) + Shutdown() + // AddPending adds the wants as pending a response. If the are not + // cancelled before the timeout, the OnDontHaveTimeout method will be called. + AddPending([]cid.Cid) + // CancelPending removes the wants + CancelPending([]cid.Cid) +} + +// New creates a new MessageQueue. +func New(ctx context.Context, p peer.ID, network MessageNetwork, onDontHaveTimeout OnDontHaveTimeout) *MessageQueue { + onTimeout := func(ks []cid.Cid) { + onDontHaveTimeout(p, ks) + } + dhTimeoutMgr := newDontHaveTimeoutMgr(ctx, newPeerConnection(p, network), onTimeout) + return newMessageQueue(ctx, p, network, maxMessageSize, sendErrorBackoff, dhTimeoutMgr) } // This constructor is used by the tests -func newMessageQueue(ctx context.Context, p peer.ID, network MessageNetwork, maxMsgSize int, sendErrorBackoff time.Duration) *MessageQueue { +func newMessageQueue(ctx context.Context, p peer.ID, network MessageNetwork, + maxMsgSize int, sendErrorBackoff time.Duration, dhTimeoutMgr DontHaveTimeoutManager) *MessageQueue { + mq := &MessageQueue{ ctx: ctx, p: p, network: network, + dhTimeoutMgr: dhTimeoutMgr, maxMessageSize: maxMsgSize, bcstWants: newRecallWantList(), peerWants: newRecallWantList(), @@ -191,9 +237,13 @@ func (mq *MessageQueue) AddCancels(cancelKs []cid.Cid) { return } + // Cancel any outstanding DONT_HAVE timers + mq.dhTimeoutMgr.CancelPending(cancelKs) + mq.wllock.Lock() defer mq.wllock.Unlock() + // Remove keys from broadcast and peer wants, and add to cancels for _, c := range cancelKs { mq.bcstWants.Remove(c) mq.peerWants.Remove(c) @@ -227,7 +277,14 @@ func (mq *MessageQueue) Shutdown() { close(mq.done) } +func (mq *MessageQueue) onShutdown() { + // Shut down the DONT_HAVE timeout manager + mq.dhTimeoutMgr.Shutdown() +} + func (mq *MessageQueue) runQueue() { + defer mq.onShutdown() + for { select { case <-mq.rebroadcastTimer.C: @@ -301,6 +358,12 @@ func (mq *MessageQueue) sendMessage() { return } + // Make sure the DONT_HAVE timeout manager has started + if !mq.sender.SupportsHave() { + // Note: Start is idempotent + mq.dhTimeoutMgr.Start() + } + // Convert want lists to a Bitswap Message message, onSent := mq.extractOutgoingMessage(mq.sender.SupportsHave()) if message == nil || message.Empty() { @@ -315,6 +378,8 @@ func (mq *MessageQueue) sendMessage() { // We were able to send successfully. onSent() + mq.simulateDontHaveWithTimeout(message) + // If the message was too big and only a subset of wants could be // sent, schedule sending the rest of the wants in the next // iteration of the event loop. @@ -327,6 +392,37 @@ func (mq *MessageQueue) sendMessage() { } } +// If the peer is running an older version of Bitswap that doesn't support the +// DONT_HAVE response, watch for timeouts on any want-blocks we sent the peer, +// and if there is a timeout simulate a DONT_HAVE response. +func (mq *MessageQueue) simulateDontHaveWithTimeout(msg bsmsg.BitSwapMessage) { + // If the peer supports DONT_HAVE responses, we don't need to simulate + if mq.sender.SupportsHave() { + return + } + + mq.wllock.Lock() + + // Get the CID of each want-block that expects a DONT_HAVE response + wantlist := msg.Wantlist() + wants := make([]cid.Cid, 0, len(wantlist)) + for _, entry := range wantlist { + if entry.WantType == pb.Message_Wantlist_Block && entry.SendDontHave { + // Unlikely, but just in case check that the block hasn't been + // received in the interim + c := entry.Cid + if _, ok := mq.peerWants.allWants.Contains(c); ok { + wants = append(wants, c) + } + } + } + + mq.wllock.Unlock() + + // Add wants to DONT_HAVE timeout manager + mq.dhTimeoutMgr.AddPending(wants) +} + // func (mq *MessageQueue) logOutgoingMessage(msg bsmsg.BitSwapMessage) { // entries := msg.Wantlist() // for _, e := range entries { @@ -420,6 +516,7 @@ func (mq *MessageQueue) extractOutgoingMessage(supportsHave bool) (bsmsg.BitSwap return msg, onSent } + func (mq *MessageQueue) initializeSender() error { if mq.sender != nil { return nil diff --git a/bitswap/internal/messagequeue/messagequeue_test.go b/bitswap/internal/messagequeue/messagequeue_test.go index ad66c944a..0ea93c43d 100644 --- a/bitswap/internal/messagequeue/messagequeue_test.go +++ b/bitswap/internal/messagequeue/messagequeue_test.go @@ -3,17 +3,19 @@ package messagequeue import ( "context" "errors" + "fmt" "testing" "time" - "github.com/ipfs/go-bitswap/message" "github.com/ipfs/go-bitswap/internal/testutil" + "github.com/ipfs/go-bitswap/message" cid "github.com/ipfs/go-cid" bsmsg "github.com/ipfs/go-bitswap/message" pb "github.com/ipfs/go-bitswap/message/pb" bsnet "github.com/ipfs/go-bitswap/network" peer "github.com/libp2p/go-libp2p-core/peer" + "github.com/libp2p/go-libp2p/p2p/protocol/ping" ) type fakeMessageNetwork struct { @@ -33,7 +35,35 @@ func (fmn *fakeMessageNetwork) NewMessageSender(context.Context, peer.ID) (bsnet return nil, fmn.messageSenderError } -func (fms *fakeMessageNetwork) Self() peer.ID { return "" } +func (fms *fakeMessageNetwork) Self() peer.ID { return "" } +func (fms *fakeMessageNetwork) Latency(peer.ID) time.Duration { return 0 } +func (fms *fakeMessageNetwork) Ping(context.Context, peer.ID) ping.Result { + return ping.Result{Error: fmt.Errorf("ping error")} +} + +type fakeDontHaveTimeoutMgr struct { + ks []cid.Cid +} + +func (fp *fakeDontHaveTimeoutMgr) Start() {} +func (fp *fakeDontHaveTimeoutMgr) Shutdown() {} +func (fp *fakeDontHaveTimeoutMgr) AddPending(ks []cid.Cid) { + s := cid.NewSet() + for _, c := range append(fp.ks, ks...) { + s.Add(c) + } + fp.ks = s.Keys() +} +func (fp *fakeDontHaveTimeoutMgr) CancelPending(ks []cid.Cid) { + s := cid.NewSet() + for _, c := range fp.ks { + s.Add(c) + } + for _, c := range ks { + s.Remove(c) + } + fp.ks = s.Keys() +} type fakeMessageSender struct { sendError error @@ -56,6 +86,8 @@ func (fms *fakeMessageSender) Close() error { fms.fullClosed <- struct{}{} func (fms *fakeMessageSender) Reset() error { fms.reset <- struct{}{}; return nil } func (fms *fakeMessageSender) SupportsHave() bool { return fms.supportsHave } +func mockTimeoutCb(peer.ID, []cid.Cid) {} + func collectMessages(ctx context.Context, t *testing.T, messagesSent <-chan bsmsg.BitSwapMessage, @@ -90,7 +122,7 @@ func TestStartupAndShutdown(t *testing.T) { fakeSender := &fakeMessageSender{nil, fullClosedChan, resetChan, messagesSent, sendErrors, true} fakenet := &fakeMessageNetwork{nil, nil, fakeSender} peerID := testutil.GeneratePeers(1)[0] - messageQueue := New(ctx, peerID, fakenet) + messageQueue := New(ctx, peerID, fakenet, mockTimeoutCb) bcstwh := testutil.GenerateCids(10) messageQueue.Startup() @@ -132,7 +164,7 @@ func TestSendingMessagesDeduped(t *testing.T) { fakeSender := &fakeMessageSender{nil, fullClosedChan, resetChan, messagesSent, sendErrors, true} fakenet := &fakeMessageNetwork{nil, nil, fakeSender} peerID := testutil.GeneratePeers(1)[0] - messageQueue := New(ctx, peerID, fakenet) + messageQueue := New(ctx, peerID, fakenet, mockTimeoutCb) wantHaves := testutil.GenerateCids(10) wantBlocks := testutil.GenerateCids(10) @@ -155,7 +187,7 @@ func TestSendingMessagesPartialDupe(t *testing.T) { fakeSender := &fakeMessageSender{nil, fullClosedChan, resetChan, messagesSent, sendErrors, true} fakenet := &fakeMessageNetwork{nil, nil, fakeSender} peerID := testutil.GeneratePeers(1)[0] - messageQueue := New(ctx, peerID, fakenet) + messageQueue := New(ctx, peerID, fakenet, mockTimeoutCb) wantHaves := testutil.GenerateCids(10) wantBlocks := testutil.GenerateCids(10) @@ -178,7 +210,7 @@ func TestSendingMessagesPriority(t *testing.T) { fakeSender := &fakeMessageSender{nil, fullClosedChan, resetChan, messagesSent, sendErrors, true} fakenet := &fakeMessageNetwork{nil, nil, fakeSender} peerID := testutil.GeneratePeers(1)[0] - messageQueue := New(ctx, peerID, fakenet) + messageQueue := New(ctx, peerID, fakenet, mockTimeoutCb) wantHaves1 := testutil.GenerateCids(5) wantHaves2 := testutil.GenerateCids(5) wantHaves := append(wantHaves1, wantHaves2...) @@ -247,7 +279,7 @@ func TestCancelOverridesPendingWants(t *testing.T) { fakeSender := &fakeMessageSender{nil, fullClosedChan, resetChan, messagesSent, sendErrors, true} fakenet := &fakeMessageNetwork{nil, nil, fakeSender} peerID := testutil.GeneratePeers(1)[0] - messageQueue := New(ctx, peerID, fakenet) + messageQueue := New(ctx, peerID, fakenet, mockTimeoutCb) wantHaves := testutil.GenerateCids(2) wantBlocks := testutil.GenerateCids(2) @@ -281,7 +313,7 @@ func TestWantOverridesPendingCancels(t *testing.T) { fakeSender := &fakeMessageSender{nil, fullClosedChan, resetChan, messagesSent, sendErrors, true} fakenet := &fakeMessageNetwork{nil, nil, fakeSender} peerID := testutil.GeneratePeers(1)[0] - messageQueue := New(ctx, peerID, fakenet) + messageQueue := New(ctx, peerID, fakenet, mockTimeoutCb) cancels := testutil.GenerateCids(3) messageQueue.Startup() @@ -314,7 +346,7 @@ func TestWantlistRebroadcast(t *testing.T) { fakeSender := &fakeMessageSender{nil, fullClosedChan, resetChan, messagesSent, sendErrors, true} fakenet := &fakeMessageNetwork{nil, nil, fakeSender} peerID := testutil.GeneratePeers(1)[0] - messageQueue := New(ctx, peerID, fakenet) + messageQueue := New(ctx, peerID, fakenet, mockTimeoutCb) bcstwh := testutil.GenerateCids(10) wantHaves := testutil.GenerateCids(10) wantBlocks := testutil.GenerateCids(10) @@ -410,12 +442,13 @@ func TestSendingLargeMessages(t *testing.T) { fullClosedChan := make(chan struct{}, 1) fakeSender := &fakeMessageSender{nil, fullClosedChan, resetChan, messagesSent, sendErrors, true} fakenet := &fakeMessageNetwork{nil, nil, fakeSender} + dhtm := &fakeDontHaveTimeoutMgr{} peerID := testutil.GeneratePeers(1)[0] wantBlocks := testutil.GenerateCids(10) entrySize := 44 maxMsgSize := entrySize * 3 // 3 wants - messageQueue := newMessageQueue(ctx, peerID, fakenet, maxMsgSize, sendErrorBackoff) + messageQueue := newMessageQueue(ctx, peerID, fakenet, maxMsgSize, sendErrorBackoff, dhtm) messageQueue.Startup() messageQueue.AddWants(wantBlocks, []cid.Cid{}) @@ -442,7 +475,7 @@ func TestSendToPeerThatDoesntSupportHave(t *testing.T) { fakenet := &fakeMessageNetwork{nil, nil, fakeSender} peerID := testutil.GeneratePeers(1)[0] - messageQueue := New(ctx, peerID, fakenet) + messageQueue := New(ctx, peerID, fakenet, mockTimeoutCb) messageQueue.Startup() // If the remote peer doesn't support HAVE / DONT_HAVE messages @@ -488,6 +521,39 @@ func TestSendToPeerThatDoesntSupportHave(t *testing.T) { } } +func TestSendToPeerThatDoesntSupportHaveMonitorsTimeouts(t *testing.T) { + ctx := context.Background() + messagesSent := make(chan bsmsg.BitSwapMessage) + sendErrors := make(chan error) + resetChan := make(chan struct{}, 1) + fullClosedChan := make(chan struct{}, 1) + fakeSender := &fakeMessageSender{nil, fullClosedChan, resetChan, messagesSent, sendErrors, false} + fakenet := &fakeMessageNetwork{nil, nil, fakeSender} + peerID := testutil.GeneratePeers(1)[0] + + dhtm := &fakeDontHaveTimeoutMgr{} + messageQueue := newMessageQueue(ctx, peerID, fakenet, maxMessageSize, sendErrorBackoff, dhtm) + messageQueue.Startup() + + wbs := testutil.GenerateCids(10) + messageQueue.AddWants(wbs, nil) + collectMessages(ctx, t, messagesSent, 10*time.Millisecond) + + // Check want-blocks are added to DontHaveTimeoutMgr + if len(dhtm.ks) != len(wbs) { + t.Fatal("want-blocks not added to DontHaveTimeoutMgr") + } + + cancelCount := 2 + messageQueue.AddCancels(wbs[:cancelCount]) + collectMessages(ctx, t, messagesSent, 10*time.Millisecond) + + // Check want-blocks are removed from DontHaveTimeoutMgr + if len(dhtm.ks) != len(wbs)-cancelCount { + t.Fatal("want-blocks not removed from DontHaveTimeoutMgr") + } +} + func TestResendAfterError(t *testing.T) { ctx := context.Background() messagesSent := make(chan bsmsg.BitSwapMessage) @@ -496,9 +562,10 @@ func TestResendAfterError(t *testing.T) { fullClosedChan := make(chan struct{}, 1) fakeSender := &fakeMessageSender{nil, fullClosedChan, resetChan, messagesSent, sendErrors, true} fakenet := &fakeMessageNetwork{nil, nil, fakeSender} + dhtm := &fakeDontHaveTimeoutMgr{} peerID := testutil.GeneratePeers(1)[0] sendErrBackoff := 5 * time.Millisecond - messageQueue := newMessageQueue(ctx, peerID, fakenet, maxMessageSize, sendErrBackoff) + messageQueue := newMessageQueue(ctx, peerID, fakenet, maxMessageSize, sendErrBackoff, dhtm) wantBlocks := testutil.GenerateCids(10) wantHaves := testutil.GenerateCids(10) @@ -534,9 +601,10 @@ func TestResendAfterMaxRetries(t *testing.T) { fullClosedChan := make(chan struct{}, 1) fakeSender := &fakeMessageSender{nil, fullClosedChan, resetChan, messagesSent, sendErrors, true} fakenet := &fakeMessageNetwork{nil, nil, fakeSender} + dhtm := &fakeDontHaveTimeoutMgr{} peerID := testutil.GeneratePeers(1)[0] sendErrBackoff := 2 * time.Millisecond - messageQueue := newMessageQueue(ctx, peerID, fakenet, maxMessageSize, sendErrBackoff) + messageQueue := newMessageQueue(ctx, peerID, fakenet, maxMessageSize, sendErrBackoff, dhtm) wantBlocks := testutil.GenerateCids(10) wantHaves := testutil.GenerateCids(10) wantBlocks2 := testutil.GenerateCids(10) diff --git a/bitswap/internal/testinstance/testinstance.go b/bitswap/internal/testinstance/testinstance.go index 2068928d6..b1651db11 100644 --- a/bitswap/internal/testinstance/testinstance.go +++ b/bitswap/internal/testinstance/testinstance.go @@ -5,8 +5,8 @@ import ( "time" bitswap "github.com/ipfs/go-bitswap" - bsnet "github.com/ipfs/go-bitswap/network" tn "github.com/ipfs/go-bitswap/internal/testnet" + bsnet "github.com/ipfs/go-bitswap/network" ds "github.com/ipfs/go-datastore" delayed "github.com/ipfs/go-datastore/delayed" ds_sync "github.com/ipfs/go-datastore/sync" @@ -19,24 +19,26 @@ import ( // NewTestInstanceGenerator generates a new InstanceGenerator for the given // testnet -func NewTestInstanceGenerator(net tn.Network, bsOptions ...bitswap.Option) InstanceGenerator { +func NewTestInstanceGenerator(net tn.Network, netOptions []bsnet.NetOpt, bsOptions []bitswap.Option) InstanceGenerator { ctx, cancel := context.WithCancel(context.Background()) return InstanceGenerator{ - net: net, - seq: 0, - ctx: ctx, // TODO take ctx as param to Next, Instances - cancel: cancel, - bsOptions: bsOptions, + net: net, + seq: 0, + ctx: ctx, // TODO take ctx as param to Next, Instances + cancel: cancel, + bsOptions: bsOptions, + netOptions: netOptions, } } // InstanceGenerator generates new test instances of bitswap+dependencies type InstanceGenerator struct { - seq int - net tn.Network - ctx context.Context - cancel context.CancelFunc - bsOptions []bitswap.Option + seq int + net tn.Network + ctx context.Context + cancel context.CancelFunc + bsOptions []bitswap.Option + netOptions []bsnet.NetOpt } // Close closes the clobal context, shutting down all test instances @@ -52,7 +54,7 @@ func (g *InstanceGenerator) Next() Instance { if err != nil { panic("FIXME") // TODO change signature } - return NewInstance(g.ctx, g.net, p, g.bsOptions...) + return NewInstance(g.ctx, g.net, p, g.netOptions, g.bsOptions) } // Instances creates N test instances of bitswap + dependencies and connects @@ -63,6 +65,12 @@ func (g *InstanceGenerator) Instances(n int) []Instance { inst := g.Next() instances = append(instances, inst) } + ConnectInstances(instances) + return instances +} + +// ConnectInstances connects the given instances to each other +func ConnectInstances(instances []Instance) { for i, inst := range instances { for j := i + 1; j < len(instances); j++ { oinst := instances[j] @@ -72,7 +80,6 @@ func (g *InstanceGenerator) Instances(n int) []Instance { } } } - return instances } // Instance is a test instance of bitswap + dependencies for integration testing @@ -100,10 +107,10 @@ func (i *Instance) SetBlockstoreLatency(t time.Duration) time.Duration { // NB: It's easy make mistakes by providing the same peer ID to two different // instances. To safeguard, use the InstanceGenerator to generate instances. It's // just a much better idea. -func NewInstance(ctx context.Context, net tn.Network, p tnet.Identity, options ...bitswap.Option) Instance { +func NewInstance(ctx context.Context, net tn.Network, p tnet.Identity, netOptions []bsnet.NetOpt, bsOptions []bitswap.Option) Instance { bsdelay := delay.Fixed(0) - adapter := net.Adapter(p) + adapter := net.Adapter(p, netOptions...) dstore := ds_sync.MutexWrap(delayed.New(ds.NewMapDatastore(), bsdelay)) bstore, err := blockstore.CachedBlockstore(ctx, @@ -113,7 +120,7 @@ func NewInstance(ctx context.Context, net tn.Network, p tnet.Identity, options . panic(err.Error()) // FIXME perhaps change signature and return error. } - bs := bitswap.New(ctx, adapter, bstore, options...).(*bitswap.Bitswap) + bs := bitswap.New(ctx, adapter, bstore, bsOptions...).(*bitswap.Bitswap) return Instance{ Adapter: adapter, diff --git a/bitswap/internal/testnet/virtual.go b/bitswap/internal/testnet/virtual.go index 9a92d1c75..1d1c7b796 100644 --- a/bitswap/internal/testnet/virtual.go +++ b/bitswap/internal/testnet/virtual.go @@ -17,9 +17,11 @@ import ( "github.com/libp2p/go-libp2p-core/connmgr" "github.com/libp2p/go-libp2p-core/peer" + protocol "github.com/libp2p/go-libp2p-core/protocol" "github.com/libp2p/go-libp2p-core/routing" tnet "github.com/libp2p/go-libp2p-testing/net" mocknet "github.com/libp2p/go-libp2p/p2p/net/mock" + "github.com/libp2p/go-libp2p/p2p/protocol/ping" ) // VirtualNetwork generates a new testnet instance - a fake network that @@ -88,10 +90,23 @@ func (n *network) Adapter(p tnet.Identity, opts ...bsnet.NetOpt) bsnet.BitSwapNe n.mu.Lock() defer n.mu.Unlock() + s := bsnet.Settings{ + SupportedProtocols: []protocol.ID{ + bsnet.ProtocolBitswap, + bsnet.ProtocolBitswapOneOne, + bsnet.ProtocolBitswapOneZero, + bsnet.ProtocolBitswapNoVers, + }, + } + for _, opt := range opts { + opt(&s) + } + client := &networkClient{ - local: p.ID(), - network: n, - routing: n.routingserver.Client(p), + local: p.ID(), + network: n, + routing: n.routingserver.Client(p), + supportedProtocols: s.SupportedProtocols, } n.clients[p.ID()] = &receiverQueue{receiver: client} return client @@ -169,15 +184,26 @@ func (n *network) SendMessage( type networkClient struct { local peer.ID bsnet.Receiver - network *network - routing routing.Routing - stats bsnet.Stats + network *network + routing routing.Routing + stats bsnet.Stats + supportedProtocols []protocol.ID } func (nc *networkClient) Self() peer.ID { return nc.local } +func (nc *networkClient) Ping(ctx context.Context, p peer.ID) ping.Result { + return ping.Result{RTT: nc.Latency(p)} +} + +func (nc *networkClient) Latency(p peer.ID) time.Duration { + nc.network.mu.Lock() + defer nc.network.mu.Unlock() + return nc.network.latencies[nc.local][p] +} + func (nc *networkClient) SendMessage( ctx context.Context, to peer.ID, @@ -240,8 +266,20 @@ func (mp *messagePasser) Reset() error { return nil } +var oldProtos = map[protocol.ID]struct{}{ + bsnet.ProtocolBitswapNoVers: struct{}{}, + bsnet.ProtocolBitswapOneZero: struct{}{}, + bsnet.ProtocolBitswapOneOne: struct{}{}, +} + func (mp *messagePasser) SupportsHave() bool { - return true + protos := mp.net.network.clients[mp.target].receiver.supportedProtocols + for _, proto := range protos { + if _, ok := oldProtos[proto]; !ok { + return true + } + } + return false } func (nc *networkClient) NewMessageSender(ctx context.Context, p peer.ID) (bsnet.MessageSender, error) { diff --git a/bitswap/network/interface.go b/bitswap/network/interface.go index 704d851fb..6b2878e38 100644 --- a/bitswap/network/interface.go +++ b/bitswap/network/interface.go @@ -2,6 +2,7 @@ package network import ( "context" + "time" bsmsg "github.com/ipfs/go-bitswap/message" @@ -10,6 +11,7 @@ import ( "github.com/libp2p/go-libp2p-core/connmgr" "github.com/libp2p/go-libp2p-core/peer" "github.com/libp2p/go-libp2p-core/protocol" + "github.com/libp2p/go-libp2p/p2p/protocol/ping" ) var ( @@ -26,6 +28,7 @@ var ( // BitSwapNetwork provides network connectivity for BitSwap sessions. type BitSwapNetwork interface { Self() peer.ID + // SendMessage sends a BitSwap message to a peer. SendMessage( context.Context, @@ -46,6 +49,8 @@ type BitSwapNetwork interface { Stats() Stats Routing + + Pinger } // MessageSender is an interface for sending a series of messages over the bitswap @@ -82,6 +87,14 @@ type Routing interface { Provide(context.Context, cid.Cid) error } +// Pinger is an interface to ping a peer and get the average latency of all pings +type Pinger interface { + // Ping a peer + Ping(context.Context, peer.ID) ping.Result + // Get the average latency of all pings + Latency(peer.ID) time.Duration +} + // Stats is a container for statistics about the bitswap network // the numbers inside are specific to bitswap, and not any other protocols // using the same underlying network. diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index 2a25b7a00..b73a25453 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -19,6 +19,7 @@ import ( peerstore "github.com/libp2p/go-libp2p-core/peerstore" "github.com/libp2p/go-libp2p-core/protocol" "github.com/libp2p/go-libp2p-core/routing" + "github.com/libp2p/go-libp2p/p2p/protocol/ping" msgio "github.com/libp2p/go-msgio" ma "github.com/multiformats/go-multiaddr" ) @@ -107,6 +108,17 @@ func (bsnet *impl) Self() peer.ID { return bsnet.host.ID() } +func (bsnet *impl) Ping(ctx context.Context, p peer.ID) ping.Result { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + res := <-ping.Ping(ctx, bsnet.host, p) + return res +} + +func (bsnet *impl) Latency(p peer.ID) time.Duration { + return bsnet.host.Peerstore().LatencyEWMA(p) +} + // Indicates whether the given protocol supports HAVE / DONT_HAVE messages func (bsnet *impl) SupportsHave(proto protocol.ID) bool { switch proto { From 2b0ef67ce575871e402fb60f143ad77790e7692d Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Thu, 30 Jan 2020 21:56:51 -0800 Subject: [PATCH 0833/1035] chore: remove deprecated logging This commit was moved from ipfs/go-bitswap@1d06b0e5e78d80c7c646f559f1a75c208663160c --- bitswap/bitswap.go | 4 ++-- bitswap/internal/decision/engine_test.go | 4 ++-- bitswap/internal/getter/getter.go | 2 +- .../internal/session/peerresponsetracker.go | 4 ++-- bitswap/internal/session/session.go | 12 +++++----- bitswap/internal/wantmanager/wantmanager.go | 2 +- bitswap/network/ipfs_impl.go | 4 ++-- bitswap/workers.go | 23 ++++++++----------- 8 files changed, 25 insertions(+), 30 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index e5e0ef148..5e1c5b05b 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -382,7 +382,7 @@ func (bs *Bitswap) receiveBlocksFrom(ctx context.Context, from peer.ID, blks []b if from != "" { for _, b := range wanted { - log.Event(ctx, "Bitswap.GetBlockRequest.End", b.Cid()) + log.Debugw("Bitswap.GetBlockRequest.End", "cid", b.Cid()) } } @@ -417,7 +417,7 @@ func (bs *Bitswap) ReceiveMessage(ctx context.Context, p peer.ID, incoming bsmsg // Process blocks err := bs.receiveBlocksFrom(ctx, p, iblocks, haves, dontHaves) if err != nil { - log.Warningf("ReceiveMessage recvBlockFrom error: %s", err) + log.Warnf("ReceiveMessage recvBlockFrom error: %s", err) return } } diff --git a/bitswap/internal/decision/engine_test.go b/bitswap/internal/decision/engine_test.go index d465fde20..ebfbaacda 100644 --- a/bitswap/internal/decision/engine_test.go +++ b/bitswap/internal/decision/engine_test.go @@ -1092,12 +1092,12 @@ func getNextEnvelope(e *Engine, next envChan, t time.Duration) (envChan, *Envelo select { case env, ok := <-next: // blocks till next envelope ready if !ok { - log.Warningf("got closed channel") + log.Warnf("got closed channel") return nil, nil } return nil, env case <-ctx.Done(): - // log.Warningf("got timeout") + // log.Warnf("got timeout") } return next, nil } diff --git a/bitswap/internal/getter/getter.go b/bitswap/internal/getter/getter.go index d8c73d4d3..02e3b54b7 100644 --- a/bitswap/internal/getter/getter.go +++ b/bitswap/internal/getter/getter.go @@ -77,7 +77,7 @@ func AsyncGetBlocks(ctx context.Context, sessctx context.Context, keys []cid.Cid remaining := cid.NewSet() promise := notif.Subscribe(ctx, keys...) for _, k := range keys { - log.Event(ctx, "Bitswap.GetBlockRequest.Start", k) + log.Debugw("Bitswap.GetBlockRequest.Start", "cid", k) remaining.Add(k) } diff --git a/bitswap/internal/session/peerresponsetracker.go b/bitswap/internal/session/peerresponsetracker.go index 220398968..fb3c111bf 100644 --- a/bitswap/internal/session/peerresponsetracker.go +++ b/bitswap/internal/session/peerresponsetracker.go @@ -41,7 +41,7 @@ func (prt *peerResponseTracker) choose(peers []peer.ID) peer.ID { for _, p := range peers { counted += float64(prt.getPeerCount(p)) / float64(total) if counted > rnd { - // log.Warningf(" chose %s from %s (%d) / %s (%d) with pivot %.2f", + // log.Warnf(" chose %s from %s (%d) / %s (%d) with pivot %.2f", // lu.P(p), lu.P(peers[0]), prt.firstResponder[peers[0]], lu.P(peers[1]), prt.firstResponder[peers[1]], rnd) return p } @@ -51,7 +51,7 @@ func (prt *peerResponseTracker) choose(peers []peer.ID) peer.ID { // math that doesn't quite cover the whole range of peers in the for loop // so just choose the last peer. index := len(peers) - 1 - // log.Warningf(" chose last (indx %d) %s from %s (%d) / %s (%d) with pivot %.2f", + // log.Warnf(" chose last (indx %d) %s from %s (%d) / %s (%d) with pivot %.2f", // index, lu.P(peers[index]), lu.P(peers[0]), prt.firstResponder[peers[0]], lu.P(peers[1]), prt.firstResponder[peers[1]], rnd) return peers[index] } diff --git a/bitswap/internal/session/session.go b/bitswap/internal/session/session.go index 77a76ce62..b20db308c 100644 --- a/bitswap/internal/session/session.go +++ b/bitswap/internal/session/session.go @@ -210,13 +210,13 @@ func (s *Session) ReceiveFrom(from peer.ID, ks []cid.Cid, haves []cid.Cid, dontH // // log.Infof("Ses%d<-%s: %d blocks, %d haves, %d dont haves\n", // // s.id, from, len(interestedKs), len(wantedHaves), len(wantedDontHaves)) // for _, c := range interestedKs { -// log.Warningf("Ses%d %s<-%s: block %s\n", s.id, lu.P(s.self), lu.P(from), lu.C(c)) +// log.Warnf("Ses%d %s<-%s: block %s\n", s.id, lu.P(s.self), lu.P(from), lu.C(c)) // } // for _, c := range haves { -// log.Warningf("Ses%d %s<-%s: HAVE %s\n", s.id, lu.P(s.self), lu.P(from), lu.C(c)) +// log.Warnf("Ses%d %s<-%s: HAVE %s\n", s.id, lu.P(s.self), lu.P(from), lu.C(c)) // } // for _, c := range dontHaves { -// log.Warningf("Ses%d %s<-%s: DONT_HAVE %s\n", s.id, lu.P(s.self), lu.P(from), lu.C(c)) +// log.Warnf("Ses%d %s<-%s: DONT_HAVE %s\n", s.id, lu.P(s.self), lu.P(from), lu.C(c)) // } // } @@ -306,9 +306,9 @@ func (s *Session) run(ctx context.Context) { func (s *Session) handleIdleTick(ctx context.Context) { live := s.sw.PrepareBroadcast() - // log.Warningf("\n\n\n\n\nSes%d: broadcast %d keys\n\n\n\n\n", s.id, len(live)) + // log.Warnf("\n\n\n\n\nSes%d: broadcast %d keys\n\n\n\n\n", s.id, len(live)) // log.Infof("Ses%d: broadcast %d keys\n", s.id, len(live)) - log.Warningf("Ses%d: broadcast %d keys", s.id, len(live)) + log.Warnf("Ses%d: broadcast %d keys", s.id, len(live)) // Broadcast a want-have for the live wants to everyone we're connected to s.sprm.RecordPeerRequests(nil, live) @@ -387,7 +387,7 @@ func (s *Session) resetIdleTick() { tickDelay = s.initialSearchDelay } else { avLat := s.latencyTrkr.averageLatency() - // log.Warningf("averageLatency %s", avLat) + // log.Warnf("averageLatency %s", avLat) tickDelay = s.baseTickDelay + (3 * avLat) } tickDelay = tickDelay * time.Duration(1+s.consecutiveTicks) diff --git a/bitswap/internal/wantmanager/wantmanager.go b/bitswap/internal/wantmanager/wantmanager.go index 4ddda4b79..254ea9796 100644 --- a/bitswap/internal/wantmanager/wantmanager.go +++ b/bitswap/internal/wantmanager/wantmanager.go @@ -75,7 +75,7 @@ func (wm *WantManager) ReceiveFrom(ctx context.Context, p peer.ID, blks []cid.Ci // BroadcastWantHaves is called when want-haves should be broadcast to all // connected peers (as part of session discovery) func (wm *WantManager) BroadcastWantHaves(ctx context.Context, ses uint64, wantHaves []cid.Cid) { - // log.Warningf("BroadcastWantHaves session%d: %s", ses, wantHaves) + // log.Warnf("BroadcastWantHaves session%d: %s", ses, wantHaves) // Record broadcast wants wm.bcwl.Add(wantHaves, ses) diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index b73a25453..67159d53c 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -135,7 +135,7 @@ func (bsnet *impl) msgToStream(ctx context.Context, s network.Stream, msg bsmsg. } if err := s.SetWriteDeadline(deadline); err != nil { - log.Warningf("error setting deadline: %s", err) + log.Warnf("error setting deadline: %s", err) } // Older Bitswap versions use a slightly different wire format so we need @@ -157,7 +157,7 @@ func (bsnet *impl) msgToStream(ctx context.Context, s network.Stream, msg bsmsg. } if err := s.SetWriteDeadline(time.Time{}); err != nil { - log.Warningf("error resetting deadline: %s", err) + log.Warnf("error resetting deadline: %s", err) } return nil } diff --git a/bitswap/workers.go b/bitswap/workers.go index 4b07008d4..fe2430533 100644 --- a/bitswap/workers.go +++ b/bitswap/workers.go @@ -8,7 +8,6 @@ import ( bsmsg "github.com/ipfs/go-bitswap/message" pb "github.com/ipfs/go-bitswap/message/pb" cid "github.com/ipfs/go-cid" - logging "github.com/ipfs/go-log" process "github.com/jbenet/goprocess" procctx "github.com/jbenet/goprocess/context" ) @@ -41,10 +40,10 @@ func (bs *Bitswap) startWorkers(ctx context.Context, px process.Process) { } func (bs *Bitswap) taskWorker(ctx context.Context, id int) { - idmap := logging.LoggableMap{"ID": id} defer log.Debug("bitswap task worker shutting down...") + log := log.With("ID", id) for { - log.Event(ctx, "Bitswap.TaskWorker.Loop", idmap) + log.Debug("Bitswap.TaskWorker.Loop") select { case nextEnvelope := <-bs.engine.Outbox(): select { @@ -57,13 +56,10 @@ func (bs *Bitswap) taskWorker(ctx context.Context, id int) { // TODO: Should only track *useful* messages in ledger outgoing := bsmsg.New(false) for _, block := range envelope.Message.Blocks() { - log.Event(ctx, "Bitswap.TaskWorker.Work", logging.LoggableF(func() map[string]interface{} { - return logging.LoggableMap{ - "ID": id, - "Target": envelope.Peer.Pretty(), - "Block": block.Cid().String(), - } - })) + log.Debugw("Bitswap.TaskWorker.Work", + "Target", envelope.Peer, + "Block", block.Cid(), + ) outgoing.AddBlock(block) } for _, blockPresence := range envelope.Message.BlockPresences() { @@ -143,9 +139,9 @@ func (bs *Bitswap) provideWorker(px process.Process) { // replace token when done <-limit }() - ev := logging.LoggableMap{"ID": wid} - defer log.EventBegin(ctx, "Bitswap.ProvideWorker.Work", ev, k).Done() + log.Debugw("Bitswap.ProvideWorker.Start", "ID", wid, "cid", k) + defer log.Debugw("Bitswap.ProvideWorker.End", "ID", wid, "cid", k) ctx, cancel := context.WithTimeout(ctx, provideTimeout) // timeout ctx defer cancel() @@ -158,8 +154,7 @@ func (bs *Bitswap) provideWorker(px process.Process) { // worker spawner, reads from bs.provideKeys until it closes, spawning a // _ratelimited_ number of workers to handle each key. for wid := 2; ; wid++ { - ev := logging.LoggableMap{"ID": 1} - log.Event(ctx, "Bitswap.ProvideWorker.Loop", ev) + log.Debug("Bitswap.ProvideWorker.Loop") select { case <-px.Closing(): From 60500671c9fc6dce45125fb701a51901769b81dd Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Thu, 30 Jan 2020 21:57:24 -0800 Subject: [PATCH 0834/1035] chore: go fmt This commit was moved from ipfs/go-bitswap@0f3036f84020d4af197930f1b5dd35e4255cabcf --- bitswap/internal/decision/engine_test.go | 2 +- bitswap/internal/testutil/testutil.go | 2 +- bitswap/network/ipfs_impl_test.go | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/bitswap/internal/decision/engine_test.go b/bitswap/internal/decision/engine_test.go index ebfbaacda..f6175762d 100644 --- a/bitswap/internal/decision/engine_test.go +++ b/bitswap/internal/decision/engine_test.go @@ -11,9 +11,9 @@ import ( "time" lu "github.com/ipfs/go-bitswap/internal/logutil" + "github.com/ipfs/go-bitswap/internal/testutil" message "github.com/ipfs/go-bitswap/message" pb "github.com/ipfs/go-bitswap/message/pb" - "github.com/ipfs/go-bitswap/internal/testutil" blocks "github.com/ipfs/go-block-format" cid "github.com/ipfs/go-cid" diff --git a/bitswap/internal/testutil/testutil.go b/bitswap/internal/testutil/testutil.go index 48c306ab0..54706dca6 100644 --- a/bitswap/internal/testutil/testutil.go +++ b/bitswap/internal/testutil/testutil.go @@ -3,8 +3,8 @@ package testutil import ( "math/rand" - bsmsg "github.com/ipfs/go-bitswap/message" bssd "github.com/ipfs/go-bitswap/internal/sessiondata" + bsmsg "github.com/ipfs/go-bitswap/message" "github.com/ipfs/go-bitswap/wantlist" blocks "github.com/ipfs/go-block-format" cid "github.com/ipfs/go-cid" diff --git a/bitswap/network/ipfs_impl_test.go b/bitswap/network/ipfs_impl_test.go index 6b8059fa5..e5b2475f6 100644 --- a/bitswap/network/ipfs_impl_test.go +++ b/bitswap/network/ipfs_impl_test.go @@ -5,10 +5,10 @@ import ( "testing" "time" + tn "github.com/ipfs/go-bitswap/internal/testnet" bsmsg "github.com/ipfs/go-bitswap/message" pb "github.com/ipfs/go-bitswap/message/pb" bsnet "github.com/ipfs/go-bitswap/network" - tn "github.com/ipfs/go-bitswap/internal/testnet" blocksutil "github.com/ipfs/go-ipfs-blocksutil" mockrouting "github.com/ipfs/go-ipfs-routing/mock" From ebd27cd9970774777f57cc650a626a2d56124a3b Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Wed, 12 Feb 2020 18:02:04 -0800 Subject: [PATCH 0835/1035] feat: debounce wants manually This: * Makes it easy to send immediately if we wait too long and/or if we have enough to send. * Is significantly more efficient than the debounce library as it doesn't spin off a bunch of "after" timers. fixes #245 This commit was moved from ipfs/go-bitswap@777c0d9ab790560b0813dd786e09d0d5b7299393 --- bitswap/internal/messagequeue/messagequeue.go | 61 ++++++++++++++----- 1 file changed, 47 insertions(+), 14 deletions(-) diff --git a/bitswap/internal/messagequeue/messagequeue.go b/bitswap/internal/messagequeue/messagequeue.go index 15f8100d2..4610a95b2 100644 --- a/bitswap/internal/messagequeue/messagequeue.go +++ b/bitswap/internal/messagequeue/messagequeue.go @@ -6,8 +6,6 @@ import ( "sync" "time" - debounce "github.com/bep/debounce" - bsmsg "github.com/ipfs/go-bitswap/message" pb "github.com/ipfs/go-bitswap/message/pb" bsnet "github.com/ipfs/go-bitswap/network" @@ -34,6 +32,11 @@ const ( maxPriority = math.MaxInt32 // sendMessageDebounce is the debounce duration when calling sendMessage() sendMessageDebounce = time.Millisecond + // when we reach sendMessaageCuttoff wants/cancels, we'll send the message immediately. + sendMessageCuttoff = 100 + // when we debounce for more than sendMessageMaxDelay, we'll send the + // message immediately. + sendMessageMaxDelay = 100 * time.Millisecond ) // MessageNetwork is any network that can connect peers and generate a message @@ -54,9 +57,8 @@ type MessageQueue struct { maxMessageSize int sendErrorBackoff time.Duration - signalWorkReady func() - outgoingWork chan struct{} - done chan struct{} + outgoingWork chan time.Time + done chan struct{} // Take lock whenever any of these variables are modified wllock sync.Mutex @@ -165,17 +167,13 @@ func newMessageQueue(ctx context.Context, p peer.ID, network MessageNetwork, bcstWants: newRecallWantList(), peerWants: newRecallWantList(), cancels: cid.NewSet(), - outgoingWork: make(chan struct{}, 1), + outgoingWork: make(chan time.Time, 1), done: make(chan struct{}), rebroadcastInterval: defaultRebroadcastInterval, sendErrorBackoff: sendErrorBackoff, priority: maxPriority, } - // Apply debounce to the work ready signal (which triggers sending a message) - debounced := debounce.New(sendMessageDebounce) - mq.signalWorkReady = func() { debounced(mq.onWorkReady) } - return mq } @@ -285,11 +283,42 @@ func (mq *MessageQueue) onShutdown() { func (mq *MessageQueue) runQueue() { defer mq.onShutdown() + // Create a timer for debouncing scheduled work. + scheduleWork := time.NewTimer(0) + if !scheduleWork.Stop() { + <-scheduleWork.C + } + + var workScheduled time.Time for { select { case <-mq.rebroadcastTimer.C: mq.rebroadcastWantlist() - case <-mq.outgoingWork: + case when := <-mq.outgoingWork: + // If we have work scheduled, cancel the timer. If we + // don't, record when the work was scheduled. + // We send the time on the channel so we accurately + // track delay. + if workScheduled.IsZero() { + workScheduled = when + } else if !scheduleWork.Stop() { + <-scheduleWork.C + } + + // If we have too many updates and/or we've waited too + // long, send immediately. + if mq.pendingWorkCount() > sendMessageCuttoff || + time.Since(workScheduled) >= sendMessageMaxDelay { + mq.sendIfReady() + workScheduled = time.Time{} + } else { + // Otherwise, extend the timer. + scheduleWork.Reset(sendMessageDebounce) + } + case <-scheduleWork.C: + // We have work scheduled and haven't seen any updates + // in sendMessageDebounce. Send immediately. + workScheduled = time.Time{} mq.sendIfReady() case <-mq.done: if mq.sender != nil { @@ -335,9 +364,9 @@ func (mq *MessageQueue) transferRebroadcastWants() bool { return true } -func (mq *MessageQueue) onWorkReady() { +func (mq *MessageQueue) signalWorkReady() { select { - case mq.outgoingWork <- struct{}{}: + case mq.outgoingWork <- time.Now(): default: } } @@ -443,10 +472,14 @@ func (mq *MessageQueue) simulateDontHaveWithTimeout(msg bsmsg.BitSwapMessage) { // } func (mq *MessageQueue) hasPendingWork() bool { + return mq.pendingWorkCount() > 0 +} + +func (mq *MessageQueue) pendingWorkCount() int { mq.wllock.Lock() defer mq.wllock.Unlock() - return mq.bcstWants.pending.Len() > 0 || mq.peerWants.pending.Len() > 0 || mq.cancels.Len() > 0 + return mq.bcstWants.pending.Len() + mq.peerWants.pending.Len() + mq.cancels.Len() } func (mq *MessageQueue) extractOutgoingMessage(supportsHave bool) (bsmsg.BitSwapMessage, func()) { From 8a2bb032c504b3db0ee226d01e587fdf3ae07cbd Mon Sep 17 00:00:00 2001 From: Dirk McCormick Date: Thu, 13 Feb 2020 12:01:52 -0500 Subject: [PATCH 0836/1035] refactor: adjust message queue debounce limits This commit was moved from ipfs/go-bitswap@7ccab36f6a6e3038d94ef11b60d645b1de442feb --- bitswap/internal/messagequeue/messagequeue.go | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/bitswap/internal/messagequeue/messagequeue.go b/bitswap/internal/messagequeue/messagequeue.go index 4610a95b2..e60d52c3d 100644 --- a/bitswap/internal/messagequeue/messagequeue.go +++ b/bitswap/internal/messagequeue/messagequeue.go @@ -32,11 +32,11 @@ const ( maxPriority = math.MaxInt32 // sendMessageDebounce is the debounce duration when calling sendMessage() sendMessageDebounce = time.Millisecond - // when we reach sendMessaageCuttoff wants/cancels, we'll send the message immediately. - sendMessageCuttoff = 100 + // when we reach sendMessageCutoff wants/cancels, we'll send the message immediately. + sendMessageCutoff = 256 // when we debounce for more than sendMessageMaxDelay, we'll send the // message immediately. - sendMessageMaxDelay = 100 * time.Millisecond + sendMessageMaxDelay = 20 * time.Millisecond ) // MessageNetwork is any network that can connect peers and generate a message @@ -286,6 +286,8 @@ func (mq *MessageQueue) runQueue() { // Create a timer for debouncing scheduled work. scheduleWork := time.NewTimer(0) if !scheduleWork.Stop() { + // Need to drain the timer if Stop() returns false + // See: https://golang.org/pkg/time/#Timer.Stop <-scheduleWork.C } @@ -302,12 +304,13 @@ func (mq *MessageQueue) runQueue() { if workScheduled.IsZero() { workScheduled = when } else if !scheduleWork.Stop() { + // Need to drain the timer if Stop() returns false <-scheduleWork.C } // If we have too many updates and/or we've waited too // long, send immediately. - if mq.pendingWorkCount() > sendMessageCuttoff || + if mq.pendingWorkCount() > sendMessageCutoff || time.Since(workScheduled) >= sendMessageMaxDelay { mq.sendIfReady() workScheduled = time.Time{} From 80a7211771866c3ebc88cb8e376d16690fd16dc2 Mon Sep 17 00:00:00 2001 From: Dirk McCormick Date: Fri, 14 Feb 2020 15:52:50 -0500 Subject: [PATCH 0837/1035] fix: prune peers that send too many consecutive DONT_HAVEs This commit was moved from ipfs/go-bitswap@4d2bdc274b4862e835d058646d8d828d3631150c --- bitswap/internal/session/sessionwantsender.go | 52 ++++- .../session/sessionwantsender_test.go | 195 ++++++++++++++++++ 2 files changed, 237 insertions(+), 10 deletions(-) diff --git a/bitswap/internal/session/sessionwantsender.go b/bitswap/internal/session/sessionwantsender.go index defb3578b..4448f8d52 100644 --- a/bitswap/internal/session/sessionwantsender.go +++ b/bitswap/internal/session/sessionwantsender.go @@ -9,8 +9,13 @@ import ( peer "github.com/libp2p/go-libp2p-core/peer" ) -// Maximum number of changes to accept before blocking -const changesBufferSize = 128 +const ( + // Maximum number of changes to accept before blocking + changesBufferSize = 128 + // If the session receives this many DONT_HAVEs in a row from a peer, + // it prunes the peer from the session + peerDontHaveLimit = 16 +) // BlockPresence indicates whether a peer has a block. // Note that the order is important, we decide which peer to send a want to @@ -76,13 +81,14 @@ type sessionWantSender struct { changes chan change // Information about each want indexed by CID wants map[cid.Cid]*wantInfo + // Keeps track of how many consecutive DONT_HAVEs a peer has sent + peerConsecutiveDontHaves map[peer.ID]int // Tracks which peers we have send want-block to swbt *sentWantBlocksTracker // Maintains a list of peers and whether they are connected peerAvlMgr *peerAvailabilityManager // Tracks the number of blocks each peer sent us peerRspTrkr *peerResponseTracker - // Sends wants to peers pm PeerManager // Keeps track of which peer has / doesn't have a block @@ -97,13 +103,14 @@ func newSessionWantSender(ctx context.Context, sid uint64, pm PeerManager, bpm * onSend onSendFn, onPeersExhausted onPeersExhaustedFn) sessionWantSender { spm := sessionWantSender{ - ctx: ctx, - sessionID: sid, - changes: make(chan change, changesBufferSize), - wants: make(map[cid.Cid]*wantInfo), - swbt: newSentWantBlocksTracker(), - peerAvlMgr: newPeerAvailabilityManager(), - peerRspTrkr: newPeerResponseTracker(), + ctx: ctx, + sessionID: sid, + changes: make(chan change, changesBufferSize), + wants: make(map[cid.Cid]*wantInfo), + peerConsecutiveDontHaves: make(map[peer.ID]int), + swbt: newSentWantBlocksTracker(), + peerAvlMgr: newPeerAvailabilityManager(), + peerRspTrkr: newPeerResponseTracker(), pm: pm, bpm: bpm, @@ -258,6 +265,9 @@ func (spm *sessionWantSender) processAvailability(availability map[peer.ID]bool) if isNowAvailable { newlyAvailable = append(newlyAvailable, p) } + // Reset the count of consecutive DONT_HAVEs received from the + // peer + delete(spm.peerConsecutiveDontHaves, p) } } } @@ -265,6 +275,12 @@ func (spm *sessionWantSender) processAvailability(availability map[peer.ID]bool) return newlyAvailable } +// isAvailable indicates whether the peer is available and whether +// it's been tracked by the Session (used by the tests) +func (spm *sessionWantSender) isAvailable(p peer.ID) (bool, bool) { + return spm.peerAvlMgr.isAvailable(p) +} + // trackWant creates a new entry in the map of CID -> want info func (spm *sessionWantSender) trackWant(c cid.Cid) { // fmt.Printf("trackWant %s\n", lu.C(c)) @@ -285,6 +301,7 @@ func (spm *sessionWantSender) trackWant(c cid.Cid) { // processUpdates processes incoming blocks and HAVE / DONT_HAVEs func (spm *sessionWantSender) processUpdates(updates []update) { + prunePeers := make(map[peer.ID]struct{}) dontHaves := cid.NewSet() for _, upd := range updates { // TODO: If there is a timeout for the want from the peer, remove want.sentTo @@ -308,12 +325,20 @@ func (spm *sessionWantSender) processUpdates(updates []update) { spm.setWantSentTo(c, "") } } + + // Track the number of consecutive DONT_HAVEs each peer receives + if spm.peerConsecutiveDontHaves[upd.from] == peerDontHaveLimit { + prunePeers[upd.from] = struct{}{} + } else { + spm.peerConsecutiveDontHaves[upd.from]++ + } } // For each HAVE for _, c := range upd.haves { // Update the block presence for the peer spm.updateWantBlockPresence(c, upd.from) + delete(spm.peerConsecutiveDontHaves, upd.from) } // For each received block @@ -325,6 +350,7 @@ func (spm *sessionWantSender) processUpdates(updates []update) { // us the block spm.peerRspTrkr.receivedBlockFrom(upd.from) } + delete(spm.peerConsecutiveDontHaves, upd.from) } } @@ -337,6 +363,12 @@ func (spm *sessionWantSender) processUpdates(updates []update) { spm.onPeersExhausted(newlyExhausted) } } + + // If any peers have sent us too many consecutive DONT_HAVEs, remove them + // from the session + for p := range prunePeers { + spm.SignalAvailability(p, false) + } } // convenience structs for passing around want-blocks and want-haves for a peer diff --git a/bitswap/internal/session/sessionwantsender_test.go b/bitswap/internal/session/sessionwantsender_test.go index f49bce9de..75c224d6b 100644 --- a/bitswap/internal/session/sessionwantsender_test.go +++ b/bitswap/internal/session/sessionwantsender_test.go @@ -346,3 +346,198 @@ func TestPeersExhausted(t *testing.T) { t.Fatal("Wrong keys") } } + +func TestConsecutiveDontHaveLimit(t *testing.T) { + cids := testutil.GenerateCids(peerDontHaveLimit + 10) + p := testutil.GeneratePeers(1)[0] + sid := uint64(1) + pm := newMockPeerManager() + bpm := bsbpm.New() + onSend := func(peer.ID, []cid.Cid, []cid.Cid) {} + onPeersExhausted := func([]cid.Cid) {} + spm := newSessionWantSender(context.Background(), sid, pm, bpm, onSend, onPeersExhausted) + + go spm.Run() + + // Add all cids as wants + spm.Add(cids) + + // Receive a HAVE from peer (adds it to the session) + bpm.ReceiveFrom(p, cids[:1], []cid.Cid{}) + spm.Update(p, []cid.Cid{}, cids[:1], []cid.Cid{}, true) + + // Wait for processing to complete + time.Sleep(5 * time.Millisecond) + + // Peer should be available + if avail, ok := spm.isAvailable(p); !ok || !avail { + t.Fatal("Expected peer to be available") + } + + // Receive DONT_HAVEs from peer that do not exceed limit + for _, c := range cids[1:peerDontHaveLimit] { + bpm.ReceiveFrom(p, []cid.Cid{}, []cid.Cid{c}) + spm.Update(p, []cid.Cid{}, []cid.Cid{}, []cid.Cid{c}, false) + } + + // Wait for processing to complete + time.Sleep(5 * time.Millisecond) + + // Peer should be available + if avail, ok := spm.isAvailable(p); !ok || !avail { + t.Fatal("Expected peer to be available") + } + + // Receive DONT_HAVEs from peer that exceed limit + for _, c := range cids[peerDontHaveLimit:] { + bpm.ReceiveFrom(p, []cid.Cid{}, []cid.Cid{c}) + spm.Update(p, []cid.Cid{}, []cid.Cid{}, []cid.Cid{c}, false) + } + + // Wait for processing to complete + time.Sleep(5 * time.Millisecond) + + // Session should remove peer + if avail, _ := spm.isAvailable(p); avail { + t.Fatal("Expected peer not to be available") + } +} + +func TestConsecutiveDontHaveLimitInterrupted(t *testing.T) { + cids := testutil.GenerateCids(peerDontHaveLimit + 10) + p := testutil.GeneratePeers(1)[0] + sid := uint64(1) + pm := newMockPeerManager() + bpm := bsbpm.New() + onSend := func(peer.ID, []cid.Cid, []cid.Cid) {} + onPeersExhausted := func([]cid.Cid) {} + spm := newSessionWantSender(context.Background(), sid, pm, bpm, onSend, onPeersExhausted) + + go spm.Run() + + // Add all cids as wants + spm.Add(cids) + + // Receive a HAVE from peer (adds it to the session) + bpm.ReceiveFrom(p, cids[:1], []cid.Cid{}) + spm.Update(p, []cid.Cid{}, cids[:1], []cid.Cid{}, true) + + // Wait for processing to complete + time.Sleep(5 * time.Millisecond) + + // Peer should be available + if avail, ok := spm.isAvailable(p); !ok || !avail { + t.Fatal("Expected peer to be available") + } + + // Receive DONT_HAVE then HAVE then DONT_HAVE from peer, + // where consecutive DONT_HAVEs would have exceeded limit + // (but they are not consecutive) + for _, c := range cids[1:peerDontHaveLimit] { + // DONT_HAVEs + bpm.ReceiveFrom(p, []cid.Cid{}, []cid.Cid{c}) + spm.Update(p, []cid.Cid{}, []cid.Cid{}, []cid.Cid{c}, false) + } + for _, c := range cids[peerDontHaveLimit : peerDontHaveLimit+1] { + // HAVEs + bpm.ReceiveFrom(p, []cid.Cid{c}, []cid.Cid{}) + spm.Update(p, []cid.Cid{}, []cid.Cid{c}, []cid.Cid{}, false) + } + for _, c := range cids[peerDontHaveLimit+1:] { + // DONT_HAVEs + bpm.ReceiveFrom(p, []cid.Cid{}, []cid.Cid{c}) + spm.Update(p, []cid.Cid{}, []cid.Cid{}, []cid.Cid{c}, false) + } + + // Wait for processing to complete + time.Sleep(5 * time.Millisecond) + + // Peer should be available + if avail, ok := spm.isAvailable(p); !ok || !avail { + t.Fatal("Expected peer to be available") + } +} + +func TestConsecutiveDontHaveReinstateAfterRemoval(t *testing.T) { + cids := testutil.GenerateCids(peerDontHaveLimit + 10) + p := testutil.GeneratePeers(1)[0] + sid := uint64(1) + pm := newMockPeerManager() + bpm := bsbpm.New() + onSend := func(peer.ID, []cid.Cid, []cid.Cid) {} + onPeersExhausted := func([]cid.Cid) {} + spm := newSessionWantSender(context.Background(), sid, pm, bpm, onSend, onPeersExhausted) + + go spm.Run() + + // Add all cids as wants + spm.Add(cids) + + // Receive a HAVE from peer (adds it to the session) + bpm.ReceiveFrom(p, cids[:1], []cid.Cid{}) + spm.Update(p, []cid.Cid{}, cids[:1], []cid.Cid{}, true) + + // Wait for processing to complete + time.Sleep(5 * time.Millisecond) + + // Peer should be available + if avail, ok := spm.isAvailable(p); !ok || !avail { + t.Fatal("Expected peer to be available") + } + + // Receive DONT_HAVEs from peer that exceed limit + for _, c := range cids[1 : peerDontHaveLimit+2] { + bpm.ReceiveFrom(p, []cid.Cid{}, []cid.Cid{c}) + spm.Update(p, []cid.Cid{}, []cid.Cid{}, []cid.Cid{c}, false) + } + + // Wait for processing to complete + time.Sleep(5 * time.Millisecond) + + // Session should remove peer + if avail, _ := spm.isAvailable(p); avail { + t.Fatal("Expected peer not to be available") + } + + // Receive a HAVE from peer (adds it back into the session) + bpm.ReceiveFrom(p, cids[:1], []cid.Cid{}) + spm.Update(p, []cid.Cid{}, cids[:1], []cid.Cid{}, true) + + // Wait for processing to complete + time.Sleep(5 * time.Millisecond) + + // Peer should be available + if avail, ok := spm.isAvailable(p); !ok || !avail { + t.Fatal("Expected peer to be available") + } + + cids2 := testutil.GenerateCids(peerDontHaveLimit + 10) + + // Receive DONT_HAVEs from peer that don't exceed limit + for _, c := range cids2[1:peerDontHaveLimit] { + bpm.ReceiveFrom(p, []cid.Cid{}, []cid.Cid{c}) + spm.Update(p, []cid.Cid{}, []cid.Cid{}, []cid.Cid{c}, false) + } + + // Wait for processing to complete + time.Sleep(5 * time.Millisecond) + + // Peer should be available + if avail, ok := spm.isAvailable(p); !ok || !avail { + t.Fatal("Expected peer to be available") + } + + // Receive DONT_HAVEs from peer that exceed limit + for _, c := range cids2[peerDontHaveLimit:] { + bpm.ReceiveFrom(p, []cid.Cid{}, []cid.Cid{c}) + spm.Update(p, []cid.Cid{}, []cid.Cid{}, []cid.Cid{c}, false) + } + + // Wait for processing to complete + time.Sleep(5 * time.Millisecond) + + // Session should remove peer + if avail, _ := spm.isAvailable(p); avail { + t.Fatal("Expected peer not to be available") + } +} From 8f238d2325e8ab90dc3b2c46e8bedabc6aec1d02 Mon Sep 17 00:00:00 2001 From: dirkmc Date: Tue, 18 Feb 2020 10:46:19 -0500 Subject: [PATCH 0838/1035] fix: expose decision.Receipt externally (#268) This commit was moved from ipfs/go-bitswap@d7c2ca39f6d1e6cafe5887bbf1182b0279f84c2a --- bitswap/decision/decision.go | 6 ++++++ 1 file changed, 6 insertions(+) create mode 100644 bitswap/decision/decision.go diff --git a/bitswap/decision/decision.go b/bitswap/decision/decision.go new file mode 100644 index 000000000..8dd310f69 --- /dev/null +++ b/bitswap/decision/decision.go @@ -0,0 +1,6 @@ +package decision + +import intdec "github.com/ipfs/go-bitswap/internal/decision" + +// Expose type externally +type Receipt = intdec.Receipt From 32209aaf7141b9552a10c79bb117ff2e5e76c750 Mon Sep 17 00:00:00 2001 From: dirkmc Date: Thu, 27 Feb 2020 19:07:18 -0500 Subject: [PATCH 0839/1035] fix: possible deadlock scenario in session want sender (#271) This commit was moved from ipfs/go-bitswap@a44198e38e20f5fdaaaaeff1c5e39451798e7e53 --- bitswap/internal/session/sessionwantsender.go | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/bitswap/internal/session/sessionwantsender.go b/bitswap/internal/session/sessionwantsender.go index 4448f8d52..702146a6b 100644 --- a/bitswap/internal/session/sessionwantsender.go +++ b/bitswap/internal/session/sessionwantsender.go @@ -366,8 +366,12 @@ func (spm *sessionWantSender) processUpdates(updates []update) { // If any peers have sent us too many consecutive DONT_HAVEs, remove them // from the session - for p := range prunePeers { - spm.SignalAvailability(p, false) + if len(prunePeers) > 0 { + go func() { + for p := range prunePeers { + spm.SignalAvailability(p, false) + } + }() } } From ef84085ab17e39a5ed1c2a9925ff71c5650682ae Mon Sep 17 00:00:00 2001 From: dirkmc Date: Mon, 2 Mar 2020 09:09:39 -0500 Subject: [PATCH 0840/1035] Ensure broadcast when remaining peer becomes unavailable (#272) * fix: ensure broadcast when peer becomes unavailable and all other peers sent DONT_HAVE for CID * fix: lint warnings * refactor: simplify session want sender DONT_HAVE list * fix: flaky test * test: add session exhausted wants test * docs: improve sessionWantSender processAvailability docs This commit was moved from ipfs/go-bitswap@0ba089b4a7c3a5e6c1087e29cfceafca715d8dcd --- bitswap/bitswap_with_sessions_test.go | 15 ++- bitswap/internal/messagequeue/messagequeue.go | 2 +- bitswap/internal/session/session.go | 42 +++++--- bitswap/internal/session/session_test.go | 43 ++++++++ bitswap/internal/session/sessionwantsender.go | 81 +++++++++++++--- .../session/sessionwantsender_test.go | 97 +++++++++++++++++++ bitswap/workers.go | 2 +- 7 files changed, 250 insertions(+), 32 deletions(-) diff --git a/bitswap/bitswap_with_sessions_test.go b/bitswap/bitswap_with_sessions_test.go index 28d3a3255..3b5b68e17 100644 --- a/bitswap/bitswap_with_sessions_test.go +++ b/bitswap/bitswap_with_sessions_test.go @@ -12,6 +12,7 @@ import ( blocks "github.com/ipfs/go-block-format" cid "github.com/ipfs/go-cid" blocksutil "github.com/ipfs/go-ipfs-blocksutil" + delay "github.com/ipfs/go-ipfs-delay" tu "github.com/libp2p/go-libp2p-testing/etc" ) @@ -216,7 +217,10 @@ func TestFetchAfterDisconnect(t *testing.T) { defer cancel() vnet := getVirtualNetwork() - ig := testinstance.NewTestInstanceGenerator(vnet, nil, []bitswap.Option{bitswap.ProviderSearchDelay(10 * time.Millisecond)}) + ig := testinstance.NewTestInstanceGenerator(vnet, nil, []bitswap.Option{ + bitswap.ProviderSearchDelay(10 * time.Millisecond), + bitswap.RebroadcastDelay(delay.Fixed(15 * time.Millisecond)), + }) defer ig.Close() bgen := blocksutil.NewBlockGenerator() @@ -264,6 +268,8 @@ func TestFetchAfterDisconnect(t *testing.T) { t.Fatal(err) } + time.Sleep(20 * time.Millisecond) + // Provide remaining blocks lastBlks := blks[5:] for _, block := range lastBlks { @@ -276,8 +282,11 @@ func TestFetchAfterDisconnect(t *testing.T) { // Should get last 5 blocks for i := 0; i < 5; i++ { - b := <-ch - got = append(got, b) + select { + case b := <-ch: + got = append(got, b) + case <-ctx.Done(): + } } if err := assertBlockLists(got, blks); err != nil { diff --git a/bitswap/internal/messagequeue/messagequeue.go b/bitswap/internal/messagequeue/messagequeue.go index e60d52c3d..be0740000 100644 --- a/bitswap/internal/messagequeue/messagequeue.go +++ b/bitswap/internal/messagequeue/messagequeue.go @@ -582,7 +582,7 @@ func (mq *MessageQueue) attemptSendAndRecovery(message bsmsg.BitSwapMessage) boo return true case <-time.After(mq.sendErrorBackoff): // wait 100ms in case disconnect notifications are still propagating - log.Warning("SendMsg errored but neither 'done' nor context.Done() were set") + log.Warn("SendMsg errored but neither 'done' nor context.Done() were set") } err = mq.initializeSender() diff --git a/bitswap/internal/session/session.go b/bitswap/internal/session/session.go index b20db308c..c41a65d4a 100644 --- a/bitswap/internal/session/session.go +++ b/bitswap/internal/session/session.go @@ -227,9 +227,18 @@ func (s *Session) onWantsSent(p peer.ID, wantBlocks []cid.Cid, wantHaves []cid.C } func (s *Session) onPeersExhausted(ks []cid.Cid) { + // We don't want to block the sessionWantSender if the incoming channel + // is full. So if we can't immediately send on the incoming channel spin + // it off into a go-routine. select { case s.incoming <- op{op: opBroadcast, keys: ks}: - case <-s.ctx.Done(): + default: + go func() { + select { + case s.incoming <- op{op: opBroadcast, keys: ks}: + case <-s.ctx.Done(): + } + }() } } @@ -287,12 +296,12 @@ func (s *Session) run(ctx context.Context) { case opCancel: s.sw.CancelPending(oper.keys) case opBroadcast: - s.handleIdleTick(ctx) + s.broadcastWantHaves(ctx, oper.keys) default: panic("unhandled operation") } case <-s.idleTick.C: - s.handleIdleTick(ctx) + s.broadcastWantHaves(ctx, nil) case <-s.periodicSearchTimer.C: s.handlePeriodicSearch(ctx) case baseTickDelay := <-s.tickDelayReqs: @@ -304,24 +313,35 @@ func (s *Session) run(ctx context.Context) { } } -func (s *Session) handleIdleTick(ctx context.Context) { - live := s.sw.PrepareBroadcast() +// Called when the session hasn't received any blocks for some time, or when +// all peers in the session have sent DONT_HAVE for a particular set of CIDs. +// Send want-haves to all connected peers, and search for new peers with the CID. +func (s *Session) broadcastWantHaves(ctx context.Context, wants []cid.Cid) { + // If this broadcast is because of an idle timeout (we haven't received + // any blocks for a while) then broadcast all pending wants + if wants == nil { + wants = s.sw.PrepareBroadcast() + } + // log.Warnf("\n\n\n\n\nSes%d: broadcast %d keys\n\n\n\n\n", s.id, len(live)) // log.Infof("Ses%d: broadcast %d keys\n", s.id, len(live)) - log.Warnf("Ses%d: broadcast %d keys", s.id, len(live)) // Broadcast a want-have for the live wants to everyone we're connected to - s.sprm.RecordPeerRequests(nil, live) - s.wm.BroadcastWantHaves(ctx, s.id, live) + s.sprm.RecordPeerRequests(nil, wants) + s.wm.BroadcastWantHaves(ctx, s.id, wants) // do not find providers on consecutive ticks // -- just rely on periodic search widening - if len(live) > 0 && (s.consecutiveTicks == 0) { - s.sprm.FindMorePeers(ctx, live[0]) + if len(wants) > 0 && (s.consecutiveTicks == 0) { + // Search for providers who have the first want in the list. + // Typically if the provider has the first block they will have + // the rest of the blocks also. + log.Warnf("Ses%d: FindMorePeers with want 0 of %d wants", s.id, len(wants)) + s.sprm.FindMorePeers(ctx, wants[0]) } s.resetIdleTick() - // If we have live wants + // If we have live wants record a consecutive tick if s.sw.HasLiveWants() { s.consecutiveTicks++ } diff --git a/bitswap/internal/session/session_test.go b/bitswap/internal/session/session_test.go index 21e196f7f..b3ae26b22 100644 --- a/bitswap/internal/session/session_test.go +++ b/bitswap/internal/session/session_test.go @@ -229,6 +229,49 @@ func TestSessionFindMorePeers(t *testing.T) { } } +func TestSessionOnPeersExhausted(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond) + defer cancel() + fwm := newFakeWantManager() + fpm := newFakeSessionPeerManager() + sim := bssim.New() + bpm := bsbpm.New() + notif := notifications.New() + defer notif.Shutdown() + id := testutil.GenerateSessionID() + session := New(ctx, id, fwm, fpm, sim, newFakePeerManager(), bpm, notif, time.Second, delay.Fixed(time.Minute), "") + blockGenerator := blocksutil.NewBlockGenerator() + blks := blockGenerator.Blocks(broadcastLiveWantsLimit + 5) + var cids []cid.Cid + for _, block := range blks { + cids = append(cids, block.Cid()) + } + _, err := session.GetBlocks(ctx, cids) + + if err != nil { + t.Fatal("error getting blocks") + } + + // Wait for initial want request + receivedWantReq := <-fwm.wantReqs + + // Should have sent out broadcast request for wants + if len(receivedWantReq.cids) != broadcastLiveWantsLimit { + t.Fatal("did not enqueue correct initial number of wants") + } + + // Signal that all peers have send DONT_HAVE for two of the wants + session.onPeersExhausted(cids[len(cids)-2:]) + + // Wait for want request + receivedWantReq = <-fwm.wantReqs + + // Should have sent out broadcast request for wants + if len(receivedWantReq.cids) != 2 { + t.Fatal("did not enqueue correct initial number of wants") + } +} + func TestSessionFailingToGetFirstBlock(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) defer cancel() diff --git a/bitswap/internal/session/sessionwantsender.go b/bitswap/internal/session/sessionwantsender.go index 702146a6b..38c62352c 100644 --- a/bitswap/internal/session/sessionwantsender.go +++ b/bitswap/internal/session/sessionwantsender.go @@ -236,10 +236,14 @@ func (spm *sessionWantSender) onChange(changes []change) { } // Update peer availability - newlyAvailable := spm.processAvailability(availability) + newlyAvailable, newlyUnavailable := spm.processAvailability(availability) // Update wants - spm.processUpdates(updates) + dontHaves := spm.processUpdates(updates) + + // Check if there are any wants for which all peers have indicated they + // don't have the want + spm.checkForExhaustedWants(dontHaves, newlyUnavailable) // If there are some connected peers, send any pending wants if spm.peerAvlMgr.haveAvailablePeers() { @@ -251,8 +255,12 @@ func (spm *sessionWantSender) onChange(changes []change) { // processAvailability updates the want queue with any changes in // peer availability -func (spm *sessionWantSender) processAvailability(availability map[peer.ID]bool) []peer.ID { +// It returns the peers that have become +// - newly available +// - newly unavailable +func (spm *sessionWantSender) processAvailability(availability map[peer.ID]bool) (avail []peer.ID, unavail []peer.ID) { var newlyAvailable []peer.ID + var newlyUnavailable []peer.ID for p, isNowAvailable := range availability { // Make sure this is a peer that the session is actually interested in if wasAvailable, ok := spm.peerAvlMgr.isAvailable(p); ok { @@ -264,6 +272,8 @@ func (spm *sessionWantSender) processAvailability(availability map[peer.ID]bool) spm.updateWantsPeerAvailability(p, isNowAvailable) if isNowAvailable { newlyAvailable = append(newlyAvailable, p) + } else { + newlyUnavailable = append(newlyUnavailable, p) } // Reset the count of consecutive DONT_HAVEs received from the // peer @@ -272,7 +282,7 @@ func (spm *sessionWantSender) processAvailability(availability map[peer.ID]bool) } } - return newlyAvailable + return newlyAvailable, newlyUnavailable } // isAvailable indicates whether the peer is available and whether @@ -299,8 +309,9 @@ func (spm *sessionWantSender) trackWant(c cid.Cid) { } } -// processUpdates processes incoming blocks and HAVE / DONT_HAVEs -func (spm *sessionWantSender) processUpdates(updates []update) { +// processUpdates processes incoming blocks and HAVE / DONT_HAVEs. +// It returns all DONT_HAVEs. +func (spm *sessionWantSender) processUpdates(updates []update) []cid.Cid { prunePeers := make(map[peer.ID]struct{}) dontHaves := cid.NewSet() for _, upd := range updates { @@ -354,16 +365,6 @@ func (spm *sessionWantSender) processUpdates(updates []update) { } } - // If all available peers for a cid sent a DONT_HAVE, signal to the session - // that we've exhausted available peers - if dontHaves.Len() > 0 { - exhausted := spm.bpm.AllPeersDoNotHaveBlock(spm.peerAvlMgr.availablePeers(), dontHaves.Keys()) - newlyExhausted := spm.newlyExhausted(exhausted) - if len(newlyExhausted) > 0 { - spm.onPeersExhausted(newlyExhausted) - } - } - // If any peers have sent us too many consecutive DONT_HAVEs, remove them // from the session if len(prunePeers) > 0 { @@ -373,6 +374,54 @@ func (spm *sessionWantSender) processUpdates(updates []update) { } }() } + + return dontHaves.Keys() +} + +// checkForExhaustedWants checks if there are any wants for which all peers +// have sent a DONT_HAVE. We call these "exhausted" wants. +func (spm *sessionWantSender) checkForExhaustedWants(dontHaves []cid.Cid, newlyUnavailable []peer.ID) { + // If there are no new DONT_HAVEs, and no peers became unavailable, then + // we don't need to check for exhausted wants + if len(dontHaves) == 0 && len(newlyUnavailable) == 0 { + return + } + + // We need to check each want for which we just received a DONT_HAVE + wants := dontHaves + + // If a peer just became unavailable, then we need to check all wants + // (because it may be the last peer who hadn't sent a DONT_HAVE for a CID) + if len(newlyUnavailable) > 0 { + // Collect all pending wants + wants = make([]cid.Cid, len(spm.wants)) + for c := range spm.wants { + wants = append(wants, c) + } + + // If the last available peer in the session has become unavailable + // then we need to broadcast all pending wants + if len(spm.peerAvlMgr.availablePeers()) == 0 { + spm.processExhaustedWants(wants) + return + } + } + + // If all available peers for a cid sent a DONT_HAVE, signal to the session + // that we've exhausted available peers + if len(wants) > 0 { + exhausted := spm.bpm.AllPeersDoNotHaveBlock(spm.peerAvlMgr.availablePeers(), wants) + spm.processExhaustedWants(exhausted) + } +} + +// processExhaustedWants filters the list so that only those wants that haven't +// already been marked as exhausted are passed to onPeersExhausted() +func (spm *sessionWantSender) processExhaustedWants(exhausted []cid.Cid) { + newlyExhausted := spm.newlyExhausted(exhausted) + if len(newlyExhausted) > 0 { + spm.onPeersExhausted(newlyExhausted) + } } // convenience structs for passing around want-blocks and want-haves for a peer diff --git a/bitswap/internal/session/sessionwantsender_test.go b/bitswap/internal/session/sessionwantsender_test.go index 75c224d6b..ecea497bb 100644 --- a/bitswap/internal/session/sessionwantsender_test.go +++ b/bitswap/internal/session/sessionwantsender_test.go @@ -347,6 +347,103 @@ func TestPeersExhausted(t *testing.T) { } } +// Tests that when +// - all the peers except one have sent a DONT_HAVE for a CID +// - the remaining peer becomes unavailable +// onPeersExhausted should be sent for that CID +func TestPeersExhaustedLastWaitingPeerUnavailable(t *testing.T) { + cids := testutil.GenerateCids(2) + peers := testutil.GeneratePeers(2) + peerA := peers[0] + peerB := peers[1] + sid := uint64(1) + pm := newMockPeerManager() + bpm := bsbpm.New() + onSend := func(peer.ID, []cid.Cid, []cid.Cid) {} + + var exhausted []cid.Cid + onPeersExhausted := func(ks []cid.Cid) { + exhausted = append(exhausted, ks...) + } + spm := newSessionWantSender(context.Background(), sid, pm, bpm, onSend, onPeersExhausted) + + go spm.Run() + + // add cid0, cid1 + spm.Add(cids) + + // peerA: HAVE cid0 + bpm.ReceiveFrom(peerA, []cid.Cid{cids[0]}, []cid.Cid{}) + // Note: this also registers peer A as being available + spm.Update(peerA, []cid.Cid{}, []cid.Cid{cids[0]}, []cid.Cid{}, true) + // peerB: HAVE cid0 + bpm.ReceiveFrom(peerB, []cid.Cid{cids[0]}, []cid.Cid{}) + // Note: this also registers peer B as being available + spm.Update(peerB, []cid.Cid{}, []cid.Cid{cids[0]}, []cid.Cid{}, true) + + // peerA: DONT_HAVE cid1 + bpm.ReceiveFrom(peerA, []cid.Cid{}, []cid.Cid{cids[1]}) + spm.Update(peerA, []cid.Cid{}, []cid.Cid{}, []cid.Cid{cids[0]}, false) + + time.Sleep(5 * time.Millisecond) + + // peerB: becomes unavailable + spm.SignalAvailability(peerB, false) + + time.Sleep(5 * time.Millisecond) + + // All remaining peers (peer A) have sent us a DONT_HAVE for cid1, + // so expect that onPeersExhausted() will be called with cid1 + if !testutil.MatchKeysIgnoreOrder(exhausted, []cid.Cid{cids[1]}) { + t.Fatal("Wrong keys") + } +} + +// Tests that when all the peers are removed from the session +// onPeersExhausted should be called with all outstanding CIDs +func TestPeersExhaustedAllPeersUnavailable(t *testing.T) { + cids := testutil.GenerateCids(3) + peers := testutil.GeneratePeers(2) + peerA := peers[0] + peerB := peers[1] + sid := uint64(1) + pm := newMockPeerManager() + bpm := bsbpm.New() + onSend := func(peer.ID, []cid.Cid, []cid.Cid) {} + + var exhausted []cid.Cid + onPeersExhausted := func(ks []cid.Cid) { + exhausted = append(exhausted, ks...) + } + spm := newSessionWantSender(context.Background(), sid, pm, bpm, onSend, onPeersExhausted) + + go spm.Run() + + // add cid0, cid1, cid2 + spm.Add(cids) + + // peerA: receive block for cid0 (and register peer A with sessionWantSender) + spm.Update(peerA, []cid.Cid{cids[0]}, []cid.Cid{}, []cid.Cid{}, true) + // peerB: HAVE cid1 + bpm.ReceiveFrom(peerB, []cid.Cid{cids[0]}, []cid.Cid{}) + // Note: this also registers peer B as being available + spm.Update(peerB, []cid.Cid{}, []cid.Cid{cids[0]}, []cid.Cid{}, true) + + time.Sleep(5 * time.Millisecond) + + // peerA and peerB: become unavailable + spm.SignalAvailability(peerA, false) + spm.SignalAvailability(peerB, false) + + time.Sleep(5 * time.Millisecond) + + // Expect that onPeersExhausted() will be called with all cids for blocks + // that have not been received + if !testutil.MatchKeysIgnoreOrder(exhausted, []cid.Cid{cids[1], cids[2]}) { + t.Fatal("Wrong keys") + } +} + func TestConsecutiveDontHaveLimit(t *testing.T) { cids := testutil.GenerateCids(peerDontHaveLimit + 10) p := testutil.GeneratePeers(1)[0] diff --git a/bitswap/workers.go b/bitswap/workers.go index fe2430533..04dc2757b 100644 --- a/bitswap/workers.go +++ b/bitswap/workers.go @@ -147,7 +147,7 @@ func (bs *Bitswap) provideWorker(px process.Process) { defer cancel() if err := bs.network.Provide(ctx, k); err != nil { - log.Warning(err) + log.Warn(err) } } From 1465ef2b9111d077cfed26955a983d4df492d7fa Mon Sep 17 00:00:00 2001 From: Dirk McCormick Date: Tue, 3 Mar 2020 17:46:36 -0500 Subject: [PATCH 0841/1035] refactor: simplify session peer management This commit was moved from ipfs/go-bitswap@960f6971b1b853595a02107027d01405733d1e72 --- bitswap/bitswap.go | 4 +- .../session/peeravailabilitymanager.go | 57 --- .../session/peeravailabilitymanager_test.go | 74 ---- bitswap/internal/session/session.go | 205 +++++---- bitswap/internal/session/session_test.go | 91 ++-- bitswap/internal/session/sessionwants.go | 42 +- bitswap/internal/session/sessionwantsender.go | 268 ++++++------ .../sessionmanager/sessionmanager_test.go | 12 +- .../sessionpeermanager/latencytracker.go | 77 ---- .../internal/sessionpeermanager/peerdata.go | 41 -- .../sessionpeermanager/sessionpeermanager.go | 400 +++--------------- 11 files changed, 377 insertions(+), 894 deletions(-) delete mode 100644 bitswap/internal/session/peeravailabilitymanager.go delete mode 100644 bitswap/internal/session/peeravailabilitymanager_test.go delete mode 100644 bitswap/internal/sessionpeermanager/latencytracker.go delete mode 100644 bitswap/internal/sessionpeermanager/peerdata.go diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 5e1c5b05b..1b59dcd01 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -148,10 +148,10 @@ func New(parent context.Context, network bsnet.BitSwapNetwork, provSearchDelay time.Duration, rebroadcastDelay delay.D, self peer.ID) bssm.Session { - return bssession.New(ctx, id, wm, spm, sim, pm, bpm, notif, provSearchDelay, rebroadcastDelay, self) + return bssession.New(ctx, id, wm, spm, pqm, sim, pm, bpm, notif, provSearchDelay, rebroadcastDelay, self) } sessionPeerManagerFactory := func(ctx context.Context, id uint64) bssession.SessionPeerManager { - return bsspm.New(ctx, id, network.ConnectionManager(), pqm) + return bsspm.New(id, network.ConnectionManager()) } notif := notifications.New() sm := bssm.New(ctx, sessionFactory, sim, sessionPeerManagerFactory, bpm, pm, notif, network.Self()) diff --git a/bitswap/internal/session/peeravailabilitymanager.go b/bitswap/internal/session/peeravailabilitymanager.go deleted file mode 100644 index 31b887c62..000000000 --- a/bitswap/internal/session/peeravailabilitymanager.go +++ /dev/null @@ -1,57 +0,0 @@ -package session - -import ( - peer "github.com/libp2p/go-libp2p-core/peer" -) - -// peerAvailabilityManager keeps track of which peers have available space -// to receive want requests -type peerAvailabilityManager struct { - peerAvailable map[peer.ID]bool -} - -func newPeerAvailabilityManager() *peerAvailabilityManager { - return &peerAvailabilityManager{ - peerAvailable: make(map[peer.ID]bool), - } -} - -func (pam *peerAvailabilityManager) addPeer(p peer.ID) { - pam.peerAvailable[p] = false -} - -func (pam *peerAvailabilityManager) isAvailable(p peer.ID) (bool, bool) { - is, ok := pam.peerAvailable[p] - return is, ok -} - -func (pam *peerAvailabilityManager) setPeerAvailability(p peer.ID, isAvailable bool) { - pam.peerAvailable[p] = isAvailable -} - -func (pam *peerAvailabilityManager) haveAvailablePeers() bool { - for _, isAvailable := range pam.peerAvailable { - if isAvailable { - return true - } - } - return false -} - -func (pam *peerAvailabilityManager) availablePeers() []peer.ID { - var available []peer.ID - for p, isAvailable := range pam.peerAvailable { - if isAvailable { - available = append(available, p) - } - } - return available -} - -func (pam *peerAvailabilityManager) allPeers() []peer.ID { - var available []peer.ID - for p := range pam.peerAvailable { - available = append(available, p) - } - return available -} diff --git a/bitswap/internal/session/peeravailabilitymanager_test.go b/bitswap/internal/session/peeravailabilitymanager_test.go deleted file mode 100644 index 1d5b8f234..000000000 --- a/bitswap/internal/session/peeravailabilitymanager_test.go +++ /dev/null @@ -1,74 +0,0 @@ -package session - -import ( - "testing" - - "github.com/ipfs/go-bitswap/internal/testutil" -) - -func TestPeerAvailabilityManager(t *testing.T) { - peers := testutil.GeneratePeers(2) - pam := newPeerAvailabilityManager() - - isAvailable, ok := pam.isAvailable(peers[0]) - if isAvailable || ok { - t.Fatal("expected not to have any availability yet") - } - - if pam.haveAvailablePeers() { - t.Fatal("expected not to have any availability yet") - } - - pam.addPeer(peers[0]) - isAvailable, ok = pam.isAvailable(peers[0]) - if !ok { - t.Fatal("expected to have a peer") - } - if isAvailable { - t.Fatal("expected not to have any availability yet") - } - if pam.haveAvailablePeers() { - t.Fatal("expected not to have any availability yet") - } - if len(pam.availablePeers()) != 0 { - t.Fatal("expected not to have any availability yet") - } - if len(pam.allPeers()) != 1 { - t.Fatal("expected one peer") - } - - pam.setPeerAvailability(peers[0], true) - isAvailable, ok = pam.isAvailable(peers[0]) - if !ok { - t.Fatal("expected to have a peer") - } - if !isAvailable { - t.Fatal("expected peer to be available") - } - if !pam.haveAvailablePeers() { - t.Fatal("expected peer to be available") - } - if len(pam.availablePeers()) != 1 { - t.Fatal("expected peer to be available") - } - if len(pam.allPeers()) != 1 { - t.Fatal("expected one peer") - } - - pam.addPeer(peers[1]) - if len(pam.availablePeers()) != 1 { - t.Fatal("expected one peer to be available") - } - if len(pam.allPeers()) != 2 { - t.Fatal("expected two peers") - } - - pam.setPeerAvailability(peers[0], false) - isAvailable, ok = pam.isAvailable(peers[0]) - if !ok { - t.Fatal("expected to have a peer") - } - if isAvailable { - t.Fatal("expected peer to not be available") - } -} diff --git a/bitswap/internal/session/session.go b/bitswap/internal/session/session.go index c41a65d4a..412484cc9 100644 --- a/bitswap/internal/session/session.go +++ b/bitswap/internal/session/session.go @@ -2,7 +2,6 @@ package session import ( "context" - "sync" "time" // lu "github.com/ipfs/go-bitswap/internal/logutil" @@ -49,23 +48,26 @@ type PeerManager interface { SendWants(ctx context.Context, peerId peer.ID, wantBlocks []cid.Cid, wantHaves []cid.Cid) } -// PeerManager provides an interface for tracking and optimize peers, and -// requesting more when neccesary. +// SessionPeerManager keeps track of peers in the session type SessionPeerManager interface { - // ReceiveFrom is called when blocks and HAVEs are received from a peer. - // It returns a boolean indicating if the peer is new to the session. - ReceiveFrom(peerId peer.ID, blks []cid.Cid, haves []cid.Cid) bool - // Peers returns the set of peers in the session. - Peers() *peer.Set - // FindMorePeers queries Content Routing to discover providers of the given cid - FindMorePeers(context.Context, cid.Cid) - // RecordPeerRequests records the time that a cid was requested from a peer - RecordPeerRequests([]peer.ID, []cid.Cid) - // RecordPeerResponse records the time that a response for a cid arrived - // from a peer - RecordPeerResponse(peer.ID, []cid.Cid) - // RecordCancels records that cancels were sent for the given cids - RecordCancels([]cid.Cid) + // PeersDiscovered indicates if any peers have been discovered yet + PeersDiscovered() bool + // Shutdown the SessionPeerManager + Shutdown() + // Adds a peer to the session, returning true if the peer is new + AddPeer(peer.ID) bool + // Removes a peer from the session, returning true if the peer existed + RemovePeer(peer.ID) bool + // All peers in the session + Peers() []peer.ID + // Whether there are any peers in the session + HasPeers() bool +} + +// ProviderFinder is used to find providers for a given key +type ProviderFinder interface { + // FindProvidersAsync searches for peers that provide the given CID + FindProvidersAsync(ctx context.Context, k cid.Cid) <-chan peer.ID } // opType is the kind of operation that is being processed by the event loop @@ -80,6 +82,8 @@ const ( opCancel // Broadcast want-haves opBroadcast + // Wants sent to peers + opWantsSent ) type op struct { @@ -92,10 +96,11 @@ type op struct { // info to, and who to request blocks from. type Session struct { // dependencies - ctx context.Context - wm WantManager - sprm SessionPeerManager - sim *bssim.SessionInterestManager + ctx context.Context + wm WantManager + sprm SessionPeerManager + providerFinder ProviderFinder + sim *bssim.SessionInterestManager sw sessionWants sws sessionWantSender @@ -127,6 +132,7 @@ func New(ctx context.Context, id uint64, wm WantManager, sprm SessionPeerManager, + providerFinder ProviderFinder, sim *bssim.SessionInterestManager, pm PeerManager, bpm *bsbpm.BlockPresenceManager, @@ -140,6 +146,7 @@ func New(ctx context.Context, ctx: ctx, wm: wm, sprm: sprm, + providerFinder: providerFinder, sim: sim, incoming: make(chan op, 128), latencyTrkr: latencyTracker{}, @@ -151,7 +158,7 @@ func New(ctx context.Context, periodicSearchDelay: periodicSearchDelay, self: self, } - s.sws = newSessionWantSender(ctx, id, pm, bpm, s.onWantsSent, s.onPeersExhausted) + s.sws = newSessionWantSender(ctx, id, pm, sprm, bpm, s.onWantsSent, s.onPeersExhausted) go s.run(ctx) @@ -164,44 +171,25 @@ func (s *Session) ID() uint64 { // ReceiveFrom receives incoming blocks from the given peer. func (s *Session) ReceiveFrom(from peer.ID, ks []cid.Cid, haves []cid.Cid, dontHaves []cid.Cid) { + // The SessionManager tells each Session about all keys that it may be + // interested in. Here the Session filters the keys to the ones that this + // particular Session is interested in. interestedRes := s.sim.FilterSessionInterested(s.id, ks, haves, dontHaves) ks = interestedRes[0] haves = interestedRes[1] dontHaves = interestedRes[2] // s.logReceiveFrom(from, ks, haves, dontHaves) - // Add any newly discovered peers that have blocks we're interested in to - // the peer set - isNewPeer := s.sprm.ReceiveFrom(from, ks, haves) - - // Record response timing only if the blocks came from the network - // (blocks can also be received from the local node) - if len(ks) > 0 && from != "" { - s.sprm.RecordPeerResponse(from, ks) - } - - // Update want potential - s.sws.Update(from, ks, haves, dontHaves, isNewPeer) + // Inform the session want sender that a message has been received + s.sws.Update(from, ks, haves, dontHaves) if len(ks) == 0 { return } - // Record which blocks have been received and figure out the total latency - // for fetching the blocks - wanted, totalLatency := s.sw.BlocksReceived(ks) - s.latencyTrkr.receiveUpdate(len(wanted), totalLatency) - - if len(wanted) == 0 { - return - } - - // Inform the SessionInterestManager that this session is no longer - // expecting to receive the wanted keys - s.sim.RemoveSessionWants(s.id, wanted) - + // Inform the session that blocks have been received select { - case s.incoming <- op{op: opReceive, keys: wanted}: + case s.incoming <- op{op: opReceive, keys: ks}: case <-s.ctx.Done(): } } @@ -220,28 +208,6 @@ func (s *Session) ReceiveFrom(from peer.ID, ks []cid.Cid, haves []cid.Cid, dontH // } // } -func (s *Session) onWantsSent(p peer.ID, wantBlocks []cid.Cid, wantHaves []cid.Cid) { - allBlks := append(wantBlocks[:len(wantBlocks):len(wantBlocks)], wantHaves...) - s.sw.WantsSent(allBlks) - s.sprm.RecordPeerRequests([]peer.ID{p}, allBlks) -} - -func (s *Session) onPeersExhausted(ks []cid.Cid) { - // We don't want to block the sessionWantSender if the incoming channel - // is full. So if we can't immediately send on the incoming channel spin - // it off into a go-routine. - select { - case s.incoming <- op{op: opBroadcast, keys: ks}: - default: - go func() { - select { - case s.incoming <- op{op: opBroadcast, keys: ks}: - case <-s.ctx.Done(): - } - }() - } -} - // GetBlock fetches a single block. func (s *Session) GetBlock(parent context.Context, k cid.Cid) (blocks.Block, error) { return bsgetter.SyncGetBlock(parent, k, s.GetBlocks) @@ -278,6 +244,34 @@ func (s *Session) SetBaseTickDelay(baseTickDelay time.Duration) { } } +// onWantsSent is called when wants are sent to a peer by the session wants sender +func (s *Session) onWantsSent(p peer.ID, wantBlocks []cid.Cid, wantHaves []cid.Cid) { + allBlks := append(wantBlocks[:len(wantBlocks):len(wantBlocks)], wantHaves...) + s.nonBlockingEnqueue(op{op: opWantsSent, keys: allBlks}) +} + +// onPeersExhausted is called when all available peers have sent DONT_HAVE for +// a set of cids (or all peers become unavailable) +func (s *Session) onPeersExhausted(ks []cid.Cid) { + s.nonBlockingEnqueue(op{op: opBroadcast, keys: ks}) +} + +// We don't want to block the sessionWantSender if the incoming channel +// is full. So if we can't immediately send on the incoming channel spin +// it off into a go-routine. +func (s *Session) nonBlockingEnqueue(o op) { + select { + case s.incoming <- o: + default: + go func() { + select { + case s.incoming <- o: + case <-s.ctx.Done(): + } + }() + } +} + // Session run loop -- everything in this function should not be called // outside of this loop func (s *Session) run(ctx context.Context) { @@ -290,23 +284,34 @@ func (s *Session) run(ctx context.Context) { case oper := <-s.incoming: switch oper.op { case opReceive: + // Received blocks s.handleReceive(oper.keys) case opWant: + // Client wants blocks s.wantBlocks(ctx, oper.keys) case opCancel: + // Wants were cancelled s.sw.CancelPending(oper.keys) + case opWantsSent: + // Wants were sent to a peer + s.sw.WantsSent(oper.keys) case opBroadcast: + // Broadcast want-haves to all peers s.broadcastWantHaves(ctx, oper.keys) default: panic("unhandled operation") } case <-s.idleTick.C: + // The session hasn't received blocks for a while, broadcast s.broadcastWantHaves(ctx, nil) case <-s.periodicSearchTimer.C: + // Periodically search for a random live want s.handlePeriodicSearch(ctx) case baseTickDelay := <-s.tickDelayReqs: + // Set the base tick delay s.baseTickDelay = baseTickDelay case <-ctx.Done(): + // Shutdown s.handleShutdown() return } @@ -327,7 +332,6 @@ func (s *Session) broadcastWantHaves(ctx context.Context, wants []cid.Cid) { // log.Infof("Ses%d: broadcast %d keys\n", s.id, len(live)) // Broadcast a want-have for the live wants to everyone we're connected to - s.sprm.RecordPeerRequests(nil, wants) s.wm.BroadcastWantHaves(ctx, s.id, wants) // do not find providers on consecutive ticks @@ -337,7 +341,7 @@ func (s *Session) broadcastWantHaves(ctx context.Context, wants []cid.Cid) { // Typically if the provider has the first block they will have // the rest of the blocks also. log.Warnf("Ses%d: FindMorePeers with want 0 of %d wants", s.id, len(wants)) - s.sprm.FindMorePeers(ctx, wants[0]) + s.findMorePeers(ctx, wants[0]) } s.resetIdleTick() @@ -347,6 +351,8 @@ func (s *Session) broadcastWantHaves(ctx context.Context, wants []cid.Cid) { } } +// handlePeriodicSearch is called periodically to search for providers of a +// randomly chosen CID in the sesssion. func (s *Session) handlePeriodicSearch(ctx context.Context) { randomWant := s.sw.RandomLiveWant() if !randomWant.Defined() { @@ -355,40 +361,74 @@ func (s *Session) handlePeriodicSearch(ctx context.Context) { // TODO: come up with a better strategy for determining when to search // for new providers for blocks. - s.sprm.FindMorePeers(ctx, randomWant) + s.findMorePeers(ctx, randomWant) s.wm.BroadcastWantHaves(ctx, s.id, []cid.Cid{randomWant}) s.periodicSearchTimer.Reset(s.periodicSearchDelay.NextWaitTime()) } +// findMorePeers attempts to find more peers for a session by searching for +// providers for the given Cid +func (s *Session) findMorePeers(ctx context.Context, c cid.Cid) { + go func(k cid.Cid) { + for p := range s.providerFinder.FindProvidersAsync(ctx, k) { + // When a provider indicates that it has a cid, it's equivalent to + // the providing peer sending a HAVE + s.sws.Update(p, nil, []cid.Cid{c}, nil) + } + }(c) +} + +// handleShutdown is called when the session shuts down func (s *Session) handleShutdown() { + // Stop the idle timer s.idleTick.Stop() + // Shut down the session peer manager + s.sprm.Shutdown() + // Remove the session from the want manager s.wm.RemoveSession(s.ctx, s.id) } +// handleReceive is called when the session receives blocks from a peer func (s *Session) handleReceive(ks []cid.Cid) { + // Record which blocks have been received and figure out the total latency + // for fetching the blocks + wanted, totalLatency := s.sw.BlocksReceived(ks) + if len(wanted) == 0 { + return + } + + // Record latency + s.latencyTrkr.receiveUpdate(len(wanted), totalLatency) + + // Inform the SessionInterestManager that this session is no longer + // expecting to receive the wanted keys + s.sim.RemoveSessionWants(s.id, wanted) + s.idleTick.Stop() // We've received new wanted blocks, so reset the number of ticks // that have occurred since the last new block s.consecutiveTicks = 0 - s.sprm.RecordCancels(ks) - s.resetIdleTick() } +// wantBlocks is called when blocks are requested by the client func (s *Session) wantBlocks(ctx context.Context, newks []cid.Cid) { if len(newks) > 0 { + // Inform the SessionInterestManager that this session is interested in the keys s.sim.RecordSessionInterest(s.id, newks) + // Tell the sessionWants tracker that that the wants have been requested s.sw.BlocksRequested(newks) + // Tell the sessionWantSender that the blocks have been requested s.sws.Add(newks) } - // If we have discovered peers already, the SessionPotentialManager will + // If we have discovered peers already, the sessionWantSender will // send wants to them - if s.sprm.Peers().Size() > 0 { + if s.sprm.PeersDiscovered() { return } @@ -396,7 +436,6 @@ func (s *Session) wantBlocks(ctx context.Context, newks []cid.Cid) { ks := s.sw.GetNextWants(broadcastLiveWantsLimit) if len(ks) > 0 { log.Infof("Ses%d: No peers - broadcasting %d want HAVE requests\n", s.id, len(ks)) - s.sprm.RecordPeerRequests(nil, ks) s.wm.BroadcastWantHaves(ctx, s.id, ks) } } @@ -415,29 +454,19 @@ func (s *Session) resetIdleTick() { } type latencyTracker struct { - sync.RWMutex totalLatency time.Duration count int } func (lt *latencyTracker) hasLatency() bool { - lt.RLock() - defer lt.RUnlock() - return lt.totalLatency > 0 && lt.count > 0 } func (lt *latencyTracker) averageLatency() time.Duration { - lt.RLock() - defer lt.RUnlock() - return lt.totalLatency / time.Duration(lt.count) } func (lt *latencyTracker) receiveUpdate(count int, totalLatency time.Duration) { - lt.Lock() - defer lt.Unlock() - lt.totalLatency += totalLatency lt.count += count } diff --git a/bitswap/internal/session/session_test.go b/bitswap/internal/session/session_test.go index b3ae26b22..13f2b3021 100644 --- a/bitswap/internal/session/session_test.go +++ b/bitswap/internal/session/session_test.go @@ -9,6 +9,7 @@ import ( notifications "github.com/ipfs/go-bitswap/internal/notifications" bspm "github.com/ipfs/go-bitswap/internal/peermanager" bssim "github.com/ipfs/go-bitswap/internal/sessioninterestmanager" + bsspm "github.com/ipfs/go-bitswap/internal/sessionpeermanager" "github.com/ipfs/go-bitswap/internal/testutil" cid "github.com/ipfs/go-cid" blocksutil "github.com/ipfs/go-ipfs-blocksutil" @@ -38,40 +39,41 @@ func (fwm *fakeWantManager) BroadcastWantHaves(ctx context.Context, sesid uint64 } func (fwm *fakeWantManager) RemoveSession(context.Context, uint64) {} -type fakeSessionPeerManager struct { - peers *peer.Set - findMorePeersRequested chan cid.Cid +func newFakeSessionPeerManager() *bsspm.SessionPeerManager { + return bsspm.New(1, newFakePeerTagger()) } -func newFakeSessionPeerManager() *fakeSessionPeerManager { - return &fakeSessionPeerManager{ - peers: peer.NewSet(), - findMorePeersRequested: make(chan cid.Cid, 1), - } +type fakePeerTagger struct { } -func (fpm *fakeSessionPeerManager) FindMorePeers(ctx context.Context, k cid.Cid) { - select { - case fpm.findMorePeersRequested <- k: - case <-ctx.Done(): - } +func newFakePeerTagger() *fakePeerTagger { + return &fakePeerTagger{} } -func (fpm *fakeSessionPeerManager) Peers() *peer.Set { - return fpm.peers +func (fpt *fakePeerTagger) TagPeer(p peer.ID, tag string, val int) { +} +func (fpt *fakePeerTagger) UntagPeer(p peer.ID, tag string) { } -func (fpm *fakeSessionPeerManager) ReceiveFrom(p peer.ID, ks []cid.Cid, haves []cid.Cid) bool { - if !fpm.peers.Contains(p) { - fpm.peers.Add(p) - return true +type fakeProviderFinder struct { + findMorePeersRequested chan cid.Cid +} + +func newFakeProviderFinder() *fakeProviderFinder { + return &fakeProviderFinder{ + findMorePeersRequested: make(chan cid.Cid, 1), } - return false } -func (fpm *fakeSessionPeerManager) RecordCancels(c []cid.Cid) {} -func (fpm *fakeSessionPeerManager) RecordPeerRequests([]peer.ID, []cid.Cid) {} -func (fpm *fakeSessionPeerManager) RecordPeerResponse(p peer.ID, c []cid.Cid) { - fpm.peers.Add(p) + +func (fpf *fakeProviderFinder) FindProvidersAsync(ctx context.Context, k cid.Cid) <-chan peer.ID { + go func() { + select { + case fpf.findMorePeersRequested <- k: + case <-ctx.Done(): + } + }() + + return make(chan peer.ID) } type fakePeerManager struct { @@ -88,22 +90,24 @@ func (pm *fakePeerManager) UnregisterSession(uint64) func (pm *fakePeerManager) SendWants(context.Context, peer.ID, []cid.Cid, []cid.Cid) {} func TestSessionGetBlocks(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond) + ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond) defer cancel() fwm := newFakeWantManager() fpm := newFakeSessionPeerManager() + fpf := newFakeProviderFinder() sim := bssim.New() bpm := bsbpm.New() notif := notifications.New() defer notif.Shutdown() id := testutil.GenerateSessionID() - session := New(ctx, id, fwm, fpm, sim, newFakePeerManager(), bpm, notif, time.Second, delay.Fixed(time.Minute), "") + session := New(ctx, id, fwm, fpm, fpf, sim, newFakePeerManager(), bpm, notif, time.Second, delay.Fixed(time.Minute), "") blockGenerator := blocksutil.NewBlockGenerator() blks := blockGenerator.Blocks(broadcastLiveWantsLimit * 2) var cids []cid.Cid for _, block := range blks { cids = append(cids, block.Cid()) } + _, err := session.GetBlocks(ctx, cids) if err != nil { @@ -125,14 +129,16 @@ func TestSessionGetBlocks(t *testing.T) { } // Simulate receiving HAVEs from several peers - peers := testutil.GeneratePeers(broadcastLiveWantsLimit) + peers := testutil.GeneratePeers(5) for i, p := range peers { blk := blks[testutil.IndexOf(blks, receivedWantReq.cids[i])] session.ReceiveFrom(p, []cid.Cid{}, []cid.Cid{blk.Cid()}, []cid.Cid{}) } + time.Sleep(10 * time.Millisecond) + // Verify new peers were recorded - if !testutil.MatchPeersIgnoreOrder(fpm.Peers().Peers(), peers) { + if !testutil.MatchPeersIgnoreOrder(fpm.Peers(), peers) { t.Fatal("peers not recorded by the peer manager") } @@ -145,6 +151,8 @@ func TestSessionGetBlocks(t *testing.T) { // Simulate receiving DONT_HAVE for a CID session.ReceiveFrom(peers[0], []cid.Cid{}, []cid.Cid{}, []cid.Cid{blks[0].Cid()}) + time.Sleep(10 * time.Millisecond) + // Verify session still wants received blocks _, unwanted = sim.SplitWantedUnwanted(blks) if len(unwanted) > 0 { @@ -154,6 +162,8 @@ func TestSessionGetBlocks(t *testing.T) { // Simulate receiving block for a CID session.ReceiveFrom(peers[1], []cid.Cid{blks[0].Cid()}, []cid.Cid{}, []cid.Cid{}) + time.Sleep(100 * time.Millisecond) + // Verify session no longer wants received block wanted, unwanted := sim.SplitWantedUnwanted(blks) if len(unwanted) != 1 || !unwanted[0].Cid().Equals(blks[0].Cid()) { @@ -169,12 +179,13 @@ func TestSessionFindMorePeers(t *testing.T) { defer cancel() fwm := newFakeWantManager() fpm := newFakeSessionPeerManager() + fpf := newFakeProviderFinder() sim := bssim.New() bpm := bsbpm.New() notif := notifications.New() defer notif.Shutdown() id := testutil.GenerateSessionID() - session := New(ctx, id, fwm, fpm, sim, newFakePeerManager(), bpm, notif, time.Second, delay.Fixed(time.Minute), "") + session := New(ctx, id, fwm, fpm, fpf, sim, newFakePeerManager(), bpm, notif, time.Second, delay.Fixed(time.Minute), "") session.SetBaseTickDelay(200 * time.Microsecond) blockGenerator := blocksutil.NewBlockGenerator() blks := blockGenerator.Blocks(broadcastLiveWantsLimit * 2) @@ -223,7 +234,7 @@ func TestSessionFindMorePeers(t *testing.T) { // The session should eventually try to find more peers select { - case <-fpm.findMorePeersRequested: + case <-fpf.findMorePeersRequested: case <-ctx.Done(): t.Fatal("Did not find more peers") } @@ -234,12 +245,14 @@ func TestSessionOnPeersExhausted(t *testing.T) { defer cancel() fwm := newFakeWantManager() fpm := newFakeSessionPeerManager() + fpf := newFakeProviderFinder() + sim := bssim.New() bpm := bsbpm.New() notif := notifications.New() defer notif.Shutdown() id := testutil.GenerateSessionID() - session := New(ctx, id, fwm, fpm, sim, newFakePeerManager(), bpm, notif, time.Second, delay.Fixed(time.Minute), "") + session := New(ctx, id, fwm, fpm, fpf, sim, newFakePeerManager(), bpm, notif, time.Second, delay.Fixed(time.Minute), "") blockGenerator := blocksutil.NewBlockGenerator() blks := blockGenerator.Blocks(broadcastLiveWantsLimit + 5) var cids []cid.Cid @@ -277,12 +290,13 @@ func TestSessionFailingToGetFirstBlock(t *testing.T) { defer cancel() fwm := newFakeWantManager() fpm := newFakeSessionPeerManager() + fpf := newFakeProviderFinder() sim := bssim.New() bpm := bsbpm.New() notif := notifications.New() defer notif.Shutdown() id := testutil.GenerateSessionID() - session := New(ctx, id, fwm, fpm, sim, newFakePeerManager(), bpm, notif, 10*time.Millisecond, delay.Fixed(100*time.Millisecond), "") + session := New(ctx, id, fwm, fpm, fpf, sim, newFakePeerManager(), bpm, notif, 10*time.Millisecond, delay.Fixed(100*time.Millisecond), "") blockGenerator := blocksutil.NewBlockGenerator() blks := blockGenerator.Blocks(4) var cids []cid.Cid @@ -314,7 +328,7 @@ func TestSessionFailingToGetFirstBlock(t *testing.T) { // Wait for a request to find more peers to occur select { - case k := <-fpm.findMorePeersRequested: + case k := <-fpf.findMorePeersRequested: if testutil.IndexOf(blks, k) == -1 { t.Fatal("did not rebroadcast an active want") } @@ -369,14 +383,14 @@ func TestSessionFailingToGetFirstBlock(t *testing.T) { // Should not have tried to find peers on consecutive ticks select { - case <-fpm.findMorePeersRequested: + case <-fpf.findMorePeersRequested: t.Fatal("Should not have tried to find peers on consecutive ticks") default: } // Wait for rebroadcast to occur select { - case k := <-fpm.findMorePeersRequested: + case k := <-fpf.findMorePeersRequested: if testutil.IndexOf(blks, k) == -1 { t.Fatal("did not rebroadcast an active want") } @@ -388,6 +402,7 @@ func TestSessionFailingToGetFirstBlock(t *testing.T) { func TestSessionCtxCancelClosesGetBlocksChannel(t *testing.T) { fwm := newFakeWantManager() fpm := newFakeSessionPeerManager() + fpf := newFakeProviderFinder() sim := bssim.New() bpm := bsbpm.New() notif := notifications.New() @@ -396,7 +411,7 @@ func TestSessionCtxCancelClosesGetBlocksChannel(t *testing.T) { // Create a new session with its own context sessctx, sesscancel := context.WithTimeout(context.Background(), 100*time.Millisecond) - session := New(sessctx, id, fwm, fpm, sim, newFakePeerManager(), bpm, notif, time.Second, delay.Fixed(time.Minute), "") + session := New(sessctx, id, fwm, fpm, fpf, sim, newFakePeerManager(), bpm, notif, time.Second, delay.Fixed(time.Minute), "") timerCtx, timerCancel := context.WithTimeout(context.Background(), 10*time.Millisecond) defer timerCancel() @@ -430,12 +445,14 @@ func TestSessionReceiveMessageAfterShutdown(t *testing.T) { ctx, cancelCtx := context.WithTimeout(context.Background(), 10*time.Millisecond) fwm := newFakeWantManager() fpm := newFakeSessionPeerManager() + fpf := newFakeProviderFinder() + sim := bssim.New() bpm := bsbpm.New() notif := notifications.New() defer notif.Shutdown() id := testutil.GenerateSessionID() - session := New(ctx, id, fwm, fpm, sim, newFakePeerManager(), bpm, notif, time.Second, delay.Fixed(time.Minute), "") + session := New(ctx, id, fwm, fpm, fpf, sim, newFakePeerManager(), bpm, notif, time.Second, delay.Fixed(time.Minute), "") blockGenerator := blocksutil.NewBlockGenerator() blks := blockGenerator.Blocks(2) cids := []cid.Cid{blks[0].Cid(), blks[1].Cid()} diff --git a/bitswap/internal/session/sessionwants.go b/bitswap/internal/session/sessionwants.go index 9f896049f..ad8dcd1bc 100644 --- a/bitswap/internal/session/sessionwants.go +++ b/bitswap/internal/session/sessionwants.go @@ -3,7 +3,6 @@ package session import ( "fmt" "math/rand" - "sync" "time" cid "github.com/ipfs/go-cid" @@ -12,7 +11,6 @@ import ( // sessionWants keeps track of which cids are waiting to be sent out, and which // peers are "live" - ie, we've sent a request but haven't received a block yet type sessionWants struct { - sync.RWMutex toFetch *cidQueue liveWants map[cid.Cid]time.Time } @@ -30,9 +28,6 @@ func (sw *sessionWants) String() string { // BlocksRequested is called when the client makes a request for blocks func (sw *sessionWants) BlocksRequested(newWants []cid.Cid) { - sw.Lock() - defer sw.Unlock() - for _, k := range newWants { sw.toFetch.Push(k) } @@ -43,9 +38,6 @@ func (sw *sessionWants) BlocksRequested(newWants []cid.Cid) { func (sw *sessionWants) GetNextWants(limit int) []cid.Cid { now := time.Now() - sw.Lock() - defer sw.Unlock() - // Move CIDs from fetch queue to the live wants queue (up to the limit) currentLiveCount := len(sw.liveWants) toAdd := limit - currentLiveCount @@ -63,10 +55,6 @@ func (sw *sessionWants) GetNextWants(limit int) []cid.Cid { // WantsSent is called when wants are sent to a peer func (sw *sessionWants) WantsSent(ks []cid.Cid) { now := time.Now() - - sw.Lock() - defer sw.Unlock() - for _, c := range ks { if _, ok := sw.liveWants[c]; !ok { sw.toFetch.Remove(c) @@ -86,12 +74,8 @@ func (sw *sessionWants) BlocksReceived(ks []cid.Cid) ([]cid.Cid, time.Duration) } now := time.Now() - - sw.Lock() - defer sw.Unlock() - for _, c := range ks { - if sw.unlockedIsWanted(c) { + if sw.isWanted(c) { wanted = append(wanted, c) sentAt, ok := sw.liveWants[c] @@ -113,10 +97,6 @@ func (sw *sessionWants) BlocksReceived(ks []cid.Cid) ([]cid.Cid, time.Duration) // live want CIDs. func (sw *sessionWants) PrepareBroadcast() []cid.Cid { now := time.Now() - - sw.Lock() - defer sw.Unlock() - live := make([]cid.Cid, 0, len(sw.liveWants)) for c := range sw.liveWants { live = append(live, c) @@ -127,9 +107,6 @@ func (sw *sessionWants) PrepareBroadcast() []cid.Cid { // CancelPending removes the given CIDs from the fetch queue. func (sw *sessionWants) CancelPending(keys []cid.Cid) { - sw.Lock() - defer sw.Unlock() - for _, k := range keys { sw.toFetch.Remove(k) } @@ -137,9 +114,6 @@ func (sw *sessionWants) CancelPending(keys []cid.Cid) { // LiveWants returns a list of live wants func (sw *sessionWants) LiveWants() []cid.Cid { - sw.RLock() - defer sw.RUnlock() - live := make([]cid.Cid, 0, len(sw.liveWants)) for c := range sw.liveWants { live = append(live, c) @@ -148,16 +122,12 @@ func (sw *sessionWants) LiveWants() []cid.Cid { } func (sw *sessionWants) RandomLiveWant() cid.Cid { - i := rand.Uint64() - - sw.RLock() - defer sw.RUnlock() - if len(sw.liveWants) == 0 { return cid.Cid{} } - i %= uint64(len(sw.liveWants)) + // picking a random live want + i := rand.Intn(len(sw.liveWants)) for k := range sw.liveWants { if i == 0 { return k @@ -169,13 +139,11 @@ func (sw *sessionWants) RandomLiveWant() cid.Cid { // Has live wants indicates if there are any live wants func (sw *sessionWants) HasLiveWants() bool { - sw.RLock() - defer sw.RUnlock() - return len(sw.liveWants) > 0 } -func (sw *sessionWants) unlockedIsWanted(c cid.Cid) bool { +// Indicates whether the want is in either of the fetch or live queues +func (sw *sessionWants) isWanted(c cid.Cid) bool { _, ok := sw.liveWants[c] if !ok { ok = sw.toFetch.Has(c) diff --git a/bitswap/internal/session/sessionwantsender.go b/bitswap/internal/session/sessionwantsender.go index 38c62352c..cffb39bb9 100644 --- a/bitswap/internal/session/sessionwantsender.go +++ b/bitswap/internal/session/sessionwantsender.go @@ -48,11 +48,9 @@ type peerAvailability struct { available bool } -// change can be a new peer being discovered, a new message received by the -// session, or a change in the connect status of a peer +// change can be new wants, a new message received by the session, +// or a change in the connect status of a peer type change struct { - // the peer ID of a new peer - addPeer peer.ID // new wants requested add []cid.Cid // new message received by session (blocks / HAVEs / DONT_HAVEs) @@ -85,12 +83,12 @@ type sessionWantSender struct { peerConsecutiveDontHaves map[peer.ID]int // Tracks which peers we have send want-block to swbt *sentWantBlocksTracker - // Maintains a list of peers and whether they are connected - peerAvlMgr *peerAvailabilityManager // Tracks the number of blocks each peer sent us peerRspTrkr *peerResponseTracker // Sends wants to peers pm PeerManager + // Keeps track of peers in the session + spm SessionPeerManager // Keeps track of which peer has / doesn't have a block bpm *bsbpm.BlockPresenceManager // Called when wants are sent @@ -99,105 +97,94 @@ type sessionWantSender struct { onPeersExhausted onPeersExhaustedFn } -func newSessionWantSender(ctx context.Context, sid uint64, pm PeerManager, bpm *bsbpm.BlockPresenceManager, - onSend onSendFn, onPeersExhausted onPeersExhaustedFn) sessionWantSender { +func newSessionWantSender(ctx context.Context, sid uint64, pm PeerManager, spm SessionPeerManager, + bpm *bsbpm.BlockPresenceManager, onSend onSendFn, onPeersExhausted onPeersExhaustedFn) sessionWantSender { - spm := sessionWantSender{ + sws := sessionWantSender{ ctx: ctx, sessionID: sid, changes: make(chan change, changesBufferSize), wants: make(map[cid.Cid]*wantInfo), peerConsecutiveDontHaves: make(map[peer.ID]int), swbt: newSentWantBlocksTracker(), - peerAvlMgr: newPeerAvailabilityManager(), peerRspTrkr: newPeerResponseTracker(), pm: pm, + spm: spm, bpm: bpm, onSend: onSend, onPeersExhausted: onPeersExhausted, } - return spm + return sws } -func (spm *sessionWantSender) ID() uint64 { - return spm.sessionID +func (sws *sessionWantSender) ID() uint64 { + return sws.sessionID } // Add is called when new wants are added to the session -func (spm *sessionWantSender) Add(ks []cid.Cid) { +func (sws *sessionWantSender) Add(ks []cid.Cid) { if len(ks) == 0 { return } - spm.addChange(change{add: ks}) + sws.addChange(change{add: ks}) } // Update is called when the session receives a message with incoming blocks // or HAVE / DONT_HAVE -func (spm *sessionWantSender) Update(from peer.ID, ks []cid.Cid, haves []cid.Cid, dontHaves []cid.Cid, isNewPeer bool) { - // fmt.Printf("Update(%s, %d, %d, %d, %t)\n", lu.P(from), len(ks), len(haves), len(dontHaves), isNewPeer) +func (sws *sessionWantSender) Update(from peer.ID, ks []cid.Cid, haves []cid.Cid, dontHaves []cid.Cid) { + // fmt.Printf("Update(%s, %d, %d, %d, %t)\n", lu.P(from), len(ks), len(haves), len(dontHaves)) hasUpdate := len(ks) > 0 || len(haves) > 0 || len(dontHaves) > 0 - if !hasUpdate && !isNewPeer { + if !hasUpdate { return } - ch := change{} - - if hasUpdate { - ch.update = update{from, ks, haves, dontHaves} - } - - // If the message came from a new peer register with the peer manager - if isNewPeer { - available := spm.pm.RegisterSession(from, spm) - ch.addPeer = from - ch.availability = peerAvailability{from, available} - } - - spm.addChange(ch) + sws.addChange(change{ + update: update{from, ks, haves, dontHaves}, + }) } // SignalAvailability is called by the PeerManager to signal that a peer has // connected / disconnected -func (spm *sessionWantSender) SignalAvailability(p peer.ID, isAvailable bool) { +func (sws *sessionWantSender) SignalAvailability(p peer.ID, isAvailable bool) { // fmt.Printf("SignalAvailability(%s, %t)\n", lu.P(p), isAvailable) availability := peerAvailability{p, isAvailable} - spm.addChange(change{availability: availability}) + sws.addChange(change{availability: availability}) } // Run is the main loop for processing incoming changes -func (spm *sessionWantSender) Run() { +func (sws *sessionWantSender) Run() { for { select { - case ch := <-spm.changes: - spm.onChange([]change{ch}) - case <-spm.ctx.Done(): - spm.shutdown() + case ch := <-sws.changes: + sws.onChange([]change{ch}) + case <-sws.ctx.Done(): + sws.shutdown() return } } } // addChange adds a new change to the queue -func (spm *sessionWantSender) addChange(c change) { +func (sws *sessionWantSender) addChange(c change) { select { - case spm.changes <- c: - case <-spm.ctx.Done(): + case sws.changes <- c: + case <-sws.ctx.Done(): } } // shutdown unregisters the session with the PeerManager -func (spm *sessionWantSender) shutdown() { - spm.pm.UnregisterSession(spm.sessionID) +func (sws *sessionWantSender) shutdown() { + sws.pm.UnregisterSession(sws.sessionID) } // collectChanges collects all the changes that have occurred since the last // invocation of onChange -func (spm *sessionWantSender) collectChanges(changes []change) []change { +func (sws *sessionWantSender) collectChanges(changes []change) []change { for len(changes) < changesBufferSize { select { - case next := <-spm.changes: + case next := <-sws.changes: changes = append(changes, next) default: return changes @@ -207,27 +194,28 @@ func (spm *sessionWantSender) collectChanges(changes []change) []change { } // onChange processes the next set of changes -func (spm *sessionWantSender) onChange(changes []change) { +func (sws *sessionWantSender) onChange(changes []change) { // Several changes may have been recorded since the last time we checked, // so pop all outstanding changes from the channel - changes = spm.collectChanges(changes) + changes = sws.collectChanges(changes) // Apply each change availability := make(map[peer.ID]bool, len(changes)) var updates []update for _, chng := range changes { - // Add newly discovered peers - if chng.addPeer != "" { - spm.peerAvlMgr.addPeer(chng.addPeer) - } - // Initialize info for new wants for _, c := range chng.add { - spm.trackWant(c) + sws.trackWant(c) } // Consolidate updates and changes to availability if chng.update.from != "" { + // If the update includes blocks or haves, treat it as signaling that + // the peer is available + if len(chng.update.ks) > 0 || len(chng.update.haves) > 0 { + availability[chng.update.from] = true + } + updates = append(updates, chng.update) } if chng.availability.target != "" { @@ -236,20 +224,20 @@ func (spm *sessionWantSender) onChange(changes []change) { } // Update peer availability - newlyAvailable, newlyUnavailable := spm.processAvailability(availability) + newlyAvailable, newlyUnavailable := sws.processAvailability(availability) // Update wants - dontHaves := spm.processUpdates(updates) + dontHaves := sws.processUpdates(updates) // Check if there are any wants for which all peers have indicated they // don't have the want - spm.checkForExhaustedWants(dontHaves, newlyUnavailable) + sws.checkForExhaustedWants(dontHaves, newlyUnavailable) // If there are some connected peers, send any pending wants - if spm.peerAvlMgr.haveAvailablePeers() { + if sws.spm.HasPeers() { // fmt.Printf("sendNextWants()\n") - spm.sendNextWants(newlyAvailable) - // fmt.Println(spm) + sws.sendNextWants(newlyAvailable) + // fmt.Println(sws) } } @@ -258,60 +246,58 @@ func (spm *sessionWantSender) onChange(changes []change) { // It returns the peers that have become // - newly available // - newly unavailable -func (spm *sessionWantSender) processAvailability(availability map[peer.ID]bool) (avail []peer.ID, unavail []peer.ID) { +func (sws *sessionWantSender) processAvailability(availability map[peer.ID]bool) (avail []peer.ID, unavail []peer.ID) { var newlyAvailable []peer.ID var newlyUnavailable []peer.ID for p, isNowAvailable := range availability { - // Make sure this is a peer that the session is actually interested in - if wasAvailable, ok := spm.peerAvlMgr.isAvailable(p); ok { - // If the state has changed - if wasAvailable != isNowAvailable { - // Update the state and record that something changed - spm.peerAvlMgr.setPeerAvailability(p, isNowAvailable) - // fmt.Printf("processAvailability change %s %t\n", lu.P(p), isNowAvailable) - spm.updateWantsPeerAvailability(p, isNowAvailable) - if isNowAvailable { - newlyAvailable = append(newlyAvailable, p) - } else { - newlyUnavailable = append(newlyUnavailable, p) - } - // Reset the count of consecutive DONT_HAVEs received from the - // peer - delete(spm.peerConsecutiveDontHaves, p) + stateChange := false + if isNowAvailable { + isNewPeer := sws.spm.AddPeer(p) + if isNewPeer { + stateChange = true + newlyAvailable = append(newlyAvailable, p) + } + } else { + wasAvailable := sws.spm.RemovePeer(p) + if wasAvailable { + stateChange = true + newlyUnavailable = append(newlyUnavailable, p) } } + + // If the state has changed + if stateChange { + sws.updateWantsPeerAvailability(p, isNowAvailable) + // Reset the count of consecutive DONT_HAVEs received from the + // peer + delete(sws.peerConsecutiveDontHaves, p) + } } return newlyAvailable, newlyUnavailable } -// isAvailable indicates whether the peer is available and whether -// it's been tracked by the Session (used by the tests) -func (spm *sessionWantSender) isAvailable(p peer.ID) (bool, bool) { - return spm.peerAvlMgr.isAvailable(p) -} - // trackWant creates a new entry in the map of CID -> want info -func (spm *sessionWantSender) trackWant(c cid.Cid) { +func (sws *sessionWantSender) trackWant(c cid.Cid) { // fmt.Printf("trackWant %s\n", lu.C(c)) - if _, ok := spm.wants[c]; ok { + if _, ok := sws.wants[c]; ok { return } // Create the want info - wi := newWantInfo(spm.peerRspTrkr) - spm.wants[c] = wi + wi := newWantInfo(sws.peerRspTrkr) + sws.wants[c] = wi // For each available peer, register any information we know about // whether the peer has the block - for _, p := range spm.peerAvlMgr.availablePeers() { - spm.updateWantBlockPresence(c, p) + for _, p := range sws.spm.Peers() { + sws.updateWantBlockPresence(c, p) } } // processUpdates processes incoming blocks and HAVE / DONT_HAVEs. // It returns all DONT_HAVEs. -func (spm *sessionWantSender) processUpdates(updates []update) []cid.Cid { +func (sws *sessionWantSender) processUpdates(updates []update) []cid.Cid { prunePeers := make(map[peer.ID]struct{}) dontHaves := cid.NewSet() for _, upd := range updates { @@ -325,43 +311,43 @@ func (spm *sessionWantSender) processUpdates(updates []update) []cid.Cid { dontHaves.Add(c) // Update the block presence for the peer - spm.updateWantBlockPresence(c, upd.from) + sws.updateWantBlockPresence(c, upd.from) // Check if the DONT_HAVE is in response to a want-block // (could also be in response to want-have) - if spm.swbt.haveSentWantBlockTo(upd.from, c) { + if sws.swbt.haveSentWantBlockTo(upd.from, c) { // If we were waiting for a response from this peer, clear // sentTo so that we can send the want to another peer - if sentTo, ok := spm.getWantSentTo(c); ok && sentTo == upd.from { - spm.setWantSentTo(c, "") + if sentTo, ok := sws.getWantSentTo(c); ok && sentTo == upd.from { + sws.setWantSentTo(c, "") } } // Track the number of consecutive DONT_HAVEs each peer receives - if spm.peerConsecutiveDontHaves[upd.from] == peerDontHaveLimit { + if sws.peerConsecutiveDontHaves[upd.from] == peerDontHaveLimit { prunePeers[upd.from] = struct{}{} } else { - spm.peerConsecutiveDontHaves[upd.from]++ + sws.peerConsecutiveDontHaves[upd.from]++ } } // For each HAVE for _, c := range upd.haves { // Update the block presence for the peer - spm.updateWantBlockPresence(c, upd.from) - delete(spm.peerConsecutiveDontHaves, upd.from) + sws.updateWantBlockPresence(c, upd.from) + delete(sws.peerConsecutiveDontHaves, upd.from) } // For each received block for _, c := range upd.ks { // Remove the want - removed := spm.removeWant(c) + removed := sws.removeWant(c) if removed != nil { // Inform the peer tracker that this peer was the first to send // us the block - spm.peerRspTrkr.receivedBlockFrom(upd.from) + sws.peerRspTrkr.receivedBlockFrom(upd.from) } - delete(spm.peerConsecutiveDontHaves, upd.from) + delete(sws.peerConsecutiveDontHaves, upd.from) } } @@ -370,7 +356,7 @@ func (spm *sessionWantSender) processUpdates(updates []update) []cid.Cid { if len(prunePeers) > 0 { go func() { for p := range prunePeers { - spm.SignalAvailability(p, false) + sws.SignalAvailability(p, false) } }() } @@ -380,7 +366,7 @@ func (spm *sessionWantSender) processUpdates(updates []update) []cid.Cid { // checkForExhaustedWants checks if there are any wants for which all peers // have sent a DONT_HAVE. We call these "exhausted" wants. -func (spm *sessionWantSender) checkForExhaustedWants(dontHaves []cid.Cid, newlyUnavailable []peer.ID) { +func (sws *sessionWantSender) checkForExhaustedWants(dontHaves []cid.Cid, newlyUnavailable []peer.ID) { // If there are no new DONT_HAVEs, and no peers became unavailable, then // we don't need to check for exhausted wants if len(dontHaves) == 0 && len(newlyUnavailable) == 0 { @@ -394,15 +380,15 @@ func (spm *sessionWantSender) checkForExhaustedWants(dontHaves []cid.Cid, newlyU // (because it may be the last peer who hadn't sent a DONT_HAVE for a CID) if len(newlyUnavailable) > 0 { // Collect all pending wants - wants = make([]cid.Cid, len(spm.wants)) - for c := range spm.wants { + wants = make([]cid.Cid, len(sws.wants)) + for c := range sws.wants { wants = append(wants, c) } // If the last available peer in the session has become unavailable // then we need to broadcast all pending wants - if len(spm.peerAvlMgr.availablePeers()) == 0 { - spm.processExhaustedWants(wants) + if !sws.spm.HasPeers() { + sws.processExhaustedWants(wants) return } } @@ -410,17 +396,17 @@ func (spm *sessionWantSender) checkForExhaustedWants(dontHaves []cid.Cid, newlyU // If all available peers for a cid sent a DONT_HAVE, signal to the session // that we've exhausted available peers if len(wants) > 0 { - exhausted := spm.bpm.AllPeersDoNotHaveBlock(spm.peerAvlMgr.availablePeers(), wants) - spm.processExhaustedWants(exhausted) + exhausted := sws.bpm.AllPeersDoNotHaveBlock(sws.spm.Peers(), wants) + sws.processExhaustedWants(exhausted) } } // processExhaustedWants filters the list so that only those wants that haven't // already been marked as exhausted are passed to onPeersExhausted() -func (spm *sessionWantSender) processExhaustedWants(exhausted []cid.Cid) { - newlyExhausted := spm.newlyExhausted(exhausted) +func (sws *sessionWantSender) processExhaustedWants(exhausted []cid.Cid) { + newlyExhausted := sws.newlyExhausted(exhausted) if len(newlyExhausted) > 0 { - spm.onPeersExhausted(newlyExhausted) + sws.onPeersExhausted(newlyExhausted) } } @@ -444,10 +430,10 @@ func (aw allWants) forPeer(p peer.ID) *wantSets { // sendNextWants sends wants to peers according to the latest information // about which peers have / dont have blocks -func (spm *sessionWantSender) sendNextWants(newlyAvailable []peer.ID) { +func (sws *sessionWantSender) sendNextWants(newlyAvailable []peer.ID) { toSend := make(allWants) - for c, wi := range spm.wants { + for c, wi := range sws.wants { // Ensure we send want-haves to any newly available peers for _, p := range newlyAvailable { toSend.forPeer(p).wantHaves.Add(c) @@ -471,13 +457,13 @@ func (spm *sessionWantSender) sendNextWants(newlyAvailable []peer.ID) { // fmt.Printf(" q - send best: %s: %s\n", lu.C(c), lu.P(wi.bestPeer)) // Record that we are sending a want-block for this want to the peer - spm.setWantSentTo(c, wi.bestPeer) + sws.setWantSentTo(c, wi.bestPeer) // Send a want-block to the chosen peer toSend.forPeer(wi.bestPeer).wantBlocks.Add(c) // Send a want-have to each other peer - for _, op := range spm.peerAvlMgr.availablePeers() { + for _, op := range sws.spm.Peers() { if op != wi.bestPeer { toSend.forPeer(op).wantHaves.Add(c) } @@ -485,11 +471,11 @@ func (spm *sessionWantSender) sendNextWants(newlyAvailable []peer.ID) { } // Send any wants we've collected - spm.sendWants(toSend) + sws.sendWants(toSend) } // sendWants sends want-have and want-blocks to the appropriate peers -func (spm *sessionWantSender) sendWants(sends allWants) { +func (sws *sessionWantSender) sendWants(sends allWants) { // fmt.Printf(" send wants to %d peers\n", len(sends)) // For each peer we're sending a request to @@ -497,7 +483,7 @@ func (spm *sessionWantSender) sendWants(sends allWants) { // fmt.Printf(" send %d wants to %s\n", snd.wantBlocks.Len(), lu.P(p)) // Piggyback some other want-haves onto the request to the peer - for _, c := range spm.getPiggybackWantHaves(p, snd.wantBlocks) { + for _, c := range sws.getPiggybackWantHaves(p, snd.wantBlocks) { snd.wantHaves.Add(c) } @@ -507,24 +493,24 @@ func (spm *sessionWantSender) sendWants(sends allWants) { // precedence over want-haves. wblks := snd.wantBlocks.Keys() whaves := snd.wantHaves.Keys() - spm.pm.SendWants(spm.ctx, p, wblks, whaves) + sws.pm.SendWants(sws.ctx, p, wblks, whaves) // Inform the session that we've sent the wants - spm.onSend(p, wblks, whaves) + sws.onSend(p, wblks, whaves) // Record which peers we send want-block to - spm.swbt.addSentWantBlocksTo(p, wblks) + sws.swbt.addSentWantBlocksTo(p, wblks) } } // getPiggybackWantHaves gets the want-haves that should be piggybacked onto // a request that we are making to send want-blocks to a peer -func (spm *sessionWantSender) getPiggybackWantHaves(p peer.ID, wantBlocks *cid.Set) []cid.Cid { +func (sws *sessionWantSender) getPiggybackWantHaves(p peer.ID, wantBlocks *cid.Set) []cid.Cid { var whs []cid.Cid - for c := range spm.wants { + for c := range sws.wants { // Don't send want-have if we're already sending a want-block // (or have previously) - if !wantBlocks.Has(c) && !spm.swbt.haveSentWantBlockTo(p, c) { + if !wantBlocks.Has(c) && !sws.swbt.haveSentWantBlockTo(p, c) { whs = append(whs, c) } } @@ -533,10 +519,10 @@ func (spm *sessionWantSender) getPiggybackWantHaves(p peer.ID, wantBlocks *cid.S // newlyExhausted filters the list of keys for wants that have not already // been marked as exhausted (all peers indicated they don't have the block) -func (spm *sessionWantSender) newlyExhausted(ks []cid.Cid) []cid.Cid { +func (sws *sessionWantSender) newlyExhausted(ks []cid.Cid) []cid.Cid { var res []cid.Cid for _, c := range ks { - if wi, ok := spm.wants[c]; ok { + if wi, ok := sws.wants[c]; ok { if !wi.exhausted { res = append(res, c) wi.exhausted = true @@ -547,9 +533,9 @@ func (spm *sessionWantSender) newlyExhausted(ks []cid.Cid) []cid.Cid { } // removeWant is called when the corresponding block is received -func (spm *sessionWantSender) removeWant(c cid.Cid) *wantInfo { - if wi, ok := spm.wants[c]; ok { - delete(spm.wants, c) +func (sws *sessionWantSender) removeWant(c cid.Cid) *wantInfo { + if wi, ok := sws.wants[c]; ok { + delete(sws.wants, c) return wi } return nil @@ -557,10 +543,10 @@ func (spm *sessionWantSender) removeWant(c cid.Cid) *wantInfo { // updateWantsPeerAvailability is called when the availability changes for a // peer. It updates all the wants accordingly. -func (spm *sessionWantSender) updateWantsPeerAvailability(p peer.ID, isNowAvailable bool) { - for c, wi := range spm.wants { +func (sws *sessionWantSender) updateWantsPeerAvailability(p peer.ID, isNowAvailable bool) { + for c, wi := range sws.wants { if isNowAvailable { - spm.updateWantBlockPresence(c, p) + sws.updateWantBlockPresence(c, p) } else { wi.removePeer(p) } @@ -569,17 +555,17 @@ func (spm *sessionWantSender) updateWantsPeerAvailability(p peer.ID, isNowAvaila // updateWantBlockPresence is called when a HAVE / DONT_HAVE is received for the given // want / peer -func (spm *sessionWantSender) updateWantBlockPresence(c cid.Cid, p peer.ID) { - wi, ok := spm.wants[c] +func (sws *sessionWantSender) updateWantBlockPresence(c cid.Cid, p peer.ID) { + wi, ok := sws.wants[c] if !ok { return } // If the peer sent us a HAVE or DONT_HAVE for the cid, adjust the // block presence for the peer / cid combination - if spm.bpm.PeerHasBlock(p, c) { + if sws.bpm.PeerHasBlock(p, c) { wi.setPeerBlockPresence(p, BPHave) - } else if spm.bpm.PeerDoesNotHaveBlock(p, c) { + } else if sws.bpm.PeerDoesNotHaveBlock(p, c) { wi.setPeerBlockPresence(p, BPDontHave) } else { wi.setPeerBlockPresence(p, BPUnknown) @@ -587,16 +573,16 @@ func (spm *sessionWantSender) updateWantBlockPresence(c cid.Cid, p peer.ID) { } // Which peer was the want sent to -func (spm *sessionWantSender) getWantSentTo(c cid.Cid) (peer.ID, bool) { - if wi, ok := spm.wants[c]; ok { +func (sws *sessionWantSender) getWantSentTo(c cid.Cid) (peer.ID, bool) { + if wi, ok := sws.wants[c]; ok { return wi.sentTo, true } return "", false } // Record which peer the want was sent to -func (spm *sessionWantSender) setWantSentTo(c cid.Cid, p peer.ID) { - if wi, ok := spm.wants[c]; ok { +func (sws *sessionWantSender) setWantSentTo(c cid.Cid, p peer.ID) { + if wi, ok := sws.wants[c]; ok { wi.sentTo = p } } diff --git a/bitswap/internal/sessionmanager/sessionmanager_test.go b/bitswap/internal/sessionmanager/sessionmanager_test.go index e89ea4644..4e0152bb7 100644 --- a/bitswap/internal/sessionmanager/sessionmanager_test.go +++ b/bitswap/internal/sessionmanager/sessionmanager_test.go @@ -45,12 +45,12 @@ func (fs *fakeSession) ReceiveFrom(p peer.ID, ks []cid.Cid, wantBlocks []cid.Cid type fakeSesPeerManager struct { } -func (*fakeSesPeerManager) ReceiveFrom(peer.ID, []cid.Cid, []cid.Cid) bool { return true } -func (*fakeSesPeerManager) Peers() *peer.Set { return nil } -func (*fakeSesPeerManager) FindMorePeers(context.Context, cid.Cid) {} -func (*fakeSesPeerManager) RecordPeerRequests([]peer.ID, []cid.Cid) {} -func (*fakeSesPeerManager) RecordPeerResponse(peer.ID, []cid.Cid) {} -func (*fakeSesPeerManager) RecordCancels(c []cid.Cid) {} +func (*fakeSesPeerManager) Peers() []peer.ID { return nil } +func (*fakeSesPeerManager) PeersDiscovered() bool { return false } +func (*fakeSesPeerManager) Shutdown() {} +func (*fakeSesPeerManager) AddPeer(peer.ID) bool { return false } +func (*fakeSesPeerManager) RemovePeer(peer.ID) bool { return false } +func (*fakeSesPeerManager) HasPeers() bool { return false } type fakePeerManager struct { } diff --git a/bitswap/internal/sessionpeermanager/latencytracker.go b/bitswap/internal/sessionpeermanager/latencytracker.go deleted file mode 100644 index 326d2fa4c..000000000 --- a/bitswap/internal/sessionpeermanager/latencytracker.go +++ /dev/null @@ -1,77 +0,0 @@ -package sessionpeermanager - -import ( - "time" - - "github.com/ipfs/go-cid" -) - -type requestData struct { - startedAt time.Time - wasCancelled bool - timeoutFunc *time.Timer -} - -type latencyTracker struct { - requests map[cid.Cid]*requestData -} - -func newLatencyTracker() *latencyTracker { - return &latencyTracker{requests: make(map[cid.Cid]*requestData)} -} - -type afterTimeoutFunc func(cid.Cid) - -func (lt *latencyTracker) SetupRequests(keys []cid.Cid, timeoutDuration time.Duration, afterTimeout afterTimeoutFunc) { - startedAt := time.Now() - for _, k := range keys { - if _, ok := lt.requests[k]; !ok { - lt.requests[k] = &requestData{ - startedAt, - false, - time.AfterFunc(timeoutDuration, makeAfterTimeout(afterTimeout, k)), - } - } - } -} - -func makeAfterTimeout(afterTimeout afterTimeoutFunc, k cid.Cid) func() { - return func() { afterTimeout(k) } -} - -func (lt *latencyTracker) CheckDuration(key cid.Cid) (time.Duration, bool) { - request, ok := lt.requests[key] - var latency time.Duration - if ok { - latency = time.Since(request.startedAt) - } - return latency, ok -} - -func (lt *latencyTracker) RemoveRequest(key cid.Cid) { - request, ok := lt.requests[key] - if ok { - request.timeoutFunc.Stop() - delete(lt.requests, key) - } -} - -func (lt *latencyTracker) RecordCancel(keys []cid.Cid) { - for _, key := range keys { - request, ok := lt.requests[key] - if ok { - request.wasCancelled = true - } - } -} - -func (lt *latencyTracker) WasCancelled(key cid.Cid) bool { - request, ok := lt.requests[key] - return ok && request.wasCancelled -} - -func (lt *latencyTracker) Shutdown() { - for _, request := range lt.requests { - request.timeoutFunc.Stop() - } -} diff --git a/bitswap/internal/sessionpeermanager/peerdata.go b/bitswap/internal/sessionpeermanager/peerdata.go deleted file mode 100644 index a06198588..000000000 --- a/bitswap/internal/sessionpeermanager/peerdata.go +++ /dev/null @@ -1,41 +0,0 @@ -package sessionpeermanager - -import ( - "time" - - "github.com/ipfs/go-cid" -) - -const ( - newLatencyWeight = 0.5 -) - -type peerData struct { - hasLatency bool - latency time.Duration - lt *latencyTracker -} - -func newPeerData() *peerData { - return &peerData{ - hasLatency: false, - lt: newLatencyTracker(), - latency: 0, - } -} - -func (pd *peerData) AdjustLatency(k cid.Cid, hasFallbackLatency bool, fallbackLatency time.Duration) { - latency, hasLatency := pd.lt.CheckDuration(k) - pd.lt.RemoveRequest(k) - if !hasLatency { - latency, hasLatency = fallbackLatency, hasFallbackLatency - } - if hasLatency { - if pd.hasLatency { - pd.latency = time.Duration(float64(pd.latency)*(1.0-newLatencyWeight) + float64(latency)*newLatencyWeight) - } else { - pd.latency = latency - pd.hasLatency = true - } - } -} diff --git a/bitswap/internal/sessionpeermanager/sessionpeermanager.go b/bitswap/internal/sessionpeermanager/sessionpeermanager.go index 7957638d3..950770737 100644 --- a/bitswap/internal/sessionpeermanager/sessionpeermanager.go +++ b/bitswap/internal/sessionpeermanager/sessionpeermanager.go @@ -1,26 +1,20 @@ package sessionpeermanager import ( - "context" "fmt" - "math/rand" - "sort" - "time" + "sync" - bssd "github.com/ipfs/go-bitswap/internal/sessiondata" logging "github.com/ipfs/go-log" - cid "github.com/ipfs/go-cid" peer "github.com/libp2p/go-libp2p-core/peer" ) var log = logging.Logger("bs:sprmgr") const ( - defaultTimeoutDuration = 5 * time.Second - maxOptimizedPeers = 32 - unoptimizedTagValue = 5 // tag value for "unoptimized" session peers. - optimizedTagValue = 10 // tag value for "optimized" session peers. + // Connection Manager tag value for session peers. Indicates to connection + // manager that it should keep the connection to the peer. + sessionPeerTagValue = 5 ) // PeerTagger is an interface for tagging peers with metadata @@ -29,362 +23,100 @@ type PeerTagger interface { UntagPeer(p peer.ID, tag string) } -// PeerProviderFinder is an interface for finding providers -type PeerProviderFinder interface { - FindProvidersAsync(context.Context, cid.Cid) <-chan peer.ID -} - -type peerMessage interface { - handle(spm *SessionPeerManager) -} - -// SessionPeerManager tracks and manages peers for a session, and provides -// the best ones to the session +// SessionPeerManager keeps track of peers for a session, and takes care of +// ConnectionManager tagging. type SessionPeerManager struct { - ctx context.Context - tagger PeerTagger - providerFinder PeerProviderFinder - peers *peer.Set - tag string - id uint64 - - peerMessages chan peerMessage + tagger PeerTagger + tag string - // do not touch outside of run loop - activePeers map[peer.ID]*peerData - unoptimizedPeersArr []peer.ID - optimizedPeersArr []peer.ID - broadcastLatency *latencyTracker - timeoutDuration time.Duration + plk sync.RWMutex + peers map[peer.ID]struct{} + peersDiscovered bool } // New creates a new SessionPeerManager -func New(ctx context.Context, id uint64, tagger PeerTagger, providerFinder PeerProviderFinder) *SessionPeerManager { - spm := &SessionPeerManager{ - ctx: ctx, - id: id, - tagger: tagger, - providerFinder: providerFinder, - peers: peer.NewSet(), - peerMessages: make(chan peerMessage, 128), - activePeers: make(map[peer.ID]*peerData), - broadcastLatency: newLatencyTracker(), - timeoutDuration: defaultTimeoutDuration, - } - - spm.tag = fmt.Sprint("bs-ses-", id) - - go spm.run(ctx) - return spm -} - -func (spm *SessionPeerManager) ReceiveFrom(p peer.ID, ks []cid.Cid, haves []cid.Cid) bool { - if len(ks) > 0 || len(haves) > 0 && !spm.peers.Contains(p) { - log.Infof("Added peer %s to session: %d peers\n", p, spm.peers.Size()) - spm.peers.Add(p) - return true - } - return false -} - -func (spm *SessionPeerManager) Peers() *peer.Set { - return spm.peers -} - -// RecordPeerResponse records that a peer received some blocks, and adds the -// peer to the list of peers if it wasn't already added -func (spm *SessionPeerManager) RecordPeerResponse(p peer.ID, ks []cid.Cid) { - - select { - case spm.peerMessages <- &peerResponseMessage{p, ks}: - case <-spm.ctx.Done(): - } -} - -// RecordCancels records the fact that cancellations were sent to peers, -// so if blocks don't arrive, don't let it affect the peer's timeout -func (spm *SessionPeerManager) RecordCancels(ks []cid.Cid) { - select { - case spm.peerMessages <- &cancelMessage{ks}: - case <-spm.ctx.Done(): - } -} - -// RecordPeerRequests records that a given set of peers requested the given cids. -func (spm *SessionPeerManager) RecordPeerRequests(p []peer.ID, ks []cid.Cid) { - select { - case spm.peerMessages <- &peerRequestMessage{p, ks}: - case <-spm.ctx.Done(): - } -} - -// GetOptimizedPeers returns the best peers available for a session, along with -// a rating for how good they are, in comparison to the best peer. -func (spm *SessionPeerManager) GetOptimizedPeers() []bssd.OptimizedPeer { - // right now this just returns all peers, but soon we might return peers - // ordered by optimization, or only a subset - resp := make(chan []bssd.OptimizedPeer, 1) - select { - case spm.peerMessages <- &getPeersMessage{resp}: - case <-spm.ctx.Done(): - return nil - } - - select { - case peers := <-resp: - return peers - case <-spm.ctx.Done(): - return nil - } -} - -// FindMorePeers attempts to find more peers for a session by searching for -// providers for the given Cid -func (spm *SessionPeerManager) FindMorePeers(ctx context.Context, c cid.Cid) { - go func(k cid.Cid) { - for p := range spm.providerFinder.FindProvidersAsync(ctx, k) { - - select { - case spm.peerMessages <- &peerFoundMessage{p}: - case <-ctx.Done(): - case <-spm.ctx.Done(): - } - } - }(c) -} - -// SetTimeoutDuration changes the length of time used to timeout recording of -// requests -func (spm *SessionPeerManager) SetTimeoutDuration(timeoutDuration time.Duration) { - select { - case spm.peerMessages <- &setTimeoutMessage{timeoutDuration}: - case <-spm.ctx.Done(): - } -} - -func (spm *SessionPeerManager) run(ctx context.Context) { - for { - select { - case pm := <-spm.peerMessages: - pm.handle(spm) - case <-ctx.Done(): - spm.handleShutdown() - return - } - } -} - -func (spm *SessionPeerManager) tagPeer(p peer.ID, data *peerData) { - var value int - if data.hasLatency { - value = optimizedTagValue - } else { - value = unoptimizedTagValue +func New(id uint64, tagger PeerTagger) *SessionPeerManager { + return &SessionPeerManager{ + tag: fmt.Sprint("bs-ses-", id), + tagger: tagger, + peers: make(map[peer.ID]struct{}), } - spm.tagger.TagPeer(p, spm.tag, value) } -func (spm *SessionPeerManager) insertPeer(p peer.ID, data *peerData) { - if data.hasLatency { - insertPos := sort.Search(len(spm.optimizedPeersArr), func(i int) bool { - return spm.activePeers[spm.optimizedPeersArr[i]].latency > data.latency - }) - spm.optimizedPeersArr = append(spm.optimizedPeersArr[:insertPos], - append([]peer.ID{p}, spm.optimizedPeersArr[insertPos:]...)...) - } else { - spm.unoptimizedPeersArr = append(spm.unoptimizedPeersArr, p) - } +// AddPeer adds the peer to the SessionPeerManager. +// Returns true if the peer is a new peer, false if it already existed. +func (spm *SessionPeerManager) AddPeer(p peer.ID) bool { + spm.plk.Lock() + defer spm.plk.Unlock() - if !spm.peers.Contains(p) { - log.Infof("Added peer %s to session: %d peers\n", p, spm.peers.Size()) - spm.peers.Add(p) + // Check if the peer is a new peer + if _, ok := spm.peers[p]; ok { + return false } -} -func (spm *SessionPeerManager) removeOptimizedPeer(p peer.ID) { - for i := 0; i < len(spm.optimizedPeersArr); i++ { - if spm.optimizedPeersArr[i] == p { - spm.optimizedPeersArr = append(spm.optimizedPeersArr[:i], spm.optimizedPeersArr[i+1:]...) - return - } - } -} + spm.peers[p] = struct{}{} + spm.peersDiscovered = true -func (spm *SessionPeerManager) removeUnoptimizedPeer(p peer.ID) { - for i := 0; i < len(spm.unoptimizedPeersArr); i++ { - if spm.unoptimizedPeersArr[i] == p { - spm.unoptimizedPeersArr[i] = spm.unoptimizedPeersArr[len(spm.unoptimizedPeersArr)-1] - spm.unoptimizedPeersArr = spm.unoptimizedPeersArr[:len(spm.unoptimizedPeersArr)-1] - return - } - } -} + // Tag the peer with the ConnectionManager so it doesn't discard the + // connection + spm.tagger.TagPeer(p, spm.tag, sessionPeerTagValue) -func (spm *SessionPeerManager) recordResponse(p peer.ID, ks []cid.Cid) { - data, ok := spm.activePeers[p] - wasOptimized := ok && data.hasLatency - if wasOptimized { - spm.removeOptimizedPeer(p) - } else { - if ok { - spm.removeUnoptimizedPeer(p) - } else { - data = newPeerData() - spm.activePeers[p] = data - } - } - for _, k := range ks { - fallbackLatency, hasFallbackLatency := spm.broadcastLatency.CheckDuration(k) - data.AdjustLatency(k, hasFallbackLatency, fallbackLatency) - } - if !ok || wasOptimized != data.hasLatency { - spm.tagPeer(p, data) - } - spm.insertPeer(p, data) + log.Infof("Added peer %s to session: %d peers\n", p, len(spm.peers)) + return true } -type peerFoundMessage struct { - p peer.ID -} +// RemovePeer removes the peer from the SessionPeerManager. +// Returns true if the peer was removed, false if it did not exist. +func (spm *SessionPeerManager) RemovePeer(p peer.ID) bool { + spm.plk.Lock() + defer spm.plk.Unlock() -func (pfm *peerFoundMessage) handle(spm *SessionPeerManager) { - p := pfm.p - if _, ok := spm.activePeers[p]; !ok { - spm.activePeers[p] = newPeerData() - spm.insertPeer(p, spm.activePeers[p]) - spm.tagPeer(p, spm.activePeers[p]) + if _, ok := spm.peers[p]; !ok { + return false } -} -type peerResponseMessage struct { - p peer.ID - ks []cid.Cid + delete(spm.peers, p) + spm.tagger.UntagPeer(p, spm.tag) + return true } -func (prm *peerResponseMessage) handle(spm *SessionPeerManager) { - spm.recordResponse(prm.p, prm.ks) -} +// PeersDiscovered indicates whether peers have been discovered yet. +// Returns true once a peer has been discovered by the session (even if all +// peers are later removed from the session). +func (spm *SessionPeerManager) PeersDiscovered() bool { + spm.plk.RLock() + defer spm.plk.RUnlock() -type peerRequestMessage struct { - peers []peer.ID - keys []cid.Cid + return spm.peersDiscovered } -func (spm *SessionPeerManager) makeTimeout(p peer.ID) afterTimeoutFunc { - return func(k cid.Cid) { - select { - case spm.peerMessages <- &peerTimeoutMessage{p, k}: - case <-spm.ctx.Done(): - } - } -} +func (spm *SessionPeerManager) Peers() []peer.ID { + spm.plk.RLock() + defer spm.plk.RUnlock() -func (prm *peerRequestMessage) handle(spm *SessionPeerManager) { - if prm.peers == nil { - spm.broadcastLatency.SetupRequests(prm.keys, spm.timeoutDuration, func(k cid.Cid) { - select { - case spm.peerMessages <- &broadcastTimeoutMessage{k}: - case <-spm.ctx.Done(): - } - }) - } else { - for _, p := range prm.peers { - if data, ok := spm.activePeers[p]; ok { - data.lt.SetupRequests(prm.keys, spm.timeoutDuration, spm.makeTimeout(p)) - } - } + peers := make([]peer.ID, 0, len(spm.peers)) + for p := range spm.peers { + peers = append(peers, p) } -} -type getPeersMessage struct { - resp chan<- []bssd.OptimizedPeer + return peers } -// Get all optimized peers in order followed by randomly ordered unoptimized -// peers, with a limit of maxOptimizedPeers -func (prm *getPeersMessage) handle(spm *SessionPeerManager) { - randomOrder := rand.Perm(len(spm.unoptimizedPeersArr)) - - // Number of peers to get in total: unoptimized + optimized - // limited by maxOptimizedPeers - maxPeers := len(spm.unoptimizedPeersArr) + len(spm.optimizedPeersArr) - if maxPeers > maxOptimizedPeers { - maxPeers = maxOptimizedPeers - } - - // The best peer latency is the first optimized peer's latency. - // If we haven't recorded any peer's latency, use 0. - var bestPeerLatency float64 - if len(spm.optimizedPeersArr) > 0 { - bestPeerLatency = float64(spm.activePeers[spm.optimizedPeersArr[0]].latency) - } else { - bestPeerLatency = 0 - } +func (spm *SessionPeerManager) HasPeers() bool { + spm.plk.RLock() + defer spm.plk.RUnlock() - optimizedPeers := make([]bssd.OptimizedPeer, 0, maxPeers) - for i := 0; i < maxPeers; i++ { - // First add optimized peers in order - if i < len(spm.optimizedPeersArr) { - p := spm.optimizedPeersArr[i] - optimizedPeers = append(optimizedPeers, bssd.OptimizedPeer{ - Peer: p, - OptimizationRating: bestPeerLatency / float64(spm.activePeers[p].latency), - }) - } else { - // Then add unoptimized peers in random order - p := spm.unoptimizedPeersArr[randomOrder[i-len(spm.optimizedPeersArr)]] - optimizedPeers = append(optimizedPeers, bssd.OptimizedPeer{Peer: p, OptimizationRating: 0.0}) - } - } - prm.resp <- optimizedPeers + return len(spm.peers) > 0 } -type cancelMessage struct { - ks []cid.Cid -} - -func (cm *cancelMessage) handle(spm *SessionPeerManager) { - for _, data := range spm.activePeers { - data.lt.RecordCancel(cm.ks) - } -} +// Shutdown untags all the peers +func (spm *SessionPeerManager) Shutdown() { + spm.plk.Lock() + defer spm.plk.Unlock() -func (spm *SessionPeerManager) handleShutdown() { - for p, data := range spm.activePeers { + // Untag the peers with the ConnectionManager so that it can release + // connections to those peers + for p := range spm.peers { spm.tagger.UntagPeer(p, spm.tag) - data.lt.Shutdown() } } - -type peerTimeoutMessage struct { - p peer.ID - k cid.Cid -} - -func (ptm *peerTimeoutMessage) handle(spm *SessionPeerManager) { - data, ok := spm.activePeers[ptm.p] - // If the request was cancelled, make sure we clean up the request tracker - if ok && data.lt.WasCancelled(ptm.k) { - data.lt.RemoveRequest(ptm.k) - } else { - // If the request was not cancelled, record the latency. Note that we - // do this even if we didn't previously know about this peer. - spm.recordResponse(ptm.p, []cid.Cid{ptm.k}) - } -} - -type broadcastTimeoutMessage struct { - k cid.Cid -} - -func (btm *broadcastTimeoutMessage) handle(spm *SessionPeerManager) { - spm.broadcastLatency.RemoveRequest(btm.k) -} - -type setTimeoutMessage struct { - timeoutDuration time.Duration -} - -func (stm *setTimeoutMessage) handle(spm *SessionPeerManager) { - spm.timeoutDuration = stm.timeoutDuration -} From 66eaa6e0f3cdba9b7bd00b3dcb423eccbd3a07f5 Mon Sep 17 00:00:00 2001 From: Dirk McCormick Date: Wed, 4 Mar 2020 10:24:47 -0500 Subject: [PATCH 0842/1035] test: fix session tests This commit was moved from ipfs/go-bitswap@369b794b02a60138306ad5a5a9d53ad2bef3e2d0 --- bitswap/internal/session/session_test.go | 2 +- .../session/sessionwantsender_test.go | 141 ++++++++++-------- .../sessionpeermanager/sessionpeermanager.go | 8 + 3 files changed, 89 insertions(+), 62 deletions(-) diff --git a/bitswap/internal/session/session_test.go b/bitswap/internal/session/session_test.go index 13f2b3021..d40036d3d 100644 --- a/bitswap/internal/session/session_test.go +++ b/bitswap/internal/session/session_test.go @@ -162,7 +162,7 @@ func TestSessionGetBlocks(t *testing.T) { // Simulate receiving block for a CID session.ReceiveFrom(peers[1], []cid.Cid{blks[0].Cid()}, []cid.Cid{}, []cid.Cid{}) - time.Sleep(100 * time.Millisecond) + time.Sleep(10 * time.Millisecond) // Verify session no longer wants received block wanted, unwanted := sim.SplitWantedUnwanted(blks) diff --git a/bitswap/internal/session/sessionwantsender_test.go b/bitswap/internal/session/sessionwantsender_test.go index ecea497bb..404447668 100644 --- a/bitswap/internal/session/sessionwantsender_test.go +++ b/bitswap/internal/session/sessionwantsender_test.go @@ -72,10 +72,11 @@ func TestSendWants(t *testing.T) { peerA := peers[0] sid := uint64(1) pm := newMockPeerManager() + fpm := newFakeSessionPeerManager() bpm := bsbpm.New() onSend := func(peer.ID, []cid.Cid, []cid.Cid) {} onPeersExhausted := func([]cid.Cid) {} - spm := newSessionWantSender(context.Background(), sid, pm, bpm, onSend, onPeersExhausted) + spm := newSessionWantSender(context.Background(), sid, pm, fpm, bpm, onSend, onPeersExhausted) go spm.Run() @@ -83,7 +84,7 @@ func TestSendWants(t *testing.T) { blkCids0 := cids[0:2] spm.Add(blkCids0) // peerA: HAVE cid0 - spm.Update(peerA, []cid.Cid{}, []cid.Cid{cids[0]}, []cid.Cid{}, true) + spm.Update(peerA, []cid.Cid{}, []cid.Cid{cids[0]}, []cid.Cid{}) // Wait for processing to complete peerSends := pm.waitNextWants() @@ -109,10 +110,11 @@ func TestSendsWantBlockToOnePeerOnly(t *testing.T) { peerB := peers[1] sid := uint64(1) pm := newMockPeerManager() + fpm := newFakeSessionPeerManager() bpm := bsbpm.New() onSend := func(peer.ID, []cid.Cid, []cid.Cid) {} onPeersExhausted := func([]cid.Cid) {} - spm := newSessionWantSender(context.Background(), sid, pm, bpm, onSend, onPeersExhausted) + spm := newSessionWantSender(context.Background(), sid, pm, fpm, bpm, onSend, onPeersExhausted) go spm.Run() @@ -120,7 +122,7 @@ func TestSendsWantBlockToOnePeerOnly(t *testing.T) { blkCids0 := cids[0:2] spm.Add(blkCids0) // peerA: HAVE cid0 - spm.Update(peerA, []cid.Cid{}, []cid.Cid{cids[0]}, []cid.Cid{}, true) + spm.Update(peerA, []cid.Cid{}, []cid.Cid{cids[0]}, []cid.Cid{}) // Wait for processing to complete peerSends := pm.waitNextWants() @@ -139,7 +141,7 @@ func TestSendsWantBlockToOnePeerOnly(t *testing.T) { pm.clearWants() // peerB: HAVE cid0 - spm.Update(peerB, []cid.Cid{}, []cid.Cid{cids[0]}, []cid.Cid{}, true) + spm.Update(peerB, []cid.Cid{}, []cid.Cid{cids[0]}, []cid.Cid{}) // Wait for processing to complete peerSends = pm.waitNextWants() @@ -166,17 +168,18 @@ func TestReceiveBlock(t *testing.T) { peerB := peers[1] sid := uint64(1) pm := newMockPeerManager() + fpm := newFakeSessionPeerManager() bpm := bsbpm.New() onSend := func(peer.ID, []cid.Cid, []cid.Cid) {} onPeersExhausted := func([]cid.Cid) {} - spm := newSessionWantSender(context.Background(), sid, pm, bpm, onSend, onPeersExhausted) + spm := newSessionWantSender(context.Background(), sid, pm, fpm, bpm, onSend, onPeersExhausted) go spm.Run() // add cid0, cid1 spm.Add(cids) // peerA: HAVE cid0 - spm.Update(peerA, []cid.Cid{}, []cid.Cid{cids[0]}, []cid.Cid{}, true) + spm.Update(peerA, []cid.Cid{}, []cid.Cid{cids[0]}, []cid.Cid{}) // Wait for processing to complete peerSends := pm.waitNextWants() @@ -196,10 +199,10 @@ func TestReceiveBlock(t *testing.T) { // peerA: block cid0, DONT_HAVE cid1 bpm.ReceiveFrom(peerA, []cid.Cid{}, []cid.Cid{cids[1]}) - spm.Update(peerA, []cid.Cid{cids[0]}, []cid.Cid{}, []cid.Cid{cids[1]}, false) + spm.Update(peerA, []cid.Cid{cids[0]}, []cid.Cid{}, []cid.Cid{cids[1]}) // peerB: HAVE cid0, cid1 bpm.ReceiveFrom(peerB, cids, []cid.Cid{}) - spm.Update(peerB, []cid.Cid{}, cids, []cid.Cid{}, true) + spm.Update(peerB, []cid.Cid{}, cids, []cid.Cid{}) // Wait for processing to complete peerSends = pm.waitNextWants() @@ -225,17 +228,18 @@ func TestPeerUnavailable(t *testing.T) { peerB := peers[1] sid := uint64(1) pm := newMockPeerManager() + fpm := newFakeSessionPeerManager() bpm := bsbpm.New() onSend := func(peer.ID, []cid.Cid, []cid.Cid) {} onPeersExhausted := func([]cid.Cid) {} - spm := newSessionWantSender(context.Background(), sid, pm, bpm, onSend, onPeersExhausted) + spm := newSessionWantSender(context.Background(), sid, pm, fpm, bpm, onSend, onPeersExhausted) go spm.Run() // add cid0, cid1 spm.Add(cids) // peerA: HAVE cid0 - spm.Update(peerA, []cid.Cid{}, []cid.Cid{cids[0]}, []cid.Cid{}, true) + spm.Update(peerA, []cid.Cid{}, []cid.Cid{cids[0]}, []cid.Cid{}) // Wait for processing to complete peerSends := pm.waitNextWants() @@ -254,7 +258,7 @@ func TestPeerUnavailable(t *testing.T) { pm.clearWants() // peerB: HAVE cid0 - spm.Update(peerB, []cid.Cid{}, []cid.Cid{cids[0]}, []cid.Cid{}, true) + spm.Update(peerB, []cid.Cid{}, []cid.Cid{cids[0]}, []cid.Cid{}) // Wait for processing to complete peerSends = pm.waitNextWants() @@ -283,12 +287,13 @@ func TestPeerUnavailable(t *testing.T) { } func TestPeersExhausted(t *testing.T) { - cids := testutil.GenerateCids(2) + cids := testutil.GenerateCids(3) peers := testutil.GeneratePeers(2) peerA := peers[0] peerB := peers[1] sid := uint64(1) pm := newMockPeerManager() + fpm := newFakeSessionPeerManager() bpm := bsbpm.New() onSend := func(peer.ID, []cid.Cid, []cid.Cid) {} @@ -296,53 +301,62 @@ func TestPeersExhausted(t *testing.T) { onPeersExhausted := func(ks []cid.Cid) { exhausted = append(exhausted, ks...) } - spm := newSessionWantSender(context.Background(), sid, pm, bpm, onSend, onPeersExhausted) + spm := newSessionWantSender(context.Background(), sid, pm, fpm, bpm, onSend, onPeersExhausted) go spm.Run() // add cid0, cid1 spm.Add(cids) - // peerA: DONT_HAVE cid0 - bpm.ReceiveFrom(peerA, []cid.Cid{}, []cid.Cid{cids[0]}) + // peerA: HAVE cid0 + bpm.ReceiveFrom(peerA, []cid.Cid{cids[0]}, []cid.Cid{}) // Note: this also registers peer A as being available - spm.Update(peerA, []cid.Cid{}, []cid.Cid{}, []cid.Cid{cids[0]}, true) + spm.Update(peerA, []cid.Cid{cids[0]}, []cid.Cid{}, []cid.Cid{}) + + // peerA: DONT_HAVE cid1 + bpm.ReceiveFrom(peerA, []cid.Cid{}, []cid.Cid{cids[1]}) + spm.Update(peerA, []cid.Cid{}, []cid.Cid{}, []cid.Cid{cids[1]}) time.Sleep(5 * time.Millisecond) - // All available peers (peer A) have sent us a DONT_HAVE for cid0, - // so expect that onPeersExhausted() will be called with cid0 - if !testutil.MatchKeysIgnoreOrder(exhausted, []cid.Cid{cids[0]}) { + // All available peers (peer A) have sent us a DONT_HAVE for cid1, + // so expect that onPeersExhausted() will be called with cid1 + if !testutil.MatchKeysIgnoreOrder(exhausted, []cid.Cid{cids[1]}) { t.Fatal("Wrong keys") } // Clear exhausted cids exhausted = []cid.Cid{} - // peerB: DONT_HAVE cid0, cid1 - bpm.ReceiveFrom(peerB, []cid.Cid{}, cids) - spm.Update(peerB, []cid.Cid{}, []cid.Cid{}, cids, true) + // peerB: HAVE cid0 + bpm.ReceiveFrom(peerB, []cid.Cid{cids[0]}, []cid.Cid{}) + // Note: this also registers peer B as being available + spm.Update(peerB, []cid.Cid{cids[0]}, []cid.Cid{}, []cid.Cid{}) + + // peerB: DONT_HAVE cid1, cid2 + bpm.ReceiveFrom(peerB, []cid.Cid{}, []cid.Cid{cids[1], cids[2]}) + spm.Update(peerB, []cid.Cid{}, []cid.Cid{}, []cid.Cid{cids[1], cids[2]}) // Wait for processing to complete pm.waitNextWants() // All available peers (peer A and peer B) have sent us a DONT_HAVE - // for cid0, but we already called onPeersExhausted with cid0, so it + // for cid1, but we already called onPeersExhausted with cid1, so it // should not be called again if len(exhausted) > 0 { t.Fatal("Wrong keys") } - // peerA: DONT_HAVE cid1 - bpm.ReceiveFrom(peerA, []cid.Cid{}, []cid.Cid{cids[1]}) - spm.Update(peerA, []cid.Cid{}, []cid.Cid{}, []cid.Cid{cids[1]}, false) + // peerA: DONT_HAVE cid2 + bpm.ReceiveFrom(peerA, []cid.Cid{}, []cid.Cid{cids[2]}) + spm.Update(peerA, []cid.Cid{}, []cid.Cid{}, []cid.Cid{cids[2]}) // Wait for processing to complete pm.waitNextWants() // All available peers (peer A and peer B) have sent us a DONT_HAVE for - // cid1, so expect that onPeersExhausted() will be called with cid1 - if !testutil.MatchKeysIgnoreOrder(exhausted, []cid.Cid{cids[1]}) { + // cid2, so expect that onPeersExhausted() will be called with cid2 + if !testutil.MatchKeysIgnoreOrder(exhausted, []cid.Cid{cids[2]}) { t.Fatal("Wrong keys") } } @@ -358,6 +372,7 @@ func TestPeersExhaustedLastWaitingPeerUnavailable(t *testing.T) { peerB := peers[1] sid := uint64(1) pm := newMockPeerManager() + fpm := newFakeSessionPeerManager() bpm := bsbpm.New() onSend := func(peer.ID, []cid.Cid, []cid.Cid) {} @@ -365,7 +380,7 @@ func TestPeersExhaustedLastWaitingPeerUnavailable(t *testing.T) { onPeersExhausted := func(ks []cid.Cid) { exhausted = append(exhausted, ks...) } - spm := newSessionWantSender(context.Background(), sid, pm, bpm, onSend, onPeersExhausted) + spm := newSessionWantSender(context.Background(), sid, pm, fpm, bpm, onSend, onPeersExhausted) go spm.Run() @@ -375,15 +390,15 @@ func TestPeersExhaustedLastWaitingPeerUnavailable(t *testing.T) { // peerA: HAVE cid0 bpm.ReceiveFrom(peerA, []cid.Cid{cids[0]}, []cid.Cid{}) // Note: this also registers peer A as being available - spm.Update(peerA, []cid.Cid{}, []cid.Cid{cids[0]}, []cid.Cid{}, true) + spm.Update(peerA, []cid.Cid{}, []cid.Cid{cids[0]}, []cid.Cid{}) // peerB: HAVE cid0 bpm.ReceiveFrom(peerB, []cid.Cid{cids[0]}, []cid.Cid{}) // Note: this also registers peer B as being available - spm.Update(peerB, []cid.Cid{}, []cid.Cid{cids[0]}, []cid.Cid{}, true) + spm.Update(peerB, []cid.Cid{}, []cid.Cid{cids[0]}, []cid.Cid{}) // peerA: DONT_HAVE cid1 bpm.ReceiveFrom(peerA, []cid.Cid{}, []cid.Cid{cids[1]}) - spm.Update(peerA, []cid.Cid{}, []cid.Cid{}, []cid.Cid{cids[0]}, false) + spm.Update(peerA, []cid.Cid{}, []cid.Cid{}, []cid.Cid{cids[0]}) time.Sleep(5 * time.Millisecond) @@ -408,6 +423,7 @@ func TestPeersExhaustedAllPeersUnavailable(t *testing.T) { peerB := peers[1] sid := uint64(1) pm := newMockPeerManager() + fpm := newFakeSessionPeerManager() bpm := bsbpm.New() onSend := func(peer.ID, []cid.Cid, []cid.Cid) {} @@ -415,7 +431,7 @@ func TestPeersExhaustedAllPeersUnavailable(t *testing.T) { onPeersExhausted := func(ks []cid.Cid) { exhausted = append(exhausted, ks...) } - spm := newSessionWantSender(context.Background(), sid, pm, bpm, onSend, onPeersExhausted) + spm := newSessionWantSender(context.Background(), sid, pm, fpm, bpm, onSend, onPeersExhausted) go spm.Run() @@ -423,11 +439,11 @@ func TestPeersExhaustedAllPeersUnavailable(t *testing.T) { spm.Add(cids) // peerA: receive block for cid0 (and register peer A with sessionWantSender) - spm.Update(peerA, []cid.Cid{cids[0]}, []cid.Cid{}, []cid.Cid{}, true) + spm.Update(peerA, []cid.Cid{cids[0]}, []cid.Cid{}, []cid.Cid{}) // peerB: HAVE cid1 bpm.ReceiveFrom(peerB, []cid.Cid{cids[0]}, []cid.Cid{}) // Note: this also registers peer B as being available - spm.Update(peerB, []cid.Cid{}, []cid.Cid{cids[0]}, []cid.Cid{}, true) + spm.Update(peerB, []cid.Cid{}, []cid.Cid{cids[0]}, []cid.Cid{}) time.Sleep(5 * time.Millisecond) @@ -449,10 +465,11 @@ func TestConsecutiveDontHaveLimit(t *testing.T) { p := testutil.GeneratePeers(1)[0] sid := uint64(1) pm := newMockPeerManager() + fpm := newFakeSessionPeerManager() bpm := bsbpm.New() onSend := func(peer.ID, []cid.Cid, []cid.Cid) {} onPeersExhausted := func([]cid.Cid) {} - spm := newSessionWantSender(context.Background(), sid, pm, bpm, onSend, onPeersExhausted) + spm := newSessionWantSender(context.Background(), sid, pm, fpm, bpm, onSend, onPeersExhausted) go spm.Run() @@ -461,41 +478,41 @@ func TestConsecutiveDontHaveLimit(t *testing.T) { // Receive a HAVE from peer (adds it to the session) bpm.ReceiveFrom(p, cids[:1], []cid.Cid{}) - spm.Update(p, []cid.Cid{}, cids[:1], []cid.Cid{}, true) + spm.Update(p, []cid.Cid{}, cids[:1], []cid.Cid{}) // Wait for processing to complete time.Sleep(5 * time.Millisecond) // Peer should be available - if avail, ok := spm.isAvailable(p); !ok || !avail { + if has := fpm.HasPeer(p); !has { t.Fatal("Expected peer to be available") } // Receive DONT_HAVEs from peer that do not exceed limit for _, c := range cids[1:peerDontHaveLimit] { bpm.ReceiveFrom(p, []cid.Cid{}, []cid.Cid{c}) - spm.Update(p, []cid.Cid{}, []cid.Cid{}, []cid.Cid{c}, false) + spm.Update(p, []cid.Cid{}, []cid.Cid{}, []cid.Cid{c}) } // Wait for processing to complete time.Sleep(5 * time.Millisecond) // Peer should be available - if avail, ok := spm.isAvailable(p); !ok || !avail { + if has := fpm.HasPeer(p); !has { t.Fatal("Expected peer to be available") } // Receive DONT_HAVEs from peer that exceed limit for _, c := range cids[peerDontHaveLimit:] { bpm.ReceiveFrom(p, []cid.Cid{}, []cid.Cid{c}) - spm.Update(p, []cid.Cid{}, []cid.Cid{}, []cid.Cid{c}, false) + spm.Update(p, []cid.Cid{}, []cid.Cid{}, []cid.Cid{c}) } // Wait for processing to complete time.Sleep(5 * time.Millisecond) // Session should remove peer - if avail, _ := spm.isAvailable(p); avail { + if has := fpm.HasPeer(p); has { t.Fatal("Expected peer not to be available") } } @@ -505,10 +522,11 @@ func TestConsecutiveDontHaveLimitInterrupted(t *testing.T) { p := testutil.GeneratePeers(1)[0] sid := uint64(1) pm := newMockPeerManager() + fpm := newFakeSessionPeerManager() bpm := bsbpm.New() onSend := func(peer.ID, []cid.Cid, []cid.Cid) {} onPeersExhausted := func([]cid.Cid) {} - spm := newSessionWantSender(context.Background(), sid, pm, bpm, onSend, onPeersExhausted) + spm := newSessionWantSender(context.Background(), sid, pm, fpm, bpm, onSend, onPeersExhausted) go spm.Run() @@ -517,13 +535,13 @@ func TestConsecutiveDontHaveLimitInterrupted(t *testing.T) { // Receive a HAVE from peer (adds it to the session) bpm.ReceiveFrom(p, cids[:1], []cid.Cid{}) - spm.Update(p, []cid.Cid{}, cids[:1], []cid.Cid{}, true) + spm.Update(p, []cid.Cid{}, cids[:1], []cid.Cid{}) // Wait for processing to complete time.Sleep(5 * time.Millisecond) // Peer should be available - if avail, ok := spm.isAvailable(p); !ok || !avail { + if has := fpm.HasPeer(p); !has { t.Fatal("Expected peer to be available") } @@ -533,24 +551,24 @@ func TestConsecutiveDontHaveLimitInterrupted(t *testing.T) { for _, c := range cids[1:peerDontHaveLimit] { // DONT_HAVEs bpm.ReceiveFrom(p, []cid.Cid{}, []cid.Cid{c}) - spm.Update(p, []cid.Cid{}, []cid.Cid{}, []cid.Cid{c}, false) + spm.Update(p, []cid.Cid{}, []cid.Cid{}, []cid.Cid{c}) } for _, c := range cids[peerDontHaveLimit : peerDontHaveLimit+1] { // HAVEs bpm.ReceiveFrom(p, []cid.Cid{c}, []cid.Cid{}) - spm.Update(p, []cid.Cid{}, []cid.Cid{c}, []cid.Cid{}, false) + spm.Update(p, []cid.Cid{}, []cid.Cid{c}, []cid.Cid{}) } for _, c := range cids[peerDontHaveLimit+1:] { // DONT_HAVEs bpm.ReceiveFrom(p, []cid.Cid{}, []cid.Cid{c}) - spm.Update(p, []cid.Cid{}, []cid.Cid{}, []cid.Cid{c}, false) + spm.Update(p, []cid.Cid{}, []cid.Cid{}, []cid.Cid{c}) } // Wait for processing to complete time.Sleep(5 * time.Millisecond) // Peer should be available - if avail, ok := spm.isAvailable(p); !ok || !avail { + if has := fpm.HasPeer(p); !has { t.Fatal("Expected peer to be available") } } @@ -560,10 +578,11 @@ func TestConsecutiveDontHaveReinstateAfterRemoval(t *testing.T) { p := testutil.GeneratePeers(1)[0] sid := uint64(1) pm := newMockPeerManager() + fpm := newFakeSessionPeerManager() bpm := bsbpm.New() onSend := func(peer.ID, []cid.Cid, []cid.Cid) {} onPeersExhausted := func([]cid.Cid) {} - spm := newSessionWantSender(context.Background(), sid, pm, bpm, onSend, onPeersExhausted) + spm := newSessionWantSender(context.Background(), sid, pm, fpm, bpm, onSend, onPeersExhausted) go spm.Run() @@ -572,39 +591,39 @@ func TestConsecutiveDontHaveReinstateAfterRemoval(t *testing.T) { // Receive a HAVE from peer (adds it to the session) bpm.ReceiveFrom(p, cids[:1], []cid.Cid{}) - spm.Update(p, []cid.Cid{}, cids[:1], []cid.Cid{}, true) + spm.Update(p, []cid.Cid{}, cids[:1], []cid.Cid{}) // Wait for processing to complete time.Sleep(5 * time.Millisecond) // Peer should be available - if avail, ok := spm.isAvailable(p); !ok || !avail { + if has := fpm.HasPeer(p); !has { t.Fatal("Expected peer to be available") } // Receive DONT_HAVEs from peer that exceed limit for _, c := range cids[1 : peerDontHaveLimit+2] { bpm.ReceiveFrom(p, []cid.Cid{}, []cid.Cid{c}) - spm.Update(p, []cid.Cid{}, []cid.Cid{}, []cid.Cid{c}, false) + spm.Update(p, []cid.Cid{}, []cid.Cid{}, []cid.Cid{c}) } // Wait for processing to complete time.Sleep(5 * time.Millisecond) // Session should remove peer - if avail, _ := spm.isAvailable(p); avail { + if has := fpm.HasPeer(p); has { t.Fatal("Expected peer not to be available") } // Receive a HAVE from peer (adds it back into the session) bpm.ReceiveFrom(p, cids[:1], []cid.Cid{}) - spm.Update(p, []cid.Cid{}, cids[:1], []cid.Cid{}, true) + spm.Update(p, []cid.Cid{}, cids[:1], []cid.Cid{}) // Wait for processing to complete time.Sleep(5 * time.Millisecond) // Peer should be available - if avail, ok := spm.isAvailable(p); !ok || !avail { + if has := fpm.HasPeer(p); !has { t.Fatal("Expected peer to be available") } @@ -613,28 +632,28 @@ func TestConsecutiveDontHaveReinstateAfterRemoval(t *testing.T) { // Receive DONT_HAVEs from peer that don't exceed limit for _, c := range cids2[1:peerDontHaveLimit] { bpm.ReceiveFrom(p, []cid.Cid{}, []cid.Cid{c}) - spm.Update(p, []cid.Cid{}, []cid.Cid{}, []cid.Cid{c}, false) + spm.Update(p, []cid.Cid{}, []cid.Cid{}, []cid.Cid{c}) } // Wait for processing to complete time.Sleep(5 * time.Millisecond) // Peer should be available - if avail, ok := spm.isAvailable(p); !ok || !avail { + if has := fpm.HasPeer(p); !has { t.Fatal("Expected peer to be available") } // Receive DONT_HAVEs from peer that exceed limit for _, c := range cids2[peerDontHaveLimit:] { bpm.ReceiveFrom(p, []cid.Cid{}, []cid.Cid{c}) - spm.Update(p, []cid.Cid{}, []cid.Cid{}, []cid.Cid{c}, false) + spm.Update(p, []cid.Cid{}, []cid.Cid{}, []cid.Cid{c}) } // Wait for processing to complete time.Sleep(5 * time.Millisecond) // Session should remove peer - if avail, _ := spm.isAvailable(p); avail { + if has := fpm.HasPeer(p); has { t.Fatal("Expected peer not to be available") } } diff --git a/bitswap/internal/sessionpeermanager/sessionpeermanager.go b/bitswap/internal/sessionpeermanager/sessionpeermanager.go index 950770737..cc6e71106 100644 --- a/bitswap/internal/sessionpeermanager/sessionpeermanager.go +++ b/bitswap/internal/sessionpeermanager/sessionpeermanager.go @@ -109,6 +109,14 @@ func (spm *SessionPeerManager) HasPeers() bool { return len(spm.peers) > 0 } +func (spm *SessionPeerManager) HasPeer(p peer.ID) bool { + spm.plk.RLock() + defer spm.plk.RUnlock() + + _, ok := spm.peers[p] + return ok +} + // Shutdown untags all the peers func (spm *SessionPeerManager) Shutdown() { spm.plk.Lock() From 70df873887e6fef1bc445baebf0400d90234258e Mon Sep 17 00:00:00 2001 From: Dirk McCormick Date: Wed, 4 Mar 2020 10:46:55 -0500 Subject: [PATCH 0843/1035] test: fix session peer manager tests This commit was moved from ipfs/go-bitswap@fafdaaec61ef3fd1228ab294601ad67b46e3d570 --- .../sessionpeermanager_test.go | 457 +++++++----------- 1 file changed, 165 insertions(+), 292 deletions(-) diff --git a/bitswap/internal/sessionpeermanager/sessionpeermanager_test.go b/bitswap/internal/sessionpeermanager/sessionpeermanager_test.go index 9a771b188..9e0d633e6 100644 --- a/bitswap/internal/sessionpeermanager/sessionpeermanager_test.go +++ b/bitswap/internal/sessionpeermanager/sessionpeermanager_test.go @@ -1,46 +1,13 @@ package sessionpeermanager import ( - "context" - "fmt" - "math/rand" "sync" "testing" - "time" "github.com/ipfs/go-bitswap/internal/testutil" - - cid "github.com/ipfs/go-cid" peer "github.com/libp2p/go-libp2p-core/peer" ) -type fakePeerProviderFinder struct { - peers []peer.ID - completed chan struct{} -} - -func (fppf *fakePeerProviderFinder) FindProvidersAsync(ctx context.Context, c cid.Cid) <-chan peer.ID { - peerCh := make(chan peer.ID) - go func() { - - for _, p := range fppf.peers { - select { - case peerCh <- p: - case <-ctx.Done(): - close(peerCh) - return - } - } - close(peerCh) - - select { - case fppf.completed <- struct{}{}: - case <-ctx.Done(): - } - }() - return peerCh -} - type fakePeerTagger struct { lk sync.Mutex taggedPeers []peer.ID @@ -75,324 +42,230 @@ func (fpt *fakePeerTagger) count() int { return len(fpt.taggedPeers) } -func getPeers(sessionPeerManager *SessionPeerManager) []peer.ID { - optimizedPeers := sessionPeerManager.GetOptimizedPeers() - var peers []peer.ID - for _, optimizedPeer := range optimizedPeers { - peers = append(peers, optimizedPeer.Peer) +// func TestFindingMorePeers(t *testing.T) { +// ctx := context.Background() +// ctx, cancel := context.WithCancel(ctx) +// defer cancel() +// completed := make(chan struct{}) + +// peers := testutil.GeneratePeers(5) +// fpt := &fakePeerTagger{} +// fppf := &fakePeerProviderFinder{peers, completed} +// c := testutil.GenerateCids(1)[0] +// id := testutil.GenerateSessionID() + +// sessionPeerManager := New(ctx, id, fpt, fppf) + +// findCtx, findCancel := context.WithTimeout(ctx, 10*time.Millisecond) +// defer findCancel() +// sessionPeerManager.FindMorePeers(ctx, c) +// select { +// case <-completed: +// case <-findCtx.Done(): +// t.Fatal("Did not finish finding providers") +// } +// time.Sleep(2 * time.Millisecond) + +// sessionPeers := getPeers(sessionPeerManager) +// if len(sessionPeers) != len(peers) { +// t.Fatal("incorrect number of peers found") +// } +// for _, p := range sessionPeers { +// if !testutil.ContainsPeer(peers, p) { +// t.Fatal("incorrect peer found through finding providers") +// } +// } +// if len(fpt.taggedPeers) != len(peers) { +// t.Fatal("Peers were not tagged!") +// } +// } + +func TestAddPeers(t *testing.T) { + peers := testutil.GeneratePeers(2) + spm := New(1, &fakePeerTagger{}) + + isNew := spm.AddPeer(peers[0]) + if !isNew { + t.Fatal("Expected peer to be new") } - return peers -} -func TestFindingMorePeers(t *testing.T) { - ctx := context.Background() - ctx, cancel := context.WithCancel(ctx) - defer cancel() - completed := make(chan struct{}) - - peers := testutil.GeneratePeers(5) - fpt := &fakePeerTagger{} - fppf := &fakePeerProviderFinder{peers, completed} - c := testutil.GenerateCids(1)[0] - id := testutil.GenerateSessionID() - - sessionPeerManager := New(ctx, id, fpt, fppf) - - findCtx, findCancel := context.WithTimeout(ctx, 10*time.Millisecond) - defer findCancel() - sessionPeerManager.FindMorePeers(ctx, c) - select { - case <-completed: - case <-findCtx.Done(): - t.Fatal("Did not finish finding providers") + isNew = spm.AddPeer(peers[0]) + if isNew { + t.Fatal("Expected peer to no longer be new") } - time.Sleep(2 * time.Millisecond) - sessionPeers := getPeers(sessionPeerManager) - if len(sessionPeers) != len(peers) { - t.Fatal("incorrect number of peers found") - } - for _, p := range sessionPeers { - if !testutil.ContainsPeer(peers, p) { - t.Fatal("incorrect peer found through finding providers") - } - } - if len(fpt.taggedPeers) != len(peers) { - t.Fatal("Peers were not tagged!") + isNew = spm.AddPeer(peers[1]) + if !isNew { + t.Fatal("Expected peer to be new") } } -func TestRecordingReceivedBlocks(t *testing.T) { - ctx := context.Background() - ctx, cancel := context.WithCancel(ctx) - defer cancel() - p := testutil.GeneratePeers(1)[0] - fpt := &fakePeerTagger{} - fppf := &fakePeerProviderFinder{} - c := testutil.GenerateCids(1)[0] - id := testutil.GenerateSessionID() - - sessionPeerManager := New(ctx, id, fpt, fppf) - sessionPeerManager.RecordPeerResponse(p, []cid.Cid{c}) - time.Sleep(10 * time.Millisecond) - sessionPeers := getPeers(sessionPeerManager) - if len(sessionPeers) != 1 { - t.Fatal("did not add peer on receive") +func TestRemovePeers(t *testing.T) { + peers := testutil.GeneratePeers(2) + spm := New(1, &fakePeerTagger{}) + + existed := spm.RemovePeer(peers[0]) + if existed { + t.Fatal("Expected peer not to exist") + } + + spm.AddPeer(peers[0]) + spm.AddPeer(peers[1]) + + existed = spm.RemovePeer(peers[0]) + if !existed { + t.Fatal("Expected peer to exist") } - if sessionPeers[0] != p { - t.Fatal("incorrect peer added on receive") + existed = spm.RemovePeer(peers[1]) + if !existed { + t.Fatal("Expected peer to exist") } - if len(fpt.taggedPeers) != 1 { - t.Fatal("Peers was not tagged!") + existed = spm.RemovePeer(peers[0]) + if existed { + t.Fatal("Expected peer not to have existed") } } -func TestOrderingPeers(t *testing.T) { - ctx := context.Background() - ctx, cancel := context.WithTimeout(ctx, 60*time.Millisecond) - defer cancel() - peerCount := 100 - peers := testutil.GeneratePeers(peerCount) - completed := make(chan struct{}) - fpt := &fakePeerTagger{} - fppf := &fakePeerProviderFinder{peers, completed} - c := testutil.GenerateCids(1) - id := testutil.GenerateSessionID() - sessionPeerManager := New(ctx, id, fpt, fppf) - - // add all peers to session - sessionPeerManager.FindMorePeers(ctx, c[0]) - select { - case <-completed: - case <-ctx.Done(): - t.Fatal("Did not finish finding providers") - } - time.Sleep(5 * time.Millisecond) - - // record broadcast - sessionPeerManager.RecordPeerRequests(nil, c) - - // record receives - randi := rand.Perm(peerCount) - peer1 := peers[randi[0]] - peer2 := peers[randi[1]] - peer3 := peers[randi[2]] - time.Sleep(5 * time.Millisecond) - sessionPeerManager.RecordPeerResponse(peer1, []cid.Cid{c[0]}) - time.Sleep(25 * time.Millisecond) - sessionPeerManager.RecordPeerResponse(peer2, []cid.Cid{c[0]}) - time.Sleep(5 * time.Millisecond) - sessionPeerManager.RecordPeerResponse(peer3, []cid.Cid{c[0]}) - - sessionPeers := sessionPeerManager.GetOptimizedPeers() - if len(sessionPeers) != maxOptimizedPeers { - t.Fatal(fmt.Sprintf("Should not return more (%d) than the max of optimized peers (%d)", len(sessionPeers), maxOptimizedPeers)) - } +func TestHasPeers(t *testing.T) { + peers := testutil.GeneratePeers(2) + spm := New(1, &fakePeerTagger{}) - // should prioritize peers which are fastest - // peer1: ~5ms - // peer2: 5 + 25 = ~30ms - // peer3: 5 + 25 + 5 = ~35ms - if (sessionPeers[0].Peer != peer1) || (sessionPeers[1].Peer != peer2) || (sessionPeers[2].Peer != peer3) { - t.Fatal("Did not prioritize peers that received blocks") + if spm.HasPeers() { + t.Fatal("Expected not to have peers yet") } - // should give first peer rating of 1 - if sessionPeers[0].OptimizationRating < 1.0 { - t.Fatal("Did not assign rating to best peer correctly") + spm.AddPeer(peers[0]) + if !spm.HasPeers() { + t.Fatal("Expected to have peers") } - // should give other optimized peers ratings between 0 & 1 - if (sessionPeers[1].OptimizationRating >= 1.0) || (sessionPeers[1].OptimizationRating <= 0.0) || - (sessionPeers[2].OptimizationRating >= 1.0) || (sessionPeers[2].OptimizationRating <= 0.0) { - t.Fatal("Did not assign rating to other optimized peers correctly") + spm.AddPeer(peers[1]) + if !spm.HasPeers() { + t.Fatal("Expected to have peers") } - // should give other non-optimized peers rating of zero - for i := 3; i < maxOptimizedPeers; i++ { - if sessionPeers[i].OptimizationRating != 0.0 { - t.Fatal("Did not assign rating to unoptimized peer correctly") - } + spm.RemovePeer(peers[0]) + if !spm.HasPeers() { + t.Fatal("Expected to have peers") } - c2 := testutil.GenerateCids(1) - - // Request again - sessionPeerManager.RecordPeerRequests(nil, c2) + spm.RemovePeer(peers[1]) + if spm.HasPeers() { + t.Fatal("Expected to no longer have peers") + } +} - // Receive a second time - sessionPeerManager.RecordPeerResponse(peer3, []cid.Cid{c2[0]}) +func TestHasPeer(t *testing.T) { + peers := testutil.GeneratePeers(2) + spm := New(1, &fakePeerTagger{}) - // call again - nextSessionPeers := sessionPeerManager.GetOptimizedPeers() - if len(nextSessionPeers) != maxOptimizedPeers { - t.Fatal(fmt.Sprintf("Should not return more (%d) than the max of optimized peers (%d)", len(nextSessionPeers), maxOptimizedPeers)) + if spm.HasPeer(peers[0]) { + t.Fatal("Expected not to have peer yet") } - // should sort by average latency - // peer1: ~5ms - // peer3: (~35ms + ~5ms) / 2 = ~20ms - // peer2: ~30ms - if (nextSessionPeers[0].Peer != peer1) || (nextSessionPeers[1].Peer != peer3) || - (nextSessionPeers[2].Peer != peer2) { - t.Fatal("Did not correctly update order of peers sorted by average latency") + spm.AddPeer(peers[0]) + if !spm.HasPeer(peers[0]) { + t.Fatal("Expected to have peer") } - // should randomize other peers - totalSame := 0 - for i := 3; i < maxOptimizedPeers; i++ { - if sessionPeers[i].Peer == nextSessionPeers[i].Peer { - totalSame++ - } - } - if totalSame >= maxOptimizedPeers-3 { - t.Fatal("should not return the same random peers each time") + spm.AddPeer(peers[1]) + if !spm.HasPeer(peers[1]) { + t.Fatal("Expected to have peer") } -} -func TestTimeoutsAndCancels(t *testing.T) { - ctx := context.Background() - ctx, cancel := context.WithTimeout(ctx, 2*time.Second) - defer cancel() - peers := testutil.GeneratePeers(3) - completed := make(chan struct{}) - fpt := &fakePeerTagger{} - fppf := &fakePeerProviderFinder{peers, completed} - c := testutil.GenerateCids(1) - id := testutil.GenerateSessionID() - sessionPeerManager := New(ctx, id, fpt, fppf) - - // add all peers to session - sessionPeerManager.FindMorePeers(ctx, c[0]) - select { - case <-completed: - case <-ctx.Done(): - t.Fatal("Did not finish finding providers") + spm.RemovePeer(peers[0]) + if spm.HasPeer(peers[0]) { + t.Fatal("Expected not to have peer") } - time.Sleep(2 * time.Millisecond) - - sessionPeerManager.SetTimeoutDuration(20 * time.Millisecond) - - // record broadcast - sessionPeerManager.RecordPeerRequests(nil, c) - // record receives - peer1 := peers[0] - peer2 := peers[1] - peer3 := peers[2] - time.Sleep(1 * time.Millisecond) - sessionPeerManager.RecordPeerResponse(peer1, []cid.Cid{c[0]}) - time.Sleep(2 * time.Millisecond) - sessionPeerManager.RecordPeerResponse(peer2, []cid.Cid{c[0]}) - time.Sleep(40 * time.Millisecond) - sessionPeerManager.RecordPeerResponse(peer3, []cid.Cid{c[0]}) + if !spm.HasPeer(peers[1]) { + t.Fatal("Expected to have peer") + } +} - sessionPeers := sessionPeerManager.GetOptimizedPeers() +func TestPeers(t *testing.T) { + peers := testutil.GeneratePeers(2) + spm := New(1, &fakePeerTagger{}) - // should prioritize peers which are fastest - if (sessionPeers[0].Peer != peer1) || (sessionPeers[1].Peer != peer2) || (sessionPeers[2].Peer != peer3) { - t.Fatal("Did not prioritize peers that received blocks") + if len(spm.Peers()) > 0 { + t.Fatal("Expected not to have peers yet") } - // should give first peer rating of 1 - if sessionPeers[0].OptimizationRating < 1.0 { - t.Fatal("Did not assign rating to best peer correctly") + spm.AddPeer(peers[0]) + if len(spm.Peers()) != 1 { + t.Fatal("Expected to have one peer") } - // should give other optimized peers ratings between 0 & 1 - if (sessionPeers[1].OptimizationRating >= 1.0) || (sessionPeers[1].OptimizationRating <= 0.0) { - t.Fatal("Did not assign rating to other optimized peers correctly") + spm.AddPeer(peers[1]) + if len(spm.Peers()) != 2 { + t.Fatal("Expected to have two peers") } - // should not record a response for a broadcast return that arrived AFTER the timeout period - // leaving peer unoptimized - if sessionPeers[2].OptimizationRating != 0 { - t.Fatal("should not have recorded broadcast response for peer that arrived after timeout period") + spm.RemovePeer(peers[0]) + if len(spm.Peers()) != 1 { + t.Fatal("Expected to have one peer") } +} - // now we make a targeted request, which SHOULD affect peer - // rating if it times out - c2 := testutil.GenerateCids(1) - - // Request again - sessionPeerManager.RecordPeerRequests([]peer.ID{peer2}, c2) - // wait for a timeout - time.Sleep(40 * time.Millisecond) +func TestPeersDiscovered(t *testing.T) { + peers := testutil.GeneratePeers(2) + spm := New(1, &fakePeerTagger{}) - // call again - nextSessionPeers := sessionPeerManager.GetOptimizedPeers() - if sessionPeers[1].OptimizationRating <= nextSessionPeers[1].OptimizationRating { - t.Fatal("Timeout should have affected optimization rating but did not") + if spm.PeersDiscovered() { + t.Fatal("Expected not to have discovered peers yet") } - // now we make a targeted request, but later cancel it - // timing out should not affect rating - c3 := testutil.GenerateCids(1) - - // Request again - sessionPeerManager.RecordPeerRequests([]peer.ID{peer2}, c3) - sessionPeerManager.RecordCancels([]cid.Cid{c3[0]}) - // wait for a timeout - time.Sleep(40 * time.Millisecond) + spm.AddPeer(peers[0]) + if !spm.PeersDiscovered() { + t.Fatal("Expected to have discovered peers") + } - // call again - thirdSessionPeers := sessionPeerManager.GetOptimizedPeers() - if nextSessionPeers[1].OptimizationRating != thirdSessionPeers[1].OptimizationRating { - t.Fatal("Timeout should not have affected optimization rating but did") + spm.RemovePeer(peers[0]) + if !spm.PeersDiscovered() { + t.Fatal("Expected to still have discovered peers") } +} - // if we make a targeted request that is then cancelled, but we still - // receive the block before the timeout, it's worth recording and affecting latency +func TestPeerTagging(t *testing.T) { + peers := testutil.GeneratePeers(2) + fpt := &fakePeerTagger{} + spm := New(1, fpt) - c4 := testutil.GenerateCids(1) + spm.AddPeer(peers[0]) + if len(fpt.taggedPeers) != 1 { + t.Fatal("Expected to have tagged one peer") + } - // Request again - sessionPeerManager.RecordPeerRequests([]peer.ID{peer2}, c4) - sessionPeerManager.RecordCancels([]cid.Cid{c4[0]}) - time.Sleep(2 * time.Millisecond) - sessionPeerManager.RecordPeerResponse(peer2, []cid.Cid{c4[0]}) - time.Sleep(2 * time.Millisecond) + spm.AddPeer(peers[0]) + if len(fpt.taggedPeers) != 1 { + t.Fatal("Expected to have tagged one peer") + } - // call again - fourthSessionPeers := sessionPeerManager.GetOptimizedPeers() - if thirdSessionPeers[1].OptimizationRating >= fourthSessionPeers[1].OptimizationRating { - t.Fatal("Timeout should have affected optimization rating but did not") + spm.AddPeer(peers[1]) + if len(fpt.taggedPeers) != 2 { + t.Fatal("Expected to have tagged two peers") } - // ensure all peer latency tracking has been cleaned up - if len(sessionPeerManager.activePeers[peer2].lt.requests) > 0 { - t.Fatal("Latency request tracking should have been cleaned up but was not") + spm.RemovePeer(peers[1]) + if len(fpt.taggedPeers) != 1 { + t.Fatal("Expected to have untagged peer") } } -func TestUntaggingPeers(t *testing.T) { - ctx := context.Background() - ctx, cancel := context.WithTimeout(ctx, 30*time.Millisecond) - defer cancel() - peers := testutil.GeneratePeers(5) - completed := make(chan struct{}) +func TestShutdown(t *testing.T) { + peers := testutil.GeneratePeers(2) fpt := &fakePeerTagger{} - fppf := &fakePeerProviderFinder{peers, completed} - c := testutil.GenerateCids(1)[0] - id := testutil.GenerateSessionID() - - sessionPeerManager := New(ctx, id, fpt, fppf) + spm := New(1, fpt) - sessionPeerManager.FindMorePeers(ctx, c) - select { - case <-completed: - case <-ctx.Done(): - t.Fatal("Did not finish finding providers") + spm.AddPeer(peers[0]) + spm.AddPeer(peers[1]) + if len(fpt.taggedPeers) != 2 { + t.Fatal("Expected to have tagged two peers") } - time.Sleep(15 * time.Millisecond) - if fpt.count() != len(peers) { - t.Fatal("Peers were not tagged!") - } - <-ctx.Done() - fpt.wait.Wait() + spm.Shutdown() - if fpt.count() != 0 { - t.Fatal("Peers were not untagged!") + if len(fpt.taggedPeers) != 0 { + t.Fatal("Expected to have untagged all peers") } } From b8fcc373418a4ed0fc7fa074c50acf5f0d2140de Mon Sep 17 00:00:00 2001 From: Dirk McCormick Date: Wed, 4 Mar 2020 10:58:16 -0500 Subject: [PATCH 0844/1035] docs: document session idle tick behaviour This commit was moved from ipfs/go-bitswap@b34fe0b3e7f048069add9c5ee2857848ccd01986 --- bitswap/internal/session/session.go | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/bitswap/internal/session/session.go b/bitswap/internal/session/session.go index 412484cc9..b92319280 100644 --- a/bitswap/internal/session/session.go +++ b/bitswap/internal/session/session.go @@ -440,6 +440,13 @@ func (s *Session) wantBlocks(ctx context.Context, newks []cid.Cid) { } } +// The session will broadcast if it has outstanding wants and doesn't receive +// any blocks for some time. +// The length of time is calculated +// - initially +// as a fixed delay +// - once some blocks are received +// from a base delay and average latency, with a backoff func (s *Session) resetIdleTick() { var tickDelay time.Duration if !s.latencyTrkr.hasLatency() { @@ -453,6 +460,8 @@ func (s *Session) resetIdleTick() { s.idleTick.Reset(tickDelay) } +// latencyTracker keeps track of the average latency between sending a want +// and receiving the corresponding block type latencyTracker struct { totalLatency time.Duration count int From aa5f6c6944d32fbbc2c1c90977cff8a45d0de2fe Mon Sep 17 00:00:00 2001 From: Dirk McCormick Date: Wed, 4 Mar 2020 11:09:44 -0500 Subject: [PATCH 0845/1035] test: clean up tests This commit was moved from ipfs/go-bitswap@1c24de2cbdd6e04fe52e171d04f876ea2d459b92 --- .../sessionpeermanager_test.go | 44 ------------------- 1 file changed, 44 deletions(-) diff --git a/bitswap/internal/sessionpeermanager/sessionpeermanager_test.go b/bitswap/internal/sessionpeermanager/sessionpeermanager_test.go index 9e0d633e6..e3c1c4ab4 100644 --- a/bitswap/internal/sessionpeermanager/sessionpeermanager_test.go +++ b/bitswap/internal/sessionpeermanager/sessionpeermanager_test.go @@ -36,50 +36,6 @@ func (fpt *fakePeerTagger) UntagPeer(p peer.ID, tag string) { } } -func (fpt *fakePeerTagger) count() int { - fpt.lk.Lock() - defer fpt.lk.Unlock() - return len(fpt.taggedPeers) -} - -// func TestFindingMorePeers(t *testing.T) { -// ctx := context.Background() -// ctx, cancel := context.WithCancel(ctx) -// defer cancel() -// completed := make(chan struct{}) - -// peers := testutil.GeneratePeers(5) -// fpt := &fakePeerTagger{} -// fppf := &fakePeerProviderFinder{peers, completed} -// c := testutil.GenerateCids(1)[0] -// id := testutil.GenerateSessionID() - -// sessionPeerManager := New(ctx, id, fpt, fppf) - -// findCtx, findCancel := context.WithTimeout(ctx, 10*time.Millisecond) -// defer findCancel() -// sessionPeerManager.FindMorePeers(ctx, c) -// select { -// case <-completed: -// case <-findCtx.Done(): -// t.Fatal("Did not finish finding providers") -// } -// time.Sleep(2 * time.Millisecond) - -// sessionPeers := getPeers(sessionPeerManager) -// if len(sessionPeers) != len(peers) { -// t.Fatal("incorrect number of peers found") -// } -// for _, p := range sessionPeers { -// if !testutil.ContainsPeer(peers, p) { -// t.Fatal("incorrect peer found through finding providers") -// } -// } -// if len(fpt.taggedPeers) != len(peers) { -// t.Fatal("Peers were not tagged!") -// } -// } - func TestAddPeers(t *testing.T) { peers := testutil.GeneratePeers(2) spm := New(1, &fakePeerTagger{}) From 84b7a1787c10820043a04611b56c487f7180fe98 Mon Sep 17 00:00:00 2001 From: Dirk McCormick Date: Wed, 4 Mar 2020 11:15:29 -0500 Subject: [PATCH 0846/1035] test: fix flaky tests This commit was moved from ipfs/go-bitswap@3be2da86c6c474384153effe0d11d7d9f607e368 --- bitswap/internal/messagequeue/messagequeue_test.go | 12 ++++++------ bitswap/internal/session/sessionwantsender_test.go | 6 +++--- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/bitswap/internal/messagequeue/messagequeue_test.go b/bitswap/internal/messagequeue/messagequeue_test.go index 0ea93c43d..96284756d 100644 --- a/bitswap/internal/messagequeue/messagequeue_test.go +++ b/bitswap/internal/messagequeue/messagequeue_test.go @@ -394,9 +394,9 @@ func TestWantlistRebroadcast(t *testing.T) { t.Fatal("wrong number of wants") } - // Tell message queue to rebroadcast after 5ms, then wait 8ms - messageQueue.SetRebroadcastInterval(5 * time.Millisecond) - messages = collectMessages(ctx, t, messagesSent, 8*time.Millisecond) + // Tell message queue to rebroadcast after 10ms, then wait 15ms + messageQueue.SetRebroadcastInterval(10 * time.Millisecond) + messages = collectMessages(ctx, t, messagesSent, 15*time.Millisecond) firstMessage = messages[0] // Both original and new wants should have been rebroadcast @@ -425,9 +425,9 @@ func TestWantlistRebroadcast(t *testing.T) { } } - // Tell message queue to rebroadcast after 5ms, then wait 8ms - messageQueue.SetRebroadcastInterval(5 * time.Millisecond) - messages = collectMessages(ctx, t, messagesSent, 8*time.Millisecond) + // Tell message queue to rebroadcast after 10ms, then wait 15ms + messageQueue.SetRebroadcastInterval(10 * time.Millisecond) + messages = collectMessages(ctx, t, messagesSent, 15*time.Millisecond) firstMessage = messages[0] if len(firstMessage.Wantlist()) != totalWants-len(cancels) { t.Fatal("did not rebroadcast all wants") diff --git a/bitswap/internal/session/sessionwantsender_test.go b/bitswap/internal/session/sessionwantsender_test.go index 404447668..ef7da73c6 100644 --- a/bitswap/internal/session/sessionwantsender_test.go +++ b/bitswap/internal/session/sessionwantsender_test.go @@ -481,7 +481,7 @@ func TestConsecutiveDontHaveLimit(t *testing.T) { spm.Update(p, []cid.Cid{}, cids[:1], []cid.Cid{}) // Wait for processing to complete - time.Sleep(5 * time.Millisecond) + time.Sleep(10 * time.Millisecond) // Peer should be available if has := fpm.HasPeer(p); !has { @@ -495,7 +495,7 @@ func TestConsecutiveDontHaveLimit(t *testing.T) { } // Wait for processing to complete - time.Sleep(5 * time.Millisecond) + time.Sleep(20 * time.Millisecond) // Peer should be available if has := fpm.HasPeer(p); !has { @@ -509,7 +509,7 @@ func TestConsecutiveDontHaveLimit(t *testing.T) { } // Wait for processing to complete - time.Sleep(5 * time.Millisecond) + time.Sleep(20 * time.Millisecond) // Session should remove peer if has := fpm.HasPeer(p); has { From 5da13abf7fc5ebf652bf7bb4821044582f138d60 Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Fri, 6 Mar 2020 06:56:25 -0800 Subject: [PATCH 0847/1035] ci: test with the race detector (#277) This commit was moved from ipfs/go-bitswap@5d28b3847325f7a9036328ddea4a435cde5b6c3b --- bitswap/bitswap_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index 0a0bcc98b..428fa5be6 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -245,7 +245,7 @@ func TestLargeSwarm(t *testing.T) { if detectrace.WithRace() { // when running with the race detector, 500 instances launches // well over 8k goroutines. This hits a race detector limit. - numInstances = 50 + numInstances = 20 } else if travis.IsRunning() { numInstances = 200 } else { From ec0843261aa167edda057becba51fae4f6d8ab09 Mon Sep 17 00:00:00 2001 From: Dirk McCormick Date: Thu, 5 Mar 2020 14:22:24 -0500 Subject: [PATCH 0848/1035] fix: overly aggressive session peer removal This commit was moved from ipfs/go-bitswap@1247b02df50e7f4afd61ca858da24dd43abb9d9c --- bitswap/internal/messagequeue/messagequeue.go | 1 + bitswap/internal/session/session.go | 4 +- bitswap/internal/session/sessionwants.go | 8 ++- bitswap/internal/session/sessionwantsender.go | 72 +++++++++++++------ .../session/sessionwantsender_test.go | 59 ++++++++++++--- .../sessionpeermanager/sessionpeermanager.go | 5 +- 6 files changed, 114 insertions(+), 35 deletions(-) diff --git a/bitswap/internal/messagequeue/messagequeue.go b/bitswap/internal/messagequeue/messagequeue.go index be0740000..8e2518899 100644 --- a/bitswap/internal/messagequeue/messagequeue.go +++ b/bitswap/internal/messagequeue/messagequeue.go @@ -46,6 +46,7 @@ type MessageNetwork interface { NewMessageSender(context.Context, peer.ID) (bsnet.MessageSender, error) Latency(peer.ID) time.Duration Ping(context.Context, peer.ID) ping.Result + Self() peer.ID } // MessageQueue implements queue of want messages to send to peers. diff --git a/bitswap/internal/session/session.go b/bitswap/internal/session/session.go index b92319280..45cd825fa 100644 --- a/bitswap/internal/session/session.go +++ b/bitswap/internal/session/session.go @@ -4,9 +4,9 @@ import ( "context" "time" - // lu "github.com/ipfs/go-bitswap/internal/logutil" bsbpm "github.com/ipfs/go-bitswap/internal/blockpresencemanager" bsgetter "github.com/ipfs/go-bitswap/internal/getter" + lu "github.com/ipfs/go-bitswap/internal/logutil" notifications "github.com/ipfs/go-bitswap/internal/notifications" bspm "github.com/ipfs/go-bitswap/internal/peermanager" bssim "github.com/ipfs/go-bitswap/internal/sessioninterestmanager" @@ -340,7 +340,7 @@ func (s *Session) broadcastWantHaves(ctx context.Context, wants []cid.Cid) { // Search for providers who have the first want in the list. // Typically if the provider has the first block they will have // the rest of the blocks also. - log.Warnf("Ses%d: FindMorePeers with want 0 of %d wants", s.id, len(wants)) + log.Warnf("Ses%d: FindMorePeers with want %s (1st of %d wants)", s.id, lu.C(wants[0]), len(wants)) s.findMorePeers(ctx, wants[0]) } s.resetIdleTick() diff --git a/bitswap/internal/session/sessionwants.go b/bitswap/internal/session/sessionwants.go index ad8dcd1bc..60df0df2f 100644 --- a/bitswap/internal/session/sessionwants.go +++ b/bitswap/internal/session/sessionwants.go @@ -56,7 +56,7 @@ func (sw *sessionWants) GetNextWants(limit int) []cid.Cid { func (sw *sessionWants) WantsSent(ks []cid.Cid) { now := time.Now() for _, c := range ks { - if _, ok := sw.liveWants[c]; !ok { + if _, ok := sw.liveWants[c]; !ok && sw.toFetch.Has(c) { sw.toFetch.Remove(c) sw.liveWants[c] = now } @@ -83,8 +83,7 @@ func (sw *sessionWants) BlocksReceived(ks []cid.Cid) ([]cid.Cid, time.Duration) totalLatency += now.Sub(sentAt) } - // Remove the CID from the live wants / toFetch queue and add it - // to the past wants + // Remove the CID from the live wants / toFetch queue delete(sw.liveWants, c) sw.toFetch.Remove(c) } @@ -96,6 +95,9 @@ func (sw *sessionWants) BlocksReceived(ks []cid.Cid) ([]cid.Cid, time.Duration) // PrepareBroadcast saves the current time for each live want and returns the // live want CIDs. func (sw *sessionWants) PrepareBroadcast() []cid.Cid { + // TODO: Change this to return wants in order so that the session will + // send out Find Providers request for the first want + // (Note that maps return keys in random order) now := time.Now() live := make([]cid.Cid, 0, len(sw.liveWants)) for c := range sw.liveWants { diff --git a/bitswap/internal/session/sessionwantsender.go b/bitswap/internal/session/sessionwantsender.go index cffb39bb9..ece7a14cc 100644 --- a/bitswap/internal/session/sessionwantsender.go +++ b/bitswap/internal/session/sessionwantsender.go @@ -4,6 +4,7 @@ import ( "context" bsbpm "github.com/ipfs/go-bitswap/internal/blockpresencemanager" + lu "github.com/ipfs/go-bitswap/internal/logutil" cid "github.com/ipfs/go-cid" peer "github.com/libp2p/go-libp2p-core/peer" @@ -298,16 +299,34 @@ func (sws *sessionWantSender) trackWant(c cid.Cid) { // processUpdates processes incoming blocks and HAVE / DONT_HAVEs. // It returns all DONT_HAVEs. func (sws *sessionWantSender) processUpdates(updates []update) []cid.Cid { - prunePeers := make(map[peer.ID]struct{}) - dontHaves := cid.NewSet() + // Process received blocks keys + blkCids := cid.NewSet() for _, upd := range updates { - // TODO: If there is a timeout for the want from the peer, remove want.sentTo - // so the want can be sent to another peer (and blacklist the peer?) - // TODO: If a peer is no longer available, check if all providers of - // each CID have been exhausted + for _, c := range upd.ks { + blkCids.Add(c) + log.Warnf("received block %s", lu.C(c)) + // Remove the want + removed := sws.removeWant(c) + if removed != nil { + // Inform the peer tracker that this peer was the first to send + // us the block + sws.peerRspTrkr.receivedBlockFrom(upd.from) + } + delete(sws.peerConsecutiveDontHaves, upd.from) + } + } - // For each DONT_HAVE + // Process received DONT_HAVEs + dontHaves := cid.NewSet() + prunePeers := make(map[peer.ID]struct{}) + for _, upd := range updates { for _, c := range upd.dontHaves { + // If we already received a block for the want, ignore any + // DONT_HAVE for the want + if blkCids.Has(c) { + continue + } + dontHaves.Add(c) // Update the block presence for the peer @@ -330,24 +349,23 @@ func (sws *sessionWantSender) processUpdates(updates []update) []cid.Cid { sws.peerConsecutiveDontHaves[upd.from]++ } } + } - // For each HAVE + // Process received HAVEs + for _, upd := range updates { for _, c := range upd.haves { + // If we already received a block for the want, ignore any HAVE for + // the want + if blkCids.Has(c) { + continue + } + // Update the block presence for the peer sws.updateWantBlockPresence(c, upd.from) - delete(sws.peerConsecutiveDontHaves, upd.from) - } - // For each received block - for _, c := range upd.ks { - // Remove the want - removed := sws.removeWant(c) - if removed != nil { - // Inform the peer tracker that this peer was the first to send - // us the block - sws.peerRspTrkr.receivedBlockFrom(upd.from) - } + // Clear the consecutive DONT_HAVE count for the peer delete(sws.peerConsecutiveDontHaves, upd.from) + delete(prunePeers, upd.from) } } @@ -356,7 +374,21 @@ func (sws *sessionWantSender) processUpdates(updates []update) []cid.Cid { if len(prunePeers) > 0 { go func() { for p := range prunePeers { - sws.SignalAvailability(p, false) + // Before removing the peer from the session, check if the peer + // sent us a HAVE for a block that we want + peerHasWantedBlock := false + for c := range sws.wants { + if sws.bpm.PeerHasBlock(p, c) { + peerHasWantedBlock = true + break + } + } + + // Peer doesn't have anything we want, so remove it + if !peerHasWantedBlock { + log.Infof("peer %s sent too many dont haves", lu.P(p)) + sws.SignalAvailability(p, false) + } } }() } diff --git a/bitswap/internal/session/sessionwantsender_test.go b/bitswap/internal/session/sessionwantsender_test.go index ef7da73c6..b320ed831 100644 --- a/bitswap/internal/session/sessionwantsender_test.go +++ b/bitswap/internal/session/sessionwantsender_test.go @@ -476,9 +476,8 @@ func TestConsecutiveDontHaveLimit(t *testing.T) { // Add all cids as wants spm.Add(cids) - // Receive a HAVE from peer (adds it to the session) - bpm.ReceiveFrom(p, cids[:1], []cid.Cid{}) - spm.Update(p, []cid.Cid{}, cids[:1], []cid.Cid{}) + // Receive a block from peer (adds it to the session) + spm.Update(p, cids[:1], []cid.Cid{}, []cid.Cid{}) // Wait for processing to complete time.Sleep(10 * time.Millisecond) @@ -533,9 +532,8 @@ func TestConsecutiveDontHaveLimitInterrupted(t *testing.T) { // Add all cids as wants spm.Add(cids) - // Receive a HAVE from peer (adds it to the session) - bpm.ReceiveFrom(p, cids[:1], []cid.Cid{}) - spm.Update(p, []cid.Cid{}, cids[:1], []cid.Cid{}) + // Receive a block from peer (adds it to the session) + spm.Update(p, cids[:1], []cid.Cid{}, []cid.Cid{}) // Wait for processing to complete time.Sleep(5 * time.Millisecond) @@ -589,9 +587,8 @@ func TestConsecutiveDontHaveReinstateAfterRemoval(t *testing.T) { // Add all cids as wants spm.Add(cids) - // Receive a HAVE from peer (adds it to the session) - bpm.ReceiveFrom(p, cids[:1], []cid.Cid{}) - spm.Update(p, []cid.Cid{}, cids[:1], []cid.Cid{}) + // Receive a block from peer (adds it to the session) + spm.Update(p, cids[:1], []cid.Cid{}, []cid.Cid{}) // Wait for processing to complete time.Sleep(5 * time.Millisecond) @@ -657,3 +654,47 @@ func TestConsecutiveDontHaveReinstateAfterRemoval(t *testing.T) { t.Fatal("Expected peer not to be available") } } + +func TestConsecutiveDontHaveDontRemoveIfHasWantedBlock(t *testing.T) { + cids := testutil.GenerateCids(peerDontHaveLimit + 10) + p := testutil.GeneratePeers(1)[0] + sid := uint64(1) + pm := newMockPeerManager() + fpm := newFakeSessionPeerManager() + bpm := bsbpm.New() + onSend := func(peer.ID, []cid.Cid, []cid.Cid) {} + onPeersExhausted := func([]cid.Cid) {} + spm := newSessionWantSender(context.Background(), sid, pm, fpm, bpm, onSend, onPeersExhausted) + + go spm.Run() + + // Add all cids as wants + spm.Add(cids) + + // Receive a HAVE from peer (adds it to the session) + bpm.ReceiveFrom(p, cids[:1], []cid.Cid{}) + spm.Update(p, []cid.Cid{}, cids[:1], []cid.Cid{}) + + // Wait for processing to complete + time.Sleep(10 * time.Millisecond) + + // Peer should be available + if has := fpm.HasPeer(p); !has { + t.Fatal("Expected peer to be available") + } + + // Receive DONT_HAVEs from peer that exceed limit + for _, c := range cids[1 : peerDontHaveLimit+5] { + bpm.ReceiveFrom(p, []cid.Cid{}, []cid.Cid{c}) + spm.Update(p, []cid.Cid{}, []cid.Cid{}, []cid.Cid{c}) + } + + // Wait for processing to complete + time.Sleep(20 * time.Millisecond) + + // Peer should still be available because it has a block that we want. + // (We received a HAVE for cid 0 but didn't yet receive the block) + if has := fpm.HasPeer(p); !has { + t.Fatal("Expected peer to be available") + } +} diff --git a/bitswap/internal/sessionpeermanager/sessionpeermanager.go b/bitswap/internal/sessionpeermanager/sessionpeermanager.go index cc6e71106..90233c72c 100644 --- a/bitswap/internal/sessionpeermanager/sessionpeermanager.go +++ b/bitswap/internal/sessionpeermanager/sessionpeermanager.go @@ -4,6 +4,7 @@ import ( "fmt" "sync" + lu "github.com/ipfs/go-bitswap/internal/logutil" logging "github.com/ipfs/go-log" peer "github.com/libp2p/go-libp2p-core/peer" @@ -61,7 +62,7 @@ func (spm *SessionPeerManager) AddPeer(p peer.ID) bool { // connection spm.tagger.TagPeer(p, spm.tag, sessionPeerTagValue) - log.Infof("Added peer %s to session: %d peers\n", p, len(spm.peers)) + log.Debugf("Added peer %s to session (%d peers)\n", p, len(spm.peers)) return true } @@ -77,6 +78,8 @@ func (spm *SessionPeerManager) RemovePeer(p peer.ID) bool { delete(spm.peers, p) spm.tagger.UntagPeer(p, spm.tag) + + log.Debugf("Removed peer %s from session (%d peers)", lu.P(p), len(spm.peers)) return true } From cfa1db69533c088c0744021d3cb4753cb20c61ee Mon Sep 17 00:00:00 2001 From: Dirk McCormick Date: Thu, 5 Mar 2020 14:39:24 -0500 Subject: [PATCH 0849/1035] Disable flaky benchmark This commit was moved from ipfs/go-bitswap@32e5cae5e0052e7e3db256daac2914c6730bb0ee --- bitswap/benchmarks_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bitswap/benchmarks_test.go b/bitswap/benchmarks_test.go index 71e046298..9761a26c9 100644 --- a/bitswap/benchmarks_test.go +++ b/bitswap/benchmarks_test.go @@ -130,7 +130,7 @@ var mixedBenches = []mixedBench{ mixedBench{bench{"3Nodes-Overlap3-OneAtATime", 3, 10, overlap2, oneAtATime}, 1, 2}, mixedBench{bench{"3Nodes-AllToAll-OneAtATime", 3, 10, allToAll, oneAtATime}, 1, 2}, mixedBench{bench{"3Nodes-Overlap3-AllConcurrent", 3, 10, overlap2, fetchAllConcurrent}, 1, 2}, - mixedBench{bench{"3Nodes-Overlap3-UnixfsFetch", 3, 100, overlap2, unixfsFileFetch}, 1, 2}, + // mixedBench{bench{"3Nodes-Overlap3-UnixfsFetch", 3, 100, overlap2, unixfsFileFetch}, 1, 2}, } func BenchmarkFetchFromOldBitswap(b *testing.B) { From b6f574fcf28eb38a76327d224af90771e10d1028 Mon Sep 17 00:00:00 2001 From: Dirk McCormick Date: Fri, 6 Mar 2020 09:40:54 -0500 Subject: [PATCH 0850/1035] fix: block receive shouldn't affect DONT_HAVE count for other peers This commit was moved from ipfs/go-bitswap@99fe214acd57c3deccf7922c5b2c5decc8341cad --- bitswap/internal/session/sessionwantsender.go | 55 +++++++++---------- 1 file changed, 25 insertions(+), 30 deletions(-) diff --git a/bitswap/internal/session/sessionwantsender.go b/bitswap/internal/session/sessionwantsender.go index ece7a14cc..4bb65aaf5 100644 --- a/bitswap/internal/session/sessionwantsender.go +++ b/bitswap/internal/session/sessionwantsender.go @@ -321,8 +321,15 @@ func (sws *sessionWantSender) processUpdates(updates []update) []cid.Cid { prunePeers := make(map[peer.ID]struct{}) for _, upd := range updates { for _, c := range upd.dontHaves { - // If we already received a block for the want, ignore any - // DONT_HAVE for the want + // Track the number of consecutive DONT_HAVEs each peer receives + if sws.peerConsecutiveDontHaves[upd.from] == peerDontHaveLimit { + prunePeers[upd.from] = struct{}{} + } else { + sws.peerConsecutiveDontHaves[upd.from]++ + } + + // If we already received a block for the want, there's no need to + // update block presence etc if blkCids.Has(c) { continue } @@ -341,28 +348,18 @@ func (sws *sessionWantSender) processUpdates(updates []update) []cid.Cid { sws.setWantSentTo(c, "") } } - - // Track the number of consecutive DONT_HAVEs each peer receives - if sws.peerConsecutiveDontHaves[upd.from] == peerDontHaveLimit { - prunePeers[upd.from] = struct{}{} - } else { - sws.peerConsecutiveDontHaves[upd.from]++ - } } } // Process received HAVEs for _, upd := range updates { for _, c := range upd.haves { - // If we already received a block for the want, ignore any HAVE for - // the want - if blkCids.Has(c) { - continue + // If we haven't already received a block for the want + if !blkCids.Has(c) { + // Update the block presence for the peer + sws.updateWantBlockPresence(c, upd.from) } - // Update the block presence for the peer - sws.updateWantBlockPresence(c, upd.from) - // Clear the consecutive DONT_HAVE count for the peer delete(sws.peerConsecutiveDontHaves, upd.from) delete(prunePeers, upd.from) @@ -372,23 +369,21 @@ func (sws *sessionWantSender) processUpdates(updates []update) []cid.Cid { // If any peers have sent us too many consecutive DONT_HAVEs, remove them // from the session if len(prunePeers) > 0 { + for p := range prunePeers { + // Before removing the peer from the session, check if the peer + // sent us a HAVE for a block that we want + for c := range sws.wants { + if sws.bpm.PeerHasBlock(p, c) { + delete(prunePeers, p) + break + } + } + } go func() { for p := range prunePeers { - // Before removing the peer from the session, check if the peer - // sent us a HAVE for a block that we want - peerHasWantedBlock := false - for c := range sws.wants { - if sws.bpm.PeerHasBlock(p, c) { - peerHasWantedBlock = true - break - } - } - // Peer doesn't have anything we want, so remove it - if !peerHasWantedBlock { - log.Infof("peer %s sent too many dont haves", lu.P(p)) - sws.SignalAvailability(p, false) - } + log.Infof("peer %s sent too many dont haves", lu.P(p)) + sws.SignalAvailability(p, false) } }() } From b64218213a30fdc58f633fba5ace340d2b20f2bf Mon Sep 17 00:00:00 2001 From: Dirk McCormick Date: Fri, 6 Mar 2020 09:49:38 -0500 Subject: [PATCH 0851/1035] refactor: avoid unnecessary go-routine This commit was moved from ipfs/go-bitswap@f74c469c7ad6a1f5bc062cea62da97a37667153e --- bitswap/internal/session/sessionwantsender.go | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/bitswap/internal/session/sessionwantsender.go b/bitswap/internal/session/sessionwantsender.go index 4bb65aaf5..df963f9e9 100644 --- a/bitswap/internal/session/sessionwantsender.go +++ b/bitswap/internal/session/sessionwantsender.go @@ -368,17 +368,17 @@ func (sws *sessionWantSender) processUpdates(updates []update) []cid.Cid { // If any peers have sent us too many consecutive DONT_HAVEs, remove them // from the session - if len(prunePeers) > 0 { - for p := range prunePeers { - // Before removing the peer from the session, check if the peer - // sent us a HAVE for a block that we want - for c := range sws.wants { - if sws.bpm.PeerHasBlock(p, c) { - delete(prunePeers, p) - break - } + for p := range prunePeers { + // Before removing the peer from the session, check if the peer + // sent us a HAVE for a block that we want + for c := range sws.wants { + if sws.bpm.PeerHasBlock(p, c) { + delete(prunePeers, p) + break } } + } + if len(prunePeers) > 0 { go func() { for p := range prunePeers { // Peer doesn't have anything we want, so remove it From 776393de35e6857008af38f74baf8dbc7ce386ec Mon Sep 17 00:00:00 2001 From: Dirk McCormick Date: Fri, 6 Mar 2020 11:29:52 -0500 Subject: [PATCH 0852/1035] fix: races in tests This commit was moved from ipfs/go-bitswap@e12b69e442ccbc3cd90f39baf3d61962e1fe9401 --- bitswap/bitswap_test.go | 2 +- bitswap/internal/decision/engine.go | 24 +-- bitswap/internal/decision/engine_test.go | 42 +++-- .../messagequeue/donthavetimeoutmgr_test.go | 51 +++--- .../messagequeue/messagequeue_test.go | 77 +++++++-- .../session/sessionwantsender_test.go | 157 ++++++++++++------ 6 files changed, 229 insertions(+), 124 deletions(-) diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index 0a0bcc98b..428fa5be6 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -245,7 +245,7 @@ func TestLargeSwarm(t *testing.T) { if detectrace.WithRace() { // when running with the race detector, 500 instances launches // well over 8k goroutines. This hits a race detector limit. - numInstances = 50 + numInstances = 20 } else if travis.IsRunning() { numInstances = 200 } else { diff --git a/bitswap/internal/decision/engine.go b/bitswap/internal/decision/engine.go index bf51beaef..15e6ad8c2 100644 --- a/bitswap/internal/decision/engine.go +++ b/bitswap/internal/decision/engine.go @@ -76,6 +76,10 @@ const ( // the alpha for the EWMA used to track long term usefulness longTermAlpha = 0.05 + // how frequently the engine should sample usefulness. Peers that + // interact every shortTerm time period are considered "active". + shortTerm = 10 * time.Second + // long term ratio defines what "long term" means in terms of the // shortTerm duration. Peers that interact once every longTermRatio are // considered useful over the long term. @@ -96,14 +100,6 @@ const ( blockstoreWorkerCount = 128 ) -var ( - // how frequently the engine should sample usefulness. Peers that - // interact every shortTerm time period are considered "active". - // - // this is only a variable to make testing easier. - shortTerm = 10 * time.Second -) - // Envelope contains a message for a Peer. type Envelope struct { // Peer is the intended recipient. @@ -161,6 +157,9 @@ type Engine struct { // bytes up to which we will replace a want-have with a want-block maxBlockSizeReplaceHasWithBlock int + // how frequently the engine should sample peer usefulness + peerSampleInterval time.Duration + sendDontHaves bool self peer.ID @@ -168,11 +167,13 @@ type Engine struct { // NewEngine creates a new block sending engine for the given block store func NewEngine(ctx context.Context, bs bstore.Blockstore, peerTagger PeerTagger, self peer.ID) *Engine { - return newEngine(ctx, bs, peerTagger, self, maxBlockSizeReplaceHasWithBlock) + return newEngine(ctx, bs, peerTagger, self, maxBlockSizeReplaceHasWithBlock, shortTerm) } // This constructor is used by the tests -func newEngine(ctx context.Context, bs bstore.Blockstore, peerTagger PeerTagger, self peer.ID, maxReplaceSize int) *Engine { +func newEngine(ctx context.Context, bs bstore.Blockstore, peerTagger PeerTagger, self peer.ID, + maxReplaceSize int, peerSampleInterval time.Duration) *Engine { + e := &Engine{ ledgerMap: make(map[peer.ID]*ledger), bsm: newBlockstoreManager(ctx, bs, blockstoreWorkerCount), @@ -181,6 +182,7 @@ func newEngine(ctx context.Context, bs bstore.Blockstore, peerTagger PeerTagger, workSignal: make(chan struct{}, 1), ticker: time.NewTicker(time.Millisecond * 100), maxBlockSizeReplaceHasWithBlock: maxReplaceSize, + peerSampleInterval: peerSampleInterval, taskWorkerCount: taskWorkerCount, sendDontHaves: true, self: self, @@ -236,7 +238,7 @@ func (e *Engine) StartWorkers(ctx context.Context, px process.Process) { // adjust it ±25% based on our debt ratio. Peers that have historically been // more useful to us than we are to them get the highest score. func (e *Engine) scoreWorker(ctx context.Context) { - ticker := time.NewTicker(shortTerm) + ticker := time.NewTicker(e.peerSampleInterval) defer ticker.Stop() type update struct { diff --git a/bitswap/internal/decision/engine_test.go b/bitswap/internal/decision/engine_test.go index f6175762d..0db51f881 100644 --- a/bitswap/internal/decision/engine_test.go +++ b/bitswap/internal/decision/engine_test.go @@ -91,10 +91,10 @@ type engineSet struct { Blockstore blockstore.Blockstore } -func newTestEngine(ctx context.Context, idStr string) engineSet { +func newTestEngine(ctx context.Context, idStr string, peerSampleInterval time.Duration) engineSet { fpt := &fakePeerTagger{} bs := blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore())) - e := newEngine(ctx, bs, fpt, "localhost", 0) + e := newEngine(ctx, bs, fpt, "localhost", 0, peerSampleInterval) e.StartWorkers(ctx, process.WithTeardown(func() error { return nil })) return engineSet{ Peer: peer.ID(idStr), @@ -108,8 +108,8 @@ func newTestEngine(ctx context.Context, idStr string) engineSet { func TestConsistentAccounting(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - sender := newTestEngine(ctx, "Ernie") - receiver := newTestEngine(ctx, "Bert") + sender := newTestEngine(ctx, "Ernie", shortTerm) + receiver := newTestEngine(ctx, "Bert", shortTerm) // Send messages from Ernie to Bert for i := 0; i < 1000; i++ { @@ -143,8 +143,8 @@ func TestPeerIsAddedToPeersWhenMessageReceivedOrSent(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - sanfrancisco := newTestEngine(ctx, "sf") - seattle := newTestEngine(ctx, "sea") + sanfrancisco := newTestEngine(ctx, "sf", shortTerm) + seattle := newTestEngine(ctx, "sea", shortTerm) m := message.New(true) @@ -181,7 +181,7 @@ func peerIsPartner(p peer.ID, e *Engine) bool { func TestOutboxClosedWhenEngineClosed(t *testing.T) { ctx := context.Background() t.SkipNow() // TODO implement *Engine.Close - e := newEngine(ctx, blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore())), &fakePeerTagger{}, "localhost", 0) + e := newEngine(ctx, blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore())), &fakePeerTagger{}, "localhost", 0, shortTerm) e.StartWorkers(ctx, process.WithTeardown(func() error { return nil })) var wg sync.WaitGroup wg.Add(1) @@ -509,7 +509,7 @@ func TestPartnerWantHaveWantBlockNonActive(t *testing.T) { testCases = onlyTestCases } - e := newEngine(context.Background(), bs, &fakePeerTagger{}, "localhost", 0) + e := newEngine(context.Background(), bs, &fakePeerTagger{}, "localhost", 0, shortTerm) e.StartWorkers(context.Background(), process.WithTeardown(func() error { return nil })) for i, testCase := range testCases { t.Logf("Test case %d:", i) @@ -665,7 +665,7 @@ func TestPartnerWantHaveWantBlockActive(t *testing.T) { testCases = onlyTestCases } - e := newEngine(context.Background(), bs, &fakePeerTagger{}, "localhost", 0) + e := newEngine(context.Background(), bs, &fakePeerTagger{}, "localhost", 0, shortTerm) e.StartWorkers(context.Background(), process.WithTeardown(func() error { return nil })) var next envChan @@ -850,7 +850,7 @@ func TestPartnerWantsThenCancels(t *testing.T) { ctx := context.Background() for i := 0; i < numRounds; i++ { expected := make([][]string, 0, len(testcases)) - e := newEngine(ctx, bs, &fakePeerTagger{}, "localhost", 0) + e := newEngine(ctx, bs, &fakePeerTagger{}, "localhost", 0, shortTerm) e.StartWorkers(ctx, process.WithTeardown(func() error { return nil })) for _, testcase := range testcases { set := testcase[0] @@ -875,7 +875,7 @@ func TestSendReceivedBlocksToPeersThatWantThem(t *testing.T) { partner := libp2ptest.RandPeerIDFatal(t) otherPeer := libp2ptest.RandPeerIDFatal(t) - e := newEngine(context.Background(), bs, &fakePeerTagger{}, "localhost", 0) + e := newEngine(context.Background(), bs, &fakePeerTagger{}, "localhost", 0, shortTerm) e.StartWorkers(context.Background(), process.WithTeardown(func() error { return nil })) blks := testutil.GenerateBlocksOfSize(4, 8*1024) @@ -919,7 +919,7 @@ func TestSendDontHave(t *testing.T) { partner := libp2ptest.RandPeerIDFatal(t) otherPeer := libp2ptest.RandPeerIDFatal(t) - e := newEngine(context.Background(), bs, &fakePeerTagger{}, "localhost", 0) + e := newEngine(context.Background(), bs, &fakePeerTagger{}, "localhost", 0, shortTerm) e.StartWorkers(context.Background(), process.WithTeardown(func() error { return nil })) blks := testutil.GenerateBlocksOfSize(4, 8*1024) @@ -981,8 +981,8 @@ func TestSendDontHave(t *testing.T) { func TestTaggingPeers(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) defer cancel() - sanfrancisco := newTestEngine(ctx, "sf") - seattle := newTestEngine(ctx, "sea") + sanfrancisco := newTestEngine(ctx, "sf", shortTerm) + seattle := newTestEngine(ctx, "sea", shortTerm) keys := []string{"a", "b", "c", "d", "e"} for _, letter := range keys { @@ -1007,13 +1007,11 @@ func TestTaggingPeers(t *testing.T) { } func TestTaggingUseful(t *testing.T) { - oldShortTerm := shortTerm - shortTerm = 2 * time.Millisecond - defer func() { shortTerm = oldShortTerm }() + peerSampleInterval := 2 * time.Millisecond ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) defer cancel() - me := newTestEngine(ctx, "engine") + me := newTestEngine(ctx, "engine", peerSampleInterval) friend := peer.ID("friend") block := blocks.NewBlock([]byte("foobar")) @@ -1025,21 +1023,21 @@ func TestTaggingUseful(t *testing.T) { t.Fatal("Peers should be untagged but weren't") } me.Engine.MessageSent(friend, msg) - time.Sleep(shortTerm * 2) + time.Sleep(peerSampleInterval * 2) if me.PeerTagger.count(me.Engine.tagUseful) != 1 { t.Fatal("Peers should be tagged but weren't") } - time.Sleep(shortTerm * 8) + time.Sleep(peerSampleInterval * 8) } if me.PeerTagger.count(me.Engine.tagUseful) == 0 { t.Fatal("peers should still be tagged due to long-term usefulness") } - time.Sleep(shortTerm * 2) + time.Sleep(peerSampleInterval * 2) if me.PeerTagger.count(me.Engine.tagUseful) == 0 { t.Fatal("peers should still be tagged due to long-term usefulness") } - time.Sleep(shortTerm * 20) + time.Sleep(peerSampleInterval * 30) if me.PeerTagger.count(me.Engine.tagUseful) != 0 { t.Fatal("peers should finally be untagged") } diff --git a/bitswap/internal/messagequeue/donthavetimeoutmgr_test.go b/bitswap/internal/messagequeue/donthavetimeoutmgr_test.go index 78e622a74..4093f7ba6 100644 --- a/bitswap/internal/messagequeue/donthavetimeoutmgr_test.go +++ b/bitswap/internal/messagequeue/donthavetimeoutmgr_test.go @@ -50,9 +50,24 @@ type timeoutRecorder struct { func (tr *timeoutRecorder) onTimeout(tks []cid.Cid) { tr.lk.Lock() defer tr.lk.Unlock() + tr.timedOutKs = append(tr.timedOutKs, tks...) } +func (tr *timeoutRecorder) timedOutCount() int { + tr.lk.Lock() + defer tr.lk.Unlock() + + return len(tr.timedOutKs) +} + +func (tr *timeoutRecorder) clear() { + tr.lk.Lock() + defer tr.lk.Unlock() + + tr.timedOutKs = nil +} + func TestDontHaveTimeoutMgrTimeout(t *testing.T) { firstks := testutil.GenerateCids(2) secondks := append(firstks, testutil.GenerateCids(3)...) @@ -75,7 +90,7 @@ func TestDontHaveTimeoutMgrTimeout(t *testing.T) { time.Sleep(expectedTimeout - 5*time.Millisecond) // At this stage no keys should have timed out - if len(tr.timedOutKs) > 0 { + if tr.timedOutCount() > 0 { t.Fatal("expected timeout not to have happened yet") } @@ -86,12 +101,12 @@ func TestDontHaveTimeoutMgrTimeout(t *testing.T) { time.Sleep(10 * time.Millisecond) // At this stage first set of keys should have timed out - if len(tr.timedOutKs) != len(firstks) { + if tr.timedOutCount() != len(firstks) { t.Fatal("expected timeout") } // Clear the recorded timed out keys - tr.timedOutKs = nil + tr.clear() // Sleep until the second set of keys should have timed out time.Sleep(expectedTimeout) @@ -99,7 +114,7 @@ func TestDontHaveTimeoutMgrTimeout(t *testing.T) { // At this stage all keys should have timed out. The second set included // the first set of keys, but they were added before the first set timed // out, so only the remaining keys should have beed added. - if len(tr.timedOutKs) != len(secondks)-len(firstks) { + if tr.timedOutCount() != len(secondks)-len(firstks) { t.Fatal("expected second set of keys to timeout") } } @@ -130,7 +145,7 @@ func TestDontHaveTimeoutMgrCancel(t *testing.T) { time.Sleep(expectedTimeout) // At this stage all non-cancelled keys should have timed out - if len(tr.timedOutKs) != len(ks)-cancelCount { + if tr.timedOutCount() != len(ks)-cancelCount { t.Fatal("expected timeout") } } @@ -167,7 +182,7 @@ func TestDontHaveTimeoutWantCancelWant(t *testing.T) { time.Sleep(10 * time.Millisecond) // At this stage only the key that was never cancelled should have timed out - if len(tr.timedOutKs) != 1 { + if tr.timedOutCount() != 1 { t.Fatal("expected one key to timeout") } @@ -175,7 +190,7 @@ func TestDontHaveTimeoutWantCancelWant(t *testing.T) { time.Sleep(latency) // At this stage the key that was added back should also have timed out - if len(tr.timedOutKs) != 2 { + if tr.timedOutCount() != 2 { t.Fatal("expected added back key to timeout") } } @@ -202,7 +217,7 @@ func TestDontHaveTimeoutRepeatedAddPending(t *testing.T) { time.Sleep(latency + 5*time.Millisecond) // At this stage all keys should have timed out - if len(tr.timedOutKs) != len(ks) { + if tr.timedOutCount() != len(ks) { t.Fatal("expected timeout") } } @@ -229,7 +244,7 @@ func TestDontHaveTimeoutMgrUsesDefaultTimeoutIfPingError(t *testing.T) { time.Sleep(expectedTimeout - 5*time.Millisecond) // At this stage no timeout should have happened yet - if len(tr.timedOutKs) > 0 { + if tr.timedOutCount() > 0 { t.Fatal("expected timeout not to have happened yet") } @@ -237,7 +252,7 @@ func TestDontHaveTimeoutMgrUsesDefaultTimeoutIfPingError(t *testing.T) { time.Sleep(10 * time.Millisecond) // Now the keys should have timed out - if len(tr.timedOutKs) != len(ks) { + if tr.timedOutCount() != len(ks) { t.Fatal("expected timeout") } } @@ -263,7 +278,7 @@ func TestDontHaveTimeoutMgrUsesDefaultTimeoutIfLatencyLonger(t *testing.T) { time.Sleep(defaultTimeout - 5*time.Millisecond) // At this stage no timeout should have happened yet - if len(tr.timedOutKs) > 0 { + if tr.timedOutCount() > 0 { t.Fatal("expected timeout not to have happened yet") } @@ -271,7 +286,7 @@ func TestDontHaveTimeoutMgrUsesDefaultTimeoutIfLatencyLonger(t *testing.T) { time.Sleep(10 * time.Millisecond) // Now the keys should have timed out - if len(tr.timedOutKs) != len(ks) { + if tr.timedOutCount() != len(ks) { t.Fatal("expected timeout") } } @@ -281,17 +296,11 @@ func TestDontHaveTimeoutNoTimeoutAfterShutdown(t *testing.T) { latency := time.Millisecond * 10 latMultiplier := 1 expProcessTime := time.Duration(0) + tr := timeoutRecorder{} ctx := context.Background() pc := &mockPeerConn{latency: latency} - var lk sync.Mutex - var timedOutKs []cid.Cid - onTimeout := func(tks []cid.Cid) { - lk.Lock() - defer lk.Unlock() - timedOutKs = append(timedOutKs, tks...) - } - dhtm := newDontHaveTimeoutMgrWithParams(ctx, pc, onTimeout, + dhtm := newDontHaveTimeoutMgrWithParams(ctx, pc, tr.onTimeout, dontHaveTimeout, latMultiplier, expProcessTime) dhtm.Start() @@ -308,7 +317,7 @@ func TestDontHaveTimeoutNoTimeoutAfterShutdown(t *testing.T) { time.Sleep(10 * time.Millisecond) // Manager was shut down so timeout should not have fired - if len(timedOutKs) != 0 { + if tr.timedOutCount() != 0 { t.Fatal("expected no timeout after shutdown") } } diff --git a/bitswap/internal/messagequeue/messagequeue_test.go b/bitswap/internal/messagequeue/messagequeue_test.go index 96284756d..0f7cba8ac 100644 --- a/bitswap/internal/messagequeue/messagequeue_test.go +++ b/bitswap/internal/messagequeue/messagequeue_test.go @@ -4,6 +4,7 @@ import ( "context" "errors" "fmt" + "sync" "testing" "time" @@ -42,12 +43,16 @@ func (fms *fakeMessageNetwork) Ping(context.Context, peer.ID) ping.Result { } type fakeDontHaveTimeoutMgr struct { + lk sync.Mutex ks []cid.Cid } func (fp *fakeDontHaveTimeoutMgr) Start() {} func (fp *fakeDontHaveTimeoutMgr) Shutdown() {} func (fp *fakeDontHaveTimeoutMgr) AddPending(ks []cid.Cid) { + fp.lk.Lock() + defer fp.lk.Unlock() + s := cid.NewSet() for _, c := range append(fp.ks, ks...) { s.Add(c) @@ -55,6 +60,9 @@ func (fp *fakeDontHaveTimeoutMgr) AddPending(ks []cid.Cid) { fp.ks = s.Keys() } func (fp *fakeDontHaveTimeoutMgr) CancelPending(ks []cid.Cid) { + fp.lk.Lock() + defer fp.lk.Unlock() + s := cid.NewSet() for _, c := range fp.ks { s.Add(c) @@ -64,8 +72,15 @@ func (fp *fakeDontHaveTimeoutMgr) CancelPending(ks []cid.Cid) { } fp.ks = s.Keys() } +func (fp *fakeDontHaveTimeoutMgr) pendingCount() int { + fp.lk.Lock() + defer fp.lk.Unlock() + + return len(fp.ks) +} type fakeMessageSender struct { + lk sync.Mutex sendError error fullClosed chan<- struct{} reset chan<- struct{} @@ -74,7 +89,23 @@ type fakeMessageSender struct { supportsHave bool } +func newFakeMessageSender(sendError error, fullClosed chan<- struct{}, reset chan<- struct{}, + messagesSent chan<- bsmsg.BitSwapMessage, sendErrors chan<- error, supportsHave bool) *fakeMessageSender { + + return &fakeMessageSender{ + sendError: sendError, + fullClosed: fullClosed, + reset: reset, + messagesSent: messagesSent, + sendErrors: sendErrors, + supportsHave: supportsHave, + } +} + func (fms *fakeMessageSender) SendMsg(ctx context.Context, msg bsmsg.BitSwapMessage) error { + fms.lk.Lock() + defer fms.lk.Unlock() + if fms.sendError != nil { fms.sendErrors <- fms.sendError return fms.sendError @@ -82,6 +113,12 @@ func (fms *fakeMessageSender) SendMsg(ctx context.Context, msg bsmsg.BitSwapMess fms.messagesSent <- msg return nil } +func (fms *fakeMessageSender) clearSendError() { + fms.lk.Lock() + defer fms.lk.Unlock() + + fms.sendError = nil +} func (fms *fakeMessageSender) Close() error { fms.fullClosed <- struct{}{}; return nil } func (fms *fakeMessageSender) Reset() error { fms.reset <- struct{}{}; return nil } func (fms *fakeMessageSender) SupportsHave() bool { return fms.supportsHave } @@ -119,7 +156,7 @@ func TestStartupAndShutdown(t *testing.T) { sendErrors := make(chan error) resetChan := make(chan struct{}, 1) fullClosedChan := make(chan struct{}, 1) - fakeSender := &fakeMessageSender{nil, fullClosedChan, resetChan, messagesSent, sendErrors, true} + fakeSender := newFakeMessageSender(nil, fullClosedChan, resetChan, messagesSent, sendErrors, true) fakenet := &fakeMessageNetwork{nil, nil, fakeSender} peerID := testutil.GeneratePeers(1)[0] messageQueue := New(ctx, peerID, fakenet, mockTimeoutCb) @@ -161,7 +198,7 @@ func TestSendingMessagesDeduped(t *testing.T) { sendErrors := make(chan error) resetChan := make(chan struct{}, 1) fullClosedChan := make(chan struct{}, 1) - fakeSender := &fakeMessageSender{nil, fullClosedChan, resetChan, messagesSent, sendErrors, true} + fakeSender := newFakeMessageSender(nil, fullClosedChan, resetChan, messagesSent, sendErrors, true) fakenet := &fakeMessageNetwork{nil, nil, fakeSender} peerID := testutil.GeneratePeers(1)[0] messageQueue := New(ctx, peerID, fakenet, mockTimeoutCb) @@ -184,7 +221,7 @@ func TestSendingMessagesPartialDupe(t *testing.T) { sendErrors := make(chan error) resetChan := make(chan struct{}, 1) fullClosedChan := make(chan struct{}, 1) - fakeSender := &fakeMessageSender{nil, fullClosedChan, resetChan, messagesSent, sendErrors, true} + fakeSender := newFakeMessageSender(nil, fullClosedChan, resetChan, messagesSent, sendErrors, true) fakenet := &fakeMessageNetwork{nil, nil, fakeSender} peerID := testutil.GeneratePeers(1)[0] messageQueue := New(ctx, peerID, fakenet, mockTimeoutCb) @@ -207,7 +244,7 @@ func TestSendingMessagesPriority(t *testing.T) { sendErrors := make(chan error) resetChan := make(chan struct{}, 1) fullClosedChan := make(chan struct{}, 1) - fakeSender := &fakeMessageSender{nil, fullClosedChan, resetChan, messagesSent, sendErrors, true} + fakeSender := newFakeMessageSender(nil, fullClosedChan, resetChan, messagesSent, sendErrors, true) fakenet := &fakeMessageNetwork{nil, nil, fakeSender} peerID := testutil.GeneratePeers(1)[0] messageQueue := New(ctx, peerID, fakenet, mockTimeoutCb) @@ -276,7 +313,7 @@ func TestCancelOverridesPendingWants(t *testing.T) { sendErrors := make(chan error) resetChan := make(chan struct{}, 1) fullClosedChan := make(chan struct{}, 1) - fakeSender := &fakeMessageSender{nil, fullClosedChan, resetChan, messagesSent, sendErrors, true} + fakeSender := newFakeMessageSender(nil, fullClosedChan, resetChan, messagesSent, sendErrors, true) fakenet := &fakeMessageNetwork{nil, nil, fakeSender} peerID := testutil.GeneratePeers(1)[0] messageQueue := New(ctx, peerID, fakenet, mockTimeoutCb) @@ -310,7 +347,7 @@ func TestWantOverridesPendingCancels(t *testing.T) { sendErrors := make(chan error) resetChan := make(chan struct{}, 1) fullClosedChan := make(chan struct{}, 1) - fakeSender := &fakeMessageSender{nil, fullClosedChan, resetChan, messagesSent, sendErrors, true} + fakeSender := newFakeMessageSender(nil, fullClosedChan, resetChan, messagesSent, sendErrors, true) fakenet := &fakeMessageNetwork{nil, nil, fakeSender} peerID := testutil.GeneratePeers(1)[0] messageQueue := New(ctx, peerID, fakenet, mockTimeoutCb) @@ -343,7 +380,7 @@ func TestWantlistRebroadcast(t *testing.T) { sendErrors := make(chan error) resetChan := make(chan struct{}, 1) fullClosedChan := make(chan struct{}, 1) - fakeSender := &fakeMessageSender{nil, fullClosedChan, resetChan, messagesSent, sendErrors, true} + fakeSender := newFakeMessageSender(nil, fullClosedChan, resetChan, messagesSent, sendErrors, true) fakenet := &fakeMessageNetwork{nil, nil, fakeSender} peerID := testutil.GeneratePeers(1)[0] messageQueue := New(ctx, peerID, fakenet, mockTimeoutCb) @@ -440,7 +477,7 @@ func TestSendingLargeMessages(t *testing.T) { sendErrors := make(chan error) resetChan := make(chan struct{}, 1) fullClosedChan := make(chan struct{}, 1) - fakeSender := &fakeMessageSender{nil, fullClosedChan, resetChan, messagesSent, sendErrors, true} + fakeSender := newFakeMessageSender(nil, fullClosedChan, resetChan, messagesSent, sendErrors, true) fakenet := &fakeMessageNetwork{nil, nil, fakeSender} dhtm := &fakeDontHaveTimeoutMgr{} peerID := testutil.GeneratePeers(1)[0] @@ -471,7 +508,7 @@ func TestSendToPeerThatDoesntSupportHave(t *testing.T) { sendErrors := make(chan error) resetChan := make(chan struct{}, 1) fullClosedChan := make(chan struct{}, 1) - fakeSender := &fakeMessageSender{nil, fullClosedChan, resetChan, messagesSent, sendErrors, false} + fakeSender := newFakeMessageSender(nil, fullClosedChan, resetChan, messagesSent, sendErrors, false) fakenet := &fakeMessageNetwork{nil, nil, fakeSender} peerID := testutil.GeneratePeers(1)[0] @@ -527,7 +564,7 @@ func TestSendToPeerThatDoesntSupportHaveMonitorsTimeouts(t *testing.T) { sendErrors := make(chan error) resetChan := make(chan struct{}, 1) fullClosedChan := make(chan struct{}, 1) - fakeSender := &fakeMessageSender{nil, fullClosedChan, resetChan, messagesSent, sendErrors, false} + fakeSender := newFakeMessageSender(nil, fullClosedChan, resetChan, messagesSent, sendErrors, false) fakenet := &fakeMessageNetwork{nil, nil, fakeSender} peerID := testutil.GeneratePeers(1)[0] @@ -540,7 +577,7 @@ func TestSendToPeerThatDoesntSupportHaveMonitorsTimeouts(t *testing.T) { collectMessages(ctx, t, messagesSent, 10*time.Millisecond) // Check want-blocks are added to DontHaveTimeoutMgr - if len(dhtm.ks) != len(wbs) { + if dhtm.pendingCount() != len(wbs) { t.Fatal("want-blocks not added to DontHaveTimeoutMgr") } @@ -549,7 +586,7 @@ func TestSendToPeerThatDoesntSupportHaveMonitorsTimeouts(t *testing.T) { collectMessages(ctx, t, messagesSent, 10*time.Millisecond) // Check want-blocks are removed from DontHaveTimeoutMgr - if len(dhtm.ks) != len(wbs)-cancelCount { + if dhtm.pendingCount() != len(wbs)-cancelCount { t.Fatal("want-blocks not removed from DontHaveTimeoutMgr") } } @@ -560,7 +597,7 @@ func TestResendAfterError(t *testing.T) { sendErrors := make(chan error) resetChan := make(chan struct{}, 1) fullClosedChan := make(chan struct{}, 1) - fakeSender := &fakeMessageSender{nil, fullClosedChan, resetChan, messagesSent, sendErrors, true} + fakeSender := newFakeMessageSender(nil, fullClosedChan, resetChan, messagesSent, sendErrors, true) fakenet := &fakeMessageNetwork{nil, nil, fakeSender} dhtm := &fakeDontHaveTimeoutMgr{} peerID := testutil.GeneratePeers(1)[0] @@ -576,7 +613,7 @@ func TestResendAfterError(t *testing.T) { // After the first error is received, clear sendError so that // subsequent sends will not error errs = append(errs, <-sendErrors) - fakeSender.sendError = nil + fakeSender.clearSendError() }() // Make the first send error out @@ -599,7 +636,7 @@ func TestResendAfterMaxRetries(t *testing.T) { sendErrors := make(chan error) resetChan := make(chan struct{}, maxRetries*2) fullClosedChan := make(chan struct{}, 1) - fakeSender := &fakeMessageSender{nil, fullClosedChan, resetChan, messagesSent, sendErrors, true} + fakeSender := newFakeMessageSender(nil, fullClosedChan, resetChan, messagesSent, sendErrors, true) fakenet := &fakeMessageNetwork{nil, nil, fakeSender} dhtm := &fakeDontHaveTimeoutMgr{} peerID := testutil.GeneratePeers(1)[0] @@ -612,8 +649,11 @@ func TestResendAfterMaxRetries(t *testing.T) { messageQueue.Startup() + var lk sync.Mutex var errs []error go func() { + lk.Lock() + defer lk.Unlock() for len(errs) < maxRetries { err := <-sendErrors errs = append(errs, err) @@ -625,7 +665,10 @@ func TestResendAfterMaxRetries(t *testing.T) { messageQueue.AddWants(wantBlocks, wantHaves) messages := collectMessages(ctx, t, messagesSent, 50*time.Millisecond) - if len(errs) != maxRetries { + lk.Lock() + errCount := len(errs) + lk.Unlock() + if errCount != maxRetries { t.Fatal("Expected maxRetries errors, got", len(errs)) } @@ -635,7 +678,7 @@ func TestResendAfterMaxRetries(t *testing.T) { } // Clear sendError so that subsequent sends will not error - fakeSender.sendError = nil + fakeSender.clearSendError() // Add a new batch of wants messageQueue.AddWants(wantBlocks2, wantHaves2) diff --git a/bitswap/internal/session/sessionwantsender_test.go b/bitswap/internal/session/sessionwantsender_test.go index ef7da73c6..c6a3f72c6 100644 --- a/bitswap/internal/session/sessionwantsender_test.go +++ b/bitswap/internal/session/sessionwantsender_test.go @@ -14,22 +14,55 @@ import ( ) type sentWants struct { + sync.Mutex p peer.ID wantHaves *cid.Set wantBlocks *cid.Set } +func (sw *sentWants) add(wantBlocks []cid.Cid, wantHaves []cid.Cid) { + sw.Lock() + defer sw.Unlock() + + for _, c := range wantBlocks { + sw.wantBlocks.Add(c) + } + for _, c := range wantHaves { + if !sw.wantBlocks.Has(c) { + sw.wantHaves.Add(c) + } + } + +} +func (sw *sentWants) wantHavesKeys() []cid.Cid { + sw.Lock() + defer sw.Unlock() + return sw.wantHaves.Keys() +} +func (sw *sentWants) wantBlocksKeys() []cid.Cid { + sw.Lock() + defer sw.Unlock() + return sw.wantBlocks.Keys() +} + type mockPeerManager struct { - peerSessions sync.Map - peerSends sync.Map + lk sync.Mutex + peerSessions map[peer.ID]bspm.Session + peerSends map[peer.ID]*sentWants } func newMockPeerManager() *mockPeerManager { - return &mockPeerManager{} + return &mockPeerManager{ + peerSessions: make(map[peer.ID]bspm.Session), + peerSends: make(map[peer.ID]*sentWants), + } } func (pm *mockPeerManager) RegisterSession(p peer.ID, sess bspm.Session) bool { - pm.peerSessions.Store(p, sess) + pm.lk.Lock() + defer pm.lk.Unlock() + + pm.peerSessions[p] = sess return true } @@ -37,33 +70,62 @@ func (pm *mockPeerManager) UnregisterSession(sesid uint64) { } func (pm *mockPeerManager) SendWants(ctx context.Context, p peer.ID, wantBlocks []cid.Cid, wantHaves []cid.Cid) { - swi, _ := pm.peerSends.LoadOrStore(p, sentWants{p, cid.NewSet(), cid.NewSet()}) - sw := swi.(sentWants) - for _, c := range wantBlocks { - sw.wantBlocks.Add(c) - } - for _, c := range wantHaves { - if !sw.wantBlocks.Has(c) { - sw.wantHaves.Add(c) - } + pm.lk.Lock() + defer pm.lk.Unlock() + + sw, ok := pm.peerSends[p] + if !ok { + sw = &sentWants{p: p, wantHaves: cid.NewSet(), wantBlocks: cid.NewSet()} + pm.peerSends[p] = sw } + sw.add(wantBlocks, wantHaves) } -func (pm *mockPeerManager) waitNextWants() map[peer.ID]sentWants { +func (pm *mockPeerManager) waitNextWants() map[peer.ID]*sentWants { time.Sleep(5 * time.Millisecond) - nw := make(map[peer.ID]sentWants) - pm.peerSends.Range(func(k, v interface{}) bool { - nw[k.(peer.ID)] = v.(sentWants) - return true - }) + + pm.lk.Lock() + defer pm.lk.Unlock() + nw := make(map[peer.ID]*sentWants) + for p, sentWants := range pm.peerSends { + nw[p] = sentWants + } return nw } func (pm *mockPeerManager) clearWants() { - pm.peerSends.Range(func(k, v interface{}) bool { - pm.peerSends.Delete(k) - return true - }) + pm.lk.Lock() + defer pm.lk.Unlock() + + for p := range pm.peerSends { + delete(pm.peerSends, p) + } +} + +type exhaustedPeers struct { + lk sync.Mutex + ks []cid.Cid +} + +func (ep *exhaustedPeers) onPeersExhausted(ks []cid.Cid) { + ep.lk.Lock() + defer ep.lk.Unlock() + + ep.ks = append(ep.ks, ks...) +} + +func (ep *exhaustedPeers) clear() { + ep.lk.Lock() + defer ep.lk.Unlock() + + ep.ks = nil +} + +func (ep *exhaustedPeers) exhausted() []cid.Cid { + ep.lk.Lock() + defer ep.lk.Unlock() + + return append([]cid.Cid{}, ep.ks...) } func TestSendWants(t *testing.T) { @@ -95,10 +157,10 @@ func TestSendWants(t *testing.T) { if !ok { t.Fatal("Nothing sent to peer") } - if !testutil.MatchKeysIgnoreOrder(sw.wantBlocks.Keys(), blkCids0) { + if !testutil.MatchKeysIgnoreOrder(sw.wantBlocksKeys(), blkCids0) { t.Fatal("Wrong keys") } - if sw.wantHaves.Len() > 0 { + if len(sw.wantHavesKeys()) > 0 { t.Fatal("Expecting no want-haves") } } @@ -133,7 +195,7 @@ func TestSendsWantBlockToOnePeerOnly(t *testing.T) { if !ok { t.Fatal("Nothing sent to peer") } - if !testutil.MatchKeysIgnoreOrder(sw.wantBlocks.Keys(), blkCids0) { + if !testutil.MatchKeysIgnoreOrder(sw.wantBlocksKeys(), blkCids0) { t.Fatal("Wrong keys") } @@ -156,7 +218,7 @@ func TestSendsWantBlockToOnePeerOnly(t *testing.T) { if sw.wantBlocks.Len() > 0 { t.Fatal("Expecting no want-blocks") } - if !testutil.MatchKeysIgnoreOrder(sw.wantHaves.Keys(), blkCids0) { + if !testutil.MatchKeysIgnoreOrder(sw.wantHavesKeys(), blkCids0) { t.Fatal("Wrong keys") } } @@ -190,7 +252,7 @@ func TestReceiveBlock(t *testing.T) { if !ok { t.Fatal("Nothing sent to peer") } - if !testutil.MatchKeysIgnoreOrder(sw.wantBlocks.Keys(), cids) { + if !testutil.MatchKeysIgnoreOrder(sw.wantBlocksKeys(), cids) { t.Fatal("Wrong keys") } @@ -215,7 +277,7 @@ func TestReceiveBlock(t *testing.T) { if !ok { t.Fatal("Nothing sent to peer") } - wb := sw.wantBlocks.Keys() + wb := sw.wantBlocksKeys() if len(wb) != 1 || !wb[0].Equals(cids[1]) { t.Fatal("Wrong keys", wb) } @@ -250,7 +312,7 @@ func TestPeerUnavailable(t *testing.T) { if !ok { t.Fatal("Nothing sent to peer") } - if !testutil.MatchKeysIgnoreOrder(sw.wantBlocks.Keys(), cids) { + if !testutil.MatchKeysIgnoreOrder(sw.wantBlocksKeys(), cids) { t.Fatal("Wrong keys") } @@ -281,7 +343,7 @@ func TestPeerUnavailable(t *testing.T) { if !ok { t.Fatal("Nothing sent to peer") } - if !testutil.MatchKeysIgnoreOrder(sw.wantBlocks.Keys(), cids) { + if !testutil.MatchKeysIgnoreOrder(sw.wantBlocksKeys(), cids) { t.Fatal("Wrong keys") } } @@ -297,11 +359,8 @@ func TestPeersExhausted(t *testing.T) { bpm := bsbpm.New() onSend := func(peer.ID, []cid.Cid, []cid.Cid) {} - var exhausted []cid.Cid - onPeersExhausted := func(ks []cid.Cid) { - exhausted = append(exhausted, ks...) - } - spm := newSessionWantSender(context.Background(), sid, pm, fpm, bpm, onSend, onPeersExhausted) + ep := exhaustedPeers{} + spm := newSessionWantSender(context.Background(), sid, pm, fpm, bpm, onSend, ep.onPeersExhausted) go spm.Run() @@ -321,12 +380,12 @@ func TestPeersExhausted(t *testing.T) { // All available peers (peer A) have sent us a DONT_HAVE for cid1, // so expect that onPeersExhausted() will be called with cid1 - if !testutil.MatchKeysIgnoreOrder(exhausted, []cid.Cid{cids[1]}) { + if !testutil.MatchKeysIgnoreOrder(ep.exhausted(), []cid.Cid{cids[1]}) { t.Fatal("Wrong keys") } // Clear exhausted cids - exhausted = []cid.Cid{} + ep.clear() // peerB: HAVE cid0 bpm.ReceiveFrom(peerB, []cid.Cid{cids[0]}, []cid.Cid{}) @@ -343,7 +402,7 @@ func TestPeersExhausted(t *testing.T) { // All available peers (peer A and peer B) have sent us a DONT_HAVE // for cid1, but we already called onPeersExhausted with cid1, so it // should not be called again - if len(exhausted) > 0 { + if len(ep.exhausted()) > 0 { t.Fatal("Wrong keys") } @@ -356,7 +415,7 @@ func TestPeersExhausted(t *testing.T) { // All available peers (peer A and peer B) have sent us a DONT_HAVE for // cid2, so expect that onPeersExhausted() will be called with cid2 - if !testutil.MatchKeysIgnoreOrder(exhausted, []cid.Cid{cids[2]}) { + if !testutil.MatchKeysIgnoreOrder(ep.exhausted(), []cid.Cid{cids[2]}) { t.Fatal("Wrong keys") } } @@ -376,11 +435,8 @@ func TestPeersExhaustedLastWaitingPeerUnavailable(t *testing.T) { bpm := bsbpm.New() onSend := func(peer.ID, []cid.Cid, []cid.Cid) {} - var exhausted []cid.Cid - onPeersExhausted := func(ks []cid.Cid) { - exhausted = append(exhausted, ks...) - } - spm := newSessionWantSender(context.Background(), sid, pm, fpm, bpm, onSend, onPeersExhausted) + ep := exhaustedPeers{} + spm := newSessionWantSender(context.Background(), sid, pm, fpm, bpm, onSend, ep.onPeersExhausted) go spm.Run() @@ -409,7 +465,7 @@ func TestPeersExhaustedLastWaitingPeerUnavailable(t *testing.T) { // All remaining peers (peer A) have sent us a DONT_HAVE for cid1, // so expect that onPeersExhausted() will be called with cid1 - if !testutil.MatchKeysIgnoreOrder(exhausted, []cid.Cid{cids[1]}) { + if !testutil.MatchKeysIgnoreOrder(ep.exhausted(), []cid.Cid{cids[1]}) { t.Fatal("Wrong keys") } } @@ -427,11 +483,8 @@ func TestPeersExhaustedAllPeersUnavailable(t *testing.T) { bpm := bsbpm.New() onSend := func(peer.ID, []cid.Cid, []cid.Cid) {} - var exhausted []cid.Cid - onPeersExhausted := func(ks []cid.Cid) { - exhausted = append(exhausted, ks...) - } - spm := newSessionWantSender(context.Background(), sid, pm, fpm, bpm, onSend, onPeersExhausted) + ep := exhaustedPeers{} + spm := newSessionWantSender(context.Background(), sid, pm, fpm, bpm, onSend, ep.onPeersExhausted) go spm.Run() @@ -455,7 +508,7 @@ func TestPeersExhaustedAllPeersUnavailable(t *testing.T) { // Expect that onPeersExhausted() will be called with all cids for blocks // that have not been received - if !testutil.MatchKeysIgnoreOrder(exhausted, []cid.Cid{cids[1], cids[2]}) { + if !testutil.MatchKeysIgnoreOrder(ep.exhausted(), []cid.Cid{cids[1], cids[2]}) { t.Fatal("Wrong keys") } } From 243f47916b2df7b82f2e9860795e31d9c08e0229 Mon Sep 17 00:00:00 2001 From: Dirk McCormick Date: Thu, 5 Mar 2020 14:22:24 -0500 Subject: [PATCH 0853/1035] fix: overly aggressive session peer removal This commit was moved from ipfs/go-bitswap@916da78a755dbd833b736d83f105b175f5fce628 --- bitswap/internal/messagequeue/messagequeue.go | 1 + bitswap/internal/session/session.go | 4 +- bitswap/internal/session/sessionwants.go | 8 ++- bitswap/internal/session/sessionwantsender.go | 72 +++++++++++++------ .../session/sessionwantsender_test.go | 59 ++++++++++++--- .../sessionpeermanager/sessionpeermanager.go | 5 +- 6 files changed, 114 insertions(+), 35 deletions(-) diff --git a/bitswap/internal/messagequeue/messagequeue.go b/bitswap/internal/messagequeue/messagequeue.go index be0740000..8e2518899 100644 --- a/bitswap/internal/messagequeue/messagequeue.go +++ b/bitswap/internal/messagequeue/messagequeue.go @@ -46,6 +46,7 @@ type MessageNetwork interface { NewMessageSender(context.Context, peer.ID) (bsnet.MessageSender, error) Latency(peer.ID) time.Duration Ping(context.Context, peer.ID) ping.Result + Self() peer.ID } // MessageQueue implements queue of want messages to send to peers. diff --git a/bitswap/internal/session/session.go b/bitswap/internal/session/session.go index b92319280..45cd825fa 100644 --- a/bitswap/internal/session/session.go +++ b/bitswap/internal/session/session.go @@ -4,9 +4,9 @@ import ( "context" "time" - // lu "github.com/ipfs/go-bitswap/internal/logutil" bsbpm "github.com/ipfs/go-bitswap/internal/blockpresencemanager" bsgetter "github.com/ipfs/go-bitswap/internal/getter" + lu "github.com/ipfs/go-bitswap/internal/logutil" notifications "github.com/ipfs/go-bitswap/internal/notifications" bspm "github.com/ipfs/go-bitswap/internal/peermanager" bssim "github.com/ipfs/go-bitswap/internal/sessioninterestmanager" @@ -340,7 +340,7 @@ func (s *Session) broadcastWantHaves(ctx context.Context, wants []cid.Cid) { // Search for providers who have the first want in the list. // Typically if the provider has the first block they will have // the rest of the blocks also. - log.Warnf("Ses%d: FindMorePeers with want 0 of %d wants", s.id, len(wants)) + log.Warnf("Ses%d: FindMorePeers with want %s (1st of %d wants)", s.id, lu.C(wants[0]), len(wants)) s.findMorePeers(ctx, wants[0]) } s.resetIdleTick() diff --git a/bitswap/internal/session/sessionwants.go b/bitswap/internal/session/sessionwants.go index ad8dcd1bc..60df0df2f 100644 --- a/bitswap/internal/session/sessionwants.go +++ b/bitswap/internal/session/sessionwants.go @@ -56,7 +56,7 @@ func (sw *sessionWants) GetNextWants(limit int) []cid.Cid { func (sw *sessionWants) WantsSent(ks []cid.Cid) { now := time.Now() for _, c := range ks { - if _, ok := sw.liveWants[c]; !ok { + if _, ok := sw.liveWants[c]; !ok && sw.toFetch.Has(c) { sw.toFetch.Remove(c) sw.liveWants[c] = now } @@ -83,8 +83,7 @@ func (sw *sessionWants) BlocksReceived(ks []cid.Cid) ([]cid.Cid, time.Duration) totalLatency += now.Sub(sentAt) } - // Remove the CID from the live wants / toFetch queue and add it - // to the past wants + // Remove the CID from the live wants / toFetch queue delete(sw.liveWants, c) sw.toFetch.Remove(c) } @@ -96,6 +95,9 @@ func (sw *sessionWants) BlocksReceived(ks []cid.Cid) ([]cid.Cid, time.Duration) // PrepareBroadcast saves the current time for each live want and returns the // live want CIDs. func (sw *sessionWants) PrepareBroadcast() []cid.Cid { + // TODO: Change this to return wants in order so that the session will + // send out Find Providers request for the first want + // (Note that maps return keys in random order) now := time.Now() live := make([]cid.Cid, 0, len(sw.liveWants)) for c := range sw.liveWants { diff --git a/bitswap/internal/session/sessionwantsender.go b/bitswap/internal/session/sessionwantsender.go index cffb39bb9..ece7a14cc 100644 --- a/bitswap/internal/session/sessionwantsender.go +++ b/bitswap/internal/session/sessionwantsender.go @@ -4,6 +4,7 @@ import ( "context" bsbpm "github.com/ipfs/go-bitswap/internal/blockpresencemanager" + lu "github.com/ipfs/go-bitswap/internal/logutil" cid "github.com/ipfs/go-cid" peer "github.com/libp2p/go-libp2p-core/peer" @@ -298,16 +299,34 @@ func (sws *sessionWantSender) trackWant(c cid.Cid) { // processUpdates processes incoming blocks and HAVE / DONT_HAVEs. // It returns all DONT_HAVEs. func (sws *sessionWantSender) processUpdates(updates []update) []cid.Cid { - prunePeers := make(map[peer.ID]struct{}) - dontHaves := cid.NewSet() + // Process received blocks keys + blkCids := cid.NewSet() for _, upd := range updates { - // TODO: If there is a timeout for the want from the peer, remove want.sentTo - // so the want can be sent to another peer (and blacklist the peer?) - // TODO: If a peer is no longer available, check if all providers of - // each CID have been exhausted + for _, c := range upd.ks { + blkCids.Add(c) + log.Warnf("received block %s", lu.C(c)) + // Remove the want + removed := sws.removeWant(c) + if removed != nil { + // Inform the peer tracker that this peer was the first to send + // us the block + sws.peerRspTrkr.receivedBlockFrom(upd.from) + } + delete(sws.peerConsecutiveDontHaves, upd.from) + } + } - // For each DONT_HAVE + // Process received DONT_HAVEs + dontHaves := cid.NewSet() + prunePeers := make(map[peer.ID]struct{}) + for _, upd := range updates { for _, c := range upd.dontHaves { + // If we already received a block for the want, ignore any + // DONT_HAVE for the want + if blkCids.Has(c) { + continue + } + dontHaves.Add(c) // Update the block presence for the peer @@ -330,24 +349,23 @@ func (sws *sessionWantSender) processUpdates(updates []update) []cid.Cid { sws.peerConsecutiveDontHaves[upd.from]++ } } + } - // For each HAVE + // Process received HAVEs + for _, upd := range updates { for _, c := range upd.haves { + // If we already received a block for the want, ignore any HAVE for + // the want + if blkCids.Has(c) { + continue + } + // Update the block presence for the peer sws.updateWantBlockPresence(c, upd.from) - delete(sws.peerConsecutiveDontHaves, upd.from) - } - // For each received block - for _, c := range upd.ks { - // Remove the want - removed := sws.removeWant(c) - if removed != nil { - // Inform the peer tracker that this peer was the first to send - // us the block - sws.peerRspTrkr.receivedBlockFrom(upd.from) - } + // Clear the consecutive DONT_HAVE count for the peer delete(sws.peerConsecutiveDontHaves, upd.from) + delete(prunePeers, upd.from) } } @@ -356,7 +374,21 @@ func (sws *sessionWantSender) processUpdates(updates []update) []cid.Cid { if len(prunePeers) > 0 { go func() { for p := range prunePeers { - sws.SignalAvailability(p, false) + // Before removing the peer from the session, check if the peer + // sent us a HAVE for a block that we want + peerHasWantedBlock := false + for c := range sws.wants { + if sws.bpm.PeerHasBlock(p, c) { + peerHasWantedBlock = true + break + } + } + + // Peer doesn't have anything we want, so remove it + if !peerHasWantedBlock { + log.Infof("peer %s sent too many dont haves", lu.P(p)) + sws.SignalAvailability(p, false) + } } }() } diff --git a/bitswap/internal/session/sessionwantsender_test.go b/bitswap/internal/session/sessionwantsender_test.go index c6a3f72c6..d38f0a20f 100644 --- a/bitswap/internal/session/sessionwantsender_test.go +++ b/bitswap/internal/session/sessionwantsender_test.go @@ -529,9 +529,8 @@ func TestConsecutiveDontHaveLimit(t *testing.T) { // Add all cids as wants spm.Add(cids) - // Receive a HAVE from peer (adds it to the session) - bpm.ReceiveFrom(p, cids[:1], []cid.Cid{}) - spm.Update(p, []cid.Cid{}, cids[:1], []cid.Cid{}) + // Receive a block from peer (adds it to the session) + spm.Update(p, cids[:1], []cid.Cid{}, []cid.Cid{}) // Wait for processing to complete time.Sleep(10 * time.Millisecond) @@ -586,9 +585,8 @@ func TestConsecutiveDontHaveLimitInterrupted(t *testing.T) { // Add all cids as wants spm.Add(cids) - // Receive a HAVE from peer (adds it to the session) - bpm.ReceiveFrom(p, cids[:1], []cid.Cid{}) - spm.Update(p, []cid.Cid{}, cids[:1], []cid.Cid{}) + // Receive a block from peer (adds it to the session) + spm.Update(p, cids[:1], []cid.Cid{}, []cid.Cid{}) // Wait for processing to complete time.Sleep(5 * time.Millisecond) @@ -642,9 +640,8 @@ func TestConsecutiveDontHaveReinstateAfterRemoval(t *testing.T) { // Add all cids as wants spm.Add(cids) - // Receive a HAVE from peer (adds it to the session) - bpm.ReceiveFrom(p, cids[:1], []cid.Cid{}) - spm.Update(p, []cid.Cid{}, cids[:1], []cid.Cid{}) + // Receive a block from peer (adds it to the session) + spm.Update(p, cids[:1], []cid.Cid{}, []cid.Cid{}) // Wait for processing to complete time.Sleep(5 * time.Millisecond) @@ -710,3 +707,47 @@ func TestConsecutiveDontHaveReinstateAfterRemoval(t *testing.T) { t.Fatal("Expected peer not to be available") } } + +func TestConsecutiveDontHaveDontRemoveIfHasWantedBlock(t *testing.T) { + cids := testutil.GenerateCids(peerDontHaveLimit + 10) + p := testutil.GeneratePeers(1)[0] + sid := uint64(1) + pm := newMockPeerManager() + fpm := newFakeSessionPeerManager() + bpm := bsbpm.New() + onSend := func(peer.ID, []cid.Cid, []cid.Cid) {} + onPeersExhausted := func([]cid.Cid) {} + spm := newSessionWantSender(context.Background(), sid, pm, fpm, bpm, onSend, onPeersExhausted) + + go spm.Run() + + // Add all cids as wants + spm.Add(cids) + + // Receive a HAVE from peer (adds it to the session) + bpm.ReceiveFrom(p, cids[:1], []cid.Cid{}) + spm.Update(p, []cid.Cid{}, cids[:1], []cid.Cid{}) + + // Wait for processing to complete + time.Sleep(10 * time.Millisecond) + + // Peer should be available + if has := fpm.HasPeer(p); !has { + t.Fatal("Expected peer to be available") + } + + // Receive DONT_HAVEs from peer that exceed limit + for _, c := range cids[1 : peerDontHaveLimit+5] { + bpm.ReceiveFrom(p, []cid.Cid{}, []cid.Cid{c}) + spm.Update(p, []cid.Cid{}, []cid.Cid{}, []cid.Cid{c}) + } + + // Wait for processing to complete + time.Sleep(20 * time.Millisecond) + + // Peer should still be available because it has a block that we want. + // (We received a HAVE for cid 0 but didn't yet receive the block) + if has := fpm.HasPeer(p); !has { + t.Fatal("Expected peer to be available") + } +} diff --git a/bitswap/internal/sessionpeermanager/sessionpeermanager.go b/bitswap/internal/sessionpeermanager/sessionpeermanager.go index cc6e71106..90233c72c 100644 --- a/bitswap/internal/sessionpeermanager/sessionpeermanager.go +++ b/bitswap/internal/sessionpeermanager/sessionpeermanager.go @@ -4,6 +4,7 @@ import ( "fmt" "sync" + lu "github.com/ipfs/go-bitswap/internal/logutil" logging "github.com/ipfs/go-log" peer "github.com/libp2p/go-libp2p-core/peer" @@ -61,7 +62,7 @@ func (spm *SessionPeerManager) AddPeer(p peer.ID) bool { // connection spm.tagger.TagPeer(p, spm.tag, sessionPeerTagValue) - log.Infof("Added peer %s to session: %d peers\n", p, len(spm.peers)) + log.Debugf("Added peer %s to session (%d peers)\n", p, len(spm.peers)) return true } @@ -77,6 +78,8 @@ func (spm *SessionPeerManager) RemovePeer(p peer.ID) bool { delete(spm.peers, p) spm.tagger.UntagPeer(p, spm.tag) + + log.Debugf("Removed peer %s from session (%d peers)", lu.P(p), len(spm.peers)) return true } From c52daf6f1c255ff5a15c31c04d97e70dedf712a0 Mon Sep 17 00:00:00 2001 From: Dirk McCormick Date: Thu, 5 Mar 2020 14:39:24 -0500 Subject: [PATCH 0854/1035] Disable flaky benchmark This commit was moved from ipfs/go-bitswap@2112d90ef66d4e7e0f0ee1f4f0a5f9048f2ea1e0 --- bitswap/benchmarks_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bitswap/benchmarks_test.go b/bitswap/benchmarks_test.go index 71e046298..9761a26c9 100644 --- a/bitswap/benchmarks_test.go +++ b/bitswap/benchmarks_test.go @@ -130,7 +130,7 @@ var mixedBenches = []mixedBench{ mixedBench{bench{"3Nodes-Overlap3-OneAtATime", 3, 10, overlap2, oneAtATime}, 1, 2}, mixedBench{bench{"3Nodes-AllToAll-OneAtATime", 3, 10, allToAll, oneAtATime}, 1, 2}, mixedBench{bench{"3Nodes-Overlap3-AllConcurrent", 3, 10, overlap2, fetchAllConcurrent}, 1, 2}, - mixedBench{bench{"3Nodes-Overlap3-UnixfsFetch", 3, 100, overlap2, unixfsFileFetch}, 1, 2}, + // mixedBench{bench{"3Nodes-Overlap3-UnixfsFetch", 3, 100, overlap2, unixfsFileFetch}, 1, 2}, } func BenchmarkFetchFromOldBitswap(b *testing.B) { From 7fd48bf9d6aa0866ef04e7acd38bc5e300142afd Mon Sep 17 00:00:00 2001 From: Dirk McCormick Date: Fri, 6 Mar 2020 09:40:54 -0500 Subject: [PATCH 0855/1035] fix: block receive shouldn't affect DONT_HAVE count for other peers This commit was moved from ipfs/go-bitswap@33443d7779ef57a8454048be1161fc815c2ea1a9 --- bitswap/internal/session/sessionwantsender.go | 55 +++++++++---------- 1 file changed, 25 insertions(+), 30 deletions(-) diff --git a/bitswap/internal/session/sessionwantsender.go b/bitswap/internal/session/sessionwantsender.go index ece7a14cc..4bb65aaf5 100644 --- a/bitswap/internal/session/sessionwantsender.go +++ b/bitswap/internal/session/sessionwantsender.go @@ -321,8 +321,15 @@ func (sws *sessionWantSender) processUpdates(updates []update) []cid.Cid { prunePeers := make(map[peer.ID]struct{}) for _, upd := range updates { for _, c := range upd.dontHaves { - // If we already received a block for the want, ignore any - // DONT_HAVE for the want + // Track the number of consecutive DONT_HAVEs each peer receives + if sws.peerConsecutiveDontHaves[upd.from] == peerDontHaveLimit { + prunePeers[upd.from] = struct{}{} + } else { + sws.peerConsecutiveDontHaves[upd.from]++ + } + + // If we already received a block for the want, there's no need to + // update block presence etc if blkCids.Has(c) { continue } @@ -341,28 +348,18 @@ func (sws *sessionWantSender) processUpdates(updates []update) []cid.Cid { sws.setWantSentTo(c, "") } } - - // Track the number of consecutive DONT_HAVEs each peer receives - if sws.peerConsecutiveDontHaves[upd.from] == peerDontHaveLimit { - prunePeers[upd.from] = struct{}{} - } else { - sws.peerConsecutiveDontHaves[upd.from]++ - } } } // Process received HAVEs for _, upd := range updates { for _, c := range upd.haves { - // If we already received a block for the want, ignore any HAVE for - // the want - if blkCids.Has(c) { - continue + // If we haven't already received a block for the want + if !blkCids.Has(c) { + // Update the block presence for the peer + sws.updateWantBlockPresence(c, upd.from) } - // Update the block presence for the peer - sws.updateWantBlockPresence(c, upd.from) - // Clear the consecutive DONT_HAVE count for the peer delete(sws.peerConsecutiveDontHaves, upd.from) delete(prunePeers, upd.from) @@ -372,23 +369,21 @@ func (sws *sessionWantSender) processUpdates(updates []update) []cid.Cid { // If any peers have sent us too many consecutive DONT_HAVEs, remove them // from the session if len(prunePeers) > 0 { + for p := range prunePeers { + // Before removing the peer from the session, check if the peer + // sent us a HAVE for a block that we want + for c := range sws.wants { + if sws.bpm.PeerHasBlock(p, c) { + delete(prunePeers, p) + break + } + } + } go func() { for p := range prunePeers { - // Before removing the peer from the session, check if the peer - // sent us a HAVE for a block that we want - peerHasWantedBlock := false - for c := range sws.wants { - if sws.bpm.PeerHasBlock(p, c) { - peerHasWantedBlock = true - break - } - } - // Peer doesn't have anything we want, so remove it - if !peerHasWantedBlock { - log.Infof("peer %s sent too many dont haves", lu.P(p)) - sws.SignalAvailability(p, false) - } + log.Infof("peer %s sent too many dont haves", lu.P(p)) + sws.SignalAvailability(p, false) } }() } From c688ba71489098e8a3c9d18e80ff229560f08680 Mon Sep 17 00:00:00 2001 From: Dirk McCormick Date: Fri, 6 Mar 2020 09:49:38 -0500 Subject: [PATCH 0856/1035] refactor: avoid unnecessary go-routine This commit was moved from ipfs/go-bitswap@22f0c797966afa4bbfa3b45fdd920a21a250b252 --- bitswap/internal/session/sessionwantsender.go | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/bitswap/internal/session/sessionwantsender.go b/bitswap/internal/session/sessionwantsender.go index 4bb65aaf5..df963f9e9 100644 --- a/bitswap/internal/session/sessionwantsender.go +++ b/bitswap/internal/session/sessionwantsender.go @@ -368,17 +368,17 @@ func (sws *sessionWantSender) processUpdates(updates []update) []cid.Cid { // If any peers have sent us too many consecutive DONT_HAVEs, remove them // from the session - if len(prunePeers) > 0 { - for p := range prunePeers { - // Before removing the peer from the session, check if the peer - // sent us a HAVE for a block that we want - for c := range sws.wants { - if sws.bpm.PeerHasBlock(p, c) { - delete(prunePeers, p) - break - } + for p := range prunePeers { + // Before removing the peer from the session, check if the peer + // sent us a HAVE for a block that we want + for c := range sws.wants { + if sws.bpm.PeerHasBlock(p, c) { + delete(prunePeers, p) + break } } + } + if len(prunePeers) > 0 { go func() { for p := range prunePeers { // Peer doesn't have anything we want, so remove it From 3ef19188ede3a79ad132be1f0cc045555e59f2fd Mon Sep 17 00:00:00 2001 From: Dirk McCormick Date: Fri, 6 Mar 2020 14:00:40 -0500 Subject: [PATCH 0857/1035] fix: flaky test This commit was moved from ipfs/go-bitswap@cc1224e61d287addfd7c31b1d5550bc66acda582 --- bitswap/internal/session/sessionwantsender_test.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/bitswap/internal/session/sessionwantsender_test.go b/bitswap/internal/session/sessionwantsender_test.go index d38f0a20f..1a35c0eab 100644 --- a/bitswap/internal/session/sessionwantsender_test.go +++ b/bitswap/internal/session/sessionwantsender_test.go @@ -658,7 +658,7 @@ func TestConsecutiveDontHaveReinstateAfterRemoval(t *testing.T) { } // Wait for processing to complete - time.Sleep(5 * time.Millisecond) + time.Sleep(10 * time.Millisecond) // Session should remove peer if has := fpm.HasPeer(p); has { @@ -670,7 +670,7 @@ func TestConsecutiveDontHaveReinstateAfterRemoval(t *testing.T) { spm.Update(p, []cid.Cid{}, cids[:1], []cid.Cid{}) // Wait for processing to complete - time.Sleep(5 * time.Millisecond) + time.Sleep(10 * time.Millisecond) // Peer should be available if has := fpm.HasPeer(p); !has { @@ -686,7 +686,7 @@ func TestConsecutiveDontHaveReinstateAfterRemoval(t *testing.T) { } // Wait for processing to complete - time.Sleep(5 * time.Millisecond) + time.Sleep(10 * time.Millisecond) // Peer should be available if has := fpm.HasPeer(p); !has { @@ -700,7 +700,7 @@ func TestConsecutiveDontHaveReinstateAfterRemoval(t *testing.T) { } // Wait for processing to complete - time.Sleep(5 * time.Millisecond) + time.Sleep(10 * time.Millisecond) // Session should remove peer if has := fpm.HasPeer(p); has { From d1670270eceace2a35dd3bdf01f194488b3f9f6c Mon Sep 17 00:00:00 2001 From: Dirk McCormick Date: Fri, 6 Mar 2020 14:07:38 -0500 Subject: [PATCH 0858/1035] test: fix another flaky test This commit was moved from ipfs/go-bitswap@2e6034247dd429677f92b17a1d338187426f4958 --- bitswap/internal/decision/engine_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/bitswap/internal/decision/engine_test.go b/bitswap/internal/decision/engine_test.go index 0db51f881..89705ed03 100644 --- a/bitswap/internal/decision/engine_test.go +++ b/bitswap/internal/decision/engine_test.go @@ -1007,7 +1007,7 @@ func TestTaggingPeers(t *testing.T) { } func TestTaggingUseful(t *testing.T) { - peerSampleInterval := 2 * time.Millisecond + peerSampleInterval := 5 * time.Millisecond ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) defer cancel() @@ -1027,7 +1027,7 @@ func TestTaggingUseful(t *testing.T) { if me.PeerTagger.count(me.Engine.tagUseful) != 1 { t.Fatal("Peers should be tagged but weren't") } - time.Sleep(peerSampleInterval * 8) + time.Sleep(peerSampleInterval * 10) } if me.PeerTagger.count(me.Engine.tagUseful) == 0 { From d806628a1fef83d5440ad0147b965e91eedb0582 Mon Sep 17 00:00:00 2001 From: Dirk McCormick Date: Fri, 6 Mar 2020 14:24:56 -0500 Subject: [PATCH 0859/1035] fix: flaky test This commit was moved from ipfs/go-bitswap@568a984ca95c52da0f751dbf25be0fcb778272c5 --- bitswap/internal/decision/engine_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/bitswap/internal/decision/engine_test.go b/bitswap/internal/decision/engine_test.go index 89705ed03..892c3057c 100644 --- a/bitswap/internal/decision/engine_test.go +++ b/bitswap/internal/decision/engine_test.go @@ -1009,7 +1009,7 @@ func TestTaggingPeers(t *testing.T) { func TestTaggingUseful(t *testing.T) { peerSampleInterval := 5 * time.Millisecond - ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) defer cancel() me := newTestEngine(ctx, "engine", peerSampleInterval) friend := peer.ID("friend") @@ -1023,7 +1023,7 @@ func TestTaggingUseful(t *testing.T) { t.Fatal("Peers should be untagged but weren't") } me.Engine.MessageSent(friend, msg) - time.Sleep(peerSampleInterval * 2) + time.Sleep(8 * time.Millisecond) if me.PeerTagger.count(me.Engine.tagUseful) != 1 { t.Fatal("Peers should be tagged but weren't") } From f6538f40b5adba25c51ec9ae07eda499863257ba Mon Sep 17 00:00:00 2001 From: Dirk McCormick Date: Tue, 10 Mar 2020 16:06:51 -0400 Subject: [PATCH 0860/1035] feat: timeout when peer doesnt respond to want-block This commit was moved from ipfs/go-bitswap@bdd4629db462166cf811c284e5a75e124282a7a9 --- bitswap/bitswap.go | 2 +- .../messagequeue/donthavetimeoutmgr.go | 5 +++-- bitswap/internal/messagequeue/messagequeue.go | 18 ++++++------------ 3 files changed, 10 insertions(+), 15 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 1b59dcd01..a2bd56ca2 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -125,7 +125,7 @@ func New(parent context.Context, network bsnet.BitSwapNetwork, var wm *bswm.WantManager // onDontHaveTimeout is called when a want-block is sent to a peer that // has an old version of Bitswap that doesn't support DONT_HAVE messages, - // and no response is received within a timeout. + // or when no response is received within a timeout. onDontHaveTimeout := func(p peer.ID, dontHaves []cid.Cid) { // Simulate a DONT_HAVE message arriving to the WantManager wm.ReceiveFrom(ctx, p, nil, nil, dontHaves) diff --git a/bitswap/internal/messagequeue/donthavetimeoutmgr.go b/bitswap/internal/messagequeue/donthavetimeoutmgr.go index ee7941b6d..d1c6be58f 100644 --- a/bitswap/internal/messagequeue/donthavetimeoutmgr.go +++ b/bitswap/internal/messagequeue/donthavetimeoutmgr.go @@ -11,7 +11,8 @@ import ( const ( // dontHaveTimeout is used to simulate a DONT_HAVE when communicating with - // a peer whose Bitswap client doesn't support the DONT_HAVE response. + // a peer whose Bitswap client doesn't support the DONT_HAVE response, + // or when the peer takes too long to respond. // If the peer doesn't respond to a want-block within the timeout, the // local node assumes that the peer doesn't have the block. dontHaveTimeout = 5 * time.Second @@ -45,7 +46,7 @@ type pendingWant struct { // dontHaveTimeoutMgr pings the peer to measure latency. It uses the latency to // set a reasonable timeout for simulating a DONT_HAVE message for peers that -// don't support DONT_HAVE +// don't support DONT_HAVE or that take to long to respond. type dontHaveTimeoutMgr struct { ctx context.Context shutdown func() diff --git a/bitswap/internal/messagequeue/messagequeue.go b/bitswap/internal/messagequeue/messagequeue.go index 8e2518899..922ab6339 100644 --- a/bitswap/internal/messagequeue/messagequeue.go +++ b/bitswap/internal/messagequeue/messagequeue.go @@ -392,10 +392,8 @@ func (mq *MessageQueue) sendMessage() { } // Make sure the DONT_HAVE timeout manager has started - if !mq.sender.SupportsHave() { - // Note: Start is idempotent - mq.dhTimeoutMgr.Start() - } + // Note: Start is idempotent + mq.dhTimeoutMgr.Start() // Convert want lists to a Bitswap Message message, onSent := mq.extractOutgoingMessage(mq.sender.SupportsHave()) @@ -425,15 +423,11 @@ func (mq *MessageQueue) sendMessage() { } } -// If the peer is running an older version of Bitswap that doesn't support the -// DONT_HAVE response, watch for timeouts on any want-blocks we sent the peer, -// and if there is a timeout simulate a DONT_HAVE response. +// If want-block times out, simulate a DONT_HAVE reponse. +// This is necessary when making requests to peers running an older version of +// Bitswap that doesn't support the DONT_HAVE response, and is also useful to +// mitigate getting blocked by a peer that takes a long time to respond. func (mq *MessageQueue) simulateDontHaveWithTimeout(msg bsmsg.BitSwapMessage) { - // If the peer supports DONT_HAVE responses, we don't need to simulate - if mq.sender.SupportsHave() { - return - } - mq.wllock.Lock() // Get the CID of each want-block that expects a DONT_HAVE response From e774269ab36d8ce08944fed09d837d70a50d8873 Mon Sep 17 00:00:00 2001 From: Dirk McCormick Date: Tue, 10 Mar 2020 16:50:08 -0400 Subject: [PATCH 0861/1035] docs: fix find peers log level This commit was moved from ipfs/go-bitswap@dbb73a68706ab0ea3ce24bea0fb304be3eeb55b8 --- bitswap/internal/session/session.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bitswap/internal/session/session.go b/bitswap/internal/session/session.go index 45cd825fa..a1f88e825 100644 --- a/bitswap/internal/session/session.go +++ b/bitswap/internal/session/session.go @@ -340,7 +340,7 @@ func (s *Session) broadcastWantHaves(ctx context.Context, wants []cid.Cid) { // Search for providers who have the first want in the list. // Typically if the provider has the first block they will have // the rest of the blocks also. - log.Warnf("Ses%d: FindMorePeers with want %s (1st of %d wants)", s.id, lu.C(wants[0]), len(wants)) + log.Infof("Ses%d: FindMorePeers with want %s (1st of %d wants)", s.id, lu.C(wants[0]), len(wants)) s.findMorePeers(ctx, wants[0]) } s.resetIdleTick() From 4ffaa4076aa3a378c0203703e07ae14817af2656 Mon Sep 17 00:00:00 2001 From: dirkmc Date: Tue, 10 Mar 2020 17:07:20 -0400 Subject: [PATCH 0862/1035] fix: flaky provider query manager (#286) This commit was moved from ipfs/go-bitswap@964888c485919bea7f05c6057f64c0d7b7c3cb7e --- .../providerquerymanager/providerquerymanager_test.go | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/bitswap/internal/providerquerymanager/providerquerymanager_test.go b/bitswap/internal/providerquerymanager/providerquerymanager_test.go index 8f560536b..66d158123 100644 --- a/bitswap/internal/providerquerymanager/providerquerymanager_test.go +++ b/bitswap/internal/providerquerymanager/providerquerymanager_test.go @@ -21,6 +21,7 @@ type fakeProviderNetwork struct { connectDelay time.Duration queriesMadeMutex sync.RWMutex queriesMade int + liveQueries int } func (fpn *fakeProviderNetwork) ConnectTo(context.Context, peer.ID) error { @@ -31,6 +32,7 @@ func (fpn *fakeProviderNetwork) ConnectTo(context.Context, peer.ID) error { func (fpn *fakeProviderNetwork) FindProvidersAsync(ctx context.Context, k cid.Cid, max int) <-chan peer.ID { fpn.queriesMadeMutex.Lock() fpn.queriesMade++ + fpn.liveQueries++ fpn.queriesMadeMutex.Unlock() incomingPeers := make(chan peer.ID) go func() { @@ -48,7 +50,11 @@ func (fpn *fakeProviderNetwork) FindProvidersAsync(ctx context.Context, k cid.Ci return } } + fpn.queriesMadeMutex.Lock() + fpn.liveQueries-- + fpn.queriesMadeMutex.Unlock() }() + return incomingPeers } @@ -264,8 +270,8 @@ func TestRateLimitingRequests(t *testing.T) { } time.Sleep(9 * time.Millisecond) fpn.queriesMadeMutex.Lock() - if fpn.queriesMade != maxInProcessRequests { - t.Logf("Queries made: %d\n", fpn.queriesMade) + if fpn.liveQueries != maxInProcessRequests { + t.Logf("Queries made: %d\n", fpn.liveQueries) t.Fatal("Did not limit parallel requests to rate limit") } fpn.queriesMadeMutex.Unlock() From b2333dd87c95352083d0577007a9bb1de81c471a Mon Sep 17 00:00:00 2001 From: dirkmc Date: Tue, 10 Mar 2020 17:15:05 -0400 Subject: [PATCH 0863/1035] fix: flaky engine peer tagging test (#287) This commit was moved from ipfs/go-bitswap@f8ed752a4c0242a9946c1112ce49d1d3bde5e10f --- bitswap/internal/decision/engine_test.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/bitswap/internal/decision/engine_test.go b/bitswap/internal/decision/engine_test.go index 892c3057c..6313ee161 100644 --- a/bitswap/internal/decision/engine_test.go +++ b/bitswap/internal/decision/engine_test.go @@ -1007,9 +1007,9 @@ func TestTaggingPeers(t *testing.T) { } func TestTaggingUseful(t *testing.T) { - peerSampleInterval := 5 * time.Millisecond + peerSampleInterval := 10 * time.Millisecond - ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() me := newTestEngine(ctx, "engine", peerSampleInterval) friend := peer.ID("friend") @@ -1023,7 +1023,7 @@ func TestTaggingUseful(t *testing.T) { t.Fatal("Peers should be untagged but weren't") } me.Engine.MessageSent(friend, msg) - time.Sleep(8 * time.Millisecond) + time.Sleep(15 * time.Millisecond) if me.PeerTagger.count(me.Engine.tagUseful) != 1 { t.Fatal("Peers should be tagged but weren't") } From e572f87bb24e8f3d41d7713f77634832c3ce4034 Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Tue, 10 Mar 2020 19:25:28 -0700 Subject: [PATCH 0864/1035] fix: re-export testinstance/testnet We use these outside of bitswap for testing. This commit was moved from ipfs/go-bitswap@b58f8fc65226488d404b88f4a58f38748ae33cdb --- bitswap/benchmarks_test.go | 4 ++-- bitswap/bitswap_test.go | 4 ++-- bitswap/bitswap_with_sessions_test.go | 2 +- bitswap/network/ipfs_impl_test.go | 2 +- bitswap/{internal => }/testinstance/testinstance.go | 2 +- bitswap/{internal => }/testnet/interface.go | 0 .../testnet/internet_latency_delay_generator.go | 0 .../testnet/internet_latency_delay_generator_test.go | 0 bitswap/{internal => }/testnet/network_test.go | 0 bitswap/{internal => }/testnet/peernet.go | 0 bitswap/{internal => }/testnet/rate_limit_generators.go | 0 bitswap/{internal => }/testnet/virtual.go | 0 12 files changed, 7 insertions(+), 7 deletions(-) rename bitswap/{internal => }/testinstance/testinstance.go (98%) rename bitswap/{internal => }/testnet/interface.go (100%) rename bitswap/{internal => }/testnet/internet_latency_delay_generator.go (100%) rename bitswap/{internal => }/testnet/internet_latency_delay_generator_test.go (100%) rename bitswap/{internal => }/testnet/network_test.go (100%) rename bitswap/{internal => }/testnet/peernet.go (100%) rename bitswap/{internal => }/testnet/rate_limit_generators.go (100%) rename bitswap/{internal => }/testnet/virtual.go (100%) diff --git a/bitswap/benchmarks_test.go b/bitswap/benchmarks_test.go index 9761a26c9..d3aaf04f9 100644 --- a/bitswap/benchmarks_test.go +++ b/bitswap/benchmarks_test.go @@ -19,8 +19,8 @@ import ( bitswap "github.com/ipfs/go-bitswap" bssession "github.com/ipfs/go-bitswap/internal/session" - testinstance "github.com/ipfs/go-bitswap/internal/testinstance" - tn "github.com/ipfs/go-bitswap/internal/testnet" + testinstance "github.com/ipfs/go-bitswap/testinstance" + tn "github.com/ipfs/go-bitswap/testnet" bsnet "github.com/ipfs/go-bitswap/network" cid "github.com/ipfs/go-cid" delay "github.com/ipfs/go-ipfs-delay" diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index 428fa5be6..ba89e038d 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -11,8 +11,8 @@ import ( bitswap "github.com/ipfs/go-bitswap" decision "github.com/ipfs/go-bitswap/internal/decision" bssession "github.com/ipfs/go-bitswap/internal/session" - testinstance "github.com/ipfs/go-bitswap/internal/testinstance" - tn "github.com/ipfs/go-bitswap/internal/testnet" + testinstance "github.com/ipfs/go-bitswap/testinstance" + tn "github.com/ipfs/go-bitswap/testnet" "github.com/ipfs/go-bitswap/message" blocks "github.com/ipfs/go-block-format" cid "github.com/ipfs/go-cid" diff --git a/bitswap/bitswap_with_sessions_test.go b/bitswap/bitswap_with_sessions_test.go index 3b5b68e17..9551938c9 100644 --- a/bitswap/bitswap_with_sessions_test.go +++ b/bitswap/bitswap_with_sessions_test.go @@ -8,7 +8,7 @@ import ( bitswap "github.com/ipfs/go-bitswap" bssession "github.com/ipfs/go-bitswap/internal/session" - testinstance "github.com/ipfs/go-bitswap/internal/testinstance" + testinstance "github.com/ipfs/go-bitswap/testinstance" blocks "github.com/ipfs/go-block-format" cid "github.com/ipfs/go-cid" blocksutil "github.com/ipfs/go-ipfs-blocksutil" diff --git a/bitswap/network/ipfs_impl_test.go b/bitswap/network/ipfs_impl_test.go index e5b2475f6..5e0f512bc 100644 --- a/bitswap/network/ipfs_impl_test.go +++ b/bitswap/network/ipfs_impl_test.go @@ -5,7 +5,7 @@ import ( "testing" "time" - tn "github.com/ipfs/go-bitswap/internal/testnet" + tn "github.com/ipfs/go-bitswap/testnet" bsmsg "github.com/ipfs/go-bitswap/message" pb "github.com/ipfs/go-bitswap/message/pb" bsnet "github.com/ipfs/go-bitswap/network" diff --git a/bitswap/internal/testinstance/testinstance.go b/bitswap/testinstance/testinstance.go similarity index 98% rename from bitswap/internal/testinstance/testinstance.go rename to bitswap/testinstance/testinstance.go index b1651db11..2ee6be8bd 100644 --- a/bitswap/internal/testinstance/testinstance.go +++ b/bitswap/testinstance/testinstance.go @@ -5,7 +5,7 @@ import ( "time" bitswap "github.com/ipfs/go-bitswap" - tn "github.com/ipfs/go-bitswap/internal/testnet" + tn "github.com/ipfs/go-bitswap/testnet" bsnet "github.com/ipfs/go-bitswap/network" ds "github.com/ipfs/go-datastore" delayed "github.com/ipfs/go-datastore/delayed" diff --git a/bitswap/internal/testnet/interface.go b/bitswap/testnet/interface.go similarity index 100% rename from bitswap/internal/testnet/interface.go rename to bitswap/testnet/interface.go diff --git a/bitswap/internal/testnet/internet_latency_delay_generator.go b/bitswap/testnet/internet_latency_delay_generator.go similarity index 100% rename from bitswap/internal/testnet/internet_latency_delay_generator.go rename to bitswap/testnet/internet_latency_delay_generator.go diff --git a/bitswap/internal/testnet/internet_latency_delay_generator_test.go b/bitswap/testnet/internet_latency_delay_generator_test.go similarity index 100% rename from bitswap/internal/testnet/internet_latency_delay_generator_test.go rename to bitswap/testnet/internet_latency_delay_generator_test.go diff --git a/bitswap/internal/testnet/network_test.go b/bitswap/testnet/network_test.go similarity index 100% rename from bitswap/internal/testnet/network_test.go rename to bitswap/testnet/network_test.go diff --git a/bitswap/internal/testnet/peernet.go b/bitswap/testnet/peernet.go similarity index 100% rename from bitswap/internal/testnet/peernet.go rename to bitswap/testnet/peernet.go diff --git a/bitswap/internal/testnet/rate_limit_generators.go b/bitswap/testnet/rate_limit_generators.go similarity index 100% rename from bitswap/internal/testnet/rate_limit_generators.go rename to bitswap/testnet/rate_limit_generators.go diff --git a/bitswap/internal/testnet/virtual.go b/bitswap/testnet/virtual.go similarity index 100% rename from bitswap/internal/testnet/virtual.go rename to bitswap/testnet/virtual.go From 6d63e5cda81bda4036e7fb15b53556d37d2ea007 Mon Sep 17 00:00:00 2001 From: Dirk McCormick Date: Wed, 11 Mar 2020 18:01:35 -0400 Subject: [PATCH 0865/1035] fix: flaky TestDontHaveTimeoutMgrTimeout This commit was moved from ipfs/go-bitswap@5a742adbb7e3246ca3655d19b41b194c077f3811 --- bitswap/internal/messagequeue/donthavetimeoutmgr_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bitswap/internal/messagequeue/donthavetimeoutmgr_test.go b/bitswap/internal/messagequeue/donthavetimeoutmgr_test.go index 4093f7ba6..3ac21a78c 100644 --- a/bitswap/internal/messagequeue/donthavetimeoutmgr_test.go +++ b/bitswap/internal/messagequeue/donthavetimeoutmgr_test.go @@ -109,7 +109,7 @@ func TestDontHaveTimeoutMgrTimeout(t *testing.T) { tr.clear() // Sleep until the second set of keys should have timed out - time.Sleep(expectedTimeout) + time.Sleep(expectedTimeout + 10*time.Millisecond) // At this stage all keys should have timed out. The second set included // the first set of keys, but they were added before the first set timed From 675f7d6c41813c691c0baf4b7aba69d946d3720b Mon Sep 17 00:00:00 2001 From: Dirk McCormick Date: Wed, 11 Mar 2020 17:54:59 -0400 Subject: [PATCH 0866/1035] fix: order of session broadcast wants This commit was moved from ipfs/go-bitswap@b83a609c430d3a57496c4c688a3597baece8beda --- bitswap/internal/session/session.go | 4 +- bitswap/internal/session/session_test.go | 11 ++- bitswap/internal/session/sessionwants.go | 78 +++++++++++-------- bitswap/internal/session/sessionwants_test.go | 67 +++++++++++++++- 4 files changed, 118 insertions(+), 42 deletions(-) diff --git a/bitswap/internal/session/session.go b/bitswap/internal/session/session.go index a1f88e825..faf01cb7a 100644 --- a/bitswap/internal/session/session.go +++ b/bitswap/internal/session/session.go @@ -141,7 +141,7 @@ func New(ctx context.Context, periodicSearchDelay delay.D, self peer.ID) *Session { s := &Session{ - sw: newSessionWants(), + sw: newSessionWants(broadcastLiveWantsLimit), tickDelayReqs: make(chan time.Duration), ctx: ctx, wm: wm, @@ -433,7 +433,7 @@ func (s *Session) wantBlocks(ctx context.Context, newks []cid.Cid) { } // No peers discovered yet, broadcast some want-haves - ks := s.sw.GetNextWants(broadcastLiveWantsLimit) + ks := s.sw.GetNextWants() if len(ks) > 0 { log.Infof("Ses%d: No peers - broadcasting %d want HAVE requests\n", s.id, len(ks)) s.wm.BroadcastWantHaves(ctx, s.id, ks) diff --git a/bitswap/internal/session/session_test.go b/bitswap/internal/session/session_test.go index d40036d3d..d6f89e2dc 100644 --- a/bitswap/internal/session/session_test.go +++ b/bitswap/internal/session/session_test.go @@ -222,12 +222,19 @@ func TestSessionFindMorePeers(t *testing.T) { t.Fatal("Did not make second want request ") } - // Verify a broadcast was made + // The session should keep broadcasting periodically until it receives a response select { case receivedWantReq := <-fwm.wantReqs: - if len(receivedWantReq.cids) < broadcastLiveWantsLimit { + if len(receivedWantReq.cids) != broadcastLiveWantsLimit { t.Fatal("did not rebroadcast whole live list") } + // Make sure the first block is not included because it has already + // been received + for _, c := range receivedWantReq.cids { + if c.Equals(cids[0]) { + t.Fatal("should not braodcast block that was already received") + } + } case <-ctx.Done(): t.Fatal("Never rebroadcast want list") } diff --git a/bitswap/internal/session/sessionwants.go b/bitswap/internal/session/sessionwants.go index 60df0df2f..803e2e734 100644 --- a/bitswap/internal/session/sessionwants.go +++ b/bitswap/internal/session/sessionwants.go @@ -11,19 +11,27 @@ import ( // sessionWants keeps track of which cids are waiting to be sent out, and which // peers are "live" - ie, we've sent a request but haven't received a block yet type sessionWants struct { - toFetch *cidQueue - liveWants map[cid.Cid]time.Time + // The wants that have not yet been sent out + toFetch *cidQueue + // Wants that have been sent but have not received a response + liveWants *cidQueue + // The time at which live wants were sent + sentAt map[cid.Cid]time.Time + // The maximum number of want-haves to send in a broadcast + broadcastLimit int } -func newSessionWants() sessionWants { +func newSessionWants(broadcastLimit int) sessionWants { return sessionWants{ - toFetch: newCidQueue(), - liveWants: make(map[cid.Cid]time.Time), + toFetch: newCidQueue(), + liveWants: newCidQueue(), + sentAt: make(map[cid.Cid]time.Time), + broadcastLimit: broadcastLimit, } } func (sw *sessionWants) String() string { - return fmt.Sprintf("%d pending / %d live", sw.toFetch.Len(), len(sw.liveWants)) + return fmt.Sprintf("%d pending / %d live", sw.toFetch.Len(), sw.liveWants.Len()) } // BlocksRequested is called when the client makes a request for blocks @@ -33,20 +41,23 @@ func (sw *sessionWants) BlocksRequested(newWants []cid.Cid) { } } -// GetNextWants moves as many CIDs from the fetch queue to the live wants -// list as possible (given the limit). Returns the newly live wants. -func (sw *sessionWants) GetNextWants(limit int) []cid.Cid { +// GetNextWants is called when the session has not yet discovered peers with +// the blocks that it wants. It moves as many CIDs from the fetch queue to +// the live wants queue as possible (given the broadcast limit). +// Returns the newly live wants. +func (sw *sessionWants) GetNextWants() []cid.Cid { now := time.Now() // Move CIDs from fetch queue to the live wants queue (up to the limit) - currentLiveCount := len(sw.liveWants) - toAdd := limit - currentLiveCount + currentLiveCount := sw.liveWants.Len() + toAdd := sw.broadcastLimit - currentLiveCount var live []cid.Cid for ; toAdd > 0 && sw.toFetch.Len() > 0; toAdd-- { c := sw.toFetch.Pop() live = append(live, c) - sw.liveWants[c] = now + sw.liveWants.Push(c) + sw.sentAt[c] = now } return live @@ -56,9 +67,10 @@ func (sw *sessionWants) GetNextWants(limit int) []cid.Cid { func (sw *sessionWants) WantsSent(ks []cid.Cid) { now := time.Now() for _, c := range ks { - if _, ok := sw.liveWants[c]; !ok && sw.toFetch.Has(c) { + if _, ok := sw.sentAt[c]; !ok && sw.toFetch.Has(c) { sw.toFetch.Remove(c) - sw.liveWants[c] = now + sw.liveWants.Push(c) + sw.sentAt[c] = now } } } @@ -78,13 +90,15 @@ func (sw *sessionWants) BlocksReceived(ks []cid.Cid) ([]cid.Cid, time.Duration) if sw.isWanted(c) { wanted = append(wanted, c) - sentAt, ok := sw.liveWants[c] + // Measure latency + sentAt, ok := sw.sentAt[c] if ok && !sentAt.IsZero() { totalLatency += now.Sub(sentAt) } // Remove the CID from the live wants / toFetch queue - delete(sw.liveWants, c) + sw.liveWants.Remove(c) + delete(sw.sentAt, c) sw.toFetch.Remove(c) } } @@ -93,16 +107,15 @@ func (sw *sessionWants) BlocksReceived(ks []cid.Cid) ([]cid.Cid, time.Duration) } // PrepareBroadcast saves the current time for each live want and returns the -// live want CIDs. +// live want CIDs up to the broadcast limit. func (sw *sessionWants) PrepareBroadcast() []cid.Cid { - // TODO: Change this to return wants in order so that the session will - // send out Find Providers request for the first want - // (Note that maps return keys in random order) now := time.Now() - live := make([]cid.Cid, 0, len(sw.liveWants)) - for c := range sw.liveWants { - live = append(live, c) - sw.liveWants[c] = now + live := sw.liveWants.Cids() + if len(live) > sw.broadcastLimit { + live = live[:sw.broadcastLimit] + } + for _, c := range live { + sw.sentAt[c] = now } return live } @@ -116,21 +129,18 @@ func (sw *sessionWants) CancelPending(keys []cid.Cid) { // LiveWants returns a list of live wants func (sw *sessionWants) LiveWants() []cid.Cid { - live := make([]cid.Cid, 0, len(sw.liveWants)) - for c := range sw.liveWants { - live = append(live, c) - } - return live + return sw.liveWants.Cids() } +// RandomLiveWant returns a randomly selected live want func (sw *sessionWants) RandomLiveWant() cid.Cid { - if len(sw.liveWants) == 0 { + if len(sw.sentAt) == 0 { return cid.Cid{} } // picking a random live want - i := rand.Intn(len(sw.liveWants)) - for k := range sw.liveWants { + i := rand.Intn(len(sw.sentAt)) + for k := range sw.sentAt { if i == 0 { return k } @@ -141,12 +151,12 @@ func (sw *sessionWants) RandomLiveWant() cid.Cid { // Has live wants indicates if there are any live wants func (sw *sessionWants) HasLiveWants() bool { - return len(sw.liveWants) > 0 + return sw.liveWants.Len() > 0 } // Indicates whether the want is in either of the fetch or live queues func (sw *sessionWants) isWanted(c cid.Cid) bool { - _, ok := sw.liveWants[c] + ok := sw.liveWants.Has(c) if !ok { ok = sw.toFetch.Has(c) } diff --git a/bitswap/internal/session/sessionwants_test.go b/bitswap/internal/session/sessionwants_test.go index 8389faa06..07c23a13e 100644 --- a/bitswap/internal/session/sessionwants_test.go +++ b/bitswap/internal/session/sessionwants_test.go @@ -8,7 +8,7 @@ import ( ) func TestEmptySessionWants(t *testing.T) { - sw := newSessionWants() + sw := newSessionWants(broadcastLiveWantsLimit) // Expect these functions to return nothing on a new sessionWants lws := sw.PrepareBroadcast() @@ -29,7 +29,7 @@ func TestEmptySessionWants(t *testing.T) { } func TestSessionWants(t *testing.T) { - sw := newSessionWants() + sw := newSessionWants(5) cids := testutil.GenerateCids(10) others := testutil.GenerateCids(1) @@ -42,7 +42,7 @@ func TestSessionWants(t *testing.T) { // The first 5 cids should go move into the live queue // toFetch Live // 98765 43210 - nextw := sw.GetNextWants(5) + nextw := sw.GetNextWants() if len(nextw) != 5 { t.Fatal("expected 5 next wants") } @@ -78,7 +78,7 @@ func TestSessionWants(t *testing.T) { // Should move 2 wants from toFetch queue to live wants // toFetch Live // 987__ 65432 - nextw = sw.GetNextWants(5) + nextw = sw.GetNextWants() if len(nextw) != 2 { t.Fatal("expected 2 next wants") } @@ -108,3 +108,62 @@ func TestSessionWants(t *testing.T) { t.Fatal("expected 4 live wants") } } + +func TestPrepareBroadcast(t *testing.T) { + sw := newSessionWants(3) + cids := testutil.GenerateCids(10) + + // Add 6 new wants + // toFetch Live + // 543210 + sw.BlocksRequested(cids[0:6]) + + // Get next wants with a limit of 3 + // The first 3 cids should go move into the live queue + // toFetch Live + // 543 210 + sw.GetNextWants() + + // Broadcast should contain wants in order + for i := 0; i < 10; i++ { + ws := sw.PrepareBroadcast() + if len(ws) != 3 { + t.Fatal("should broadcast all live wants") + } + for idx, c := range ws { + if !c.Equals(cids[idx]) { + t.Fatal("broadcast should always return wants in order") + } + } + } + + // One block received + // Remove a cid from the live queue + sw.BlocksReceived(cids[0:1]) + // toFetch Live + // 543 21_ + + // Add 4 new wants + // toFetch Live + // 9876543 21 + sw.BlocksRequested(cids[6:]) + + // 2 Wants sent + // toFetch Live + // 98765 4321 + sw.WantsSent(cids[3:5]) + + // Broadcast should contain wants in order + cids = cids[1:] + for i := 0; i < 10; i++ { + ws := sw.PrepareBroadcast() + if len(ws) != 3 { + t.Fatal("should broadcast live wants up to limit", len(ws), len(cids)) + } + for idx, c := range ws { + if !c.Equals(cids[idx]) { + t.Fatal("broadcast should always return wants in order") + } + } + } +} From dee6ecbab0441949ab774853157f4913651e8b25 Mon Sep 17 00:00:00 2001 From: Dirk McCormick Date: Thu, 12 Mar 2020 10:45:48 -0400 Subject: [PATCH 0867/1035] refactor: improve sessionWants perf This commit was moved from ipfs/go-bitswap@73261ec7a72a5d67d666735fd2934d40caed226f --- bitswap/internal/session/sessionwants.go | 85 +++++++++++++------ bitswap/internal/session/sessionwants_test.go | 24 +++++- 2 files changed, 79 insertions(+), 30 deletions(-) diff --git a/bitswap/internal/session/sessionwants.go b/bitswap/internal/session/sessionwants.go index 803e2e734..0d4ded013 100644 --- a/bitswap/internal/session/sessionwants.go +++ b/bitswap/internal/session/sessionwants.go @@ -8,15 +8,20 @@ import ( cid "github.com/ipfs/go-cid" ) +// liveWantsOrder and liveWants will get out of sync as blocks are received. +// This constant is the maximum amount to allow them to be out of sync before +// cleaning up the ordering array. +const liveWantsOrderGCLimit = 32 + // sessionWants keeps track of which cids are waiting to be sent out, and which // peers are "live" - ie, we've sent a request but haven't received a block yet type sessionWants struct { // The wants that have not yet been sent out toFetch *cidQueue // Wants that have been sent but have not received a response - liveWants *cidQueue - // The time at which live wants were sent - sentAt map[cid.Cid]time.Time + liveWants map[cid.Cid]time.Time + // The order in which wants were requested + liveWantsOrder []cid.Cid // The maximum number of want-haves to send in a broadcast broadcastLimit int } @@ -24,14 +29,13 @@ type sessionWants struct { func newSessionWants(broadcastLimit int) sessionWants { return sessionWants{ toFetch: newCidQueue(), - liveWants: newCidQueue(), - sentAt: make(map[cid.Cid]time.Time), + liveWants: make(map[cid.Cid]time.Time), broadcastLimit: broadcastLimit, } } func (sw *sessionWants) String() string { - return fmt.Sprintf("%d pending / %d live", sw.toFetch.Len(), sw.liveWants.Len()) + return fmt.Sprintf("%d pending / %d live", sw.toFetch.Len(), len(sw.liveWants)) } // BlocksRequested is called when the client makes a request for blocks @@ -48,16 +52,17 @@ func (sw *sessionWants) BlocksRequested(newWants []cid.Cid) { func (sw *sessionWants) GetNextWants() []cid.Cid { now := time.Now() - // Move CIDs from fetch queue to the live wants queue (up to the limit) - currentLiveCount := sw.liveWants.Len() + // Move CIDs from fetch queue to the live wants queue (up to the broadcast + // limit) + currentLiveCount := len(sw.liveWants) toAdd := sw.broadcastLimit - currentLiveCount var live []cid.Cid for ; toAdd > 0 && sw.toFetch.Len() > 0; toAdd-- { c := sw.toFetch.Pop() live = append(live, c) - sw.liveWants.Push(c) - sw.sentAt[c] = now + sw.liveWantsOrder = append(sw.liveWantsOrder, c) + sw.liveWants[c] = now } return live @@ -67,10 +72,10 @@ func (sw *sessionWants) GetNextWants() []cid.Cid { func (sw *sessionWants) WantsSent(ks []cid.Cid) { now := time.Now() for _, c := range ks { - if _, ok := sw.sentAt[c]; !ok && sw.toFetch.Has(c) { + if _, ok := sw.liveWants[c]; !ok && sw.toFetch.Has(c) { sw.toFetch.Remove(c) - sw.liveWants.Push(c) - sw.sentAt[c] = now + sw.liveWantsOrder = append(sw.liveWantsOrder, c) + sw.liveWants[c] = now } } } @@ -85,24 +90,36 @@ func (sw *sessionWants) BlocksReceived(ks []cid.Cid) ([]cid.Cid, time.Duration) return wanted, totalLatency } + // Filter for blocks that were actually wanted (as opposed to duplicates) now := time.Now() for _, c := range ks { if sw.isWanted(c) { wanted = append(wanted, c) // Measure latency - sentAt, ok := sw.sentAt[c] + sentAt, ok := sw.liveWants[c] if ok && !sentAt.IsZero() { totalLatency += now.Sub(sentAt) } // Remove the CID from the live wants / toFetch queue - sw.liveWants.Remove(c) - delete(sw.sentAt, c) + delete(sw.liveWants, c) sw.toFetch.Remove(c) } } + // If the live wants ordering array is a long way out of sync with the + // live wants map, clean up the ordering array + if len(sw.liveWantsOrder)-len(sw.liveWants) > liveWantsOrderGCLimit { + cleaned := sw.liveWantsOrder[:0] + for _, c := range sw.liveWantsOrder { + if _, ok := sw.liveWants[c]; ok { + cleaned = append(cleaned, c) + } + } + sw.liveWantsOrder = cleaned + } + return wanted, totalLatency } @@ -110,13 +127,20 @@ func (sw *sessionWants) BlocksReceived(ks []cid.Cid) ([]cid.Cid, time.Duration) // live want CIDs up to the broadcast limit. func (sw *sessionWants) PrepareBroadcast() []cid.Cid { now := time.Now() - live := sw.liveWants.Cids() - if len(live) > sw.broadcastLimit { - live = live[:sw.broadcastLimit] - } - for _, c := range live { - sw.sentAt[c] = now + live := make([]cid.Cid, 0, len(sw.liveWants)) + for _, c := range sw.liveWantsOrder { + if _, ok := sw.liveWants[c]; ok { + // No response was received for the want, so reset the sent time + // to now as we're about to broadcast + sw.liveWants[c] = now + + live = append(live, c) + if len(live) == sw.broadcastLimit { + break + } + } } + return live } @@ -129,18 +153,23 @@ func (sw *sessionWants) CancelPending(keys []cid.Cid) { // LiveWants returns a list of live wants func (sw *sessionWants) LiveWants() []cid.Cid { - return sw.liveWants.Cids() + live := make([]cid.Cid, 0, len(sw.liveWants)) + for c := range sw.liveWants { + live = append(live, c) + } + + return live } // RandomLiveWant returns a randomly selected live want func (sw *sessionWants) RandomLiveWant() cid.Cid { - if len(sw.sentAt) == 0 { + if len(sw.liveWants) == 0 { return cid.Cid{} } // picking a random live want - i := rand.Intn(len(sw.sentAt)) - for k := range sw.sentAt { + i := rand.Intn(len(sw.liveWants)) + for k := range sw.liveWants { if i == 0 { return k } @@ -151,12 +180,12 @@ func (sw *sessionWants) RandomLiveWant() cid.Cid { // Has live wants indicates if there are any live wants func (sw *sessionWants) HasLiveWants() bool { - return sw.liveWants.Len() > 0 + return len(sw.liveWants) > 0 } // Indicates whether the want is in either of the fetch or live queues func (sw *sessionWants) isWanted(c cid.Cid) bool { - ok := sw.liveWants.Has(c) + _, ok := sw.liveWants[c] if !ok { ok = sw.toFetch.Has(c) } diff --git a/bitswap/internal/session/sessionwants_test.go b/bitswap/internal/session/sessionwants_test.go index 07c23a13e..b6e6c94ff 100644 --- a/bitswap/internal/session/sessionwants_test.go +++ b/bitswap/internal/session/sessionwants_test.go @@ -116,7 +116,7 @@ func TestPrepareBroadcast(t *testing.T) { // Add 6 new wants // toFetch Live // 543210 - sw.BlocksRequested(cids[0:6]) + sw.BlocksRequested(cids[:6]) // Get next wants with a limit of 3 // The first 3 cids should go move into the live queue @@ -139,7 +139,7 @@ func TestPrepareBroadcast(t *testing.T) { // One block received // Remove a cid from the live queue - sw.BlocksReceived(cids[0:1]) + sw.BlocksReceived(cids[:1]) // toFetch Live // 543 21_ @@ -167,3 +167,23 @@ func TestPrepareBroadcast(t *testing.T) { } } } + +// Test that even after GC broadcast returns correct wants +func TestPrepareBroadcastAfterGC(t *testing.T) { + sw := newSessionWants(5) + cids := testutil.GenerateCids(liveWantsOrderGCLimit * 2) + + sw.BlocksRequested(cids) + + // Trigger a sessionWants internal GC of the live wants + sw.BlocksReceived(cids[:liveWantsOrderGCLimit+1]) + cids = cids[:liveWantsOrderGCLimit+1] + + // Broadcast should contain wants in order + ws := sw.PrepareBroadcast() + for i, c := range ws { + if !c.Equals(cids[i]) { + t.Fatal("broadcast should always return wants in order") + } + } +} From f7520406b60252d66d1a53702dd4afe9fdd92d33 Mon Sep 17 00:00:00 2001 From: Dirk McCormick Date: Thu, 12 Mar 2020 17:19:26 -0400 Subject: [PATCH 0868/1035] fix: flaky TestRateLimitingRequests This commit was moved from ipfs/go-bitswap@0945c26477fda25f8ec7d285f10b23fc41f748f0 --- .../providerquerymanager/providerquerymanager_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/bitswap/internal/providerquerymanager/providerquerymanager_test.go b/bitswap/internal/providerquerymanager/providerquerymanager_test.go index 66d158123..a39e9661f 100644 --- a/bitswap/internal/providerquerymanager/providerquerymanager_test.go +++ b/bitswap/internal/providerquerymanager/providerquerymanager_test.go @@ -253,7 +253,7 @@ func TestRateLimitingRequests(t *testing.T) { peers := testutil.GeneratePeers(10) fpn := &fakeProviderNetwork{ peersFound: peers, - delay: 1 * time.Millisecond, + delay: 5 * time.Millisecond, } ctx := context.Background() ctx, cancel := context.WithCancel(ctx) @@ -268,7 +268,7 @@ func TestRateLimitingRequests(t *testing.T) { for i := 0; i < maxInProcessRequests+1; i++ { requestChannels = append(requestChannels, providerQueryManager.FindProvidersAsync(sessionCtx, keys[i])) } - time.Sleep(9 * time.Millisecond) + time.Sleep(20 * time.Millisecond) fpn.queriesMadeMutex.Lock() if fpn.liveQueries != maxInProcessRequests { t.Logf("Queries made: %d\n", fpn.liveQueries) From e82ff754c7e06363f3baeb44c8e4d85c69bb4080 Mon Sep 17 00:00:00 2001 From: dirkmc Date: Thu, 12 Mar 2020 19:06:10 -0400 Subject: [PATCH 0869/1035] fix: engine test TestTaggingUseful (#297) This commit was moved from ipfs/go-bitswap@5c18cf5d8c889cf84801a7f6945a09e2a855e5d5 --- bitswap/internal/decision/engine.go | 12 ++++- bitswap/internal/decision/engine_test.go | 62 ++++++++++++++++-------- 2 files changed, 52 insertions(+), 22 deletions(-) diff --git a/bitswap/internal/decision/engine.go b/bitswap/internal/decision/engine.go index 15e6ad8c2..5c7da903c 100644 --- a/bitswap/internal/decision/engine.go +++ b/bitswap/internal/decision/engine.go @@ -159,6 +159,8 @@ type Engine struct { // how frequently the engine should sample peer usefulness peerSampleInterval time.Duration + // used by the tests to detect when a sample is taken + sampleCh chan struct{} sendDontHaves bool @@ -167,12 +169,12 @@ type Engine struct { // NewEngine creates a new block sending engine for the given block store func NewEngine(ctx context.Context, bs bstore.Blockstore, peerTagger PeerTagger, self peer.ID) *Engine { - return newEngine(ctx, bs, peerTagger, self, maxBlockSizeReplaceHasWithBlock, shortTerm) + return newEngine(ctx, bs, peerTagger, self, maxBlockSizeReplaceHasWithBlock, shortTerm, nil) } // This constructor is used by the tests func newEngine(ctx context.Context, bs bstore.Blockstore, peerTagger PeerTagger, self peer.ID, - maxReplaceSize int, peerSampleInterval time.Duration) *Engine { + maxReplaceSize int, peerSampleInterval time.Duration, sampleCh chan struct{}) *Engine { e := &Engine{ ledgerMap: make(map[peer.ID]*ledger), @@ -183,6 +185,7 @@ func newEngine(ctx context.Context, bs bstore.Blockstore, peerTagger PeerTagger, ticker: time.NewTicker(time.Millisecond * 100), maxBlockSizeReplaceHasWithBlock: maxReplaceSize, peerSampleInterval: peerSampleInterval, + sampleCh: sampleCh, taskWorkerCount: taskWorkerCount, sendDontHaves: true, self: self, @@ -315,6 +318,11 @@ func (e *Engine) scoreWorker(ctx context.Context) { } // Keep the memory. It's not much and it saves us from having to allocate. updates = updates[:0] + + // Used by the tests + if e.sampleCh != nil { + e.sampleCh <- struct{}{} + } } } diff --git a/bitswap/internal/decision/engine_test.go b/bitswap/internal/decision/engine_test.go index 6313ee161..0ac01107f 100644 --- a/bitswap/internal/decision/engine_test.go +++ b/bitswap/internal/decision/engine_test.go @@ -91,10 +91,14 @@ type engineSet struct { Blockstore blockstore.Blockstore } -func newTestEngine(ctx context.Context, idStr string, peerSampleInterval time.Duration) engineSet { +func newTestEngine(ctx context.Context, idStr string) engineSet { + return newTestEngineWithSampling(ctx, idStr, shortTerm, nil) +} + +func newTestEngineWithSampling(ctx context.Context, idStr string, peerSampleInterval time.Duration, sampleCh chan struct{}) engineSet { fpt := &fakePeerTagger{} bs := blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore())) - e := newEngine(ctx, bs, fpt, "localhost", 0, peerSampleInterval) + e := newEngine(ctx, bs, fpt, "localhost", 0, peerSampleInterval, sampleCh) e.StartWorkers(ctx, process.WithTeardown(func() error { return nil })) return engineSet{ Peer: peer.ID(idStr), @@ -108,8 +112,8 @@ func newTestEngine(ctx context.Context, idStr string, peerSampleInterval time.Du func TestConsistentAccounting(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - sender := newTestEngine(ctx, "Ernie", shortTerm) - receiver := newTestEngine(ctx, "Bert", shortTerm) + sender := newTestEngine(ctx, "Ernie") + receiver := newTestEngine(ctx, "Bert") // Send messages from Ernie to Bert for i := 0; i < 1000; i++ { @@ -143,8 +147,8 @@ func TestPeerIsAddedToPeersWhenMessageReceivedOrSent(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - sanfrancisco := newTestEngine(ctx, "sf", shortTerm) - seattle := newTestEngine(ctx, "sea", shortTerm) + sanfrancisco := newTestEngine(ctx, "sf") + seattle := newTestEngine(ctx, "sea") m := message.New(true) @@ -181,7 +185,7 @@ func peerIsPartner(p peer.ID, e *Engine) bool { func TestOutboxClosedWhenEngineClosed(t *testing.T) { ctx := context.Background() t.SkipNow() // TODO implement *Engine.Close - e := newEngine(ctx, blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore())), &fakePeerTagger{}, "localhost", 0, shortTerm) + e := newEngine(ctx, blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore())), &fakePeerTagger{}, "localhost", 0, shortTerm, nil) e.StartWorkers(ctx, process.WithTeardown(func() error { return nil })) var wg sync.WaitGroup wg.Add(1) @@ -509,7 +513,7 @@ func TestPartnerWantHaveWantBlockNonActive(t *testing.T) { testCases = onlyTestCases } - e := newEngine(context.Background(), bs, &fakePeerTagger{}, "localhost", 0, shortTerm) + e := newEngine(context.Background(), bs, &fakePeerTagger{}, "localhost", 0, shortTerm, nil) e.StartWorkers(context.Background(), process.WithTeardown(func() error { return nil })) for i, testCase := range testCases { t.Logf("Test case %d:", i) @@ -665,7 +669,7 @@ func TestPartnerWantHaveWantBlockActive(t *testing.T) { testCases = onlyTestCases } - e := newEngine(context.Background(), bs, &fakePeerTagger{}, "localhost", 0, shortTerm) + e := newEngine(context.Background(), bs, &fakePeerTagger{}, "localhost", 0, shortTerm, nil) e.StartWorkers(context.Background(), process.WithTeardown(func() error { return nil })) var next envChan @@ -850,7 +854,7 @@ func TestPartnerWantsThenCancels(t *testing.T) { ctx := context.Background() for i := 0; i < numRounds; i++ { expected := make([][]string, 0, len(testcases)) - e := newEngine(ctx, bs, &fakePeerTagger{}, "localhost", 0, shortTerm) + e := newEngine(ctx, bs, &fakePeerTagger{}, "localhost", 0, shortTerm, nil) e.StartWorkers(ctx, process.WithTeardown(func() error { return nil })) for _, testcase := range testcases { set := testcase[0] @@ -875,7 +879,7 @@ func TestSendReceivedBlocksToPeersThatWantThem(t *testing.T) { partner := libp2ptest.RandPeerIDFatal(t) otherPeer := libp2ptest.RandPeerIDFatal(t) - e := newEngine(context.Background(), bs, &fakePeerTagger{}, "localhost", 0, shortTerm) + e := newEngine(context.Background(), bs, &fakePeerTagger{}, "localhost", 0, shortTerm, nil) e.StartWorkers(context.Background(), process.WithTeardown(func() error { return nil })) blks := testutil.GenerateBlocksOfSize(4, 8*1024) @@ -919,7 +923,7 @@ func TestSendDontHave(t *testing.T) { partner := libp2ptest.RandPeerIDFatal(t) otherPeer := libp2ptest.RandPeerIDFatal(t) - e := newEngine(context.Background(), bs, &fakePeerTagger{}, "localhost", 0, shortTerm) + e := newEngine(context.Background(), bs, &fakePeerTagger{}, "localhost", 0, shortTerm, nil) e.StartWorkers(context.Background(), process.WithTeardown(func() error { return nil })) blks := testutil.GenerateBlocksOfSize(4, 8*1024) @@ -981,8 +985,8 @@ func TestSendDontHave(t *testing.T) { func TestTaggingPeers(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) defer cancel() - sanfrancisco := newTestEngine(ctx, "sf", shortTerm) - seattle := newTestEngine(ctx, "sea", shortTerm) + sanfrancisco := newTestEngine(ctx, "sf") + seattle := newTestEngine(ctx, "sea") keys := []string{"a", "b", "c", "d", "e"} for _, letter := range keys { @@ -1007,11 +1011,13 @@ func TestTaggingPeers(t *testing.T) { } func TestTaggingUseful(t *testing.T) { - peerSampleInterval := 10 * time.Millisecond + peerSampleInterval := 1 * time.Millisecond ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() - me := newTestEngine(ctx, "engine", peerSampleInterval) + + sampleCh := make(chan struct{}) + me := newTestEngineWithSampling(ctx, "engine", peerSampleInterval, sampleCh) friend := peer.ID("friend") block := blocks.NewBlock([]byte("foobar")) @@ -1022,22 +1028,38 @@ func TestTaggingUseful(t *testing.T) { if me.PeerTagger.count(me.Engine.tagUseful) != 0 { t.Fatal("Peers should be untagged but weren't") } + me.Engine.MessageSent(friend, msg) - time.Sleep(15 * time.Millisecond) + + for j := 0; j < 3; j++ { + <-sampleCh + } + if me.PeerTagger.count(me.Engine.tagUseful) != 1 { t.Fatal("Peers should be tagged but weren't") } - time.Sleep(peerSampleInterval * 10) + + for j := 0; j < longTermRatio; j++ { + <-sampleCh + } } if me.PeerTagger.count(me.Engine.tagUseful) == 0 { t.Fatal("peers should still be tagged due to long-term usefulness") } - time.Sleep(peerSampleInterval * 2) + + for j := 0; j < longTermRatio; j++ { + <-sampleCh + } + if me.PeerTagger.count(me.Engine.tagUseful) == 0 { t.Fatal("peers should still be tagged due to long-term usefulness") } - time.Sleep(peerSampleInterval * 30) + + for j := 0; j < longTermRatio; j++ { + <-sampleCh + } + if me.PeerTagger.count(me.Engine.tagUseful) != 0 { t.Fatal("peers should finally be untagged") } From 1953a511dc79cdab2fbfbdbe2b1c99b9589d3a35 Mon Sep 17 00:00:00 2001 From: Dirk McCormick Date: Fri, 13 Mar 2020 15:58:44 -0400 Subject: [PATCH 0870/1035] refactor: clean up logs This commit was moved from ipfs/go-bitswap@ddf64ae29de630ec6b9af1ca4ea2c711b663c880 --- bitswap/internal/decision/engine.go | 58 +++++++------------ bitswap/internal/decision/engine_test.go | 11 ++-- bitswap/internal/logutil/logutil.go | 26 --------- bitswap/internal/messagequeue/messagequeue.go | 40 +++++++------ .../internal/peermanager/peerwantmanager.go | 8 +-- .../internal/session/peerresponsetracker.go | 10 ++-- bitswap/internal/session/session.go | 35 +++++------ bitswap/internal/session/sessionwantsender.go | 18 +----- .../sessionpeermanager/sessionpeermanager.go | 7 ++- bitswap/internal/wantmanager/wantmanager.go | 3 +- 10 files changed, 79 insertions(+), 137 deletions(-) delete mode 100644 bitswap/internal/logutil/logutil.go diff --git a/bitswap/internal/decision/engine.go b/bitswap/internal/decision/engine.go index 5c7da903c..4b2dea497 100644 --- a/bitswap/internal/decision/engine.go +++ b/bitswap/internal/decision/engine.go @@ -418,7 +418,7 @@ func (e *Engine) nextEnvelope(ctx context.Context) (*Envelope, error) { // Create a new message msg := bsmsg.New(true) - // log.Debugf(" %s got %d tasks", lu.P(e.self), len(nextTasks)) + log.Debugw("Bitswap process tasks", "local", e.self, "taskCount", len(nextTasks)) // Amount of data in the request queue still waiting to be popped msg.SetPendingBytes(int32(pendingBytes)) @@ -456,12 +456,11 @@ func (e *Engine) nextEnvelope(ctx context.Context) (*Envelope, error) { if blk == nil { // If the client requested DONT_HAVE, add DONT_HAVE to the message if t.SendDontHave { - // log.Debugf(" make evlp %s->%s DONT_HAVE (expected block) %s", lu.P(e.self), lu.P(p), lu.C(c)) msg.AddDontHave(c) } } else { // Add the block to the message - // log.Debugf(" make evlp %s->%s block: %s (%d bytes)", lu.P(e.self), lu.P(p), lu.C(c), len(blk.RawData())) + // log.Debugf(" make evlp %s->%s block: %s (%d bytes)", e.self, p, c, len(blk.RawData())) msg.AddBlock(blk) } } @@ -472,7 +471,7 @@ func (e *Engine) nextEnvelope(ctx context.Context) (*Envelope, error) { continue } - // log.Debugf(" sending message %s->%s (%d blks / %d presences / %d bytes)\n", lu.P(e.self), lu.P(p), blkCount, presenceCount, msg.Size()) + log.Debugw("Bitswap engine -> msg", "local", e.self, "to", p, "blockCount", len(msg.Blocks()), "presenceCount", len(msg.BlockPresences()), "size", msg.Size()) return &Envelope{ Peer: p, Message: msg, @@ -512,21 +511,21 @@ func (e *Engine) Peers() []peer.ID { func (e *Engine) MessageReceived(ctx context.Context, p peer.ID, m bsmsg.BitSwapMessage) { entries := m.Wantlist() - // if len(entries) > 0 { - // log.Debugf("engine-%s received message from %s with %d entries\n", lu.P(e.self), lu.P(p), len(entries)) - // for _, et := range entries { - // if !et.Cancel { - // if et.WantType == pb.Message_Wantlist_Have { - // log.Debugf(" recv %s<-%s: want-have %s\n", lu.P(e.self), lu.P(p), lu.C(et.Cid)) - // } else { - // log.Debugf(" recv %s<-%s: want-block %s\n", lu.P(e.self), lu.P(p), lu.C(et.Cid)) - // } - // } - // } - // } + if len(entries) > 0 { + log.Debugw("Bitswap engine <- msg", "local", e.self, "from", p, "entryCount", len(entries)) + for _, et := range entries { + if !et.Cancel { + if et.WantType == pb.Message_Wantlist_Have { + log.Debugw("Bitswap engine <- want-have", "local", e.self, "from", p, "cid", et.Cid) + } else { + log.Debugw("Bitswap engine <- want-block", "local", e.self, "from", p, "cid", et.Cid) + } + } + } + } if m.Empty() { - log.Debugf("received empty message from %s", p) + log.Infof("received empty message from %s", p) } newWorkExists := false @@ -556,7 +555,7 @@ func (e *Engine) MessageReceived(ctx context.Context, p peer.ID, m bsmsg.BitSwap // Record how many bytes were received in the ledger blks := m.Blocks() for _, block := range blks { - log.Debugf("got block %s %d bytes", block, len(block.RawData())) + log.Debugw("Bitswap engine <- block", "local", e.self, "from", p, "cid", block.Cid(), "size", len(block.RawData())) l.ReceivedBytes(len(block.RawData())) } @@ -569,7 +568,7 @@ func (e *Engine) MessageReceived(ctx context.Context, p peer.ID, m bsmsg.BitSwap // Remove cancelled blocks from the queue for _, entry := range cancels { - // log.Debugf("%s<-%s cancel %s", lu.P(e.self), lu.P(p), lu.C(entry.Cid)) + log.Debugw("Bitswap engine <- cancel", "local", e.self, "from", p, "cid", entry.Cid) if l.CancelWant(entry.Cid) { e.peerRequestQueue.Remove(entry.Cid, p) } @@ -585,6 +584,8 @@ func (e *Engine) MessageReceived(ctx context.Context, p peer.ID, m bsmsg.BitSwap // If the block was not found if !found { + log.Debugw("Bitswap engine: block not found", "local", e.self, "from", p, "cid", entry.Cid, "sendDontHave", entry.SendDontHave) + // Only add the task to the queue if the requester wants a DONT_HAVE if e.sendDontHaves && entry.SendDontHave { newWorkExists = true @@ -593,12 +594,6 @@ func (e *Engine) MessageReceived(ctx context.Context, p peer.ID, m bsmsg.BitSwap isWantBlock = true } - // if isWantBlock { - // log.Debugf(" put rq %s->%s %s as want-block (not found)\n", lu.P(e.self), lu.P(p), lu.C(entry.Cid)) - // } else { - // log.Debugf(" put rq %s->%s %s as want-have (not found)\n", lu.P(e.self), lu.P(p), lu.C(entry.Cid)) - // } - activeEntries = append(activeEntries, peertask.Task{ Topic: c, Priority: entry.Priority, @@ -611,18 +606,13 @@ func (e *Engine) MessageReceived(ctx context.Context, p peer.ID, m bsmsg.BitSwap }, }) } - // log.Debugf(" not putting rq %s->%s %s (not found, SendDontHave false)\n", lu.P(e.self), lu.P(p), lu.C(entry.Cid)) } else { // The block was found, add it to the queue newWorkExists = true isWantBlock := e.sendAsBlock(entry.WantType, blockSize) - // if isWantBlock { - // log.Debugf(" put rq %s->%s %s as want-block (%d bytes)\n", lu.P(e.self), lu.P(p), lu.C(entry.Cid), blockSize) - // } else { - // log.Debugf(" put rq %s->%s %s as want-have (%d bytes)\n", lu.P(e.self), lu.P(p), lu.C(entry.Cid), blockSize) - // } + log.Debugw("Bitswap engine: block found", "local", e.self, "from", p, "cid", entry.Cid, "isWantBlock", isWantBlock) // entrySize is the amount of space the entry takes up in the // message we send to the recipient. If we're sending a block, the @@ -695,12 +685,6 @@ func (e *Engine) ReceiveFrom(from peer.ID, blks []blocks.Block, haves []cid.Cid) blockSize := blockSizes[k] isWantBlock := e.sendAsBlock(entry.WantType, blockSize) - // if isWantBlock { - // log.Debugf(" add-block put rq %s->%s %s as want-block (%d bytes)\n", lu.P(e.self), lu.P(l.Partner), lu.C(k), blockSize) - // } else { - // log.Debugf(" add-block put rq %s->%s %s as want-have (%d bytes)\n", lu.P(e.self), lu.P(l.Partner), lu.C(k), blockSize) - // } - entrySize := blockSize if !isWantBlock { entrySize = bsmsg.BlockPresenceSize(k) diff --git a/bitswap/internal/decision/engine_test.go b/bitswap/internal/decision/engine_test.go index 0ac01107f..6f5a193b6 100644 --- a/bitswap/internal/decision/engine_test.go +++ b/bitswap/internal/decision/engine_test.go @@ -10,7 +10,6 @@ import ( "testing" "time" - lu "github.com/ipfs/go-bitswap/internal/logutil" "github.com/ipfs/go-bitswap/internal/testutil" message "github.com/ipfs/go-bitswap/message" pb "github.com/ipfs/go-bitswap/message/pb" @@ -780,12 +779,12 @@ func formatBlocksDiff(blks []blocks.Block, expBlks []string) string { var out bytes.Buffer out.WriteString(fmt.Sprintf("Blocks (%d):\n", len(blks))) for _, b := range blks { - out.WriteString(fmt.Sprintf(" %s: %s\n", lu.C(b.Cid()), b.RawData())) + out.WriteString(fmt.Sprintf(" %s: %s\n", b.Cid(), b.RawData())) } out.WriteString(fmt.Sprintf("Expected (%d):\n", len(expBlks))) for _, k := range expBlks { expected := blocks.NewBlock([]byte(k)) - out.WriteString(fmt.Sprintf(" %s: %s\n", lu.C(expected.Cid()), k)) + out.WriteString(fmt.Sprintf(" %s: %s\n", expected.Cid(), k)) } return out.String() } @@ -798,16 +797,16 @@ func formatPresencesDiff(presences []message.BlockPresence, expHaves []string, e if p.Type == pb.Message_DontHave { t = "DONT_HAVE" } - out.WriteString(fmt.Sprintf(" %s - %s\n", lu.C(p.Cid), t)) + out.WriteString(fmt.Sprintf(" %s - %s\n", p.Cid, t)) } out.WriteString(fmt.Sprintf("Expected (%d):\n", len(expHaves)+len(expDontHaves))) for _, k := range expHaves { expected := blocks.NewBlock([]byte(k)) - out.WriteString(fmt.Sprintf(" %s: %s - HAVE\n", lu.C(expected.Cid()), k)) + out.WriteString(fmt.Sprintf(" %s: %s - HAVE\n", expected.Cid(), k)) } for _, k := range expDontHaves { expected := blocks.NewBlock([]byte(k)) - out.WriteString(fmt.Sprintf(" %s: %s - DONT_HAVE\n", lu.C(expected.Cid()), k)) + out.WriteString(fmt.Sprintf(" %s: %s - DONT_HAVE\n", expected.Cid(), k)) } return out.String() } diff --git a/bitswap/internal/logutil/logutil.go b/bitswap/internal/logutil/logutil.go deleted file mode 100644 index 8cba2a47c..000000000 --- a/bitswap/internal/logutil/logutil.go +++ /dev/null @@ -1,26 +0,0 @@ -package logutil - -import ( - cid "github.com/ipfs/go-cid" - peer "github.com/libp2p/go-libp2p-core/peer" -) - -func C(c cid.Cid) string { - if c.Defined() { - str := c.String() - return str[len(str)-6:] - } - return "" -} - -func P(p peer.ID) string { - if p != "" { - str := p.String() - limit := 6 - if len(str) < limit { - limit = len(str) - } - return str[len(str)-limit:] - } - return "" -} diff --git a/bitswap/internal/messagequeue/messagequeue.go b/bitswap/internal/messagequeue/messagequeue.go index 922ab6339..b3eb53844 100644 --- a/bitswap/internal/messagequeue/messagequeue.go +++ b/bitswap/internal/messagequeue/messagequeue.go @@ -149,6 +149,7 @@ type DontHaveTimeoutManager interface { // New creates a new MessageQueue. func New(ctx context.Context, p peer.ID, network MessageNetwork, onDontHaveTimeout OnDontHaveTimeout) *MessageQueue { onTimeout := func(ks []cid.Cid) { + log.Infow("Bitswap: timeout waiting for blocks", "cids", ks, "peer", p) onDontHaveTimeout(p, ks) } dhTimeoutMgr := newDontHaveTimeoutMgr(ctx, newPeerConnection(p, network), onTimeout) @@ -401,7 +402,7 @@ func (mq *MessageQueue) sendMessage() { return } - // mq.logOutgoingMessage(message) + mq.logOutgoingMessage(message) // Try to send this message repeatedly for i := 0; i < maxRetries; i++ { @@ -450,24 +451,25 @@ func (mq *MessageQueue) simulateDontHaveWithTimeout(msg bsmsg.BitSwapMessage) { mq.dhTimeoutMgr.AddPending(wants) } -// func (mq *MessageQueue) logOutgoingMessage(msg bsmsg.BitSwapMessage) { -// entries := msg.Wantlist() -// for _, e := range entries { -// if e.Cancel { -// if e.WantType == pb.Message_Wantlist_Have { -// log.Debugf("send %s->%s: cancel-have %s\n", lu.P(mq.network.Self()), lu.P(mq.p), lu.C(e.Cid)) -// } else { -// log.Debugf("send %s->%s: cancel-block %s\n", lu.P(mq.network.Self()), lu.P(mq.p), lu.C(e.Cid)) -// } -// } else { -// if e.WantType == pb.Message_Wantlist_Have { -// log.Debugf("send %s->%s: want-have %s\n", lu.P(mq.network.Self()), lu.P(mq.p), lu.C(e.Cid)) -// } else { -// log.Debugf("send %s->%s: want-block %s\n", lu.P(mq.network.Self()), lu.P(mq.p), lu.C(e.Cid)) -// } -// } -// } -// } +func (mq *MessageQueue) logOutgoingMessage(msg bsmsg.BitSwapMessage) { + self := mq.network.Self() + entries := msg.Wantlist() + for _, e := range entries { + if e.Cancel { + if e.WantType == pb.Message_Wantlist_Have { + log.Debugw("Bitswap -> cancel-have", "local", self, "to", mq.p, "cid", e.Cid) + } else { + log.Debugw("Bitswap -> cancel-block", "local", self, "to", mq.p, "cid", e.Cid) + } + } else { + if e.WantType == pb.Message_Wantlist_Have { + log.Debugw("Bitswap -> want-have", "local", self, "to", mq.p, "cid", e.Cid) + } else { + log.Debugw("Bitswap -> want-block", "local", self, "to", mq.p, "cid", e.Cid) + } + } + } +} func (mq *MessageQueue) hasPendingWork() bool { return mq.pendingWorkCount() > 0 diff --git a/bitswap/internal/peermanager/peerwantmanager.go b/bitswap/internal/peermanager/peerwantmanager.go index 9833b3e8b..2e8658bc8 100644 --- a/bitswap/internal/peermanager/peerwantmanager.go +++ b/bitswap/internal/peermanager/peerwantmanager.go @@ -4,8 +4,6 @@ import ( "bytes" "fmt" - lu "github.com/ipfs/go-bitswap/internal/logutil" - cid "github.com/ipfs/go-cid" peer "github.com/libp2p/go-libp2p-core/peer" ) @@ -194,12 +192,12 @@ func (pwm *peerWantManager) GetWantHaves() []cid.Cid { func (pwm *peerWantManager) String() string { var b bytes.Buffer for p, ws := range pwm.peerWants { - b.WriteString(fmt.Sprintf("Peer %s: %d want-have / %d want-block:\n", lu.P(p), ws.wantHaves.Len(), ws.wantBlocks.Len())) + b.WriteString(fmt.Sprintf("Peer %s: %d want-have / %d want-block:\n", p, ws.wantHaves.Len(), ws.wantBlocks.Len())) for _, c := range ws.wantHaves.Keys() { - b.WriteString(fmt.Sprintf(" want-have %s\n", lu.C(c))) + b.WriteString(fmt.Sprintf(" want-have %s\n", c)) } for _, c := range ws.wantBlocks.Keys() { - b.WriteString(fmt.Sprintf(" want-block %s\n", lu.C(c))) + b.WriteString(fmt.Sprintf(" want-block %s\n", c)) } } return b.String() diff --git a/bitswap/internal/session/peerresponsetracker.go b/bitswap/internal/session/peerresponsetracker.go index fb3c111bf..63e904614 100644 --- a/bitswap/internal/session/peerresponsetracker.go +++ b/bitswap/internal/session/peerresponsetracker.go @@ -18,10 +18,14 @@ func newPeerResponseTracker() *peerResponseTracker { } } +// receivedBlockFrom is called when a block is received from a peer +// (only called first time block is received) func (prt *peerResponseTracker) receivedBlockFrom(from peer.ID) { prt.firstResponder[from]++ } +// choose picks a peer from the list of candidate peers, favouring those peers +// that were first to send us previous blocks func (prt *peerResponseTracker) choose(peers []peer.ID) peer.ID { if len(peers) == 0 { return "" @@ -41,8 +45,6 @@ func (prt *peerResponseTracker) choose(peers []peer.ID) peer.ID { for _, p := range peers { counted += float64(prt.getPeerCount(p)) / float64(total) if counted > rnd { - // log.Warnf(" chose %s from %s (%d) / %s (%d) with pivot %.2f", - // lu.P(p), lu.P(peers[0]), prt.firstResponder[peers[0]], lu.P(peers[1]), prt.firstResponder[peers[1]], rnd) return p } } @@ -51,11 +53,11 @@ func (prt *peerResponseTracker) choose(peers []peer.ID) peer.ID { // math that doesn't quite cover the whole range of peers in the for loop // so just choose the last peer. index := len(peers) - 1 - // log.Warnf(" chose last (indx %d) %s from %s (%d) / %s (%d) with pivot %.2f", - // index, lu.P(peers[index]), lu.P(peers[0]), prt.firstResponder[peers[0]], lu.P(peers[1]), prt.firstResponder[peers[1]], rnd) return peers[index] } +// getPeerCount returns the number of times the peer was first to send us a +// block func (prt *peerResponseTracker) getPeerCount(p peer.ID) int { count, ok := prt.firstResponder[p] if ok { diff --git a/bitswap/internal/session/session.go b/bitswap/internal/session/session.go index faf01cb7a..079a4f195 100644 --- a/bitswap/internal/session/session.go +++ b/bitswap/internal/session/session.go @@ -6,7 +6,6 @@ import ( bsbpm "github.com/ipfs/go-bitswap/internal/blockpresencemanager" bsgetter "github.com/ipfs/go-bitswap/internal/getter" - lu "github.com/ipfs/go-bitswap/internal/logutil" notifications "github.com/ipfs/go-bitswap/internal/notifications" bspm "github.com/ipfs/go-bitswap/internal/peermanager" bssim "github.com/ipfs/go-bitswap/internal/sessioninterestmanager" @@ -178,7 +177,7 @@ func (s *Session) ReceiveFrom(from peer.ID, ks []cid.Cid, haves []cid.Cid, dontH ks = interestedRes[0] haves = interestedRes[1] dontHaves = interestedRes[2] - // s.logReceiveFrom(from, ks, haves, dontHaves) + s.logReceiveFrom(from, ks, haves, dontHaves) // Inform the session want sender that a message has been received s.sws.Update(from, ks, haves, dontHaves) @@ -194,19 +193,19 @@ func (s *Session) ReceiveFrom(from peer.ID, ks []cid.Cid, haves []cid.Cid, dontH } } -// func (s *Session) logReceiveFrom(from peer.ID, interestedKs []cid.Cid, haves []cid.Cid, dontHaves []cid.Cid) { -// // log.Infof("Ses%d<-%s: %d blocks, %d haves, %d dont haves\n", -// // s.id, from, len(interestedKs), len(wantedHaves), len(wantedDontHaves)) -// for _, c := range interestedKs { -// log.Warnf("Ses%d %s<-%s: block %s\n", s.id, lu.P(s.self), lu.P(from), lu.C(c)) -// } -// for _, c := range haves { -// log.Warnf("Ses%d %s<-%s: HAVE %s\n", s.id, lu.P(s.self), lu.P(from), lu.C(c)) -// } -// for _, c := range dontHaves { -// log.Warnf("Ses%d %s<-%s: DONT_HAVE %s\n", s.id, lu.P(s.self), lu.P(from), lu.C(c)) -// } -// } +func (s *Session) logReceiveFrom(from peer.ID, interestedKs []cid.Cid, haves []cid.Cid, dontHaves []cid.Cid) { + // log.Debugf("Ses%d<-%s: %d blocks, %d haves, %d dont haves\n", + // s.id, from, len(interestedKs), len(wantedHaves), len(wantedDontHaves)) + for _, c := range interestedKs { + log.Debugw("Bitswap <- block", "local", s.self, "from", from, "cid", c, "session", s.id) + } + for _, c := range haves { + log.Debugw("Bitswap <- HAVE", "local", s.self, "from", from, "cid", c, "session", s.id) + } + for _, c := range dontHaves { + log.Debugw("Bitswap <- DONT_HAVE", "local", s.self, "from", from, "cid", c, "session", s.id) + } +} // GetBlock fetches a single block. func (s *Session) GetBlock(parent context.Context, k cid.Cid) (blocks.Block, error) { @@ -328,9 +327,6 @@ func (s *Session) broadcastWantHaves(ctx context.Context, wants []cid.Cid) { wants = s.sw.PrepareBroadcast() } - // log.Warnf("\n\n\n\n\nSes%d: broadcast %d keys\n\n\n\n\n", s.id, len(live)) - // log.Infof("Ses%d: broadcast %d keys\n", s.id, len(live)) - // Broadcast a want-have for the live wants to everyone we're connected to s.wm.BroadcastWantHaves(ctx, s.id, wants) @@ -340,7 +336,7 @@ func (s *Session) broadcastWantHaves(ctx context.Context, wants []cid.Cid) { // Search for providers who have the first want in the list. // Typically if the provider has the first block they will have // the rest of the blocks also. - log.Infof("Ses%d: FindMorePeers with want %s (1st of %d wants)", s.id, lu.C(wants[0]), len(wants)) + log.Infof("Ses%d: FindMorePeers with want %s (1st of %d wants)", s.id, wants[0], len(wants)) s.findMorePeers(ctx, wants[0]) } s.resetIdleTick() @@ -453,7 +449,6 @@ func (s *Session) resetIdleTick() { tickDelay = s.initialSearchDelay } else { avLat := s.latencyTrkr.averageLatency() - // log.Warnf("averageLatency %s", avLat) tickDelay = s.baseTickDelay + (3 * avLat) } tickDelay = tickDelay * time.Duration(1+s.consecutiveTicks) diff --git a/bitswap/internal/session/sessionwantsender.go b/bitswap/internal/session/sessionwantsender.go index df963f9e9..7af7b32a4 100644 --- a/bitswap/internal/session/sessionwantsender.go +++ b/bitswap/internal/session/sessionwantsender.go @@ -4,7 +4,6 @@ import ( "context" bsbpm "github.com/ipfs/go-bitswap/internal/blockpresencemanager" - lu "github.com/ipfs/go-bitswap/internal/logutil" cid "github.com/ipfs/go-cid" peer "github.com/libp2p/go-libp2p-core/peer" @@ -135,7 +134,6 @@ func (sws *sessionWantSender) Add(ks []cid.Cid) { // Update is called when the session receives a message with incoming blocks // or HAVE / DONT_HAVE func (sws *sessionWantSender) Update(from peer.ID, ks []cid.Cid, haves []cid.Cid, dontHaves []cid.Cid) { - // fmt.Printf("Update(%s, %d, %d, %d, %t)\n", lu.P(from), len(ks), len(haves), len(dontHaves)) hasUpdate := len(ks) > 0 || len(haves) > 0 || len(dontHaves) > 0 if !hasUpdate { return @@ -149,7 +147,6 @@ func (sws *sessionWantSender) Update(from peer.ID, ks []cid.Cid, haves []cid.Cid // SignalAvailability is called by the PeerManager to signal that a peer has // connected / disconnected func (sws *sessionWantSender) SignalAvailability(p peer.ID, isAvailable bool) { - // fmt.Printf("SignalAvailability(%s, %t)\n", lu.P(p), isAvailable) availability := peerAvailability{p, isAvailable} sws.addChange(change{availability: availability}) } @@ -236,9 +233,7 @@ func (sws *sessionWantSender) onChange(changes []change) { // If there are some connected peers, send any pending wants if sws.spm.HasPeers() { - // fmt.Printf("sendNextWants()\n") sws.sendNextWants(newlyAvailable) - // fmt.Println(sws) } } @@ -280,7 +275,6 @@ func (sws *sessionWantSender) processAvailability(availability map[peer.ID]bool) // trackWant creates a new entry in the map of CID -> want info func (sws *sessionWantSender) trackWant(c cid.Cid) { - // fmt.Printf("trackWant %s\n", lu.C(c)) if _, ok := sws.wants[c]; ok { return } @@ -304,7 +298,7 @@ func (sws *sessionWantSender) processUpdates(updates []update) []cid.Cid { for _, upd := range updates { for _, c := range upd.ks { blkCids.Add(c) - log.Warnf("received block %s", lu.C(c)) + // Remove the want removed := sws.removeWant(c) if removed != nil { @@ -382,7 +376,7 @@ func (sws *sessionWantSender) processUpdates(updates []update) []cid.Cid { go func() { for p := range prunePeers { // Peer doesn't have anything we want, so remove it - log.Infof("peer %s sent too many dont haves", lu.P(p)) + log.Infof("peer %s sent too many dont haves, removing from session %d", p, sws.ID()) sws.SignalAvailability(p, false) } }() @@ -469,7 +463,6 @@ func (sws *sessionWantSender) sendNextWants(newlyAvailable []peer.ID) { // We already sent a want-block to a peer and haven't yet received a // response yet if wi.sentTo != "" { - // fmt.Printf(" q - already sent want-block %s to %s\n", lu.C(c), lu.P(wi.sentTo)) continue } @@ -477,12 +470,9 @@ func (sws *sessionWantSender) sendNextWants(newlyAvailable []peer.ID) { // corresponding to this want, so we must wait to discover more peers if wi.bestPeer == "" { // TODO: work this out in real time instead of using bestP? - // fmt.Printf(" q - no best peer for %s\n", lu.C(c)) continue } - // fmt.Printf(" q - send best: %s: %s\n", lu.C(c), lu.P(wi.bestPeer)) - // Record that we are sending a want-block for this want to the peer sws.setWantSentTo(c, wi.bestPeer) @@ -503,12 +493,8 @@ func (sws *sessionWantSender) sendNextWants(newlyAvailable []peer.ID) { // sendWants sends want-have and want-blocks to the appropriate peers func (sws *sessionWantSender) sendWants(sends allWants) { - // fmt.Printf(" send wants to %d peers\n", len(sends)) - // For each peer we're sending a request to for p, snd := range sends { - // fmt.Printf(" send %d wants to %s\n", snd.wantBlocks.Len(), lu.P(p)) - // Piggyback some other want-haves onto the request to the peer for _, c := range sws.getPiggybackWantHaves(p, snd.wantBlocks) { snd.wantHaves.Add(c) diff --git a/bitswap/internal/sessionpeermanager/sessionpeermanager.go b/bitswap/internal/sessionpeermanager/sessionpeermanager.go index 90233c72c..499aa830b 100644 --- a/bitswap/internal/sessionpeermanager/sessionpeermanager.go +++ b/bitswap/internal/sessionpeermanager/sessionpeermanager.go @@ -4,7 +4,6 @@ import ( "fmt" "sync" - lu "github.com/ipfs/go-bitswap/internal/logutil" logging "github.com/ipfs/go-log" peer "github.com/libp2p/go-libp2p-core/peer" @@ -30,6 +29,7 @@ type SessionPeerManager struct { tagger PeerTagger tag string + id uint64 plk sync.RWMutex peers map[peer.ID]struct{} peersDiscovered bool @@ -38,6 +38,7 @@ type SessionPeerManager struct { // New creates a new SessionPeerManager func New(id uint64, tagger PeerTagger) *SessionPeerManager { return &SessionPeerManager{ + id: id, tag: fmt.Sprint("bs-ses-", id), tagger: tagger, peers: make(map[peer.ID]struct{}), @@ -62,7 +63,7 @@ func (spm *SessionPeerManager) AddPeer(p peer.ID) bool { // connection spm.tagger.TagPeer(p, spm.tag, sessionPeerTagValue) - log.Debugf("Added peer %s to session (%d peers)\n", p, len(spm.peers)) + log.Debugw("Bitswap: Added peer to session", "session", spm.id, "peer", p, "peerCount", len(spm.peers)) return true } @@ -79,7 +80,7 @@ func (spm *SessionPeerManager) RemovePeer(p peer.ID) bool { delete(spm.peers, p) spm.tagger.UntagPeer(p, spm.tag) - log.Debugf("Removed peer %s from session (%d peers)", lu.P(p), len(spm.peers)) + log.Debugw("Bitswap: removed peer from session", "session", spm.id, "peer", p, "peerCount", len(spm.peers)) return true } diff --git a/bitswap/internal/wantmanager/wantmanager.go b/bitswap/internal/wantmanager/wantmanager.go index 254ea9796..0301356dc 100644 --- a/bitswap/internal/wantmanager/wantmanager.go +++ b/bitswap/internal/wantmanager/wantmanager.go @@ -7,6 +7,7 @@ import ( bssim "github.com/ipfs/go-bitswap/internal/sessioninterestmanager" "github.com/ipfs/go-bitswap/internal/sessionmanager" bsswl "github.com/ipfs/go-bitswap/internal/sessionwantlist" + "gopkg.in/src-d/go-log.v1" cid "github.com/ipfs/go-cid" peer "github.com/libp2p/go-libp2p-core/peer" @@ -75,7 +76,7 @@ func (wm *WantManager) ReceiveFrom(ctx context.Context, p peer.ID, blks []cid.Ci // BroadcastWantHaves is called when want-haves should be broadcast to all // connected peers (as part of session discovery) func (wm *WantManager) BroadcastWantHaves(ctx context.Context, ses uint64, wantHaves []cid.Cid) { - // log.Warnf("BroadcastWantHaves session%d: %s", ses, wantHaves) + log.Infof("BroadcastWantHaves session%d: %s", ses, wantHaves) // Record broadcast wants wm.bcwl.Add(wantHaves, ses) From daa60b16df425e40663e1c1693204d3671fbd79e Mon Sep 17 00:00:00 2001 From: Dirk McCormick Date: Mon, 16 Mar 2020 12:20:44 -0400 Subject: [PATCH 0871/1035] refactor: adjust log levels This commit was moved from ipfs/go-bitswap@cee7d2d18708ad41de47ba346d7756774f5419fe --- bitswap/internal/messagequeue/messagequeue.go | 7 +++++++ bitswap/internal/session/session.go | 11 ++++++++--- bitswap/internal/wantmanager/wantmanager.go | 6 ++++-- 3 files changed, 19 insertions(+), 5 deletions(-) diff --git a/bitswap/internal/messagequeue/messagequeue.go b/bitswap/internal/messagequeue/messagequeue.go index b3eb53844..d87c03f7a 100644 --- a/bitswap/internal/messagequeue/messagequeue.go +++ b/bitswap/internal/messagequeue/messagequeue.go @@ -14,9 +14,11 @@ import ( logging "github.com/ipfs/go-log" peer "github.com/libp2p/go-libp2p-core/peer" "github.com/libp2p/go-libp2p/p2p/protocol/ping" + "go.uber.org/zap" ) var log = logging.Logger("bitswap") +var sflog = log.Desugar() const ( defaultRebroadcastInterval = 30 * time.Second @@ -452,6 +454,11 @@ func (mq *MessageQueue) simulateDontHaveWithTimeout(msg bsmsg.BitSwapMessage) { } func (mq *MessageQueue) logOutgoingMessage(msg bsmsg.BitSwapMessage) { + // Save some CPU cycles and allocations if log level is higher than debug + if ce := sflog.Check(zap.DebugLevel, "Bitswap -> send wants"); ce == nil { + return + } + self := mq.network.Self() entries := msg.Wantlist() for _, e := range entries { diff --git a/bitswap/internal/session/session.go b/bitswap/internal/session/session.go index 079a4f195..412faba52 100644 --- a/bitswap/internal/session/session.go +++ b/bitswap/internal/session/session.go @@ -15,9 +15,11 @@ import ( logging "github.com/ipfs/go-log" peer "github.com/libp2p/go-libp2p-core/peer" loggables "github.com/libp2p/go-libp2p-loggables" + "go.uber.org/zap" ) var log = logging.Logger("bs:sess") +var sflog = log.Desugar() const ( broadcastLiveWantsLimit = 64 @@ -194,8 +196,11 @@ func (s *Session) ReceiveFrom(from peer.ID, ks []cid.Cid, haves []cid.Cid, dontH } func (s *Session) logReceiveFrom(from peer.ID, interestedKs []cid.Cid, haves []cid.Cid, dontHaves []cid.Cid) { - // log.Debugf("Ses%d<-%s: %d blocks, %d haves, %d dont haves\n", - // s.id, from, len(interestedKs), len(wantedHaves), len(wantedDontHaves)) + // Save some CPU cycles if log level is higher than debug + if ce := sflog.Check(zap.DebugLevel, "Bitswap <- rcv message"); ce == nil { + return + } + for _, c := range interestedKs { log.Debugw("Bitswap <- block", "local", s.self, "from", from, "cid", c, "session", s.id) } @@ -336,7 +341,7 @@ func (s *Session) broadcastWantHaves(ctx context.Context, wants []cid.Cid) { // Search for providers who have the first want in the list. // Typically if the provider has the first block they will have // the rest of the blocks also. - log.Infof("Ses%d: FindMorePeers with want %s (1st of %d wants)", s.id, wants[0], len(wants)) + log.Debugf("Ses%d: FindMorePeers with want %s (1st of %d wants)", s.id, wants[0], len(wants)) s.findMorePeers(ctx, wants[0]) } s.resetIdleTick() diff --git a/bitswap/internal/wantmanager/wantmanager.go b/bitswap/internal/wantmanager/wantmanager.go index 0301356dc..b34056b14 100644 --- a/bitswap/internal/wantmanager/wantmanager.go +++ b/bitswap/internal/wantmanager/wantmanager.go @@ -7,12 +7,14 @@ import ( bssim "github.com/ipfs/go-bitswap/internal/sessioninterestmanager" "github.com/ipfs/go-bitswap/internal/sessionmanager" bsswl "github.com/ipfs/go-bitswap/internal/sessionwantlist" - "gopkg.in/src-d/go-log.v1" + logging "github.com/ipfs/go-log" cid "github.com/ipfs/go-cid" peer "github.com/libp2p/go-libp2p-core/peer" ) +var log = logging.Logger("bitswap") + // PeerHandler sends wants / cancels to other peers type PeerHandler interface { // Connected is called when a peer connects, with any initial want-haves @@ -76,7 +78,7 @@ func (wm *WantManager) ReceiveFrom(ctx context.Context, p peer.ID, blks []cid.Ci // BroadcastWantHaves is called when want-haves should be broadcast to all // connected peers (as part of session discovery) func (wm *WantManager) BroadcastWantHaves(ctx context.Context, ses uint64, wantHaves []cid.Cid) { - log.Infof("BroadcastWantHaves session%d: %s", ses, wantHaves) + log.Debugf("BroadcastWantHaves session%d: %s", ses, wantHaves) // Record broadcast wants wm.bcwl.Add(wantHaves, ses) From 5b22e719a2a56efae8121b9057808f5e9df73f43 Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Fri, 13 Mar 2020 18:15:15 -0700 Subject: [PATCH 0872/1035] feat: expose the full wantlist through GetWantlist And expose a separate function for _just_ getting want-blocks. When the user runs `ipfs bitswap wantlist`, they expect to see everything the node is currently looking for. Co-Authored-By: dirkmc This commit was moved from ipfs/go-bitswap@808f5a08d2bb86ed98b303f9ec3f9058a83196c5 --- bitswap/bitswap.go | 8 ++++++- bitswap/internal/peermanager/peermanager.go | 10 ++++++++- .../internal/peermanager/peerwantmanager.go | 22 +++++++++++++++++++ 3 files changed, 38 insertions(+), 2 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index a2bd56ca2..f2217b85c 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -503,11 +503,17 @@ func (bs *Bitswap) Close() error { return bs.process.Close() } -// GetWantlist returns the current local wantlist. +// GetWantlist returns the current local wantlist (both want-blocks and +// want-haves). func (bs *Bitswap) GetWantlist() []cid.Cid { return bs.pm.CurrentWants() } +// GetWantBlocks returns the current list of want-blocks. +func (bs *Bitswap) GetWantBlocks() []cid.Cid { + return bs.pm.CurrentWantBlocks() +} + // GetWanthaves returns the current list of want-haves. func (bs *Bitswap) GetWantHaves() []cid.Cid { return bs.pm.CurrentWantHaves() diff --git a/bitswap/internal/peermanager/peermanager.go b/bitswap/internal/peermanager/peermanager.go index ab73fd965..726d4be77 100644 --- a/bitswap/internal/peermanager/peermanager.go +++ b/bitswap/internal/peermanager/peermanager.go @@ -170,11 +170,19 @@ func (pm *PeerManager) SendCancels(ctx context.Context, cancelKs []cid.Cid) { } } -// CurrentWants returns the list of pending want-blocks +// CurrentWants returns the list of pending wants (both want-haves and want-blocks). func (pm *PeerManager) CurrentWants() []cid.Cid { pm.pqLk.RLock() defer pm.pqLk.RUnlock() + return pm.pwm.GetWants() +} + +// CurrentWantBlocks returns the list of pending want-blocks +func (pm *PeerManager) CurrentWantBlocks() []cid.Cid { + pm.pqLk.RLock() + defer pm.pqLk.RUnlock() + return pm.pwm.GetWantBlocks() } diff --git a/bitswap/internal/peermanager/peerwantmanager.go b/bitswap/internal/peermanager/peerwantmanager.go index 2e8658bc8..27e37ccd9 100644 --- a/bitswap/internal/peermanager/peerwantmanager.go +++ b/bitswap/internal/peermanager/peerwantmanager.go @@ -189,6 +189,28 @@ func (pwm *peerWantManager) GetWantHaves() []cid.Cid { return res.Keys() } +// GetWants returns the set of all wants (both want-blocks and want-haves). +func (pwm *peerWantManager) GetWants() []cid.Cid { + res := cid.NewSet() + + // Iterate over all known peers + for _, pws := range pwm.peerWants { + // Iterate over all want-blocks + for _, c := range pws.wantBlocks.Keys() { + // Add the CID to the results + res.Add(c) + } + + // Iterate over all want-haves + for _, c := range pws.wantHaves.Keys() { + // Add the CID to the results + res.Add(c) + } + } + + return res.Keys() +} + func (pwm *peerWantManager) String() string { var b bytes.Buffer for p, ws := range pwm.peerWants { From b04d12dc436d9a91b98cb8b3f6b991e61f7be81d Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Tue, 17 Mar 2020 13:02:00 -0700 Subject: [PATCH 0873/1035] feat: remove the context from the donthavetimeoutmanager (#303) This removes one goroutine per peer which tends to be a pretty big deal. This brings go-ipfs down from 5.5 to 4.5 goroutines per peer. This commit was moved from ipfs/go-bitswap@5a278ff0045cd48b53d24a485336ccf0d3413318 --- .../messagequeue/donthavetimeoutmgr.go | 20 +++---------- .../messagequeue/donthavetimeoutmgr_test.go | 28 +++++++++---------- bitswap/internal/messagequeue/messagequeue.go | 2 +- 3 files changed, 19 insertions(+), 31 deletions(-) diff --git a/bitswap/internal/messagequeue/donthavetimeoutmgr.go b/bitswap/internal/messagequeue/donthavetimeoutmgr.go index d1c6be58f..e5ce0b287 100644 --- a/bitswap/internal/messagequeue/donthavetimeoutmgr.go +++ b/bitswap/internal/messagequeue/donthavetimeoutmgr.go @@ -72,17 +72,17 @@ type dontHaveTimeoutMgr struct { // newDontHaveTimeoutMgr creates a new dontHaveTimeoutMgr // onDontHaveTimeout is called when pending keys expire (not cancelled before timeout) -func newDontHaveTimeoutMgr(ctx context.Context, pc PeerConnection, onDontHaveTimeout func([]cid.Cid)) *dontHaveTimeoutMgr { - return newDontHaveTimeoutMgrWithParams(ctx, pc, onDontHaveTimeout, dontHaveTimeout, +func newDontHaveTimeoutMgr(pc PeerConnection, onDontHaveTimeout func([]cid.Cid)) *dontHaveTimeoutMgr { + return newDontHaveTimeoutMgrWithParams(pc, onDontHaveTimeout, dontHaveTimeout, latencyMultiplier, maxExpectedWantProcessTime) } // newDontHaveTimeoutMgrWithParams is used by the tests -func newDontHaveTimeoutMgrWithParams(ctx context.Context, pc PeerConnection, onDontHaveTimeout func([]cid.Cid), +func newDontHaveTimeoutMgrWithParams(pc PeerConnection, onDontHaveTimeout func([]cid.Cid), defaultTimeout time.Duration, latencyMultiplier int, maxExpectedWantProcessTime time.Duration) *dontHaveTimeoutMgr { - ctx, shutdown := context.WithCancel(ctx) + ctx, shutdown := context.WithCancel(context.Background()) mqp := &dontHaveTimeoutMgr{ ctx: ctx, shutdown: shutdown, @@ -101,10 +101,7 @@ func newDontHaveTimeoutMgrWithParams(ctx context.Context, pc PeerConnection, onD // Shutdown the dontHaveTimeoutMgr. Any subsequent call to Start() will be ignored func (dhtm *dontHaveTimeoutMgr) Shutdown() { dhtm.shutdown() -} -// onShutdown is called when the dontHaveTimeoutMgr shuts down -func (dhtm *dontHaveTimeoutMgr) onShutdown() { dhtm.lk.Lock() defer dhtm.lk.Unlock() @@ -114,13 +111,6 @@ func (dhtm *dontHaveTimeoutMgr) onShutdown() { } } -// closeAfterContext is called when the dontHaveTimeoutMgr starts. -// It monitors for the context being cancelled. -func (dhtm *dontHaveTimeoutMgr) closeAfterContext() { - <-dhtm.ctx.Done() - dhtm.onShutdown() -} - // Start the dontHaveTimeoutMgr. This method is idempotent func (dhtm *dontHaveTimeoutMgr) Start() { dhtm.lk.Lock() @@ -132,8 +122,6 @@ func (dhtm *dontHaveTimeoutMgr) Start() { } dhtm.started = true - go dhtm.closeAfterContext() - // If we already have a measure of latency to the peer, use it to // calculate a reasonable timeout latency := dhtm.peerConn.Latency() diff --git a/bitswap/internal/messagequeue/donthavetimeoutmgr_test.go b/bitswap/internal/messagequeue/donthavetimeoutmgr_test.go index 3ac21a78c..5c0de884f 100644 --- a/bitswap/internal/messagequeue/donthavetimeoutmgr_test.go +++ b/bitswap/internal/messagequeue/donthavetimeoutmgr_test.go @@ -75,13 +75,13 @@ func TestDontHaveTimeoutMgrTimeout(t *testing.T) { latMultiplier := 2 expProcessTime := 5 * time.Millisecond expectedTimeout := expProcessTime + latency*time.Duration(latMultiplier) - ctx := context.Background() pc := &mockPeerConn{latency: latency} tr := timeoutRecorder{} - dhtm := newDontHaveTimeoutMgrWithParams(ctx, pc, tr.onTimeout, + dhtm := newDontHaveTimeoutMgrWithParams(pc, tr.onTimeout, dontHaveTimeout, latMultiplier, expProcessTime) dhtm.Start() + defer dhtm.Shutdown() // Add first set of keys dhtm.AddPending(firstks) @@ -125,13 +125,13 @@ func TestDontHaveTimeoutMgrCancel(t *testing.T) { latMultiplier := 1 expProcessTime := time.Duration(0) expectedTimeout := latency - ctx := context.Background() pc := &mockPeerConn{latency: latency} tr := timeoutRecorder{} - dhtm := newDontHaveTimeoutMgrWithParams(ctx, pc, tr.onTimeout, + dhtm := newDontHaveTimeoutMgrWithParams(pc, tr.onTimeout, dontHaveTimeout, latMultiplier, expProcessTime) dhtm.Start() + defer dhtm.Shutdown() // Add keys dhtm.AddPending(ks) @@ -156,13 +156,13 @@ func TestDontHaveTimeoutWantCancelWant(t *testing.T) { latMultiplier := 1 expProcessTime := time.Duration(0) expectedTimeout := latency - ctx := context.Background() pc := &mockPeerConn{latency: latency} tr := timeoutRecorder{} - dhtm := newDontHaveTimeoutMgrWithParams(ctx, pc, tr.onTimeout, + dhtm := newDontHaveTimeoutMgrWithParams(pc, tr.onTimeout, dontHaveTimeout, latMultiplier, expProcessTime) dhtm.Start() + defer dhtm.Shutdown() // Add keys dhtm.AddPending(ks) @@ -200,13 +200,13 @@ func TestDontHaveTimeoutRepeatedAddPending(t *testing.T) { latency := time.Millisecond * 5 latMultiplier := 1 expProcessTime := time.Duration(0) - ctx := context.Background() pc := &mockPeerConn{latency: latency} tr := timeoutRecorder{} - dhtm := newDontHaveTimeoutMgrWithParams(ctx, pc, tr.onTimeout, + dhtm := newDontHaveTimeoutMgrWithParams(pc, tr.onTimeout, dontHaveTimeout, latMultiplier, expProcessTime) dhtm.Start() + defer dhtm.Shutdown() // Add keys repeatedly for _, c := range ks { @@ -230,12 +230,12 @@ func TestDontHaveTimeoutMgrUsesDefaultTimeoutIfPingError(t *testing.T) { defaultTimeout := 10 * time.Millisecond expectedTimeout := expProcessTime + defaultTimeout tr := timeoutRecorder{} - ctx := context.Background() pc := &mockPeerConn{latency: latency, err: fmt.Errorf("ping error")} - dhtm := newDontHaveTimeoutMgrWithParams(ctx, pc, tr.onTimeout, + dhtm := newDontHaveTimeoutMgrWithParams(pc, tr.onTimeout, defaultTimeout, latMultiplier, expProcessTime) dhtm.Start() + defer dhtm.Shutdown() // Add keys dhtm.AddPending(ks) @@ -264,12 +264,12 @@ func TestDontHaveTimeoutMgrUsesDefaultTimeoutIfLatencyLonger(t *testing.T) { expProcessTime := time.Duration(0) defaultTimeout := 10 * time.Millisecond tr := timeoutRecorder{} - ctx := context.Background() pc := &mockPeerConn{latency: latency} - dhtm := newDontHaveTimeoutMgrWithParams(ctx, pc, tr.onTimeout, + dhtm := newDontHaveTimeoutMgrWithParams(pc, tr.onTimeout, defaultTimeout, latMultiplier, expProcessTime) dhtm.Start() + defer dhtm.Shutdown() // Add keys dhtm.AddPending(ks) @@ -297,12 +297,12 @@ func TestDontHaveTimeoutNoTimeoutAfterShutdown(t *testing.T) { latMultiplier := 1 expProcessTime := time.Duration(0) tr := timeoutRecorder{} - ctx := context.Background() pc := &mockPeerConn{latency: latency} - dhtm := newDontHaveTimeoutMgrWithParams(ctx, pc, tr.onTimeout, + dhtm := newDontHaveTimeoutMgrWithParams(pc, tr.onTimeout, dontHaveTimeout, latMultiplier, expProcessTime) dhtm.Start() + defer dhtm.Shutdown() // Add keys dhtm.AddPending(ks) diff --git a/bitswap/internal/messagequeue/messagequeue.go b/bitswap/internal/messagequeue/messagequeue.go index d87c03f7a..8fccc0b53 100644 --- a/bitswap/internal/messagequeue/messagequeue.go +++ b/bitswap/internal/messagequeue/messagequeue.go @@ -154,7 +154,7 @@ func New(ctx context.Context, p peer.ID, network MessageNetwork, onDontHaveTimeo log.Infow("Bitswap: timeout waiting for blocks", "cids", ks, "peer", p) onDontHaveTimeout(p, ks) } - dhTimeoutMgr := newDontHaveTimeoutMgr(ctx, newPeerConnection(p, network), onTimeout) + dhTimeoutMgr := newDontHaveTimeoutMgr(newPeerConnection(p, network), onTimeout) return newMessageQueue(ctx, p, network, maxMessageSize, sendErrorBackoff, dhTimeoutMgr) } From 99cb8fa5cca275734aec770ad974f42b1be0de98 Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Tue, 17 Mar 2020 13:38:40 -0700 Subject: [PATCH 0874/1035] fix: 64bit align stats (#305) fixes #302 This commit was moved from ipfs/go-bitswap@a32feca5e059d0589cbc86b7b7bf9bd45614cf56 --- bitswap/network/ipfs_impl.go | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index 67159d53c..b5661408d 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -67,6 +67,10 @@ func processSettings(opts ...NetOpt) Settings { // impl transforms the ipfs network interface, which sends and receives // NetMessage objects, into the bitswap network interface. type impl struct { + // NOTE: Stats must be at the top of the heap allocation to ensure 64bit + // alignment. + stats Stats + host host.Host routing routing.ContentRouting @@ -79,8 +83,6 @@ type impl struct { // inbound messages from the network are forwarded to the receiver receiver Receiver - - stats Stats } type streamMessageSender struct { From 1fd7bd7d55de3c3175c6ea6b228e52a9be6a3056 Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Tue, 17 Mar 2020 15:36:48 -0700 Subject: [PATCH 0875/1035] feat: micro-optimize priority (#304) This commit was moved from ipfs/go-bitswap@f6db5f77fc1724e29937439eb5bd15b8b79d510a --- bitswap/internal/decision/engine.go | 6 +++--- bitswap/internal/decision/engine_test.go | 4 ++-- bitswap/internal/decision/ledger.go | 2 +- bitswap/internal/messagequeue/messagequeue.go | 4 ++-- bitswap/internal/testutil/testutil.go | 2 +- bitswap/message/message.go | 8 ++++---- bitswap/wantlist/wantlist.go | 6 +++--- 7 files changed, 16 insertions(+), 16 deletions(-) diff --git a/bitswap/internal/decision/engine.go b/bitswap/internal/decision/engine.go index 4b2dea497..6fe8875cd 100644 --- a/bitswap/internal/decision/engine.go +++ b/bitswap/internal/decision/engine.go @@ -596,7 +596,7 @@ func (e *Engine) MessageReceived(ctx context.Context, p peer.ID, m bsmsg.BitSwap activeEntries = append(activeEntries, peertask.Task{ Topic: c, - Priority: entry.Priority, + Priority: int(entry.Priority), Work: bsmsg.BlockPresenceSize(c), Data: &taskData{ BlockSize: 0, @@ -624,7 +624,7 @@ func (e *Engine) MessageReceived(ctx context.Context, p peer.ID, m bsmsg.BitSwap } activeEntries = append(activeEntries, peertask.Task{ Topic: c, - Priority: entry.Priority, + Priority: int(entry.Priority), Work: entrySize, Data: &taskData{ BlockSize: blockSize, @@ -692,7 +692,7 @@ func (e *Engine) ReceiveFrom(from peer.ID, blks []blocks.Block, haves []cid.Cid) e.peerRequestQueue.PushTasks(l.Partner, peertask.Task{ Topic: entry.Cid, - Priority: entry.Priority, + Priority: int(entry.Priority), Work: entrySize, Data: &taskData{ BlockSize: blockSize, diff --git a/bitswap/internal/decision/engine_test.go b/bitswap/internal/decision/engine_test.go index 6f5a193b6..7dac95063 100644 --- a/bitswap/internal/decision/engine_test.go +++ b/bitswap/internal/decision/engine_test.go @@ -1068,14 +1068,14 @@ func partnerWantBlocks(e *Engine, keys []string, partner peer.ID) { add := message.New(false) for i, letter := range keys { block := blocks.NewBlock([]byte(letter)) - add.AddEntry(block.Cid(), len(keys)-i, pb.Message_Wantlist_Block, true) + add.AddEntry(block.Cid(), int32(len(keys)-i), pb.Message_Wantlist_Block, true) } e.MessageReceived(context.Background(), partner, add) } func partnerWantBlocksHaves(e *Engine, keys []string, wantHaves []string, sendDontHave bool, partner peer.ID) { add := message.New(false) - priority := len(wantHaves) + len(keys) + priority := int32(len(wantHaves) + len(keys)) for _, letter := range wantHaves { block := blocks.NewBlock([]byte(letter)) add.AddEntry(block.Cid(), priority, pb.Message_Wantlist_Have, sendDontHave) diff --git a/bitswap/internal/decision/ledger.go b/bitswap/internal/decision/ledger.go index a607834a8..8f103bd46 100644 --- a/bitswap/internal/decision/ledger.go +++ b/bitswap/internal/decision/ledger.go @@ -91,7 +91,7 @@ func (l *ledger) ReceivedBytes(n int) { l.Accounting.BytesRecv += uint64(n) } -func (l *ledger) Wants(k cid.Cid, priority int, wantType pb.Message_Wantlist_WantType) { +func (l *ledger) Wants(k cid.Cid, priority int32, wantType pb.Message_Wantlist_WantType) { log.Debugf("peer %s wants %s", l.Partner, k) l.wantList.Add(k, priority, wantType) } diff --git a/bitswap/internal/messagequeue/messagequeue.go b/bitswap/internal/messagequeue/messagequeue.go index 8fccc0b53..aed5fbf1c 100644 --- a/bitswap/internal/messagequeue/messagequeue.go +++ b/bitswap/internal/messagequeue/messagequeue.go @@ -68,7 +68,7 @@ type MessageQueue struct { bcstWants recallWantlist peerWants recallWantlist cancels *cid.Set - priority int + priority int32 // Dont touch any of these variables outside of run loop sender bsnet.MessageSender @@ -95,7 +95,7 @@ func newRecallWantList() recallWantlist { } // Add want to both the pending list and the list of all wants -func (r *recallWantlist) Add(c cid.Cid, priority int, wtype pb.Message_Wantlist_WantType) { +func (r *recallWantlist) Add(c cid.Cid, priority int32, wtype pb.Message_Wantlist_WantType) { r.allWants.Add(c, priority, wtype) r.pending.Add(c, priority, wtype) } diff --git a/bitswap/internal/testutil/testutil.go b/bitswap/internal/testutil/testutil.go index 54706dca6..086035a0d 100644 --- a/bitswap/internal/testutil/testutil.go +++ b/bitswap/internal/testutil/testutil.go @@ -13,7 +13,7 @@ import ( ) var blockGenerator = blocksutil.NewBlockGenerator() -var prioritySeq int +var prioritySeq int32 // GenerateBlocksOfSize generates a series of blocks of the given byte size func GenerateBlocksOfSize(n int, size int64) []blocks.Block { diff --git a/bitswap/message/message.go b/bitswap/message/message.go index c4ea0fd12..6668e7cfe 100644 --- a/bitswap/message/message.go +++ b/bitswap/message/message.go @@ -37,7 +37,7 @@ type BitSwapMessage interface { PendingBytes() int32 // AddEntry adds an entry to the Wantlist. - AddEntry(key cid.Cid, priority int, wantType pb.Message_Wantlist_WantType, sendDontHave bool) int + AddEntry(key cid.Cid, priority int32, wantType pb.Message_Wantlist_WantType, sendDontHave bool) int // Cancel adds a CANCEL for the given CID to the message // Returns the size of the CANCEL entry in the protobuf @@ -124,7 +124,7 @@ func newMessageFromProto(pbm pb.Message) (BitSwapMessage, error) { if err != nil { return nil, fmt.Errorf("incorrectly formatted cid in wantlist: %s", err) } - m.addEntry(c, int(e.Priority), e.Cancel, e.WantType, e.SendDontHave) + m.addEntry(c, e.Priority, e.Cancel, e.WantType, e.SendDontHave) } // deprecated @@ -231,11 +231,11 @@ func (m *impl) Cancel(k cid.Cid) int { return m.addEntry(k, 0, true, pb.Message_Wantlist_Block, false) } -func (m *impl) AddEntry(k cid.Cid, priority int, wantType pb.Message_Wantlist_WantType, sendDontHave bool) int { +func (m *impl) AddEntry(k cid.Cid, priority int32, wantType pb.Message_Wantlist_WantType, sendDontHave bool) int { return m.addEntry(k, priority, false, wantType, sendDontHave) } -func (m *impl) addEntry(c cid.Cid, priority int, cancel bool, wantType pb.Message_Wantlist_WantType, sendDontHave bool) int { +func (m *impl) addEntry(c cid.Cid, priority int32, cancel bool, wantType pb.Message_Wantlist_WantType, sendDontHave bool) int { e, exists := m.wantlist[c] if exists { // Only change priority if want is of the same type diff --git a/bitswap/wantlist/wantlist.go b/bitswap/wantlist/wantlist.go index d891ad0ba..e18567dbf 100644 --- a/bitswap/wantlist/wantlist.go +++ b/bitswap/wantlist/wantlist.go @@ -18,12 +18,12 @@ type Wantlist struct { // Entry is an entry in a want list, consisting of a cid and its priority type Entry struct { Cid cid.Cid - Priority int + Priority int32 WantType pb.Message_Wantlist_WantType } // NewRefEntry creates a new reference tracked wantlist entry. -func NewRefEntry(c cid.Cid, p int) Entry { +func NewRefEntry(c cid.Cid, p int32) Entry { return Entry{ Cid: c, Priority: p, @@ -50,7 +50,7 @@ func (w *Wantlist) Len() int { } // Add adds an entry in a wantlist from CID & Priority, if not already present. -func (w *Wantlist) Add(c cid.Cid, priority int, wantType pb.Message_Wantlist_WantType) bool { +func (w *Wantlist) Add(c cid.Cid, priority int32, wantType pb.Message_Wantlist_WantType) bool { e, ok := w.set[c] // Adding want-have should not override want-block From 91bfa847e9a688d21454f056aff65383ca67fe4d Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Wed, 18 Mar 2020 16:25:50 -0700 Subject: [PATCH 0876/1035] feat: add a custom CID type This allows us to marshal/unmarshal/size protobufs without copying CID around. This commit was moved from ipfs/go-bitswap@4b91e9bee358b41fe586afc54436c4f33f1b71b8 --- bitswap/message/message.go | 26 ++--- bitswap/message/message_test.go | 4 +- bitswap/message/pb/cid.go | 43 +++++++ bitswap/message/pb/message.pb.go | 195 +++++++++++++------------------ bitswap/message/pb/message.proto | 4 +- 5 files changed, 137 insertions(+), 135 deletions(-) create mode 100644 bitswap/message/pb/cid.go diff --git a/bitswap/message/message.go b/bitswap/message/message.go index 6668e7cfe..7c531488c 100644 --- a/bitswap/message/message.go +++ b/bitswap/message/message.go @@ -2,7 +2,7 @@ package message import ( "encoding/binary" - "fmt" + "errors" "io" pb "github.com/ipfs/go-bitswap/message/pb" @@ -117,14 +117,15 @@ type Entry struct { SendDontHave bool } +var errCidMissing = errors.New("missing cid") + func newMessageFromProto(pbm pb.Message) (BitSwapMessage, error) { m := newMsg(pbm.Wantlist.Full) for _, e := range pbm.Wantlist.Entries { - c, err := cid.Cast([]byte(e.Block)) - if err != nil { - return nil, fmt.Errorf("incorrectly formatted cid in wantlist: %s", err) + if !e.Block.Cid.Defined() { + return nil, errCidMissing } - m.addEntry(c, e.Priority, e.Cancel, e.WantType, e.SendDontHave) + m.addEntry(e.Block.Cid, e.Priority, e.Cancel, e.WantType, e.SendDontHave) } // deprecated @@ -155,13 +156,10 @@ func newMessageFromProto(pbm pb.Message) (BitSwapMessage, error) { } for _, bi := range pbm.GetBlockPresences() { - c, err := cid.Cast(bi.GetCid()) - if err != nil { - return nil, err + if !bi.Cid.Cid.Defined() { + return nil, errCidMissing } - - t := bi.GetType() - m.AddBlockPresence(c, t) + m.AddBlockPresence(bi.Cid.Cid, bi.Type) } m.pendingBytes = pbm.PendingBytes @@ -311,7 +309,7 @@ func (m *impl) Size() int { func BlockPresenceSize(c cid.Cid) int { return (&pb.Message_BlockPresence{ - Cid: c.Bytes(), + Cid: pb.Cid{Cid: c}, Type: pb.Message_Have, }).Size() } @@ -341,7 +339,7 @@ func FromMsgReader(r msgio.Reader) (BitSwapMessage, error) { func entryToPB(e *Entry) pb.Message_Wantlist_Entry { return pb.Message_Wantlist_Entry{ - Block: e.Cid.Bytes(), + Block: pb.Cid{Cid: e.Cid}, Priority: int32(e.Priority), Cancel: e.Cancel, WantType: e.WantType, @@ -385,7 +383,7 @@ func (m *impl) ToProtoV1() *pb.Message { pbm.BlockPresences = make([]pb.Message_BlockPresence, 0, len(m.blockPresences)) for c, t := range m.blockPresences { pbm.BlockPresences = append(pbm.BlockPresences, pb.Message_BlockPresence{ - Cid: c.Bytes(), + Cid: pb.Cid{Cid: c}, Type: t, }) } diff --git a/bitswap/message/message_test.go b/bitswap/message/message_test.go index 4b51a3cc2..aa58fa0f2 100644 --- a/bitswap/message/message_test.go +++ b/bitswap/message/message_test.go @@ -29,7 +29,7 @@ func TestNewMessageFromProto(t *testing.T) { str := mkFakeCid("a_key") protoMessage := new(pb.Message) protoMessage.Wantlist.Entries = []pb.Message_Wantlist_Entry{ - {Block: str.Bytes()}, + {Block: pb.Cid{Cid: str}}, } if !wantlistContains(&protoMessage.Wantlist, str) { t.Fail() @@ -164,7 +164,7 @@ func TestToAndFromNetMessage(t *testing.T) { func wantlistContains(wantlist *pb.Message_Wantlist, c cid.Cid) bool { for _, e := range wantlist.GetEntries() { - if bytes.Equal(e.GetBlock(), c.Bytes()) { + if e.Block.Cid.Defined() && c.Equals(e.Block.Cid) { return true } } diff --git a/bitswap/message/pb/cid.go b/bitswap/message/pb/cid.go new file mode 100644 index 000000000..59e32bb27 --- /dev/null +++ b/bitswap/message/pb/cid.go @@ -0,0 +1,43 @@ +package bitswap_message_pb + +import ( + "github.com/ipfs/go-cid" +) + +// NOTE: Don't "embed" the cid, wrap it like we're doing here. Otherwise, gogo +// will try to use the Bytes() function. + +// Cid is a custom type for CIDs in protobufs, that allows us to avoid +// reallocating. +type Cid struct { + Cid cid.Cid +} + +func (c Cid) Marshal() ([]byte, error) { + return c.Cid.Bytes(), nil +} + +func (c *Cid) MarshalTo(data []byte) (int, error) { + return copy(data[:c.Size()], c.Cid.Bytes()), nil +} + +func (c *Cid) Unmarshal(data []byte) (err error) { + c.Cid, err = cid.Cast(data) + return err +} + +func (c *Cid) Size() int { + return len(c.Cid.KeyString()) +} + +func (c Cid) MarshalJSON() ([]byte, error) { + return c.Cid.MarshalJSON() +} + +func (c *Cid) UnmarshalJSON(data []byte) error { + return c.Cid.UnmarshalJSON(data) +} + +func (c Cid) Equal(other Cid) bool { + return c.Cid.Equals(c.Cid) +} diff --git a/bitswap/message/pb/message.pb.go b/bitswap/message/pb/message.pb.go index b64e30825..c1effb8ea 100644 --- a/bitswap/message/pb/message.pb.go +++ b/bitswap/message/pb/message.pb.go @@ -21,7 +21,7 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package type Message_BlockPresenceType int32 @@ -202,7 +202,7 @@ func (m *Message_Wantlist) GetFull() bool { } type Message_Wantlist_Entry struct { - Block []byte `protobuf:"bytes,1,opt,name=block,proto3" json:"block,omitempty"` + Block Cid `protobuf:"bytes,1,opt,name=block,proto3,customtype=Cid" json:"block"` Priority int32 `protobuf:"varint,2,opt,name=priority,proto3" json:"priority,omitempty"` Cancel bool `protobuf:"varint,3,opt,name=cancel,proto3" json:"cancel,omitempty"` WantType Message_Wantlist_WantType `protobuf:"varint,4,opt,name=wantType,proto3,enum=bitswap.message.pb.Message_Wantlist_WantType" json:"wantType,omitempty"` @@ -242,13 +242,6 @@ func (m *Message_Wantlist_Entry) XXX_DiscardUnknown() { var xxx_messageInfo_Message_Wantlist_Entry proto.InternalMessageInfo -func (m *Message_Wantlist_Entry) GetBlock() []byte { - if m != nil { - return m.Block - } - return nil -} - func (m *Message_Wantlist_Entry) GetPriority() int32 { if m != nil { return m.Priority @@ -330,7 +323,7 @@ func (m *Message_Block) GetData() []byte { } type Message_BlockPresence struct { - Cid []byte `protobuf:"bytes,1,opt,name=cid,proto3" json:"cid,omitempty"` + Cid Cid `protobuf:"bytes,1,opt,name=cid,proto3,customtype=Cid" json:"cid"` Type Message_BlockPresenceType `protobuf:"varint,2,opt,name=type,proto3,enum=bitswap.message.pb.Message_BlockPresenceType" json:"type,omitempty"` } @@ -367,13 +360,6 @@ func (m *Message_BlockPresence) XXX_DiscardUnknown() { var xxx_messageInfo_Message_BlockPresence proto.InternalMessageInfo -func (m *Message_BlockPresence) GetCid() []byte { - if m != nil { - return m.Cid - } - return nil -} - func (m *Message_BlockPresence) GetType() Message_BlockPresenceType { if m != nil { return m.Type @@ -394,38 +380,39 @@ func init() { func init() { proto.RegisterFile("message.proto", fileDescriptor_33c57e4bae7b9afd) } var fileDescriptor_33c57e4bae7b9afd = []byte{ - // 483 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x53, 0x4d, 0x6b, 0xd4, 0x50, - 0x14, 0xcd, 0x9b, 0x24, 0x9d, 0x78, 0x9b, 0x96, 0xf1, 0x21, 0xf2, 0xc8, 0x22, 0x8d, 0x83, 0x8b, - 0xa8, 0x34, 0x85, 0xe9, 0x2f, 0xe8, 0xa0, 0xa2, 0x82, 0x20, 0x41, 0x98, 0x75, 0x3e, 0xde, 0xc4, - 0x60, 0x9a, 0x84, 0xbc, 0x37, 0xd6, 0xfc, 0x0b, 0x7f, 0x92, 0xb8, 0xea, 0x4a, 0xba, 0x74, 0x25, - 0x32, 0xf3, 0x47, 0x24, 0x37, 0x2f, 0x81, 0xb1, 0x60, 0xbb, 0xbb, 0xe7, 0xbe, 0x7b, 0x4e, 0xee, - 0xb9, 0x87, 0xc0, 0xd1, 0x25, 0x17, 0x22, 0xca, 0x78, 0x50, 0x37, 0x95, 0xac, 0x28, 0x8d, 0x73, - 0x29, 0xae, 0xa2, 0x3a, 0x18, 0xdb, 0xb1, 0x73, 0x9a, 0xe5, 0xf2, 0xd3, 0x26, 0x0e, 0x92, 0xea, - 0xf2, 0x2c, 0xab, 0xb2, 0xea, 0x0c, 0x47, 0xe3, 0xcd, 0x1a, 0x11, 0x02, 0xac, 0x7a, 0x89, 0xf9, - 0x8f, 0x03, 0x98, 0xbe, 0xef, 0xd9, 0xf4, 0x35, 0x58, 0x57, 0x51, 0x29, 0x8b, 0x5c, 0x48, 0x46, - 0x3c, 0xe2, 0x1f, 0x2e, 0x9e, 0x06, 0xb7, 0xbf, 0x10, 0xa8, 0xf1, 0x60, 0xa5, 0x66, 0x97, 0xc6, - 0xf5, 0xef, 0x13, 0x2d, 0x1c, 0xb9, 0xf4, 0x31, 0x1c, 0xc4, 0x45, 0x95, 0x7c, 0x16, 0x6c, 0xe2, - 0xe9, 0xbe, 0x1d, 0x2a, 0x44, 0x2f, 0x60, 0x5a, 0x47, 0x6d, 0x51, 0x45, 0x29, 0xd3, 0x3d, 0xdd, - 0x3f, 0x5c, 0x3c, 0xf9, 0x9f, 0xfc, 0xb2, 0x23, 0x29, 0xed, 0x81, 0x47, 0x57, 0x70, 0x8c, 0x62, - 0x1f, 0x1a, 0x2e, 0x78, 0x99, 0x70, 0xc1, 0x0c, 0x54, 0x7a, 0x76, 0xa7, 0xd2, 0xc0, 0x50, 0x8a, - 0xff, 0xc8, 0xd0, 0x39, 0xd8, 0x35, 0x2f, 0xd3, 0xbc, 0xcc, 0x96, 0xad, 0xe4, 0x82, 0x99, 0x1e, - 0xf1, 0xcd, 0x70, 0xaf, 0xe7, 0xfc, 0x9c, 0x80, 0x35, 0x98, 0xa6, 0xef, 0x60, 0xca, 0x4b, 0xd9, - 0xe4, 0x5c, 0x30, 0x82, 0x2b, 0x3c, 0xbf, 0xcf, 0xad, 0x82, 0x57, 0xa5, 0x6c, 0xda, 0xc1, 0x95, - 0x12, 0xa0, 0x14, 0x8c, 0xf5, 0xa6, 0x28, 0xd8, 0xc4, 0x23, 0xbe, 0x15, 0x62, 0xed, 0x7c, 0x27, - 0x60, 0xe2, 0x30, 0x7d, 0x04, 0x26, 0x2e, 0x8b, 0x99, 0xd8, 0x61, 0x0f, 0xa8, 0x03, 0x56, 0xdd, - 0xe4, 0x55, 0x93, 0xcb, 0x16, 0x79, 0x66, 0x38, 0xe2, 0x2e, 0x80, 0x24, 0x2a, 0x13, 0x5e, 0x30, - 0x1d, 0x15, 0x15, 0xa2, 0x6f, 0xfb, 0x80, 0x3f, 0xb6, 0x35, 0x67, 0x86, 0x47, 0xfc, 0xe3, 0xc5, - 0xe9, 0xbd, 0x96, 0x5e, 0x29, 0x52, 0x38, 0xd2, 0xbb, 0x7b, 0x09, 0x5e, 0xa6, 0x2f, 0xab, 0x52, - 0xbe, 0x89, 0xbe, 0x70, 0xbc, 0x97, 0x15, 0xee, 0xf5, 0xe6, 0x27, 0xfd, 0xb9, 0x70, 0xfe, 0x01, - 0x98, 0x18, 0xc3, 0x4c, 0xa3, 0x16, 0x18, 0xdd, 0xf3, 0x8c, 0x38, 0xe7, 0xaa, 0xd9, 0x2d, 0x5c, - 0x37, 0x7c, 0x9d, 0x7f, 0x55, 0x1e, 0x15, 0xea, 0x0e, 0x93, 0x46, 0x32, 0x42, 0x83, 0x76, 0x88, - 0xb5, 0x93, 0xc2, 0xd1, 0x5e, 0xa0, 0x74, 0x06, 0x7a, 0x92, 0xa7, 0x8a, 0xd9, 0x95, 0xf4, 0x02, - 0x0c, 0xd9, 0x79, 0x9c, 0xdc, 0xed, 0x71, 0x4f, 0x0a, 0x3d, 0x22, 0x75, 0xfe, 0x02, 0x1e, 0xde, - 0x7a, 0x1a, 0x37, 0xd7, 0xa8, 0x0d, 0xd6, 0x60, 0x73, 0x46, 0x96, 0xec, 0x7a, 0xeb, 0x92, 0x9b, - 0xad, 0x4b, 0xfe, 0x6c, 0x5d, 0xf2, 0x6d, 0xe7, 0x6a, 0x37, 0x3b, 0x57, 0xfb, 0xb5, 0x73, 0xb5, - 0xf8, 0x00, 0xff, 0xb2, 0xf3, 0xbf, 0x01, 0x00, 0x00, 0xff, 0xff, 0xac, 0xa9, 0xf7, 0xab, 0xb9, - 0x03, 0x00, 0x00, + // 497 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x93, 0xdf, 0x8a, 0xd3, 0x40, + 0x14, 0xc6, 0x33, 0x4d, 0xd2, 0xc6, 0xd3, 0xee, 0x52, 0xe7, 0x42, 0x42, 0xc0, 0x34, 0x5b, 0xbc, + 0x88, 0xca, 0x66, 0xa1, 0xfb, 0x04, 0x5b, 0xff, 0xa0, 0x82, 0x20, 0x83, 0xd0, 0xeb, 0xfc, 0x99, + 0xd6, 0xc1, 0x6c, 0x12, 0x33, 0x53, 0xd7, 0xbe, 0x85, 0x8f, 0xb5, 0x37, 0xc2, 0x5e, 0x8a, 0xca, + 0x22, 0xed, 0x8b, 0x48, 0x4e, 0xa6, 0x85, 0xba, 0xe2, 0xee, 0xdd, 0x9c, 0x33, 0xe7, 0xfb, 0x65, + 0xbe, 0xef, 0x10, 0x38, 0x38, 0xe7, 0x52, 0xc6, 0x0b, 0x1e, 0x55, 0x75, 0xa9, 0x4a, 0x4a, 0x13, + 0xa1, 0xe4, 0x45, 0x5c, 0x45, 0xbb, 0x76, 0xe2, 0x1d, 0x2f, 0x84, 0xfa, 0xb0, 0x4c, 0xa2, 0xb4, + 0x3c, 0x3f, 0x59, 0x94, 0x8b, 0xf2, 0x04, 0x47, 0x93, 0xe5, 0x1c, 0x2b, 0x2c, 0xf0, 0xd4, 0x22, + 0xc6, 0xbf, 0xba, 0xd0, 0x7b, 0xdb, 0xaa, 0xe9, 0x4b, 0x70, 0x2e, 0xe2, 0x42, 0xe5, 0x42, 0x2a, + 0x97, 0x04, 0x24, 0xec, 0x4f, 0x1e, 0x45, 0x37, 0xbf, 0x10, 0xe9, 0xf1, 0x68, 0xa6, 0x67, 0xa7, + 0xd6, 0xe5, 0xf5, 0xc8, 0x60, 0x3b, 0x2d, 0x7d, 0x00, 0xdd, 0x24, 0x2f, 0xd3, 0x8f, 0xd2, 0xed, + 0x04, 0x66, 0x38, 0x60, 0xba, 0xa2, 0x67, 0xd0, 0xab, 0xe2, 0x55, 0x5e, 0xc6, 0x99, 0x6b, 0x06, + 0x66, 0xd8, 0x9f, 0x1c, 0xfd, 0x0f, 0x3f, 0x6d, 0x44, 0x9a, 0xbd, 0xd5, 0xd1, 0x19, 0x1c, 0x22, + 0xec, 0x5d, 0xcd, 0x25, 0x2f, 0x52, 0x2e, 0x5d, 0x0b, 0x49, 0x8f, 0x6f, 0x25, 0x6d, 0x15, 0x9a, + 0xf8, 0x17, 0x86, 0x8e, 0x61, 0x50, 0xf1, 0x22, 0x13, 0xc5, 0x62, 0xba, 0x52, 0x5c, 0xba, 0x76, + 0x40, 0x42, 0x9b, 0xed, 0xf5, 0xbc, 0x9f, 0x1d, 0x70, 0xb6, 0xa6, 0xe9, 0x1b, 0xe8, 0xf1, 0x42, + 0xd5, 0x82, 0x4b, 0x97, 0xe0, 0x13, 0x9e, 0xdc, 0x25, 0xab, 0xe8, 0x45, 0xa1, 0xea, 0xd5, 0xd6, + 0x95, 0x06, 0x50, 0x0a, 0xd6, 0x7c, 0x99, 0xe7, 0x6e, 0x27, 0x20, 0xa1, 0xc3, 0xf0, 0xec, 0x7d, + 0x23, 0x60, 0xe3, 0x30, 0x3d, 0x02, 0x1b, 0x1f, 0x8b, 0x3b, 0x19, 0x4c, 0xfb, 0x8d, 0xf6, 0xc7, + 0xf5, 0xc8, 0x7c, 0x26, 0x32, 0xd6, 0xde, 0x50, 0x0f, 0x9c, 0xaa, 0x16, 0x65, 0x2d, 0xd4, 0x0a, + 0x21, 0x36, 0xdb, 0xd5, 0xcd, 0x36, 0xd2, 0xb8, 0x48, 0x79, 0xee, 0x9a, 0x88, 0xd7, 0x15, 0x7d, + 0xdd, 0x6e, 0xfb, 0xfd, 0xaa, 0xe2, 0xae, 0x15, 0x90, 0xf0, 0x70, 0x72, 0x7c, 0x27, 0x07, 0x33, + 0x2d, 0x62, 0x3b, 0x79, 0x13, 0x9e, 0xe4, 0x45, 0xf6, 0xbc, 0x2c, 0xd4, 0xab, 0xf8, 0x33, 0xc7, + 0xf0, 0x1c, 0xb6, 0xd7, 0x1b, 0x8f, 0xda, 0xec, 0x70, 0xfe, 0x1e, 0xd8, 0xb8, 0x93, 0xa1, 0x41, + 0x1d, 0xb0, 0x9a, 0xeb, 0x21, 0xf1, 0x4e, 0x75, 0xb3, 0x79, 0x70, 0x55, 0xf3, 0xb9, 0xf8, 0xd2, + 0x1a, 0x66, 0xba, 0x6a, 0x52, 0xca, 0x62, 0x15, 0xa3, 0xc1, 0x01, 0xc3, 0xb3, 0xf7, 0x09, 0x0e, + 0xf6, 0xb6, 0x4b, 0x1f, 0x82, 0x99, 0x8a, 0xec, 0x5f, 0x51, 0x35, 0x7d, 0x7a, 0x06, 0x96, 0x6a, + 0x0c, 0x77, 0x6e, 0x37, 0xbc, 0xc7, 0x45, 0xc3, 0x28, 0x1d, 0x3f, 0x85, 0xfb, 0x37, 0xae, 0x76, + 0x36, 0x0c, 0x3a, 0x00, 0x67, 0xeb, 0x79, 0x48, 0xa6, 0xee, 0xe5, 0xda, 0x27, 0x57, 0x6b, 0x9f, + 0xfc, 0x5e, 0xfb, 0xe4, 0xeb, 0xc6, 0x37, 0xae, 0x36, 0xbe, 0xf1, 0x7d, 0xe3, 0x1b, 0x49, 0x17, + 0xff, 0xbf, 0xd3, 0x3f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x8a, 0x8a, 0xaf, 0x83, 0xd3, 0x03, 0x00, + 0x00, } func (m *Message) Marshal() (dAtA []byte, err error) { @@ -600,13 +587,16 @@ func (m *Message_Wantlist_Entry) MarshalToSizedBuffer(dAtA []byte) (int, error) i-- dAtA[i] = 0x10 } - if len(m.Block) > 0 { - i -= len(m.Block) - copy(dAtA[i:], m.Block) - i = encodeVarintMessage(dAtA, i, uint64(len(m.Block))) - i-- - dAtA[i] = 0xa + { + size := m.Block.Size() + i -= size + if _, err := m.Block.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintMessage(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0xa return len(dAtA) - i, nil } @@ -672,13 +662,16 @@ func (m *Message_BlockPresence) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x10 } - if len(m.Cid) > 0 { - i -= len(m.Cid) - copy(dAtA[i:], m.Cid) - i = encodeVarintMessage(dAtA, i, uint64(len(m.Cid))) - i-- - dAtA[i] = 0xa + { + size := m.Cid.Size() + i -= size + if _, err := m.Cid.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintMessage(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0xa return len(dAtA) - i, nil } @@ -749,10 +742,8 @@ func (m *Message_Wantlist_Entry) Size() (n int) { } var l int _ = l - l = len(m.Block) - if l > 0 { - n += 1 + l + sovMessage(uint64(l)) - } + l = m.Block.Size() + n += 1 + l + sovMessage(uint64(l)) if m.Priority != 0 { n += 1 + sovMessage(uint64(m.Priority)) } @@ -791,10 +782,8 @@ func (m *Message_BlockPresence) Size() (n int) { } var l int _ = l - l = len(m.Cid) - if l > 0 { - n += 1 + l + sovMessage(uint64(l)) - } + l = m.Cid.Size() + n += 1 + l + sovMessage(uint64(l)) if m.Type != 0 { n += 1 + sovMessage(uint64(m.Type)) } @@ -1177,9 +1166,8 @@ func (m *Message_Wantlist_Entry) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Block = append(m.Block[:0], dAtA[iNdEx:postIndex]...) - if m.Block == nil { - m.Block = []byte{} + if err := m.Block.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } iNdEx = postIndex case 2: @@ -1463,9 +1451,8 @@ func (m *Message_BlockPresence) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Cid = append(m.Cid[:0], dAtA[iNdEx:postIndex]...) - if m.Cid == nil { - m.Cid = []byte{} + if err := m.Cid.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } iNdEx = postIndex case 2: @@ -1514,6 +1501,7 @@ func (m *Message_BlockPresence) Unmarshal(dAtA []byte) error { func skipMessage(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 + depth := 0 for iNdEx < l { var wire uint64 for shift := uint(0); ; shift += 7 { @@ -1545,10 +1533,8 @@ func skipMessage(dAtA []byte) (n int, err error) { break } } - return iNdEx, nil case 1: iNdEx += 8 - return iNdEx, nil case 2: var length int for shift := uint(0); ; shift += 7 { @@ -1569,55 +1555,30 @@ func skipMessage(dAtA []byte) (n int, err error) { return 0, ErrInvalidLengthMessage } iNdEx += length - if iNdEx < 0 { - return 0, ErrInvalidLengthMessage - } - return iNdEx, nil case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowMessage - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipMessage(dAtA[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - if iNdEx < 0 { - return 0, ErrInvalidLengthMessage - } - } - return iNdEx, nil + depth++ case 4: - return iNdEx, nil + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupMessage + } + depth-- case 5: iNdEx += 4 - return iNdEx, nil default: return 0, fmt.Errorf("proto: illegal wireType %d", wireType) } + if iNdEx < 0 { + return 0, ErrInvalidLengthMessage + } + if depth == 0 { + return iNdEx, nil + } } - panic("unreachable") + return 0, io.ErrUnexpectedEOF } var ( - ErrInvalidLengthMessage = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowMessage = fmt.Errorf("proto: integer overflow") + ErrInvalidLengthMessage = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowMessage = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupMessage = fmt.Errorf("proto: unexpected end of group") ) diff --git a/bitswap/message/pb/message.proto b/bitswap/message/pb/message.proto index f7afdb1fe..e6c271cc2 100644 --- a/bitswap/message/pb/message.proto +++ b/bitswap/message/pb/message.proto @@ -13,7 +13,7 @@ message Message { } message Entry { - bytes block = 1; // the block cid (cidV0 in bitswap 1.0.0, cidV1 in bitswap 1.1.0) + bytes block = 1 [(gogoproto.customtype) = "Cid", (gogoproto.nullable) = false]; // the block cid (cidV0 in bitswap 1.0.0, cidV1 in bitswap 1.1.0) int32 priority = 2; // the priority (normalized). default to 1 bool cancel = 3; // whether this revokes an entry WantType wantType = 4; // Note: defaults to enum 0, ie Block @@ -34,7 +34,7 @@ message Message { DontHave = 1; } message BlockPresence { - bytes cid = 1; + bytes cid = 1 [(gogoproto.customtype) = "Cid", (gogoproto.nullable) = false]; BlockPresenceType type = 2; } From 8de88f9e28e9cfaa564a526aa3781af5f81fbbe5 Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Thu, 19 Mar 2020 06:52:03 -0700 Subject: [PATCH 0877/1035] test(message): test custom CID type (#309) This got dropped from my other patch. This commit was moved from ipfs/go-bitswap@03e6d1f0f23d5178390d945b8a481c1abb49e783 --- bitswap/message/pb/cid_test.go | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) create mode 100644 bitswap/message/pb/cid_test.go diff --git a/bitswap/message/pb/cid_test.go b/bitswap/message/pb/cid_test.go new file mode 100644 index 000000000..3d4b87a78 --- /dev/null +++ b/bitswap/message/pb/cid_test.go @@ -0,0 +1,32 @@ +package bitswap_message_pb_test + +import ( + "bytes" + "testing" + + "github.com/ipfs/go-cid" + u "github.com/ipfs/go-ipfs-util" + + pb "github.com/ipfs/go-bitswap/message/pb" +) + +func TestCID(t *testing.T) { + var expected = [...]byte{ + 10, 34, 18, 32, 195, 171, + 143, 241, 55, 32, 232, 173, + 144, 71, 221, 57, 70, 107, + 60, 137, 116, 229, 146, 194, + 250, 56, 61, 74, 57, 96, + 113, 76, 174, 240, 196, 242, + } + + c := cid.NewCidV0(u.Hash([]byte("foobar"))) + msg := pb.Message_BlockPresence{Cid: pb.Cid{Cid: c}} + actual, err := msg.Marshal() + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(actual, expected[:]) { + t.Fatal("failed to correctly encode custom CID type") + } +} From 3d61280ba337ab08d2f58e9bc3b9d1d3df5984be Mon Sep 17 00:00:00 2001 From: Dirk McCormick Date: Wed, 18 Mar 2020 18:13:17 -0400 Subject: [PATCH 0878/1035] perf: add message queue benchmark This commit was moved from ipfs/go-bitswap@cac64200c37189813acfba4ad964da5538c6def7 --- .../messagequeue/messagequeue_test.go | 61 ++++++++++++++++++- 1 file changed, 60 insertions(+), 1 deletion(-) diff --git a/bitswap/internal/messagequeue/messagequeue_test.go b/bitswap/internal/messagequeue/messagequeue_test.go index 0f7cba8ac..de843d2aa 100644 --- a/bitswap/internal/messagequeue/messagequeue_test.go +++ b/bitswap/internal/messagequeue/messagequeue_test.go @@ -4,16 +4,18 @@ import ( "context" "errors" "fmt" + "math" + "math/rand" "sync" "testing" "time" "github.com/ipfs/go-bitswap/internal/testutil" "github.com/ipfs/go-bitswap/message" + pb "github.com/ipfs/go-bitswap/message/pb" cid "github.com/ipfs/go-cid" bsmsg "github.com/ipfs/go-bitswap/message" - pb "github.com/ipfs/go-bitswap/message/pb" bsnet "github.com/ipfs/go-bitswap/network" peer "github.com/libp2p/go-libp2p-core/peer" "github.com/libp2p/go-libp2p/p2p/protocol/ping" @@ -705,3 +707,60 @@ func filterWantTypes(wantlist []bsmsg.Entry) ([]cid.Cid, []cid.Cid, []cid.Cid) { } return wbs, whs, cls } + +// Simplistic benchmark to allow us to simulate conditions on the gateways +func BenchmarkMessageQueue(b *testing.B) { + ctx := context.Background() + + createQueue := func() *MessageQueue { + messagesSent := make(chan bsmsg.BitSwapMessage) + sendErrors := make(chan error) + resetChan := make(chan struct{}, 1) + fullClosedChan := make(chan struct{}, 1) + fakeSender := newFakeMessageSender(nil, fullClosedChan, resetChan, messagesSent, sendErrors, true) + fakenet := &fakeMessageNetwork{nil, nil, fakeSender} + dhtm := &fakeDontHaveTimeoutMgr{} + peerID := testutil.GeneratePeers(1)[0] + + messageQueue := newMessageQueue(ctx, peerID, fakenet, maxMessageSize, sendErrorBackoff, dhtm) + messageQueue.Startup() + + go func() { + for { + <-messagesSent + time.Sleep(time.Duration(rand.Intn(1000)) * time.Millisecond) + } + }() + + return messageQueue + } + + // Create a handful of message queues to start with + var qs []*MessageQueue + for i := 0; i < 5; i++ { + qs = append(qs, createQueue()) + } + + for n := 0; n < b.N; n++ { + // Create a new message queue every 10 ticks + if n%10 == 0 { + qs = append(qs, createQueue()) + } + + // Pick a random message queue, favoring those created later + qn := len(qs) + i := int(math.Floor(float64(qn) * float64(1-rand.Float32()*rand.Float32()))) + if i >= qn { // because of floating point math + i = qn - 1 + } + + // Alternately add either a few wants or a lot of broadcast wants + if rand.Intn(2) == 0 { + wants := testutil.GenerateCids(10) + qs[i].AddWants(wants[:2], wants[2:]) + } else { + wants := testutil.GenerateCids(60) + qs[i].AddBroadcastWantHaves(wants) + } + } +} From 9e2ddc996712d8aa157df14fe2cc48a433a1dc14 Mon Sep 17 00:00:00 2001 From: Dirk McCormick Date: Wed, 18 Mar 2020 18:24:00 -0400 Subject: [PATCH 0879/1035] perf: improve extractOutgoingMessage() performance This commit was moved from ipfs/go-bitswap@e98629476eb28c768714d59a36a689dd6ec7bcec --- bitswap/internal/messagequeue/messagequeue.go | 34 ++++++--- bitswap/message/message.go | 71 ++++++++++++------- bitswap/message/message_test.go | 23 ++++++ 3 files changed, 93 insertions(+), 35 deletions(-) diff --git a/bitswap/internal/messagequeue/messagequeue.go b/bitswap/internal/messagequeue/messagequeue.go index aed5fbf1c..61af02af3 100644 --- a/bitswap/internal/messagequeue/messagequeue.go +++ b/bitswap/internal/messagequeue/messagequeue.go @@ -75,6 +75,9 @@ type MessageQueue struct { rebroadcastIntervalLk sync.RWMutex rebroadcastInterval time.Duration rebroadcastTimer *time.Timer + // For performance reasons we just clear out the fields of the message + // instead of creating a new one every time. + msg bsmsg.BitSwapMessage } // recallWantlist keeps a list of pending wants, and a list of all wants that @@ -410,9 +413,10 @@ func (mq *MessageQueue) sendMessage() { for i := 0; i < maxRetries; i++ { if mq.attemptSendAndRecovery(message) { // We were able to send successfully. - onSent() + wantlist := message.Wantlist() + onSent(wantlist) - mq.simulateDontHaveWithTimeout(message) + mq.simulateDontHaveWithTimeout(wantlist) // If the message was too big and only a subset of wants could be // sent, schedule sending the rest of the wants in the next @@ -430,12 +434,12 @@ func (mq *MessageQueue) sendMessage() { // This is necessary when making requests to peers running an older version of // Bitswap that doesn't support the DONT_HAVE response, and is also useful to // mitigate getting blocked by a peer that takes a long time to respond. -func (mq *MessageQueue) simulateDontHaveWithTimeout(msg bsmsg.BitSwapMessage) { - mq.wllock.Lock() - +func (mq *MessageQueue) simulateDontHaveWithTimeout(wantlist []bsmsg.Entry) { // Get the CID of each want-block that expects a DONT_HAVE response - wantlist := msg.Wantlist() wants := make([]cid.Cid, 0, len(wantlist)) + + mq.wllock.Lock() + for _, entry := range wantlist { if entry.WantType == pb.Message_Wantlist_Block && entry.SendDontHave { // Unlikely, but just in case check that the block hasn't been @@ -489,9 +493,17 @@ func (mq *MessageQueue) pendingWorkCount() int { return mq.bcstWants.pending.Len() + mq.peerWants.pending.Len() + mq.cancels.Len() } -func (mq *MessageQueue) extractOutgoingMessage(supportsHave bool) (bsmsg.BitSwapMessage, func()) { - // Create a new message - msg := bsmsg.New(false) +func (mq *MessageQueue) extractOutgoingMessage(supportsHave bool) (bsmsg.BitSwapMessage, func([]bsmsg.Entry)) { + // For performance reasons we just clear out the fields of the message + // instead of creating a new one every time. + if mq.msg == nil { + // Create a new message + mq.msg = bsmsg.New(false) + } else { + // If there's already a message, reset it + mq.msg.Reset(false) + } + msg := mq.msg mq.wllock.Lock() defer mq.wllock.Unlock() @@ -544,11 +556,11 @@ func (mq *MessageQueue) extractOutgoingMessage(supportsHave bool) (bsmsg.BitSwap // Called when the message has been successfully sent. // Remove the sent keys from the broadcast and regular wantlists. - onSent := func() { + onSent := func(wantlist []bsmsg.Entry) { mq.wllock.Lock() defer mq.wllock.Unlock() - for _, e := range msg.Wantlist() { + for _, e := range wantlist { mq.bcstWants.pending.Remove(e.Cid) mq.peerWants.pending.RemoveType(e.Cid, e.WantType) } diff --git a/bitswap/message/message.go b/bitswap/message/message.go index 7c531488c..6b2fe533b 100644 --- a/bitswap/message/message.go +++ b/bitswap/message/message.go @@ -65,6 +65,9 @@ type BitSwapMessage interface { Exportable Loggable() map[string]interface{} + + // Reset the values in the message back to defaults, so it can be reused + Reset(bool) } // Exportable is an interface for structures than can be @@ -85,6 +88,33 @@ type BlockPresence struct { Type pb.Message_BlockPresenceType } +// Entry is a wantlist entry in a Bitswap message, with flags indicating +// - whether message is a cancel +// - whether requester wants a DONT_HAVE message +// - whether requester wants a HAVE message (instead of the block) +type Entry struct { + wantlist.Entry + Cancel bool + SendDontHave bool +} + +// Get the size of the entry on the wire +func (e *Entry) Size() int { + epb := e.ToPB() + return epb.Size() +} + +// Get the entry in protobuf form +func (e *Entry) ToPB() pb.Message_Wantlist_Entry { + return pb.Message_Wantlist_Entry{ + Block: pb.Cid{Cid: e.Cid}, + Priority: int32(e.Priority), + Cancel: e.Cancel, + WantType: e.WantType, + SendDontHave: e.SendDontHave, + } +} + type impl struct { full bool wantlist map[cid.Cid]*Entry @@ -107,14 +137,19 @@ func newMsg(full bool) *impl { } } -// Entry is a wantlist entry in a Bitswap message, with flags indicating -// - whether message is a cancel -// - whether requester wants a DONT_HAVE message -// - whether requester wants a HAVE message (instead of the block) -type Entry struct { - wantlist.Entry - Cancel bool - SendDontHave bool +// Reset the values in the message back to defaults, so it can be reused +func (m *impl) Reset(full bool) { + m.full = full + for k := range m.wantlist { + delete(m.wantlist, k) + } + for k := range m.blocks { + delete(m.blocks, k) + } + for k := range m.blockPresences { + delete(m.blockPresences, k) + } + m.pendingBytes = 0 } var errCidMissing = errors.New("missing cid") @@ -267,8 +302,7 @@ func (m *impl) addEntry(c cid.Cid, priority int32, cancel bool, wantType pb.Mess } m.wantlist[c] = e - aspb := entryToPB(e) - return aspb.Size() + return e.Size() } func (m *impl) AddBlock(b blocks.Block) { @@ -300,8 +334,7 @@ func (m *impl) Size() int { size += BlockPresenceSize(c) } for _, e := range m.wantlist { - epb := entryToPB(e) - size += epb.Size() + size += e.Size() } return size @@ -337,21 +370,11 @@ func FromMsgReader(r msgio.Reader) (BitSwapMessage, error) { return newMessageFromProto(pb) } -func entryToPB(e *Entry) pb.Message_Wantlist_Entry { - return pb.Message_Wantlist_Entry{ - Block: pb.Cid{Cid: e.Cid}, - Priority: int32(e.Priority), - Cancel: e.Cancel, - WantType: e.WantType, - SendDontHave: e.SendDontHave, - } -} - func (m *impl) ToProtoV0() *pb.Message { pbm := new(pb.Message) pbm.Wantlist.Entries = make([]pb.Message_Wantlist_Entry, 0, len(m.wantlist)) for _, e := range m.wantlist { - pbm.Wantlist.Entries = append(pbm.Wantlist.Entries, entryToPB(e)) + pbm.Wantlist.Entries = append(pbm.Wantlist.Entries, e.ToPB()) } pbm.Wantlist.Full = m.full @@ -367,7 +390,7 @@ func (m *impl) ToProtoV1() *pb.Message { pbm := new(pb.Message) pbm.Wantlist.Entries = make([]pb.Message_Wantlist_Entry, 0, len(m.wantlist)) for _, e := range m.wantlist { - pbm.Wantlist.Entries = append(pbm.Wantlist.Entries, entryToPB(e)) + pbm.Wantlist.Entries = append(pbm.Wantlist.Entries, e.ToPB()) } pbm.Wantlist.Full = m.full diff --git a/bitswap/message/message_test.go b/bitswap/message/message_test.go index aa58fa0f2..0d4b80108 100644 --- a/bitswap/message/message_test.go +++ b/bitswap/message/message_test.go @@ -2,9 +2,12 @@ package message import ( "bytes" + "fmt" "testing" pb "github.com/ipfs/go-bitswap/message/pb" + "github.com/ipfs/go-bitswap/wantlist" + blocksutil "github.com/ipfs/go-ipfs-blocksutil" blocks "github.com/ipfs/go-block-format" cid "github.com/ipfs/go-cid" @@ -289,3 +292,23 @@ func TestAddWantlistEntry(t *testing.T) { t.Fatal("want should not override cancel") } } + +func TestEntrySize(t *testing.T) { + blockGenerator := blocksutil.NewBlockGenerator() + c := blockGenerator.Next().Cid() + e := Entry{ + Entry: wantlist.Entry{ + Cid: c, + Priority: 10, + WantType: pb.Message_Wantlist_Have, + }, + SendDontHave: true, + Cancel: false, + } + fmt.Println(len(c.Bytes())) + fmt.Println(len(c.KeyString())) + epb := e.ToPB() + if e.Size() != epb.Size() { + t.Fatal("entry size calculation incorrect", e.Size(), epb.Size()) + } +} From ae45ed2a55cf99d7752f415a14f6ebaaff3687d1 Mon Sep 17 00:00:00 2001 From: Dirk McCormick Date: Thu, 19 Mar 2020 10:22:39 -0400 Subject: [PATCH 0880/1035] fix: race in tests This commit was moved from ipfs/go-bitswap@2b8391646d58c36f362f8e3f11d58cc3af39524c --- .../messagequeue/messagequeue_test.go | 72 +++++++++---------- 1 file changed, 36 insertions(+), 36 deletions(-) diff --git a/bitswap/internal/messagequeue/messagequeue_test.go b/bitswap/internal/messagequeue/messagequeue_test.go index de843d2aa..059534057 100644 --- a/bitswap/internal/messagequeue/messagequeue_test.go +++ b/bitswap/internal/messagequeue/messagequeue_test.go @@ -86,13 +86,13 @@ type fakeMessageSender struct { sendError error fullClosed chan<- struct{} reset chan<- struct{} - messagesSent chan<- bsmsg.BitSwapMessage + messagesSent chan<- []bsmsg.Entry sendErrors chan<- error supportsHave bool } func newFakeMessageSender(sendError error, fullClosed chan<- struct{}, reset chan<- struct{}, - messagesSent chan<- bsmsg.BitSwapMessage, sendErrors chan<- error, supportsHave bool) *fakeMessageSender { + messagesSent chan<- []bsmsg.Entry, sendErrors chan<- error, supportsHave bool) *fakeMessageSender { return &fakeMessageSender{ sendError: sendError, @@ -112,7 +112,7 @@ func (fms *fakeMessageSender) SendMsg(ctx context.Context, msg bsmsg.BitSwapMess fms.sendErrors <- fms.sendError return fms.sendError } - fms.messagesSent <- msg + fms.messagesSent <- msg.Wantlist() return nil } func (fms *fakeMessageSender) clearSendError() { @@ -129,9 +129,9 @@ func mockTimeoutCb(peer.ID, []cid.Cid) {} func collectMessages(ctx context.Context, t *testing.T, - messagesSent <-chan bsmsg.BitSwapMessage, - timeout time.Duration) []bsmsg.BitSwapMessage { - var messagesReceived []bsmsg.BitSwapMessage + messagesSent <-chan []bsmsg.Entry, + timeout time.Duration) [][]bsmsg.Entry { + var messagesReceived [][]bsmsg.Entry timeoutctx, cancel := context.WithTimeout(ctx, timeout) defer cancel() for { @@ -144,17 +144,17 @@ func collectMessages(ctx context.Context, } } -func totalEntriesLength(messages []bsmsg.BitSwapMessage) int { +func totalEntriesLength(messages [][]bsmsg.Entry) int { totalLength := 0 - for _, messages := range messages { - totalLength += len(messages.Wantlist()) + for _, m := range messages { + totalLength += len(m) } return totalLength } func TestStartupAndShutdown(t *testing.T) { ctx := context.Background() - messagesSent := make(chan bsmsg.BitSwapMessage) + messagesSent := make(chan []bsmsg.Entry) sendErrors := make(chan error) resetChan := make(chan struct{}, 1) fullClosedChan := make(chan struct{}, 1) @@ -172,10 +172,10 @@ func TestStartupAndShutdown(t *testing.T) { } firstMessage := messages[0] - if len(firstMessage.Wantlist()) != len(bcstwh) { + if len(firstMessage) != len(bcstwh) { t.Fatal("did not add all wants to want list") } - for _, entry := range firstMessage.Wantlist() { + for _, entry := range firstMessage { if entry.Cancel { t.Fatal("initial add sent cancel entry when it should not have") } @@ -196,7 +196,7 @@ func TestStartupAndShutdown(t *testing.T) { func TestSendingMessagesDeduped(t *testing.T) { ctx := context.Background() - messagesSent := make(chan bsmsg.BitSwapMessage) + messagesSent := make(chan []bsmsg.Entry) sendErrors := make(chan error) resetChan := make(chan struct{}, 1) fullClosedChan := make(chan struct{}, 1) @@ -219,7 +219,7 @@ func TestSendingMessagesDeduped(t *testing.T) { func TestSendingMessagesPartialDupe(t *testing.T) { ctx := context.Background() - messagesSent := make(chan bsmsg.BitSwapMessage) + messagesSent := make(chan []bsmsg.Entry) sendErrors := make(chan error) resetChan := make(chan struct{}, 1) fullClosedChan := make(chan struct{}, 1) @@ -242,7 +242,7 @@ func TestSendingMessagesPartialDupe(t *testing.T) { func TestSendingMessagesPriority(t *testing.T) { ctx := context.Background() - messagesSent := make(chan bsmsg.BitSwapMessage) + messagesSent := make(chan []bsmsg.Entry) sendErrors := make(chan error) resetChan := make(chan struct{}, 1) fullClosedChan := make(chan struct{}, 1) @@ -266,7 +266,7 @@ func TestSendingMessagesPriority(t *testing.T) { t.Fatal("wrong number of wants") } byCid := make(map[cid.Cid]message.Entry) - for _, entry := range messages[0].Wantlist() { + for _, entry := range messages[0] { byCid[entry.Cid] = entry } @@ -311,7 +311,7 @@ func TestSendingMessagesPriority(t *testing.T) { func TestCancelOverridesPendingWants(t *testing.T) { ctx := context.Background() - messagesSent := make(chan bsmsg.BitSwapMessage) + messagesSent := make(chan []bsmsg.Entry) sendErrors := make(chan error) resetChan := make(chan struct{}, 1) fullClosedChan := make(chan struct{}, 1) @@ -331,7 +331,7 @@ func TestCancelOverridesPendingWants(t *testing.T) { t.Fatal("Wrong message count") } - wb, wh, cl := filterWantTypes(messages[0].Wantlist()) + wb, wh, cl := filterWantTypes(messages[0]) if len(wb) != 1 || !wb[0].Equals(wantBlocks[1]) { t.Fatal("Expected 1 want-block") } @@ -345,7 +345,7 @@ func TestCancelOverridesPendingWants(t *testing.T) { func TestWantOverridesPendingCancels(t *testing.T) { ctx := context.Background() - messagesSent := make(chan bsmsg.BitSwapMessage) + messagesSent := make(chan []bsmsg.Entry) sendErrors := make(chan error) resetChan := make(chan struct{}, 1) fullClosedChan := make(chan struct{}, 1) @@ -364,7 +364,7 @@ func TestWantOverridesPendingCancels(t *testing.T) { t.Fatal("Wrong message count") } - wb, wh, cl := filterWantTypes(messages[0].Wantlist()) + wb, wh, cl := filterWantTypes(messages[0]) if len(wb) != 1 || !wb[0].Equals(cancels[0]) { t.Fatal("Expected 1 want-block") } @@ -378,7 +378,7 @@ func TestWantOverridesPendingCancels(t *testing.T) { func TestWantlistRebroadcast(t *testing.T) { ctx := context.Background() - messagesSent := make(chan bsmsg.BitSwapMessage) + messagesSent := make(chan []bsmsg.Entry) sendErrors := make(chan error) resetChan := make(chan struct{}, 1) fullClosedChan := make(chan struct{}, 1) @@ -400,7 +400,7 @@ func TestWantlistRebroadcast(t *testing.T) { // All broadcast want-haves should have been sent firstMessage := messages[0] - if len(firstMessage.Wantlist()) != len(bcstwh) { + if len(firstMessage) != len(bcstwh) { t.Fatal("wrong number of wants") } @@ -413,7 +413,7 @@ func TestWantlistRebroadcast(t *testing.T) { // All the want-haves should have been rebroadcast firstMessage = messages[0] - if len(firstMessage.Wantlist()) != len(bcstwh) { + if len(firstMessage) != len(bcstwh) { t.Fatal("did not rebroadcast all wants") } @@ -429,7 +429,7 @@ func TestWantlistRebroadcast(t *testing.T) { // All new wants should have been sent firstMessage = messages[0] - if len(firstMessage.Wantlist()) != len(wantHaves)+len(wantBlocks) { + if len(firstMessage) != len(wantHaves)+len(wantBlocks) { t.Fatal("wrong number of wants") } @@ -440,7 +440,7 @@ func TestWantlistRebroadcast(t *testing.T) { // Both original and new wants should have been rebroadcast totalWants := len(bcstwh) + len(wantHaves) + len(wantBlocks) - if len(firstMessage.Wantlist()) != totalWants { + if len(firstMessage) != totalWants { t.Fatal("did not rebroadcast all wants") } @@ -455,10 +455,10 @@ func TestWantlistRebroadcast(t *testing.T) { // Cancels for each want should have been sent firstMessage = messages[0] - if len(firstMessage.Wantlist()) != len(cancels) { + if len(firstMessage) != len(cancels) { t.Fatal("wrong number of cancels") } - for _, entry := range firstMessage.Wantlist() { + for _, entry := range firstMessage { if !entry.Cancel { t.Fatal("expected cancels") } @@ -468,14 +468,14 @@ func TestWantlistRebroadcast(t *testing.T) { messageQueue.SetRebroadcastInterval(10 * time.Millisecond) messages = collectMessages(ctx, t, messagesSent, 15*time.Millisecond) firstMessage = messages[0] - if len(firstMessage.Wantlist()) != totalWants-len(cancels) { + if len(firstMessage) != totalWants-len(cancels) { t.Fatal("did not rebroadcast all wants") } } func TestSendingLargeMessages(t *testing.T) { ctx := context.Background() - messagesSent := make(chan bsmsg.BitSwapMessage) + messagesSent := make(chan []bsmsg.Entry) sendErrors := make(chan error) resetChan := make(chan struct{}, 1) fullClosedChan := make(chan struct{}, 1) @@ -506,7 +506,7 @@ func TestSendingLargeMessages(t *testing.T) { func TestSendToPeerThatDoesntSupportHave(t *testing.T) { ctx := context.Background() - messagesSent := make(chan bsmsg.BitSwapMessage) + messagesSent := make(chan []bsmsg.Entry) sendErrors := make(chan error) resetChan := make(chan struct{}, 1) fullClosedChan := make(chan struct{}, 1) @@ -530,7 +530,7 @@ func TestSendToPeerThatDoesntSupportHave(t *testing.T) { if len(messages) != 1 { t.Fatal("wrong number of messages were sent", len(messages)) } - wl := messages[0].Wantlist() + wl := messages[0] if len(wl) != len(bcwh) { t.Fatal("wrong number of entries in wantlist", len(wl)) } @@ -549,7 +549,7 @@ func TestSendToPeerThatDoesntSupportHave(t *testing.T) { if len(messages) != 1 { t.Fatal("wrong number of messages were sent", len(messages)) } - wl = messages[0].Wantlist() + wl = messages[0] if len(wl) != len(wbs) { t.Fatal("should only send want-blocks (no want-haves)", len(wl)) } @@ -562,7 +562,7 @@ func TestSendToPeerThatDoesntSupportHave(t *testing.T) { func TestSendToPeerThatDoesntSupportHaveMonitorsTimeouts(t *testing.T) { ctx := context.Background() - messagesSent := make(chan bsmsg.BitSwapMessage) + messagesSent := make(chan []bsmsg.Entry) sendErrors := make(chan error) resetChan := make(chan struct{}, 1) fullClosedChan := make(chan struct{}, 1) @@ -595,7 +595,7 @@ func TestSendToPeerThatDoesntSupportHaveMonitorsTimeouts(t *testing.T) { func TestResendAfterError(t *testing.T) { ctx := context.Background() - messagesSent := make(chan bsmsg.BitSwapMessage) + messagesSent := make(chan []bsmsg.Entry) sendErrors := make(chan error) resetChan := make(chan struct{}, 1) fullClosedChan := make(chan struct{}, 1) @@ -634,7 +634,7 @@ func TestResendAfterError(t *testing.T) { func TestResendAfterMaxRetries(t *testing.T) { ctx := context.Background() - messagesSent := make(chan bsmsg.BitSwapMessage) + messagesSent := make(chan []bsmsg.Entry) sendErrors := make(chan error) resetChan := make(chan struct{}, maxRetries*2) fullClosedChan := make(chan struct{}, 1) @@ -713,7 +713,7 @@ func BenchmarkMessageQueue(b *testing.B) { ctx := context.Background() createQueue := func() *MessageQueue { - messagesSent := make(chan bsmsg.BitSwapMessage) + messagesSent := make(chan []bsmsg.Entry) sendErrors := make(chan error) resetChan := make(chan struct{}, 1) fullClosedChan := make(chan struct{}, 1) From 79064fb48a4fc3bd5a03575e5d1cca98bed79ce0 Mon Sep 17 00:00:00 2001 From: Dirk McCormick Date: Thu, 19 Mar 2020 11:27:44 -0400 Subject: [PATCH 0881/1035] refactor: reuse message queue message for perf This commit was moved from ipfs/go-bitswap@d2cb0fee4aec95ea3978fe76238aadcabed34089 --- bitswap/internal/messagequeue/messagequeue.go | 53 +++++++++---------- 1 file changed, 25 insertions(+), 28 deletions(-) diff --git a/bitswap/internal/messagequeue/messagequeue.go b/bitswap/internal/messagequeue/messagequeue.go index 61af02af3..b0b1efe49 100644 --- a/bitswap/internal/messagequeue/messagequeue.go +++ b/bitswap/internal/messagequeue/messagequeue.go @@ -179,6 +179,9 @@ func newMessageQueue(ctx context.Context, p peer.ID, network MessageNetwork, rebroadcastInterval: defaultRebroadcastInterval, sendErrorBackoff: sendErrorBackoff, priority: maxPriority, + // For performance reasons we just clear out the fields of the message + // after using it, instead of creating a new one every time. + msg: bsmsg.New(false), } return mq @@ -402,19 +405,23 @@ func (mq *MessageQueue) sendMessage() { mq.dhTimeoutMgr.Start() // Convert want lists to a Bitswap Message - message, onSent := mq.extractOutgoingMessage(mq.sender.SupportsHave()) - if message == nil || message.Empty() { + message := mq.extractOutgoingMessage(mq.sender.SupportsHave()) + + // After processing the message, clear out its fields to save memory + defer mq.msg.Reset(false) + + if message.Empty() { return } - mq.logOutgoingMessage(message) + wantlist := message.Wantlist() + mq.logOutgoingMessage(wantlist) // Try to send this message repeatedly for i := 0; i < maxRetries; i++ { if mq.attemptSendAndRecovery(message) { // We were able to send successfully. - wantlist := message.Wantlist() - onSent(wantlist) + mq.onMessageSent(wantlist) mq.simulateDontHaveWithTimeout(wantlist) @@ -457,15 +464,14 @@ func (mq *MessageQueue) simulateDontHaveWithTimeout(wantlist []bsmsg.Entry) { mq.dhTimeoutMgr.AddPending(wants) } -func (mq *MessageQueue) logOutgoingMessage(msg bsmsg.BitSwapMessage) { +func (mq *MessageQueue) logOutgoingMessage(wantlist []bsmsg.Entry) { // Save some CPU cycles and allocations if log level is higher than debug if ce := sflog.Check(zap.DebugLevel, "Bitswap -> send wants"); ce == nil { return } self := mq.network.Self() - entries := msg.Wantlist() - for _, e := range entries { + for _, e := range wantlist { if e.Cancel { if e.WantType == pb.Message_Wantlist_Have { log.Debugw("Bitswap -> cancel-have", "local", self, "to", mq.p, "cid", e.Cid) @@ -493,16 +499,7 @@ func (mq *MessageQueue) pendingWorkCount() int { return mq.bcstWants.pending.Len() + mq.peerWants.pending.Len() + mq.cancels.Len() } -func (mq *MessageQueue) extractOutgoingMessage(supportsHave bool) (bsmsg.BitSwapMessage, func([]bsmsg.Entry)) { - // For performance reasons we just clear out the fields of the message - // instead of creating a new one every time. - if mq.msg == nil { - // Create a new message - mq.msg = bsmsg.New(false) - } else { - // If there's already a message, reset it - mq.msg.Reset(false) - } +func (mq *MessageQueue) extractOutgoingMessage(supportsHave bool) bsmsg.BitSwapMessage { msg := mq.msg mq.wllock.Lock() @@ -554,19 +551,19 @@ func (mq *MessageQueue) extractOutgoingMessage(supportsHave bool) (bsmsg.BitSwap mq.cancels.Remove(c) } - // Called when the message has been successfully sent. + return msg +} + +// Called when the message has been successfully sent. +func (mq *MessageQueue) onMessageSent(wantlist []bsmsg.Entry) { // Remove the sent keys from the broadcast and regular wantlists. - onSent := func(wantlist []bsmsg.Entry) { - mq.wllock.Lock() - defer mq.wllock.Unlock() + mq.wllock.Lock() + defer mq.wllock.Unlock() - for _, e := range wantlist { - mq.bcstWants.pending.Remove(e.Cid) - mq.peerWants.pending.RemoveType(e.Cid, e.WantType) - } + for _, e := range wantlist { + mq.bcstWants.pending.Remove(e.Cid) + mq.peerWants.pending.RemoveType(e.Cid, e.WantType) } - - return msg, onSent } func (mq *MessageQueue) initializeSender() error { From 1a4cd05bf9e76c2ec70802519d410cf9b8280fae Mon Sep 17 00:00:00 2001 From: Dirk McCormick Date: Thu, 19 Mar 2020 11:28:24 -0400 Subject: [PATCH 0882/1035] fix: virtual net race This commit was moved from ipfs/go-bitswap@c5a6db7bf9d01441ed5f6ef9470230727e8104f5 --- bitswap/message/message.go | 23 +++++++++++++++++++++-- bitswap/message/message_test.go | 3 --- bitswap/testnet/virtual.go | 2 ++ 3 files changed, 23 insertions(+), 5 deletions(-) diff --git a/bitswap/message/message.go b/bitswap/message/message.go index 6b2fe533b..8377ea733 100644 --- a/bitswap/message/message.go +++ b/bitswap/message/message.go @@ -68,6 +68,9 @@ type BitSwapMessage interface { // Reset the values in the message back to defaults, so it can be reused Reset(bool) + + // Clone the message fields + Clone() BitSwapMessage } // Exportable is an interface for structures than can be @@ -130,13 +133,29 @@ func New(full bool) BitSwapMessage { func newMsg(full bool) *impl { return &impl{ + full: full, + wantlist: make(map[cid.Cid]*Entry), blocks: make(map[cid.Cid]blocks.Block), blockPresences: make(map[cid.Cid]pb.Message_BlockPresenceType), - wantlist: make(map[cid.Cid]*Entry), - full: full, } } +// Clone the message fields +func (m *impl) Clone() BitSwapMessage { + msg := newMsg(m.full) + for k := range m.wantlist { + msg.wantlist[k] = m.wantlist[k] + } + for k := range m.blocks { + msg.blocks[k] = m.blocks[k] + } + for k := range m.blockPresences { + msg.blockPresences[k] = m.blockPresences[k] + } + msg.pendingBytes = m.pendingBytes + return msg +} + // Reset the values in the message back to defaults, so it can be reused func (m *impl) Reset(full bool) { m.full = full diff --git a/bitswap/message/message_test.go b/bitswap/message/message_test.go index 0d4b80108..caddc6c26 100644 --- a/bitswap/message/message_test.go +++ b/bitswap/message/message_test.go @@ -2,7 +2,6 @@ package message import ( "bytes" - "fmt" "testing" pb "github.com/ipfs/go-bitswap/message/pb" @@ -305,8 +304,6 @@ func TestEntrySize(t *testing.T) { SendDontHave: true, Cancel: false, } - fmt.Println(len(c.Bytes())) - fmt.Println(len(c.KeyString())) epb := e.ToPB() if e.Size() != epb.Size() { t.Fatal("entry size calculation incorrect", e.Size(), epb.Size()) diff --git a/bitswap/testnet/virtual.go b/bitswap/testnet/virtual.go index 1d1c7b796..1e472110f 100644 --- a/bitswap/testnet/virtual.go +++ b/bitswap/testnet/virtual.go @@ -128,6 +128,8 @@ func (n *network) SendMessage( to peer.ID, mes bsmsg.BitSwapMessage) error { + mes = mes.Clone() + n.mu.Lock() defer n.mu.Unlock() From 9528702d5286e15e8f060306c0dfa1e1628f5439 Mon Sep 17 00:00:00 2001 From: Dirk McCormick Date: Thu, 19 Mar 2020 11:38:54 -0400 Subject: [PATCH 0883/1035] refactor: small changes to message queue This commit was moved from ipfs/go-bitswap@b4763e2641ffbe8de611f8a3451d9f6943a79494 --- bitswap/internal/messagequeue/messagequeue.go | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/bitswap/internal/messagequeue/messagequeue.go b/bitswap/internal/messagequeue/messagequeue.go index b0b1efe49..5debcd303 100644 --- a/bitswap/internal/messagequeue/messagequeue.go +++ b/bitswap/internal/messagequeue/messagequeue.go @@ -488,10 +488,12 @@ func (mq *MessageQueue) logOutgoingMessage(wantlist []bsmsg.Entry) { } } +// Whether there is work to be processed func (mq *MessageQueue) hasPendingWork() bool { return mq.pendingWorkCount() > 0 } +// The amount of work that is waiting to be processed func (mq *MessageQueue) pendingWorkCount() int { mq.wllock.Lock() defer mq.wllock.Unlock() @@ -499,9 +501,8 @@ func (mq *MessageQueue) pendingWorkCount() int { return mq.bcstWants.pending.Len() + mq.peerWants.pending.Len() + mq.cancels.Len() } +// Convert the lists of wants into a Bitswap message func (mq *MessageQueue) extractOutgoingMessage(supportsHave bool) bsmsg.BitSwapMessage { - msg := mq.msg - mq.wllock.Lock() defer mq.wllock.Unlock() @@ -524,7 +525,7 @@ func (mq *MessageQueue) extractOutgoingMessage(supportsHave bool) bsmsg.BitSwapM } e := bcstEntries[i] - msgSize += msg.AddEntry(e.Cid, e.Priority, wantType, false) + msgSize += mq.msg.AddEntry(e.Cid, e.Priority, wantType, false) } // Add each regular want-have / want-block to the message @@ -535,7 +536,7 @@ func (mq *MessageQueue) extractOutgoingMessage(supportsHave bool) bsmsg.BitSwapM if !supportsHave && e.WantType == pb.Message_Wantlist_Have { mq.peerWants.RemoveType(e.Cid, pb.Message_Wantlist_Have) } else { - msgSize += msg.AddEntry(e.Cid, e.Priority, e.WantType, true) + msgSize += mq.msg.AddEntry(e.Cid, e.Priority, e.WantType, true) } } @@ -544,14 +545,14 @@ func (mq *MessageQueue) extractOutgoingMessage(supportsHave bool) bsmsg.BitSwapM for i := 0; i < len(cancels) && msgSize < mq.maxMessageSize; i++ { c := cancels[i] - msgSize += msg.Cancel(c) + msgSize += mq.msg.Cancel(c) // Clear the cancel - we make a best effort to let peers know about // cancels but won't save them to resend if there's a failure. mq.cancels.Remove(c) } - return msg + return mq.msg } // Called when the message has been successfully sent. From 9b7a4e030fb83c0493bffb2057863467bb1bf29f Mon Sep 17 00:00:00 2001 From: Dirk McCormick Date: Tue, 24 Mar 2020 12:03:36 -0400 Subject: [PATCH 0884/1035] fix: wait for sessionWantSender to shutdown before completing session shutdown This commit was moved from ipfs/go-bitswap@c3c0ad1b574c6bd3bba4546def4bd350c8db52fe --- bitswap/internal/session/session.go | 3 ++ bitswap/internal/session/sessionwantsender.go | 30 +++++++++++++------ 2 files changed, 24 insertions(+), 9 deletions(-) diff --git a/bitswap/internal/session/session.go b/bitswap/internal/session/session.go index 412faba52..8646cfd70 100644 --- a/bitswap/internal/session/session.go +++ b/bitswap/internal/session/session.go @@ -387,6 +387,9 @@ func (s *Session) handleShutdown() { s.idleTick.Stop() // Shut down the session peer manager s.sprm.Shutdown() + // Shut down the sessionWantSender (blocks until sessionWantSender stops + // sending) + s.sws.Shutdown() // Remove the session from the want manager s.wm.RemoveSession(s.ctx, s.id) } diff --git a/bitswap/internal/session/sessionwantsender.go b/bitswap/internal/session/sessionwantsender.go index 7af7b32a4..c14ccd854 100644 --- a/bitswap/internal/session/sessionwantsender.go +++ b/bitswap/internal/session/sessionwantsender.go @@ -71,8 +71,11 @@ type onPeersExhaustedFn func([]cid.Cid) // consults the peer response tracker (records which peers sent us blocks). // type sessionWantSender struct { - // When the context is cancelled, sessionWantSender shuts down + // The context is used when sending wants ctx context.Context + // The sessionWantSender uses these channels when it's shutting down + closing chan struct{} + closed chan struct{} // The session ID sessionID uint64 // A channel that collects incoming changes (events) @@ -102,6 +105,8 @@ func newSessionWantSender(ctx context.Context, sid uint64, pm PeerManager, spm S sws := sessionWantSender{ ctx: ctx, + closing: make(chan struct{}), + closed: make(chan struct{}), sessionID: sid, changes: make(chan change, changesBufferSize), wants: make(map[cid.Cid]*wantInfo), @@ -157,26 +162,33 @@ func (sws *sessionWantSender) Run() { select { case ch := <-sws.changes: sws.onChange([]change{ch}) - case <-sws.ctx.Done(): - sws.shutdown() + case <-sws.closing: + // Close the 'closed' channel to signal to Shutdown() that the run + // loop has exited + close(sws.closed) return } } } +// Shutdown the sessionWantSender +func (sws *sessionWantSender) Shutdown() { + // Signal to the run loop to stop processing + close(sws.closing) + // Unregister the session with the PeerManager + sws.pm.UnregisterSession(sws.sessionID) + // Wait for run loop to complete + <-sws.closed +} + // addChange adds a new change to the queue func (sws *sessionWantSender) addChange(c change) { select { case sws.changes <- c: - case <-sws.ctx.Done(): + case <-sws.closing: } } -// shutdown unregisters the session with the PeerManager -func (sws *sessionWantSender) shutdown() { - sws.pm.UnregisterSession(sws.sessionID) -} - // collectChanges collects all the changes that have occurred since the last // invocation of onChange func (sws *sessionWantSender) collectChanges(changes []change) []change { From d44df4d2472e8ab49f4478e2abcd3ff44ffbcdee Mon Sep 17 00:00:00 2001 From: dirkmc Date: Tue, 24 Mar 2020 12:14:45 -0400 Subject: [PATCH 0885/1035] fix: flaky TestDontHaveTimeoutMgrTimeout (#320) This commit was moved from ipfs/go-bitswap@9bf0f256bb258d0ae575bd41a8f876d3421cc030 --- bitswap/internal/messagequeue/donthavetimeoutmgr_test.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/bitswap/internal/messagequeue/donthavetimeoutmgr_test.go b/bitswap/internal/messagequeue/donthavetimeoutmgr_test.go index 5c0de884f..03ceb4816 100644 --- a/bitswap/internal/messagequeue/donthavetimeoutmgr_test.go +++ b/bitswap/internal/messagequeue/donthavetimeoutmgr_test.go @@ -71,7 +71,7 @@ func (tr *timeoutRecorder) clear() { func TestDontHaveTimeoutMgrTimeout(t *testing.T) { firstks := testutil.GenerateCids(2) secondks := append(firstks, testutil.GenerateCids(3)...) - latency := time.Millisecond * 10 + latency := time.Millisecond * 20 latMultiplier := 2 expProcessTime := 5 * time.Millisecond expectedTimeout := expProcessTime + latency*time.Duration(latMultiplier) @@ -87,7 +87,7 @@ func TestDontHaveTimeoutMgrTimeout(t *testing.T) { dhtm.AddPending(firstks) // Wait for less than the expected timeout - time.Sleep(expectedTimeout - 5*time.Millisecond) + time.Sleep(expectedTimeout - 10*time.Millisecond) // At this stage no keys should have timed out if tr.timedOutCount() > 0 { @@ -98,7 +98,7 @@ func TestDontHaveTimeoutMgrTimeout(t *testing.T) { dhtm.AddPending(secondks) // Wait until after the expected timeout - time.Sleep(10 * time.Millisecond) + time.Sleep(20 * time.Millisecond) // At this stage first set of keys should have timed out if tr.timedOutCount() != len(firstks) { From 7bc480c9e64434d706742c972f64e24d16548840 Mon Sep 17 00:00:00 2001 From: dirkmc Date: Tue, 24 Mar 2020 12:21:23 -0400 Subject: [PATCH 0886/1035] fix: flaky TestSendDontHave (#321) This commit was moved from ipfs/go-bitswap@128729834fdad77cd9da46f921ae8da4e0f7b012 --- bitswap/internal/decision/engine_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/bitswap/internal/decision/engine_test.go b/bitswap/internal/decision/engine_test.go index 7dac95063..bdfa93623 100644 --- a/bitswap/internal/decision/engine_test.go +++ b/bitswap/internal/decision/engine_test.go @@ -935,7 +935,7 @@ func TestSendDontHave(t *testing.T) { // Nothing in blockstore, should get DONT_HAVE for entries that wanted it var next envChan - next, env := getNextEnvelope(e, next, 5*time.Millisecond) + next, env := getNextEnvelope(e, next, 10*time.Millisecond) if env == nil { t.Fatal("expected envelope") } @@ -965,7 +965,7 @@ func TestSendDontHave(t *testing.T) { e.ReceiveFrom(otherPeer, blks, []cid.Cid{}) // Envelope should contain 2 HAVEs / 2 blocks - _, env = getNextEnvelope(e, next, 5*time.Millisecond) + _, env = getNextEnvelope(e, next, 10*time.Millisecond) if env == nil { t.Fatal("expected envelope") } From eba8407b192066760f70d033c78721179fd7517d Mon Sep 17 00:00:00 2001 From: dirkmc Date: Tue, 24 Mar 2020 12:24:16 -0400 Subject: [PATCH 0887/1035] fix: flaky TestSendsWantBlockToOnePeerOnly (#323) This commit was moved from ipfs/go-bitswap@ae75342a08a3a1931643034578f0f5182015560a --- bitswap/internal/session/sessionwantsender_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bitswap/internal/session/sessionwantsender_test.go b/bitswap/internal/session/sessionwantsender_test.go index 1a35c0eab..a791c6c6c 100644 --- a/bitswap/internal/session/sessionwantsender_test.go +++ b/bitswap/internal/session/sessionwantsender_test.go @@ -82,7 +82,7 @@ func (pm *mockPeerManager) SendWants(ctx context.Context, p peer.ID, wantBlocks } func (pm *mockPeerManager) waitNextWants() map[peer.ID]*sentWants { - time.Sleep(5 * time.Millisecond) + time.Sleep(10 * time.Millisecond) pm.lk.Lock() defer pm.lk.Unlock() From d49bbd2fb2738c94ab7b549ba5c5160456b31f36 Mon Sep 17 00:00:00 2001 From: Dirk McCormick Date: Tue, 24 Mar 2020 13:06:41 -0400 Subject: [PATCH 0888/1035] refactor: simplify sessionWantSender shutdown This commit was moved from ipfs/go-bitswap@ac258abca9bfb30aedaea2604046f04e7a976b5d --- bitswap/internal/session/sessionwantsender.go | 22 +++++++++++-------- 1 file changed, 13 insertions(+), 9 deletions(-) diff --git a/bitswap/internal/session/sessionwantsender.go b/bitswap/internal/session/sessionwantsender.go index c14ccd854..ff31ca0ac 100644 --- a/bitswap/internal/session/sessionwantsender.go +++ b/bitswap/internal/session/sessionwantsender.go @@ -73,9 +73,11 @@ type onPeersExhaustedFn func([]cid.Cid) type sessionWantSender struct { // The context is used when sending wants ctx context.Context - // The sessionWantSender uses these channels when it's shutting down - closing chan struct{} - closed chan struct{} + // Called to shutdown the sessionWantSender + shutdown func() + // The sessionWantSender uses the close channel to signal when it's + // finished shutting down + closed chan struct{} // The session ID sessionID uint64 // A channel that collects incoming changes (events) @@ -103,9 +105,10 @@ type sessionWantSender struct { func newSessionWantSender(ctx context.Context, sid uint64, pm PeerManager, spm SessionPeerManager, bpm *bsbpm.BlockPresenceManager, onSend onSendFn, onPeersExhausted onPeersExhaustedFn) sessionWantSender { + ctx, cancel := context.WithCancel(ctx) sws := sessionWantSender{ ctx: ctx, - closing: make(chan struct{}), + shutdown: cancel, closed: make(chan struct{}), sessionID: sid, changes: make(chan change, changesBufferSize), @@ -162,7 +165,10 @@ func (sws *sessionWantSender) Run() { select { case ch := <-sws.changes: sws.onChange([]change{ch}) - case <-sws.closing: + case <-sws.ctx.Done(): + // Unregister the session with the PeerManager + sws.pm.UnregisterSession(sws.sessionID) + // Close the 'closed' channel to signal to Shutdown() that the run // loop has exited close(sws.closed) @@ -174,9 +180,7 @@ func (sws *sessionWantSender) Run() { // Shutdown the sessionWantSender func (sws *sessionWantSender) Shutdown() { // Signal to the run loop to stop processing - close(sws.closing) - // Unregister the session with the PeerManager - sws.pm.UnregisterSession(sws.sessionID) + sws.shutdown() // Wait for run loop to complete <-sws.closed } @@ -185,7 +189,7 @@ func (sws *sessionWantSender) Shutdown() { func (sws *sessionWantSender) addChange(c change) { select { case sws.changes <- c: - case <-sws.closing: + case <-sws.ctx.Done(): } } From 22154ff6509d752e21dbb1050ab641a0272cd9a4 Mon Sep 17 00:00:00 2001 From: Dirk McCormick Date: Tue, 24 Mar 2020 13:11:00 -0400 Subject: [PATCH 0889/1035] refactor: use internal context in sessionWantSender This commit was moved from ipfs/go-bitswap@70c3111e884d8aad2953ab8d96fe9d5d8e775567 --- bitswap/internal/session/session.go | 2 +- bitswap/internal/session/sessionwantsender.go | 6 ++--- .../session/sessionwantsender_test.go | 22 +++++++++---------- 3 files changed, 15 insertions(+), 15 deletions(-) diff --git a/bitswap/internal/session/session.go b/bitswap/internal/session/session.go index 8646cfd70..34a7375c2 100644 --- a/bitswap/internal/session/session.go +++ b/bitswap/internal/session/session.go @@ -159,7 +159,7 @@ func New(ctx context.Context, periodicSearchDelay: periodicSearchDelay, self: self, } - s.sws = newSessionWantSender(ctx, id, pm, sprm, bpm, s.onWantsSent, s.onPeersExhausted) + s.sws = newSessionWantSender(id, pm, sprm, bpm, s.onWantsSent, s.onPeersExhausted) go s.run(ctx) diff --git a/bitswap/internal/session/sessionwantsender.go b/bitswap/internal/session/sessionwantsender.go index ff31ca0ac..8ccba8f80 100644 --- a/bitswap/internal/session/sessionwantsender.go +++ b/bitswap/internal/session/sessionwantsender.go @@ -75,7 +75,7 @@ type sessionWantSender struct { ctx context.Context // Called to shutdown the sessionWantSender shutdown func() - // The sessionWantSender uses the close channel to signal when it's + // The sessionWantSender uses the closed channel to signal when it's // finished shutting down closed chan struct{} // The session ID @@ -102,10 +102,10 @@ type sessionWantSender struct { onPeersExhausted onPeersExhaustedFn } -func newSessionWantSender(ctx context.Context, sid uint64, pm PeerManager, spm SessionPeerManager, +func newSessionWantSender(sid uint64, pm PeerManager, spm SessionPeerManager, bpm *bsbpm.BlockPresenceManager, onSend onSendFn, onPeersExhausted onPeersExhaustedFn) sessionWantSender { - ctx, cancel := context.WithCancel(ctx) + ctx, cancel := context.WithCancel(context.Background()) sws := sessionWantSender{ ctx: ctx, shutdown: cancel, diff --git a/bitswap/internal/session/sessionwantsender_test.go b/bitswap/internal/session/sessionwantsender_test.go index 1a35c0eab..821751ae0 100644 --- a/bitswap/internal/session/sessionwantsender_test.go +++ b/bitswap/internal/session/sessionwantsender_test.go @@ -138,7 +138,7 @@ func TestSendWants(t *testing.T) { bpm := bsbpm.New() onSend := func(peer.ID, []cid.Cid, []cid.Cid) {} onPeersExhausted := func([]cid.Cid) {} - spm := newSessionWantSender(context.Background(), sid, pm, fpm, bpm, onSend, onPeersExhausted) + spm := newSessionWantSender(sid, pm, fpm, bpm, onSend, onPeersExhausted) go spm.Run() @@ -176,7 +176,7 @@ func TestSendsWantBlockToOnePeerOnly(t *testing.T) { bpm := bsbpm.New() onSend := func(peer.ID, []cid.Cid, []cid.Cid) {} onPeersExhausted := func([]cid.Cid) {} - spm := newSessionWantSender(context.Background(), sid, pm, fpm, bpm, onSend, onPeersExhausted) + spm := newSessionWantSender(sid, pm, fpm, bpm, onSend, onPeersExhausted) go spm.Run() @@ -234,7 +234,7 @@ func TestReceiveBlock(t *testing.T) { bpm := bsbpm.New() onSend := func(peer.ID, []cid.Cid, []cid.Cid) {} onPeersExhausted := func([]cid.Cid) {} - spm := newSessionWantSender(context.Background(), sid, pm, fpm, bpm, onSend, onPeersExhausted) + spm := newSessionWantSender(sid, pm, fpm, bpm, onSend, onPeersExhausted) go spm.Run() @@ -294,7 +294,7 @@ func TestPeerUnavailable(t *testing.T) { bpm := bsbpm.New() onSend := func(peer.ID, []cid.Cid, []cid.Cid) {} onPeersExhausted := func([]cid.Cid) {} - spm := newSessionWantSender(context.Background(), sid, pm, fpm, bpm, onSend, onPeersExhausted) + spm := newSessionWantSender(sid, pm, fpm, bpm, onSend, onPeersExhausted) go spm.Run() @@ -360,7 +360,7 @@ func TestPeersExhausted(t *testing.T) { onSend := func(peer.ID, []cid.Cid, []cid.Cid) {} ep := exhaustedPeers{} - spm := newSessionWantSender(context.Background(), sid, pm, fpm, bpm, onSend, ep.onPeersExhausted) + spm := newSessionWantSender(sid, pm, fpm, bpm, onSend, ep.onPeersExhausted) go spm.Run() @@ -436,7 +436,7 @@ func TestPeersExhaustedLastWaitingPeerUnavailable(t *testing.T) { onSend := func(peer.ID, []cid.Cid, []cid.Cid) {} ep := exhaustedPeers{} - spm := newSessionWantSender(context.Background(), sid, pm, fpm, bpm, onSend, ep.onPeersExhausted) + spm := newSessionWantSender(sid, pm, fpm, bpm, onSend, ep.onPeersExhausted) go spm.Run() @@ -484,7 +484,7 @@ func TestPeersExhaustedAllPeersUnavailable(t *testing.T) { onSend := func(peer.ID, []cid.Cid, []cid.Cid) {} ep := exhaustedPeers{} - spm := newSessionWantSender(context.Background(), sid, pm, fpm, bpm, onSend, ep.onPeersExhausted) + spm := newSessionWantSender(sid, pm, fpm, bpm, onSend, ep.onPeersExhausted) go spm.Run() @@ -522,7 +522,7 @@ func TestConsecutiveDontHaveLimit(t *testing.T) { bpm := bsbpm.New() onSend := func(peer.ID, []cid.Cid, []cid.Cid) {} onPeersExhausted := func([]cid.Cid) {} - spm := newSessionWantSender(context.Background(), sid, pm, fpm, bpm, onSend, onPeersExhausted) + spm := newSessionWantSender(sid, pm, fpm, bpm, onSend, onPeersExhausted) go spm.Run() @@ -578,7 +578,7 @@ func TestConsecutiveDontHaveLimitInterrupted(t *testing.T) { bpm := bsbpm.New() onSend := func(peer.ID, []cid.Cid, []cid.Cid) {} onPeersExhausted := func([]cid.Cid) {} - spm := newSessionWantSender(context.Background(), sid, pm, fpm, bpm, onSend, onPeersExhausted) + spm := newSessionWantSender(sid, pm, fpm, bpm, onSend, onPeersExhausted) go spm.Run() @@ -633,7 +633,7 @@ func TestConsecutiveDontHaveReinstateAfterRemoval(t *testing.T) { bpm := bsbpm.New() onSend := func(peer.ID, []cid.Cid, []cid.Cid) {} onPeersExhausted := func([]cid.Cid) {} - spm := newSessionWantSender(context.Background(), sid, pm, fpm, bpm, onSend, onPeersExhausted) + spm := newSessionWantSender(sid, pm, fpm, bpm, onSend, onPeersExhausted) go spm.Run() @@ -717,7 +717,7 @@ func TestConsecutiveDontHaveDontRemoveIfHasWantedBlock(t *testing.T) { bpm := bsbpm.New() onSend := func(peer.ID, []cid.Cid, []cid.Cid) {} onPeersExhausted := func([]cid.Cid) {} - spm := newSessionWantSender(context.Background(), sid, pm, fpm, bpm, onSend, onPeersExhausted) + spm := newSessionWantSender(sid, pm, fpm, bpm, onSend, onPeersExhausted) go spm.Run() From d3b72b4be61297a3e0eeaa56b5239c4619ef0f1c Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Tue, 24 Mar 2020 10:40:55 -0700 Subject: [PATCH 0890/1035] chore: make pwm internals private (#315) This makes it easier to tell where module boundaries are. This commit was moved from ipfs/go-bitswap@7348b26c710261d2cd7e9871b85e934a69e1cd7e --- bitswap/internal/peermanager/peermanager.go | 20 +++--- .../internal/peermanager/peerwantmanager.go | 16 ++--- .../peermanager/peerwantmanager_test.go | 70 +++++++++---------- bitswap/internal/wantmanager/wantmanager.go | 2 +- 4 files changed, 54 insertions(+), 54 deletions(-) diff --git a/bitswap/internal/peermanager/peermanager.go b/bitswap/internal/peermanager/peermanager.go index 726d4be77..5af98875c 100644 --- a/bitswap/internal/peermanager/peermanager.go +++ b/bitswap/internal/peermanager/peermanager.go @@ -94,11 +94,11 @@ func (pm *PeerManager) Connected(p peer.ID, initialWantHaves []cid.Cid) { // If this is the first connection to the peer if pq.refcnt == 1 { // Inform the peer want manager that there's a new peer - pm.pwm.AddPeer(p) + pm.pwm.addPeer(p) // Record that the want-haves are being sent to the peer - pm.pwm.PrepareSendWants(p, nil, initialWantHaves) + _, wantHaves := pm.pwm.prepareSendWants(p, nil, initialWantHaves) // Broadcast any live want-haves to the newly connected peers - pq.pq.AddBroadcastWantHaves(initialWantHaves) + pq.pq.AddBroadcastWantHaves(wantHaves) // Inform the sessions that the peer has connected pm.signalAvailability(p, true) } @@ -126,7 +126,7 @@ func (pm *PeerManager) Disconnected(p peer.ID) { // Clean up the peer delete(pm.peerQueues, p) pq.pq.Shutdown() - pm.pwm.RemovePeer(p) + pm.pwm.removePeer(p) } // BroadcastWantHaves broadcasts want-haves to all peers (used by the session @@ -137,7 +137,7 @@ func (pm *PeerManager) BroadcastWantHaves(ctx context.Context, wantHaves []cid.C pm.pqLk.Lock() defer pm.pqLk.Unlock() - for p, ks := range pm.pwm.PrepareBroadcastWantHaves(wantHaves) { + for p, ks := range pm.pwm.prepareBroadcastWantHaves(wantHaves) { if pqi, ok := pm.peerQueues[p]; ok { pqi.pq.AddBroadcastWantHaves(ks) } @@ -151,7 +151,7 @@ func (pm *PeerManager) SendWants(ctx context.Context, p peer.ID, wantBlocks []ci defer pm.pqLk.Unlock() if pqi, ok := pm.peerQueues[p]; ok { - wblks, whvs := pm.pwm.PrepareSendWants(p, wantBlocks, wantHaves) + wblks, whvs := pm.pwm.prepareSendWants(p, wantBlocks, wantHaves) pqi.pq.AddWants(wblks, whvs) } } @@ -163,7 +163,7 @@ func (pm *PeerManager) SendCancels(ctx context.Context, cancelKs []cid.Cid) { defer pm.pqLk.Unlock() // Send a CANCEL to each peer that has been sent a want-block or want-have - for p, ks := range pm.pwm.PrepareSendCancels(cancelKs) { + for p, ks := range pm.pwm.prepareSendCancels(cancelKs) { if pqi, ok := pm.peerQueues[p]; ok { pqi.pq.AddCancels(ks) } @@ -175,7 +175,7 @@ func (pm *PeerManager) CurrentWants() []cid.Cid { pm.pqLk.RLock() defer pm.pqLk.RUnlock() - return pm.pwm.GetWants() + return pm.pwm.getWants() } // CurrentWantBlocks returns the list of pending want-blocks @@ -183,7 +183,7 @@ func (pm *PeerManager) CurrentWantBlocks() []cid.Cid { pm.pqLk.RLock() defer pm.pqLk.RUnlock() - return pm.pwm.GetWantBlocks() + return pm.pwm.getWantBlocks() } // CurrentWantHaves returns the list of pending want-haves @@ -191,7 +191,7 @@ func (pm *PeerManager) CurrentWantHaves() []cid.Cid { pm.pqLk.RLock() defer pm.pqLk.RUnlock() - return pm.pwm.GetWantHaves() + return pm.pwm.getWantHaves() } func (pm *PeerManager) getOrCreate(p peer.ID) *peerQueueInstance { diff --git a/bitswap/internal/peermanager/peerwantmanager.go b/bitswap/internal/peermanager/peerwantmanager.go index 27e37ccd9..b4b87482b 100644 --- a/bitswap/internal/peermanager/peerwantmanager.go +++ b/bitswap/internal/peermanager/peerwantmanager.go @@ -39,7 +39,7 @@ func newPeerWantManager(wantBlockGauge Gauge) *peerWantManager { } // AddPeer adds a peer whose wants we need to keep track of -func (pwm *peerWantManager) AddPeer(p peer.ID) { +func (pwm *peerWantManager) addPeer(p peer.ID) { if _, ok := pwm.peerWants[p]; !ok { pwm.peerWants[p] = &peerWant{ wantBlocks: cid.NewSet(), @@ -49,13 +49,13 @@ func (pwm *peerWantManager) AddPeer(p peer.ID) { } // RemovePeer removes a peer and its associated wants from tracking -func (pwm *peerWantManager) RemovePeer(p peer.ID) { +func (pwm *peerWantManager) removePeer(p peer.ID) { delete(pwm.peerWants, p) } // PrepareBroadcastWantHaves filters the list of want-haves for each peer, // returning a map of peers to the want-haves they have not yet been sent. -func (pwm *peerWantManager) PrepareBroadcastWantHaves(wantHaves []cid.Cid) map[peer.ID][]cid.Cid { +func (pwm *peerWantManager) prepareBroadcastWantHaves(wantHaves []cid.Cid) map[peer.ID][]cid.Cid { res := make(map[peer.ID][]cid.Cid) // Iterate over all known peers @@ -81,7 +81,7 @@ func (pwm *peerWantManager) PrepareBroadcastWantHaves(wantHaves []cid.Cid) map[p // PrepareSendWants filters the list of want-blocks and want-haves such that // it only contains wants that have not already been sent to the peer. -func (pwm *peerWantManager) PrepareSendWants(p peer.ID, wantBlocks []cid.Cid, wantHaves []cid.Cid) ([]cid.Cid, []cid.Cid) { +func (pwm *peerWantManager) prepareSendWants(p peer.ID, wantBlocks []cid.Cid, wantHaves []cid.Cid) ([]cid.Cid, []cid.Cid) { resWantBlks := make([]cid.Cid, 0) resWantHvs := make([]cid.Cid, 0) @@ -124,7 +124,7 @@ func (pwm *peerWantManager) PrepareSendWants(p peer.ID, wantBlocks []cid.Cid, wa // PrepareSendCancels filters the list of cancels for each peer, // returning a map of peers which only contains cancels for wants that have // been sent to the peer. -func (pwm *peerWantManager) PrepareSendCancels(cancelKs []cid.Cid) map[peer.ID][]cid.Cid { +func (pwm *peerWantManager) prepareSendCancels(cancelKs []cid.Cid) map[peer.ID][]cid.Cid { res := make(map[peer.ID][]cid.Cid) // Iterate over all known peers @@ -158,7 +158,7 @@ func (pwm *peerWantManager) PrepareSendCancels(cancelKs []cid.Cid) map[peer.ID][ } // GetWantBlocks returns the set of all want-blocks sent to all peers -func (pwm *peerWantManager) GetWantBlocks() []cid.Cid { +func (pwm *peerWantManager) getWantBlocks() []cid.Cid { res := cid.NewSet() // Iterate over all known peers @@ -174,7 +174,7 @@ func (pwm *peerWantManager) GetWantBlocks() []cid.Cid { } // GetWantHaves returns the set of all want-haves sent to all peers -func (pwm *peerWantManager) GetWantHaves() []cid.Cid { +func (pwm *peerWantManager) getWantHaves() []cid.Cid { res := cid.NewSet() // Iterate over all known peers @@ -190,7 +190,7 @@ func (pwm *peerWantManager) GetWantHaves() []cid.Cid { } // GetWants returns the set of all wants (both want-blocks and want-haves). -func (pwm *peerWantManager) GetWants() []cid.Cid { +func (pwm *peerWantManager) getWants() []cid.Cid { res := cid.NewSet() // Iterate over all known peers diff --git a/bitswap/internal/peermanager/peerwantmanager_test.go b/bitswap/internal/peermanager/peerwantmanager_test.go index 0172a6816..9cfa9410f 100644 --- a/bitswap/internal/peermanager/peerwantmanager_test.go +++ b/bitswap/internal/peermanager/peerwantmanager_test.go @@ -22,10 +22,10 @@ func (g *gauge) Dec() { func TestEmpty(t *testing.T) { pwm := newPeerWantManager(&gauge{}) - if len(pwm.GetWantBlocks()) > 0 { + if len(pwm.getWantBlocks()) > 0 { t.Fatal("Expected GetWantBlocks() to have length 0") } - if len(pwm.GetWantHaves()) > 0 { + if len(pwm.getWantHaves()) > 0 { t.Fatal("Expected GetWantHaves() to have length 0") } } @@ -38,11 +38,11 @@ func TestPrepareBroadcastWantHaves(t *testing.T) { cids2 := testutil.GenerateCids(2) cids3 := testutil.GenerateCids(2) - pwm.AddPeer(peers[0]) - pwm.AddPeer(peers[1]) + pwm.addPeer(peers[0]) + pwm.addPeer(peers[1]) // Broadcast 2 cids to 2 peers - bcst := pwm.PrepareBroadcastWantHaves(cids) + bcst := pwm.prepareBroadcastWantHaves(cids) if len(bcst) != 2 { t.Fatal("Expected 2 peers") } @@ -53,13 +53,13 @@ func TestPrepareBroadcastWantHaves(t *testing.T) { } // Broadcasting same cids should have no effect - bcst2 := pwm.PrepareBroadcastWantHaves(cids) + bcst2 := pwm.prepareBroadcastWantHaves(cids) if len(bcst2) != 0 { t.Fatal("Expected 0 peers") } // Broadcast 2 other cids - bcst3 := pwm.PrepareBroadcastWantHaves(cids2) + bcst3 := pwm.prepareBroadcastWantHaves(cids2) if len(bcst3) != 2 { t.Fatal("Expected 2 peers") } @@ -70,7 +70,7 @@ func TestPrepareBroadcastWantHaves(t *testing.T) { } // Broadcast mix of old and new cids - bcst4 := pwm.PrepareBroadcastWantHaves(append(cids, cids3...)) + bcst4 := pwm.prepareBroadcastWantHaves(append(cids, cids3...)) if len(bcst4) != 2 { t.Fatal("Expected 2 peers") } @@ -84,9 +84,9 @@ func TestPrepareBroadcastWantHaves(t *testing.T) { // Sending want-block for a cid should prevent broadcast to that peer cids4 := testutil.GenerateCids(4) wantBlocks := []cid.Cid{cids4[0], cids4[2]} - pwm.PrepareSendWants(peers[0], wantBlocks, []cid.Cid{}) + pwm.prepareSendWants(peers[0], wantBlocks, []cid.Cid{}) - bcst5 := pwm.PrepareBroadcastWantHaves(cids4) + bcst5 := pwm.prepareBroadcastWantHaves(cids4) if len(bcst4) != 2 { t.Fatal("Expected 2 peers") } @@ -105,8 +105,8 @@ func TestPrepareBroadcastWantHaves(t *testing.T) { } // Add another peer - pwm.AddPeer(peers[2]) - bcst6 := pwm.PrepareBroadcastWantHaves(cids) + pwm.addPeer(peers[2]) + bcst6 := pwm.prepareBroadcastWantHaves(cids) if len(bcst6) != 1 { t.Fatal("Expected 1 peer") } @@ -126,11 +126,11 @@ func TestPrepareSendWants(t *testing.T) { cids := testutil.GenerateCids(2) cids2 := testutil.GenerateCids(2) - pwm.AddPeer(p0) - pwm.AddPeer(p1) + pwm.addPeer(p0) + pwm.addPeer(p1) // Send 2 want-blocks and 2 want-haves to p0 - wb, wh := pwm.PrepareSendWants(p0, cids, cids2) + wb, wh := pwm.prepareSendWants(p0, cids, cids2) if !testutil.MatchKeysIgnoreOrder(wb, cids) { t.Fatal("Expected 2 want-blocks") } @@ -143,7 +143,7 @@ func TestPrepareSendWants(t *testing.T) { // - 1 old want-have and 2 new want-haves cids3 := testutil.GenerateCids(2) cids4 := testutil.GenerateCids(2) - wb2, wh2 := pwm.PrepareSendWants(p0, append(cids3, cids[0]), append(cids4, cids2[0])) + wb2, wh2 := pwm.prepareSendWants(p0, append(cids3, cids[0]), append(cids4, cids2[0])) if !testutil.MatchKeysIgnoreOrder(wb2, cids3) { t.Fatal("Expected 2 want-blocks") } @@ -154,7 +154,7 @@ func TestPrepareSendWants(t *testing.T) { // Send to p0 as want-blocks: 1 new want-block, 1 old want-have cids5 := testutil.GenerateCids(1) newWantBlockOldWantHave := append(cids5, cids2[0]) - wb3, wh3 := pwm.PrepareSendWants(p0, newWantBlockOldWantHave, []cid.Cid{}) + wb3, wh3 := pwm.prepareSendWants(p0, newWantBlockOldWantHave, []cid.Cid{}) // If a want was sent as a want-have, it should be ok to now send it as a // want-block if !testutil.MatchKeysIgnoreOrder(wb3, newWantBlockOldWantHave) { @@ -167,7 +167,7 @@ func TestPrepareSendWants(t *testing.T) { // Send to p0 as want-haves: 1 new want-have, 1 old want-block cids6 := testutil.GenerateCids(1) newWantHaveOldWantBlock := append(cids6, cids[0]) - wb4, wh4 := pwm.PrepareSendWants(p0, []cid.Cid{}, newWantHaveOldWantBlock) + wb4, wh4 := pwm.prepareSendWants(p0, []cid.Cid{}, newWantHaveOldWantBlock) // If a want was previously sent as a want-block, it should not be // possible to now send it as a want-have if !testutil.MatchKeysIgnoreOrder(wh4, cids6) { @@ -178,7 +178,7 @@ func TestPrepareSendWants(t *testing.T) { } // Send 2 want-blocks and 2 want-haves to p1 - wb5, wh5 := pwm.PrepareSendWants(p1, cids, cids2) + wb5, wh5 := pwm.prepareSendWants(p1, cids, cids2) if !testutil.MatchKeysIgnoreOrder(wb5, cids) { t.Fatal("Expected 2 want-blocks") } @@ -200,24 +200,24 @@ func TestPrepareSendCancels(t *testing.T) { allwb := append(wb1, wb2...) allwh := append(wh1, wh2...) - pwm.AddPeer(p0) - pwm.AddPeer(p1) + pwm.addPeer(p0) + pwm.addPeer(p1) // Send 2 want-blocks and 2 want-haves to p0 - pwm.PrepareSendWants(p0, wb1, wh1) + pwm.prepareSendWants(p0, wb1, wh1) // Send 3 want-blocks and 3 want-haves to p1 // (1 overlapping want-block / want-have with p0) - pwm.PrepareSendWants(p1, append(wb2, wb1[1]), append(wh2, wh1[1])) + pwm.prepareSendWants(p1, append(wb2, wb1[1]), append(wh2, wh1[1])) - if !testutil.MatchKeysIgnoreOrder(pwm.GetWantBlocks(), allwb) { + if !testutil.MatchKeysIgnoreOrder(pwm.getWantBlocks(), allwb) { t.Fatal("Expected 4 cids to be wanted") } - if !testutil.MatchKeysIgnoreOrder(pwm.GetWantHaves(), allwh) { + if !testutil.MatchKeysIgnoreOrder(pwm.getWantHaves(), allwh) { t.Fatal("Expected 4 cids to be wanted") } // Cancel 1 want-block and 1 want-have that were sent to p0 - res := pwm.PrepareSendCancels([]cid.Cid{wb1[0], wh1[0]}) + res := pwm.prepareSendCancels([]cid.Cid{wb1[0], wh1[0]}) // Should cancel the want-block and want-have if len(res) != 1 { t.Fatal("Expected 1 peer") @@ -225,16 +225,16 @@ func TestPrepareSendCancels(t *testing.T) { if !testutil.MatchKeysIgnoreOrder(res[p0], []cid.Cid{wb1[0], wh1[0]}) { t.Fatal("Expected 2 cids to be cancelled") } - if !testutil.MatchKeysIgnoreOrder(pwm.GetWantBlocks(), append(wb2, wb1[1])) { + if !testutil.MatchKeysIgnoreOrder(pwm.getWantBlocks(), append(wb2, wb1[1])) { t.Fatal("Expected 3 want-blocks") } - if !testutil.MatchKeysIgnoreOrder(pwm.GetWantHaves(), append(wh2, wh1[1])) { + if !testutil.MatchKeysIgnoreOrder(pwm.getWantHaves(), append(wh2, wh1[1])) { t.Fatal("Expected 3 want-haves") } // Cancel everything allCids := append(allwb, allwh...) - res2 := pwm.PrepareSendCancels(allCids) + res2 := pwm.prepareSendCancels(allCids) // Should cancel the remaining want-blocks and want-haves if len(res2) != 2 { t.Fatal("Expected 2 peers", len(res2)) @@ -247,10 +247,10 @@ func TestPrepareSendCancels(t *testing.T) { if !testutil.MatchKeysIgnoreOrder(res2[p1], remainingP2) { t.Fatal("Expected un-cancelled cids to be cancelled") } - if len(pwm.GetWantBlocks()) != 0 { + if len(pwm.getWantBlocks()) != 0 { t.Fatal("Expected 0 want-blocks") } - if len(pwm.GetWantHaves()) != 0 { + if len(pwm.getWantHaves()) != 0 { t.Fatal("Expected 0 want-haves") } } @@ -264,10 +264,10 @@ func TestStats(t *testing.T) { cids := testutil.GenerateCids(2) cids2 := testutil.GenerateCids(2) - pwm.AddPeer(p0) + pwm.addPeer(p0) // Send 2 want-blocks and 2 want-haves to p0 - pwm.PrepareSendWants(p0, cids, cids2) + pwm.prepareSendWants(p0, cids, cids2) if g.count != 2 { t.Fatal("Expected 2 want-blocks") @@ -275,7 +275,7 @@ func TestStats(t *testing.T) { // Send 1 old want-block and 2 new want-blocks to p0 cids3 := testutil.GenerateCids(2) - pwm.PrepareSendWants(p0, append(cids3, cids[0]), []cid.Cid{}) + pwm.prepareSendWants(p0, append(cids3, cids[0]), []cid.Cid{}) if g.count != 4 { t.Fatal("Expected 4 want-blocks") @@ -284,7 +284,7 @@ func TestStats(t *testing.T) { // Cancel 1 want-block that was sent to p0 // and 1 want-block that was not sent cids4 := testutil.GenerateCids(1) - pwm.PrepareSendCancels(append(cids4, cids[0])) + pwm.prepareSendCancels(append(cids4, cids[0])) if g.count != 3 { t.Fatal("Expected 3 want-blocks", g.count) diff --git a/bitswap/internal/wantmanager/wantmanager.go b/bitswap/internal/wantmanager/wantmanager.go index b34056b14..908f9dca3 100644 --- a/bitswap/internal/wantmanager/wantmanager.go +++ b/bitswap/internal/wantmanager/wantmanager.go @@ -89,7 +89,7 @@ func (wm *WantManager) BroadcastWantHaves(ctx context.Context, ses uint64, wantH // RemoveSession is called when the session is shut down func (wm *WantManager) RemoveSession(ctx context.Context, ses uint64) { - // Remove session's interest in the given blocks + // Remove session's interest in the given blocks. cancelKs := wm.sim.RemoveSessionInterest(ses) // Remove broadcast want-haves for session From 06ec90146b2b09ed4e0238a5fe97c75acc6d62e7 Mon Sep 17 00:00:00 2001 From: Dirk McCormick Date: Tue, 24 Mar 2020 13:52:48 -0400 Subject: [PATCH 0891/1035] fix: log unexpected condition in peerWantManager.prepareSendWants() This commit was moved from ipfs/go-bitswap@fd0e1ff627933ce4e1d52ea24544c8871fa15dae --- bitswap/internal/peermanager/peermanager.go | 3 + .../internal/peermanager/peerwantmanager.go | 55 +++++++++++-------- 2 files changed, 35 insertions(+), 23 deletions(-) diff --git a/bitswap/internal/peermanager/peermanager.go b/bitswap/internal/peermanager/peermanager.go index 5af98875c..c2159b198 100644 --- a/bitswap/internal/peermanager/peermanager.go +++ b/bitswap/internal/peermanager/peermanager.go @@ -4,12 +4,15 @@ import ( "context" "sync" + logging "github.com/ipfs/go-log" "github.com/ipfs/go-metrics-interface" cid "github.com/ipfs/go-cid" peer "github.com/libp2p/go-libp2p-core/peer" ) +var log = logging.Logger("bs:peermgr") + // PeerQueue provides a queue of messages to be sent for a single peer. type PeerQueue interface { AddBroadcastWantHaves([]cid.Cid) diff --git a/bitswap/internal/peermanager/peerwantmanager.go b/bitswap/internal/peermanager/peerwantmanager.go index b4b87482b..b0c843a2e 100644 --- a/bitswap/internal/peermanager/peerwantmanager.go +++ b/bitswap/internal/peermanager/peerwantmanager.go @@ -86,35 +86,44 @@ func (pwm *peerWantManager) prepareSendWants(p peer.ID, wantBlocks []cid.Cid, wa resWantHvs := make([]cid.Cid, 0) // Get the existing want-blocks and want-haves for the peer - if pws, ok := pwm.peerWants[p]; ok { - // Iterate over the requested want-blocks - for _, c := range wantBlocks { - // If the want-block hasn't been sent to the peer - if !pws.wantBlocks.Has(c) { - // Record that the CID was sent as a want-block - pws.wantBlocks.Add(c) + pws, ok := pwm.peerWants[p] + + if !ok { + // In practice this should never happen: + // - PeerManager calls addPeer() as soon as the peer connects + // - PeerManager calls removePeer() as soon as the peer disconnects + // - All calls to PeerWantManager are locked + log.Errorf("prepareSendWants() called with peer %s but peer not found in peerWantManager", string(p)) + return resWantBlks, resWantHvs + } - // Add the CID to the results - resWantBlks = append(resWantBlks, c) + // Iterate over the requested want-blocks + for _, c := range wantBlocks { + // If the want-block hasn't been sent to the peer + if !pws.wantBlocks.Has(c) { + // Record that the CID was sent as a want-block + pws.wantBlocks.Add(c) - // Make sure the CID is no longer recorded as a want-have - pws.wantHaves.Remove(c) + // Add the CID to the results + resWantBlks = append(resWantBlks, c) - // Increment the count of want-blocks - pwm.wantBlockGauge.Inc() - } + // Make sure the CID is no longer recorded as a want-have + pws.wantHaves.Remove(c) + + // Increment the count of want-blocks + pwm.wantBlockGauge.Inc() } + } - // Iterate over the requested want-haves - for _, c := range wantHaves { - // If the CID has not been sent as a want-block or want-have - if !pws.wantBlocks.Has(c) && !pws.wantHaves.Has(c) { - // Record that the CID was sent as a want-have - pws.wantHaves.Add(c) + // Iterate over the requested want-haves + for _, c := range wantHaves { + // If the CID has not been sent as a want-block or want-have + if !pws.wantBlocks.Has(c) && !pws.wantHaves.Has(c) { + // Record that the CID was sent as a want-have + pws.wantHaves.Add(c) - // Add the CID to the results - resWantHvs = append(resWantHvs, c) - } + // Add the CID to the results + resWantHvs = append(resWantHvs, c) } } From cc36c771e27b8d8b85a38ca711c0063b2fa51830 Mon Sep 17 00:00:00 2001 From: dirkmc Date: Tue, 24 Mar 2020 14:16:24 -0400 Subject: [PATCH 0892/1035] fix: race in SessionInterestManager (#324) This commit was moved from ipfs/go-bitswap@288ceffbe3bf47307fe41f9ccfdc532aeab6228b --- .../sessioninterestmanager.go | 32 +++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/bitswap/internal/sessioninterestmanager/sessioninterestmanager.go b/bitswap/internal/sessioninterestmanager/sessioninterestmanager.go index e85a645b9..46888c9ad 100644 --- a/bitswap/internal/sessioninterestmanager/sessioninterestmanager.go +++ b/bitswap/internal/sessioninterestmanager/sessioninterestmanager.go @@ -1,13 +1,17 @@ package sessioninterestmanager import ( + "sync" + bsswl "github.com/ipfs/go-bitswap/internal/sessionwantlist" blocks "github.com/ipfs/go-block-format" cid "github.com/ipfs/go-cid" ) +// SessionInterestManager records the CIDs that each session is interested in. type SessionInterestManager struct { + lk sync.RWMutex interested *bsswl.SessionWantlist wanted *bsswl.SessionWantlist } @@ -20,21 +24,39 @@ func New() *SessionInterestManager { } } +// When the client asks the session for blocks, the session calls +// RecordSessionInterest() with those cids. func (sim *SessionInterestManager) RecordSessionInterest(ses uint64, ks []cid.Cid) { + sim.lk.Lock() + defer sim.lk.Unlock() + sim.interested.Add(ks, ses) sim.wanted.Add(ks, ses) } +// When the session shuts down it calls RemoveSessionInterest(). func (sim *SessionInterestManager) RemoveSessionInterest(ses uint64) []cid.Cid { + sim.lk.Lock() + defer sim.lk.Unlock() + sim.wanted.RemoveSession(ses) return sim.interested.RemoveSession(ses) } +// When the session receives blocks, it calls RemoveSessionWants(). func (sim *SessionInterestManager) RemoveSessionWants(ses uint64, wants []cid.Cid) { + sim.lk.Lock() + defer sim.lk.Unlock() + sim.wanted.RemoveSessionKeys(ses, wants) } +// The session calls FilterSessionInterested() to filter the sets of keys for +// those that the session is interested in func (sim *SessionInterestManager) FilterSessionInterested(ses uint64, ksets ...[]cid.Cid) [][]cid.Cid { + sim.lk.RLock() + defer sim.lk.RUnlock() + kres := make([][]cid.Cid, len(ksets)) for i, ks := range ksets { kres[i] = sim.interested.SessionHas(ses, ks).Keys() @@ -42,7 +64,12 @@ func (sim *SessionInterestManager) FilterSessionInterested(ses uint64, ksets ... return kres } +// When bitswap receives blocks it calls SplitWantedUnwanted() to discard +// unwanted blocks func (sim *SessionInterestManager) SplitWantedUnwanted(blks []blocks.Block) ([]blocks.Block, []blocks.Block) { + sim.lk.RLock() + defer sim.lk.RUnlock() + // Get the wanted block keys ks := make([]cid.Cid, len(blks)) for _, b := range blks { @@ -63,7 +90,12 @@ func (sim *SessionInterestManager) SplitWantedUnwanted(blks []blocks.Block) ([]b return wantedBlks, notWantedBlks } +// When the WantManager receives a message is calls InterestedSessions() to +// find out which sessions are interested in the message. func (sim *SessionInterestManager) InterestedSessions(blks []cid.Cid, haves []cid.Cid, dontHaves []cid.Cid) []uint64 { + sim.lk.RLock() + defer sim.lk.RUnlock() + ks := make([]cid.Cid, 0, len(blks)+len(haves)+len(dontHaves)) ks = append(ks, blks...) ks = append(ks, haves...) From ad5127ab2b1854b4ecf408d9f378a1917b0d259c Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Wed, 25 Mar 2020 17:26:24 -0700 Subject: [PATCH 0893/1035] chore: address todo in engine.go This commit was moved from ipfs/go-bitswap@3895cc0a4ebf765d69b9a7c9068a6a567425ab11 --- bitswap/internal/decision/engine.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/bitswap/internal/decision/engine.go b/bitswap/internal/decision/engine.go index 6fe8875cd..b744cb543 100644 --- a/bitswap/internal/decision/engine.go +++ b/bitswap/internal/decision/engine.go @@ -733,8 +733,7 @@ func (e *Engine) MessageSent(p peer.ID, m bsmsg.BitSwapMessage) { // Remove sent block presences from the want list for the peer for _, bp := range m.BlockPresences() { - // TODO: record block presence bytes as well? - // l.SentBytes(?) + // Don't record sent data. We reserve that for data blocks. if bp.Type == pb.Message_Have { l.wantList.RemoveType(bp.Cid, pb.Message_Wantlist_Have) } From 780ca219c7772ffd6c65208aa6d66d97f63d0373 Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Wed, 25 Mar 2020 17:35:23 -0700 Subject: [PATCH 0894/1035] fix: avoid copying messages multiple times on send Also, reduce the overhead from logging. This commit was moved from ipfs/go-bitswap@484399b464a28b75281c40ff7ccc33ddd54a54ad --- bitswap/bitswap.go | 1 + bitswap/workers.go | 90 +++++++++++++++++++++++++--------------------- 2 files changed, 50 insertions(+), 41 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index f2217b85c..aab1429fa 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -37,6 +37,7 @@ import ( ) var log = logging.Logger("bitswap") +var sflog = log.Desugar() var _ exchange.SessionExchange = (*Bitswap)(nil) diff --git a/bitswap/workers.go b/bitswap/workers.go index 04dc2757b..8018c8458 100644 --- a/bitswap/workers.go +++ b/bitswap/workers.go @@ -5,11 +5,11 @@ import ( "fmt" engine "github.com/ipfs/go-bitswap/internal/decision" - bsmsg "github.com/ipfs/go-bitswap/message" pb "github.com/ipfs/go-bitswap/message/pb" cid "github.com/ipfs/go-cid" process "github.com/jbenet/goprocess" procctx "github.com/jbenet/goprocess/context" + "go.uber.org/zap" ) // TaskWorkerCount is the total number of simultaneous threads sending @@ -52,29 +52,11 @@ func (bs *Bitswap) taskWorker(ctx context.Context, id int) { continue } - // update the BS ledger to reflect sent message - // TODO: Should only track *useful* messages in ledger - outgoing := bsmsg.New(false) - for _, block := range envelope.Message.Blocks() { - log.Debugw("Bitswap.TaskWorker.Work", - "Target", envelope.Peer, - "Block", block.Cid(), - ) - outgoing.AddBlock(block) - } - for _, blockPresence := range envelope.Message.BlockPresences() { - outgoing.AddBlockPresence(blockPresence.Cid, blockPresence.Type) - } // TODO: Only record message as sent if there was no error? - bs.engine.MessageSent(envelope.Peer, outgoing) - + // Ideally, yes. But we'd need some way to trigger a retry and/or drop + // the peer. + bs.engine.MessageSent(envelope.Peer, envelope.Message) bs.sendBlocks(ctx, envelope) - bs.counterLk.Lock() - for _, block := range envelope.Message.Blocks() { - bs.counters.blocksSent++ - bs.counters.dataSent += uint64(len(block.RawData())) - } - bs.counterLk.Unlock() case <-ctx.Done(): return } @@ -84,41 +66,67 @@ func (bs *Bitswap) taskWorker(ctx context.Context, id int) { } } -func (bs *Bitswap) sendBlocks(ctx context.Context, env *engine.Envelope) { - // Blocks need to be sent synchronously to maintain proper backpressure - // throughout the network stack - defer env.Sent() - - msgSize := 0 - msg := bsmsg.New(false) +func (bs *Bitswap) logOutgoingBlocks(env *engine.Envelope) { + if ce := sflog.Check(zap.DebugLevel, "Bitswap -> send blocks"); ce == nil { + return + } for _, blockPresence := range env.Message.BlockPresences() { c := blockPresence.Cid switch blockPresence.Type { case pb.Message_Have: - log.Infof("Sending HAVE %s to %s", c.String()[2:8], env.Peer) + log.Debugw("sending message", + "type", "HAVE", + "cid", c, + "peer", env.Peer, + ) case pb.Message_DontHave: - log.Infof("Sending DONT_HAVE %s to %s", c.String()[2:8], env.Peer) + log.Debugw("sending message", + "type", "DONT_HAVE", + "cid", c, + "peer", env.Peer, + ) default: panic(fmt.Sprintf("unrecognized BlockPresence type %v", blockPresence.Type)) } - msgSize += bsmsg.BlockPresenceSize(c) - msg.AddBlockPresence(c, blockPresence.Type) } for _, block := range env.Message.Blocks() { - msgSize += len(block.RawData()) - msg.AddBlock(block) - log.Infof("Sending block %s to %s", block, env.Peer) + log.Debugw("sending message", + "type", "BLOCK", + "cid", block.Cid(), + "peer", env.Peer, + ) } +} - bs.sentHistogram.Observe(float64(msgSize)) - err := bs.network.SendMessage(ctx, env.Peer, msg) +func (bs *Bitswap) sendBlocks(ctx context.Context, env *engine.Envelope) { + // Blocks need to be sent synchronously to maintain proper backpressure + // throughout the network stack + defer env.Sent() + + bs.logOutgoingBlocks(env) + + err := bs.network.SendMessage(ctx, env.Peer, env.Message) if err != nil { - // log.Infof("sendblock error: %s", err) - log.Errorf("SendMessage error: %s. size: %d. block-presence length: %d", err, msg.Size(), len(env.Message.BlockPresences())) + log.Debugw("failed to send blocks message", + "peer", env.Peer, + "error", err, + ) + return + } + + dataSent := 0 + blocks := env.Message.Blocks() + for _, b := range blocks { + dataSent += len(b.RawData()) } - log.Infof("Sent message to %s", env.Peer) + bs.counterLk.Lock() + bs.counters.blocksSent += uint64(len(blocks)) + bs.counters.dataSent += uint64(dataSent) + bs.counterLk.Unlock() + bs.sentHistogram.Observe(float64(env.Message.Size())) + log.Debugw("sent message", "peer", env.Peer) } func (bs *Bitswap) provideWorker(px process.Process) { From df9c14b5fbab0aa2f1a66fcec48330ca633bcc27 Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Wed, 25 Mar 2020 17:41:36 -0700 Subject: [PATCH 0895/1035] feat: normalize message logging This commit was moved from ipfs/go-bitswap@8c7bf926a54adb650a3a046d34305a07759a8c01 --- bitswap/internal/messagequeue/messagequeue.go | 30 +++++++++++++++---- bitswap/workers.go | 23 ++++++++------ 2 files changed, 39 insertions(+), 14 deletions(-) diff --git a/bitswap/internal/messagequeue/messagequeue.go b/bitswap/internal/messagequeue/messagequeue.go index 5debcd303..daf8664bf 100644 --- a/bitswap/internal/messagequeue/messagequeue.go +++ b/bitswap/internal/messagequeue/messagequeue.go @@ -466,7 +466,7 @@ func (mq *MessageQueue) simulateDontHaveWithTimeout(wantlist []bsmsg.Entry) { func (mq *MessageQueue) logOutgoingMessage(wantlist []bsmsg.Entry) { // Save some CPU cycles and allocations if log level is higher than debug - if ce := sflog.Check(zap.DebugLevel, "Bitswap -> send wants"); ce == nil { + if ce := sflog.Check(zap.DebugLevel, "sent message"); ce == nil { return } @@ -474,15 +474,35 @@ func (mq *MessageQueue) logOutgoingMessage(wantlist []bsmsg.Entry) { for _, e := range wantlist { if e.Cancel { if e.WantType == pb.Message_Wantlist_Have { - log.Debugw("Bitswap -> cancel-have", "local", self, "to", mq.p, "cid", e.Cid) + log.Debugw("sent message", + "type", "CANCEL_WANT_HAVE", + "cid", e.Cid, + "local", self, + "to", mq.p, + ) } else { - log.Debugw("Bitswap -> cancel-block", "local", self, "to", mq.p, "cid", e.Cid) + log.Debugw("sent message", + "type", "CANCEL_WANT_BLOCK", + "cid", e.Cid, + "local", self, + "to", mq.p, + ) } } else { if e.WantType == pb.Message_Wantlist_Have { - log.Debugw("Bitswap -> want-have", "local", self, "to", mq.p, "cid", e.Cid) + log.Debugw("sent message", + "type", "WANT_HAVE", + "cid", e.Cid, + "local", self, + "to", mq.p, + ) } else { - log.Debugw("Bitswap -> want-block", "local", self, "to", mq.p, "cid", e.Cid) + log.Debugw("sent message", + "type", "WANT_BLOCK", + "cid", e.Cid, + "local", self, + "to", mq.p, + ) } } } diff --git a/bitswap/workers.go b/bitswap/workers.go index 8018c8458..208c02bff 100644 --- a/bitswap/workers.go +++ b/bitswap/workers.go @@ -67,24 +67,28 @@ func (bs *Bitswap) taskWorker(ctx context.Context, id int) { } func (bs *Bitswap) logOutgoingBlocks(env *engine.Envelope) { - if ce := sflog.Check(zap.DebugLevel, "Bitswap -> send blocks"); ce == nil { + if ce := sflog.Check(zap.DebugLevel, "sent message"); ce == nil { return } + self := bs.network.Self() + for _, blockPresence := range env.Message.BlockPresences() { c := blockPresence.Cid switch blockPresence.Type { case pb.Message_Have: - log.Debugw("sending message", + log.Debugw("sent message", "type", "HAVE", "cid", c, - "peer", env.Peer, + "local", self, + "to", env.Peer, ) case pb.Message_DontHave: - log.Debugw("sending message", + log.Debugw("sent message", "type", "DONT_HAVE", "cid", c, - "peer", env.Peer, + "local", self, + "to", env.Peer, ) default: panic(fmt.Sprintf("unrecognized BlockPresence type %v", blockPresence.Type)) @@ -92,10 +96,11 @@ func (bs *Bitswap) logOutgoingBlocks(env *engine.Envelope) { } for _, block := range env.Message.Blocks() { - log.Debugw("sending message", + log.Debugw("sent message", "type", "BLOCK", "cid", block.Cid(), - "peer", env.Peer, + "local", self, + "to", env.Peer, ) } } @@ -105,8 +110,6 @@ func (bs *Bitswap) sendBlocks(ctx context.Context, env *engine.Envelope) { // throughout the network stack defer env.Sent() - bs.logOutgoingBlocks(env) - err := bs.network.SendMessage(ctx, env.Peer, env.Message) if err != nil { log.Debugw("failed to send blocks message", @@ -116,6 +119,8 @@ func (bs *Bitswap) sendBlocks(ctx context.Context, env *engine.Envelope) { return } + bs.logOutgoingBlocks(env) + dataSent := 0 blocks := env.Message.Blocks() for _, b := range blocks { From 27039091858d5d3e0df5a722551bd6c4174d84c7 Mon Sep 17 00:00:00 2001 From: Dirk McCormick Date: Fri, 3 Apr 2020 14:36:15 -0400 Subject: [PATCH 0896/1035] fix: ensure wantlist gauge gets decremented on disconnect This commit was moved from ipfs/go-bitswap@d310fe30d4fe9bf889900b32bb8a91393f0d8a0f --- bitswap/internal/peermanager/peerwantmanager.go | 10 ++++++++++ bitswap/internal/peermanager/peerwantmanager_test.go | 6 ++++++ 2 files changed, 16 insertions(+) diff --git a/bitswap/internal/peermanager/peerwantmanager.go b/bitswap/internal/peermanager/peerwantmanager.go index b0c843a2e..08914bbca 100644 --- a/bitswap/internal/peermanager/peerwantmanager.go +++ b/bitswap/internal/peermanager/peerwantmanager.go @@ -50,6 +50,16 @@ func (pwm *peerWantManager) addPeer(p peer.ID) { // RemovePeer removes a peer and its associated wants from tracking func (pwm *peerWantManager) removePeer(p peer.ID) { + pws, ok := pwm.peerWants[p] + if !ok { + return + } + + // Decrement the gauge by the number of pending want-blocks to the peer + for range pws.wantBlocks.Keys() { + pwm.wantBlockGauge.Dec() + } + delete(pwm.peerWants, p) } diff --git a/bitswap/internal/peermanager/peerwantmanager_test.go b/bitswap/internal/peermanager/peerwantmanager_test.go index 9cfa9410f..a56df168a 100644 --- a/bitswap/internal/peermanager/peerwantmanager_test.go +++ b/bitswap/internal/peermanager/peerwantmanager_test.go @@ -289,4 +289,10 @@ func TestStats(t *testing.T) { if g.count != 3 { t.Fatal("Expected 3 want-blocks", g.count) } + + pwm.removePeer(p0) + + if g.count != 0 { + t.Fatal("Expected all want-blocks to be removed with peer", g.count) + } } From 9f84f9380bb7d321f3ed769911e8b091d1e8bf46 Mon Sep 17 00:00:00 2001 From: dirkmc Date: Tue, 7 Apr 2020 16:46:49 -0400 Subject: [PATCH 0897/1035] Add separate how bitswap works doc (#294) * docs: add separate how bitswap works doc * feat: update architecture diagram and add implementation description This commit was moved from ipfs/go-bitswap@b0f337dfde28a645c25fcc9967943a41975cbfee --- bitswap/README.md | 58 ++---------- bitswap/docs/go-bitswap.png | Bin 47568 -> 84886 bytes bitswap/docs/go-bitswap.puml | 19 ++-- bitswap/docs/how-bitswap-works.md | 142 ++++++++++++++++++++++++++++++ 4 files changed, 160 insertions(+), 59 deletions(-) create mode 100644 bitswap/docs/how-bitswap-works.md diff --git a/bitswap/README.md b/bitswap/README.md index 28f07ff98..488d9993d 100644 --- a/bitswap/README.md +++ b/bitswap/README.md @@ -45,6 +45,8 @@ wants those blocks. `go-bitswap` provides an implementation of the Bitswap protocol in go. +[Learn more about how Bitswap works](./docs/how-bitswap-works.md) + ## Install `go-bitswap` requires Go >= 1.11 and can be installed using Go modules @@ -75,8 +77,7 @@ exchange := bitswap.New(ctx, network, bstore) Parameter Notes: 1. `ctx` is just the parent context for all of Bitswap -2. `network` is a network abstraction provided to Bitswap on top -of libp2p & content routing. +2. `network` is a network abstraction provided to Bitswap on top of libp2p & content routing. 3. `bstore` is an IPFS blockstore ### Get A Block Synchronously @@ -107,11 +108,11 @@ blockChannel, err := exchange.GetBlocks(ctx, cids) Parameter Notes: 1. `ctx` is the context for this request, which can be cancelled to cancel the request -2. `cids` is an slice of content IDs for the blocks you're requesting +2. `cids` is a slice of content IDs for the blocks you're requesting ### Get Related Blocks Faster With Sessions -In IPFS, content blocks are often connected to each other through a MerkleDAG. If you know ahead of time that block requests are related, Bitswap can make several optimizations internally in how it requests those blocks in order to get them faster. Bitswap provides a mechanism called a Bitswap session to manage a series of block requests as part of a single higher level operation. You should initialize a bitswap session any time you intend to make a series of block requests that are related -- and whose responses are likely to come from the same peers. +In IPFS, content blocks are often connected to each other through a MerkleDAG. If you know ahead of time that block requests are related, Bitswap can make several optimizations internally in how it requests those blocks in order to get them faster. Bitswap provides a mechanism called a Bitswap Session to manage a series of block requests as part of a single higher level operation. You should initialize a Bitswap Session any time you intend to make a series of block requests that are related -- and whose responses are likely to come from the same peers. ```golang var ctx context.Context @@ -125,7 +126,7 @@ var relatedCids []cids.cid relatedBlocksChannel, err := session.GetBlocks(ctx, relatedCids) ``` -Note that new session returns an interface with a GetBlock and GetBlocks method that have the same signature as the overall Bitswap exchange. +Note that `NewSession` returns an interface with `GetBlock` and `GetBlocks` methods that have the same signature as the overall Bitswap exchange. ### Tell bitswap a new block was added to the local datastore @@ -136,53 +137,6 @@ var exchange bitswap.Bitswap err := exchange.HasBlock(blk) ``` -## Implementation - -The following diagram outlines the major tasks Bitswap handles, and their consituent components: - -![Bitswap Components](./docs/go-bitswap.png) - -### Sending Blocks - -Internally, when a message with a wantlist is received, it is sent to the -decision engine to be considered. The decision engine checks the CID for -each block in the wantlist against local storage and creates a task for -each block it finds in the peer request queue. The peer request queue is -a priority queue that sorts available tasks by some metric. Currently, -that metric is very simple and aims to fairly address the tasks of each peer. -More advanced decision logic will be implemented in the future. Task workers -pull tasks to be done off of the queue, retrieve the block to be sent, and -send it off. The number of task workers is limited by a constant factor. - -### Requesting Blocks - -The want manager handles client requests for new blocks. The 'WantBlocks' method -is invoked for each block (or set of blocks) requested. The want manager ensures -that connected peers are notified of the new block that we want by sending the -new entries to a message queue for each peer. The message queue will loop while -there is work available and: -1. Ensure it has a connection to its peer -2. grab the message to be sent -3. Send the message -If new messages are added while the loop is in steps 1 or 3, the messages are -combined into one to avoid having to keep an actual queue and send multiple -messages. The same process occurs when the client receives a block and sends a -cancel message for it. - -### Sessions - -Sessions track related requests for blocks, and attempt to optimize transfer speed and reduce the number of duplicate blocks sent across the network. The basic optimization of sessions is to limit asks for blocks to the peers most likely to have that block and most likely to respond quickly. This is accomplished by tracking who responds to each block request, and how quickly they respond, and then optimizing future requests with that information. Sessions try to distribute requests amongst peers such that there is some duplication of data in the responses from different peers, for redundancy, but not too much. - -### Finding Providers - -When bitswap can't find a connected peer who already has the block it wants, it falls back to querying a content routing system (a DHT in IPFS's case) to try to locate a peer with the block. - -Bitswap routes these requests through the ProviderQueryManager system, which rate-limits these requests and also deduplicates in-process requests. - -### Providing - -As a bitswap client receives blocks, by default it announces them on the provided content routing system (again, a DHT in most cases). This behaviour can be disabled by passing `bitswap.ProvideEnabled(false)` as a parameter when initializing Bitswap. IPFS currently has its own experimental provider system ([go-ipfs-provider](https://github.com/ipfs/go-ipfs-provider)) which will eventually replace Bitswap's system entirely. - ## Contribute PRs are welcome! diff --git a/bitswap/docs/go-bitswap.png b/bitswap/docs/go-bitswap.png index 2b45b8d9b5a84b02dc83d0aaf33a713b6fc2bdef..31dff2b85a71af71b056e0cdbaa12941d13dabf2 100644 GIT binary patch literal 84886 zcmbTeby(D0*EWoLDPn+)v?4=?NGYixAS0cUqjVz;k}4r0Aj1IC4bsvLD$+3s(xt%A z-OaZKujhT<`##>kKL3$p7=E+&+H0?Mu5+Dh{N!aNFPNKMFN5|Lj(jT z)6SlRSKe>ASHpiycH+wL@D&h8~Y2gghE7nXMR z7Ut}FRu-4**{x{lx-$}= zo?jR(l_*QT9s70k%!yAZ&TO5T$@E8uJdJ@gw@`Pfzp1c(56xF5@VyYbMKsALs7o1l zFWHHZqdrCU+hulIj7z-!&!;}JmvESjZn?LHsBaaX&hLfJm5CCy_0Fdc1RIyJFRhe3 zN})=+&P&78tJgde6=QQtg#BsU6Xiy+tUsweRLBP($sJdttZnV+MNX^LDN7 ztm@7)A5Bw}RL9Rzg%TaEK4nz@qT?yLq#O8An0JfXg74uovx+F*`(s5Hd_D6 zV5;B;$&Y=kgLkHjU1AoUD+bNZ_|(QciMKsB95_0+7q_>3m+ImBk|%3(`u`wrlF_n$ z>O>1sSe(|-e{%KQmXBGn1dY4-N0Qu{rIRmfuhO4J-Raf8Pf>HVdR@ru`bkXBxz_r! zcGeP{Cdz1`J$J+e>+*<6wcceh`f3!5iNnP+nkm)`wePIge6_~Wk+Y+0s>D0smG6$od)KI&ub~$A0AnA;HpHE z0@&{HIC}gl7dM#n#=Lp*Uc0HWa5`)1hrzQaxW0H>#oBR4hr^ zwHnWy_@BS)`16D=;rpr87{`&O)>e$b(=gWlVhc?L1qA~G1O*`h!Q3tUt{yhf278?& zX>=TMmRw{>n0RygJQ|I*v9-15ryBgj@&*d`FFOXR%_*a;VgLu-L-Dz`&fA zfFSGb@rtOtA75b>ymiq}|NQYfegD4t=J?fst4%lXCg&$PHY{t<@vE8$G#lQgcGSnm z@eHk#OLF$?bZ3hHkm1P~wg(TK;e&c*=rr-hm=-)X|Ci(-;oZ;YJ+|{xMMdf|1Eq&TgcF+!gEFlM zu*28gzs8pGLd*X-AqDUA9Jqvcm9OZt2rwfWzmuZdc#C-=E+aKkKe>k@g{5_M$HYj4 zer0R&ZMY^EQ-8!S;&V=6EgUL==V^5=Oyy7c7OHg#Rkqa8ZK}4TyJ5S1rwEBL8}p+z zqYflb@O%E{b8MxsUram*0e)_~WqC!FZr(x1hgab9<9Ywxmx?z!iqC3ud3m}kO>SwV zs^J6K9TLqd{o!(ZIl&b#*5wW{61T&hDGTiyS^Vs}I6cbkD zaC>|;qp$B-Et->)bM-@I(hO5K60f@C z%$%BhE>cJYkaw3@p>Qg_+Hy`#r9(?S*-`%fEv*q;bF;G-So%w>M$K}o=&@V#{cBAO zaneto+?~o6*!83Xdp)%H*Rr!j2tKQkp=`|(g(L|op;zmDd3rXI)Z{i&I7IW%QuUr? z>nG0DyLax??r%_3FIkif^E%A;t*)*v)f^qR@r^3y%SIIG)}1R`nwm0tww)5LgEmsl z(K6ZfX0y0}pmCYcOH52GFE2+t4Qs5m9@pt)wpae_M$w7qe~X2|MS$-88FH=^)UjD z%p|rpHWLZHzP>9@FWBM&1zk7$joMH(y7PT`_srmInrmlgO`W&9;trJ;=QehAs$LB{ z3|cVA#bSJIShnB;*sxLVFP>6}HRaYEYB)bbp*TofHs_s_LO-pgj=HV|?4WAO%G3=n zQr?$$p2p!2BmriUkvlz9ZvW{M8(!!tm#r{{xw*MeUweD|Rry$Mcd5)W$5FSEojJAj znXajUV25(YrJPo`@c_PwKZG={x3+4(f1hxF{yhExhq)4?^GqiK_XpP!o*QF!n5II_ z%gY;ixXT~22rgr9ZLLs0t~y{%k$beaRAV*gGyC5r;l(JMUf!JPYS>0(PZkSc1np;k z*-shj>o*R?9qmuqpezHrIJ@v4WO6vPS!I$r73Vxj=DfXCb969P=N}LdP;VQ$!J_hL zurPuDzx*Y`qB@(qqu6Q9LQW2=n$&-@!gXgAZX4Zs*VQHl8v(m%dFT3QQVlxH{6LXe zX4!&EmnxP;@3&Uz%SVf(q@)yBx5cul33pvxT}?h^Nt(Zx^Wxv&BV}=F_yZZ&B=}&n zJZ~?tot~f?b!u8#RE@j1tgLJhwa^(7>ivygRGf&%;qPy6GtdLs_$6$uWqKaR+Pc#OvF8gy--v>*qVMVM2)l>&F)pEh|;MwCt zWwv#7btg`oFtq$kEOZMWx#TJS4ScUeo5C{7l6`$8mog=SsAIyydS($2u7iX&KebW` zI(_jAj<3)PdSUtK(d(L;qhZI<;UvkR1TkOP7=9iRk^j|Z?I(>KQ zpy;B=5c}`Ql%Vg@{Ebic;JF7O9o~Q$EFbdlDf9WG|BaWk=k(0+x%Jtl1UhA>zn)tD z{bm$CU*6DXNuZ;g{m+2#zkm8ap_Y;kWB#jG#puSs)-R{j5liC#2#^!E67=m26H;5w z?#0Sla1=)U=ly)3%;-E_`(;)-;6TRrdo4cdJoG)z+@1*doq9}qnX7ZjfeZr|H$JAu ziycD&e0*P^$4H+?g+@hbDl03is;a81J8#ab5B}HrRQaH@C5o92mXYRayD$2hE(E+N zPaZXqOPHt^r=de2EC!3&L_}(`va(WB>;EgZ>*ppUKe#j4CsQYd!0hi2*JPn;goTCO zA%lvGi-(4W7Q6iC)!rjUSaIhE+`IQ7#v2{y&FcUDy+)0@o1&tkrDcw6TEXyt?!DiU z(K)G4L26~56*KhZE-N#$DI^A7sy_T4<7NNNA)->EQrG`YQvcqAruHZ+-=o1sUY{oR zD66aRZqGomzgbc@9z65E->xIpRL`}MR!mBs+WC%yM_U3TQE4SO)P^{a6_v98uV=%X zRizy{P1@-+{p34M|GJLpzmKQIa0gOde6=ylso!`2;Q%u*(5jk{e69-?NkGu?^Z(@I z-t6@#ign(%i32XPUGUZ_M=0q1lFBl$8?*cj)ZMBofY!CzjG=U zXSY^cu(AB1FPxI5rwssTgp5?V5R=&)Me_KcMQFDD7h2%UU~-<8ca@j-Oh}ywh_n(} z@zQ+O7u}Us5+@WD6S^^Tp5udJtfMVOp%GT1Oi3q}mE(cCYr)WH(cGFOHCZC-@^q3& zfAK=x6n1K6L*UI@R21j9LG3P$>vBUxMCRp3}F)B<6q{Fut|z-nY$gKAfndalRvH=2pSA&0phi|&jghAmRIbCE)O8)l@! zw)C1KEng0Q{5V^iB@-ollW!3$K3<0Rk!Li2qobevYB>*rLuaVS!y_whlEp+e=$IJyjZC$~#2ad=mYjmQ-R|q1PqB?L*5k4JxksyU_g5a2iP%hFl@wJO&rjQ- znnJdQx==(9D;$>$pB-(D8j7m&6j($w6y|HP6G{7jqhyQ&!zy;wRkJy=&q zG2&vS`q0zy=+Ke5=#BxuBYSUue|ms;aoS zhMwLx@AD{%+#0up4vZK^r7Fi-7TuR2F{ z$hb)E^ZNWT5!TVG`K!N2g+<6?9Ohft8bTZz4KXvW;zh{+j9kgz<@3w_pl_QBlE=K# zw;wwc&wM|{KFVXULR|BJEcVer!qpJ118!}JAM2}6U~g?g^0BR(aSZ3@W2xBBhf5gS zDGZDgspp?j56a5E_n?9*LEiuuRHO(*9?z9;%t7@~*OtE2q@9B+?2m@zzTblH2%_OC?dvLK-b6g#_mnAQ-5M7Mv1_+q_E7uR+`5=k@`+s?&u)d zCG@xm$9O-@cbR@kza00Kt;$P-Sgus;vwkyVWLd)AlQNDHAK$!#`X;)k%}+5x6>Z}S z7J43`djoO$Q|?!8aulGCoRaX%r=T%M_58kWx<|A?f;Ge`u+0x z2^pKM)i#J5g4b?TG3cjUxt~fLmH5lzgY7C3oT-+}*msmNqR8=4GAsRgwG>?Uhurrpz@#)+R(r~9%d$nA`ue_nd%Fb4H)^%DCp#E?Zq(TbWn`4*cQsDH zhOYn1_e9zNRwc!JsYROmml(EiWVJ)zzi-G1{^?0-SU5#k&Wbu5txwRmThEuIIgnZ! z5yAzi=IR*p2P>Hu*}%HhRvUict7Pv=d$0t^80!v*lqnM7ut6lWum?` zM&aUXWG;feI2>Dz)^ng*aSs0zxp;?f^QPYC?`3~B74lo@)24KztL~HwmM>b$wRu!l z&ZP4x3637pGulCfsq)y}9+V5goRV@L)y(<=_(tR3g0F}PV{Tch>`^LeqGA#KAhnE; zIYRG^zt!v)Cb+$;t}8=0{qT*oMZ<7GE#6VD)00Gg`BSWboM&A6 z{7<9-i*BGvF(VOXhPx~JwWabs?ZNj$#iC7`Lbtj~hPzUxcx5U(-BAzvnnePKkEY}t zv4?k8Qht|dyO*0b`bQo)J=lC5e#J+tiJR|%bKUUHd=0Z@XSao}NY)=Ggx<+|*Pm!By0nqQkXa)-KV(LW&H@q_yR(cf%e&WjOD z6HtgM$))8@G8bqv`0H18sjdY;Rs?`f_u zgMc`M(m;|`a%oit)qG7;(^LAkZe%2d6-~BXKADT}RQDlw!(qqSs=+R`lWi4MMIqCK`-91Sba=Y&>R%!t2>Z+8oR`<=r4EqouC7|HDZlWZ zhKHw3=S2nFX1L1;_`~TN;cTGz+5?c{!k&@be%baP!@bi18Xqb8kelrx`iDR=bIaAd>WJT5QK4Gt_| zvb#KDrCm7Rbs#CX#mwMhKDByOLQ2^c1Ln8FKU*mYo}WI3qQ(ol0f%;vq{Pi7q^8T? z#LFYdl+Uz@!hx`}*w-t3IEZ+q*n*UQi-z}1YovgPFBl2SBj$*kC;|1bD zvgMq8YYVCs)#f|7h>QEU)Dt>+2ZBf0x|122+LecH`B;U z2cw6Li~WCGL{)F{-DPO_hoJD7#PS?3ANu9G8K1+zUAJ{^Ek7d$D`Sc7SmF6^O{`@>A?MR!R?Bs=Jo{&$_uV7Lz zv=|W{XDryRInudF83vYzoibUphAMdSG8f&;B|c$?_)$4vm5SD@UoX3ND7X5yrV_0e z7|qVn>N4(;V@FzHo}IPkBpQ~5B1?JifFmF{E-!sMls*2~J!;}?3p@3+cRsTCnTbOc zL?u8T1@&0wv**}|wAC(0-EQHYoom%HvaLmki3IJDW;e&>v33dEcZf$Rhcoc4Q>6WJ z=9Z<#&d6$6nil`O_=Hi4(N9%`X_Yklf<^8JORJkl3{#;Z&7+553~4$uENj)_ybs7U z!h+k+_)hs%T=oT~4*6}F){!uT?gVM#|(pt$8dZRPC)yN}cx1gy>Y@hGqa$r@h zt=N`A^Vvy2Bt>}*q5mj#ZJ6v(`P;r?2c&}WWw2Ota%tc=yd@&M8bx1YAKKL4jMWfR zVT&RsFh7aE#hw-+EXxqTaKTXySmMz?d<=CKXsxQ2?4fX@X4M~!TDB5z%Z7ew!Aj&8 zes**xb^g{M8`Lmq%l>fJHmN5Zu12Q>X9v`A{J5>QyT7rH6rCjA;-p7glB;utdf$gE zsI5>$oP;u^S9CY`ZvPnSa7*s|sAZ|Q7Z~8UDfQ|rhkf6~x&oKY{?tO`?v&7N7M$dK zU$|!bJV)9|dICH4W51!Oc$|xDVF~D%fZJxU{j1M?9F7fw&5eJ&n{m436PqryE37 z&$}lp_zS+6i%WxxX-dil*{;5K+-zDW9uX3#6tq!-OLz4^g#3F$=h*<(xCIe8vLE!D zrdB~dmkWbTiW^_GrXx-0T`nRtIfv`$5|UqCLVowMEx&5CNh!RE3(F7U#bdvXsPniI z1$Xn@3-X6wImQT`P-Oo5#R`#`M3bSWpGtxTCf2p~g*aS!)I=+O67abC=MNN991_kk+zhJqXEfuF?v>6J4 z{Q?h7$%VK9W>)}z*%x&6yf0f3A&3wvb#{zoH*Yf9PGRHZ82zGl-M13J*-~gsH`(@+ zp?Jw69hIUHy0a173y9u-Zs~egi&&RMvf`eNST9Qv2j2ssaL85$Cw9oNn{g#(17qkE zvu5Eyo(qpGD^7Ww@Dx90$L%5|++wuGsL%Pg#|Vnp_gyP{H+;|4^zeg0%H_z6!b;Uz z9>&_<#cri>?{6EbEAQQVwo(~1QNrJ~S7NgGx{2h_M#&?$PgpNkwVbjDa9w1M+E`1U zdn}O%c`ns%b5B~TVL)7{h;4tYquOj@VZ+t@2}cR{&6&L`o>j+u(3e<$o>vHOOpEeD zhabT36qnW^A%T5kyV(dgT)jV2k~Yn=a(a*Y{0rE4IyI3LtYn!!+u3uWX2Y^1G*&+n zM5Xp(=LImzSDIGF8*;TPc=`GHnV6V>{K?Np$1y#d(u)5en)6{~t<7qrh3IUP(3q9csvyb(>@qwmc0(eR|TB1woq(x#sR;Z80a7ZX8ZpLEvQ$8lp=h zQ4fkHZl94YEIR+3U|Q=qcT&0F)YID@e6f=$KB}ZstGUmRE4}o4tgHnbHM(Doo%Ev- z$t)(U*T<4ZZ!aK(1!4HZIucu+R)G!xSn zR1@Je>c8rL*|FEwZ>uVqYSjjh%-j#lHxt5(-X2`la)pi6uT$PJ`kZ|yt%b6{ChovRaX?m{Ll9xcWAqFS7BL8JS-Rf&$Yt0zfQU0N~z z51obEgkBmwe5bY7)D76w)rBg#`-E5%G&O)(tZQhXM(=Ey1KjjVX?&^v>16EKC#3!tH-$B+0j{Dz_hjlWs1Pi1^;saARE(l zHk_6!c>?F-Fr-I=wX6=4Wk_e7vw+L@%1S;`H=zyej@41o?}eI)sud9%hmbln;lU-X zW~v7`gI9zUy`@>mmdv(6NxpCb={xwI-QBI-1t(X-Z@Gt&yK79GRdM}&dWF)QRSl_v z`x=X89s?D(R*mg4wBDw_oFHSd{VETJ%m*g-^JhjrK1WU8VE-cvRff?g)#0uJJAu3d zh5eV1=qD#9&3^SV3287Fu{uHuC%a^mas}5ND2XPNk+qn|}hC&dinz zN$Xpyfk{x+Wf$MH)VVI8Tx4Ip6y5YHvl$jYbh<-)6B#SyQk_}EAKjart| zZP3Gzhq*;kxHadefMHoipD(1@)9!<0NkY-ahxk zd@=?y3I)q((f%oTKq&nlT$UvTiXlhr?(<&Fr=616cNKFF%5I?NXv^{es<%p*T}5EL z>Y73yP$Ue;zM4%hGV5g)+xqzAWKZwsTnIqaa*c|+xkF@^XpM9poNm~p2 z3?Fn0aw}DSF)~;>uj;?5hD-H)hFo1ywjWs&wrUemD8|6t&1KM*kJgK1Lk_-d|Lxg) z3_e`wd+BVvZ5dh1Z!y}z{l|Ecr zW`nfPZXvaIE|lE#JT&(6^_!qGKjMurF8}cSPGN2I1KSbTwYJ8_#_8$l01Cd;7CZ3a7aY1u6Z zvJ|u}7fS-rA?}eb&mrvEB&w1708upKkEAjs;C~39_ymFH!N>DaKtduC<{SV>8*D4% zG9=i=C-~C@Z9=R$KB`G%OEseFCTzsnU66OU?Xf6bbWHlI2T3V`l$1mi` zNz)#AmCW@l8!+7i^_B?!rf#`()iSPI`f`oUa0l9EhSYC#kcO(lrzaNr`22y4uPA8T zeaf)^{U=}%5)>W~upAczysmYLzuvUQ2%=`rD(0|H=q5QW+LUeH*Ou@6oNKmm1>uir z3HqBHR~u!fSTg$2RB1_s$qtM$_jZei8=IS(vDncXj~dyuJHh{$W1@dx=UKwnN;?T? zhxH8K_~&*K`bYPZuxcw^j!NKQ<|!zv#ibgiaI_@-Oh=M@>OWk=NZ=F`*&K|{&wr_;HAw3CZG^}B_m{qfjpYgMCcq7jzeM2nk! z^XgL1&!49h1u(K?ZiAM6-|mhPNM9oXhKTZ6xrFSxrjJ~xty`wU6^n~kQywR96REw* zV+ABqltLUuYZk@?<>sJA?8-YKuh55k$A~PH-$fn@xjaZ@OuJ^jvL9w}x7EliQrW`H zJOKbsCch7liexEzuxeB0^>GmPq96Q4`}n33JSToItY&m0Ec+ej1ye_V&Xm{iU?jsQ zID+PQXP+;t^3MvdJ|)X&)D(jkTQg2?u~JI(lf-00(Obl7Pr{nVbBNtZ_9bJ z7hJ_RAkU+<4myPlfsiryP2P6`{>->!t{(CU#{#3FR>@6ULsZ#4-Jlmv)+s8!Lrq|o zX0CEZ3mFGay?KH&c)$~H`(#D?{UqQ?@L-_f8L!{QDLzX7-m!Qy56bQw&I$5|s)%qx zbECeC@c1wB{ye*|Q6%e1U|V(oYbn=Ss>AQ;%BAp1o)cen5ZIq&9R78){s#00kaJe|PvR}s@fx=` z+bSw{jtRF1m_d`-oBK1eCFF{0bO@=+ofo5U&%Q<`%P07EB4=o!2|&hZ1=j*D9n~+9 zBDAvV4?~PsPRN^(7e8VFngVaaB%@+#+peZufO;->d`S^8T`~8`V=^$mL&M}NjC@e7I0=T*zfr{Wq6_~qU)fA7OX&>8fTTupYQJw0I3)^KzQ}oxKj&hR z)=hnR|+W%ChHM5$yN%;gTx}vniAm*oO23q z3Yd@ZfaK=%{o{jgOYYMKPX^u*Si8`i+qh~>d$v4fFKHtEmCSO0LZc?eGRhrpix9yVmm z3K+_6A58F@B1JMKVV{)Uq<_rMLWTZ$2>cj~hE(f~82Kit%HSYRh@meeSCemI^q*qS zeb@H1lO01rsB;SXi5)&q$5&b!$3GN_cZ$H25t5{PS-_!4Ap#_WJg^Q(;E4Bx*Ow ztDo#-6daCTU_3s)>${1aKm)-!)ooHRlt5m{XlI=tQtb@bEeF~YoEj7Q8ZXxRVy$B0 z454l;#tj-)YGOc=XvwX{6M@SB;#peHr=dJH0C6^bg_B$(lF6@`PF$afU9-T<;cD`; z=!sbd8o%$H@%V@>@UlAb6(n+K5CI>ZJd}c#vo}QT0H<6W7)`x2A3|EX{K?QCdCu#L z75@D1Q>Gvkpj?NeN_J_5@>U!8cNH4^jew$T=i@xbU%KT6r$$tg{lFQt^d2X7N9~ zZpHKS^U_~OOz}(DZo|hopIbDEd)o3>Z+gZPNL`vxg#f_=m-lHxa8A{6upb!L ze{~4XHy&Miuj*}3maOuN8|j~$*u!j`A!+eenQqSaYZDO0&)jK_-e4YvS0b0sP0Xtpbkpay>_#7Nk zV+`m30A+~h2cq<%pA_%2U|hF`9r-}#2;~=40~A$XnAtc+`llLaABTE;gP;u8~@r&_dq&hZJ>1>MK6MJlE6!xuXRULNn)Z?}2?E zW}mF(*XV65gpQrh&`DyZd`M}ES2cLH-H(Wk6L5Bq4(V<8{0P8DW{X3IJVDAj>Q~Pp z`U%~iMmXKCK3Y*viVHb0=4AtQ`alOjacsJ%s|X2F8>0#v^fCI_Oc5(4g!csbirw1l z(3zkw+qEDQE`k05_dAC@znr5j4?DrydY0l zP}N={euwwVP}7W>cYWn zs3EzU6eK)*^1dL{H0J*hd?CV1KG`}81u)lu=D^2#thOafO7*1AQ#eWpQ|I1}mb~>5 z#s{3QvHd8n1yy2J?E?FWb#OKdw*=M9&?~NwAOCRp`ucwsC}k~vjt%9u5n3k(0<1P; zfGdl!RF-0OHNb7T{&q5=VIt(^(@OkGff?YQ4yK*qUabZcP13w5O zPCPuoh_)O{N-Z22`*owr^jo?;s+KOCm9p6=2)!}=g&6+?ei<2umA~O4|6qzrt}#E& zaG=+{%gHFtVJh1T{SdF6?RyhQjLyl^H3>dGs_{wdgK$Nk6HCM|9Xa%;zrxmokV&D~k@HYnp&rags z@fDf;>sfkR(`Fp$)I=x*N4PbT+dYcLver?`)MYb0En8mbmoJ$wAVOCtv-VR#MI6jW zCE_mV>lIqH{QZ1BE}TuXK9#^y!+2&|KEJNsR%)jeumX_8K(`N7SGzr@!F%8IOE|7g zuPn@)a152b!hpkvb={=o9}zQDu5)dmt9gBcOSI`J$hXz=C(=X?On{zo*_g$RjjjGt zlCz&L^6D0Af;wo(ZEIM&!eO|^!^24DvAqQO;0#J1Bn|mEgS{4=-%FS8Y8&Ulo^5!- zp@!2$fJHD?K!MCqqEsSqD9Mmb2#q zU{!zk8mc9~aQ=~hW*6xaFP`BI+9ozbFEokZvgdwEw6NQ*#Cd%qBNrE!QR@aOkZDg% z|4gdcon@)b8gB@ai{bC??k+OI#=a#aaX;ATb)4_B?o!XD&2Ee0b8&J44QQI<(uiLD zhhX0$$8KvCb8}Fs>g{bUi9G-6i2}9nsQcc6%xGe0QwT%LD`>h~v@@uUtb>AayC3UL zZ|iZm7T2PtQ*Ji{re~m06rO#3itxf55eZ4ScZL1j4vm`!=io7yBW7U({5&U z#m$p{jn^&ObfcmX;a&Zi8`v`N@DyI(q(*CeZS$NH;0e$GPy`tJ%CZt^Pc>>!Py*_( z*$_Y}SbSR*0*zt+R-M5rpTR4wERe@&9x8%*nb&Hh^2d+IVp)dU0_6+EgCKU+)X_OS z*cz?bnT*O`F1Me@3lKz4 zggG1HdsHlj7*Ib@jHv0YwoCLT?%1}6o$un%C zxIZ!F{f>@~<(ag&=B&}f&60W+XOOkxLiqUjPB~=5YY}r^;jQaF%O)->mbUH()%m^ zAY8F&GtuQN`%yI&>vG}x=qnJ8Ib;AQMB+s)zWB`$3fBRm+ESr9WOpNf7Mr@gnk+;;xV!xIc@z#YJJWo-CjnqT25T7Gg^A z`*(h(re+ka!}L_Ae7zQj1O0UX%502HW2QS(EmM9ql_obmy=gnaJkd;~QZ~(Hqgzc| zOAGuoRgl9mXD`DH<#LdjHCNTV&VKXm^RGn|9j)^D!vb3a9J;SijbUnrD{4fa*bASD z05Twl_Fi!xu?@r|Dj8@5#0eU|G4RLyl^qWx@fXOyM*<_IxSiR=Zupil`BL;G^)sO9 z?lj{gzw{_+xW%+Evdh3rIx5bI>swuK^pOWQAK%fUV@-%iwEMv>6PB^88`?NI75gAO z-|Eq{VyaSPz7R?Yx92jF+5Yy$na~;b%QBVXED)nCyIlucgN%0naA5`SY#kkL)u1Y! z^78V2+8G718Tz8LW4Wu&Yj~rZ&u|~!^ocIS%|4my^+7N6mG!#IcI}CpR14`hUOK!B z?j9yfgt!iPK}IdJBzx5XQeoj_u^v9U7rss~O~!nLHY&8U`{?Td6oe z_zWUx)1@PHYIxR6*Wg;N`i{@fX9+565H{4;N4j_S7Z`;e-he3P(xpxxX~mKqB;$qZ z4Ooz_tf0Mh>*1vP?o8UGtIe92R7%^b_j$G18`numN4bw>=mMfrT@?>!T3006N?g{b zUucQj_2y{T+YVOQ%D4v4E&9U&b9G#NE}M{YRZQxEHB>f3prNsBfBP3GYS<1U@F1a8 zSBoc~*NTQ>LL+n_dg9Sl{=_;!yp&;`F9+{qxPQLe`w2>^!O_EF7$?9FNPx1OMK$ZA zNVIx^A(Jf(D7;#q=5$*A4HE{t`#Wodpk|*4k@F#;$!VqHG5rN$(T;x@CS^cTJmEbu zGQt;QzwnT+`92#QVa!mOLwuA+1u^HV`X8Ews}RAPv}kB(YPk&E1-nQj11Zx#bh*v` zN}r1JI805Lz-I=zFc2P34b4@9Pkyv80P%22Z)43qj$>nE13%laztRveRqQgQvnMXS zblzYb1kkZHGH^Y?4)}EWSk5}i4v3mz^KRf>fa(D-)4;8HzkZ$w;1B^HQ#aK){~qF) zC*6%vH*P@lMSw8}sNC9tjs=tP$E&^)L(s4hH2z+6es6D2WPiC9RQx5Q?k*5VCJynV z1{FN4jEo-*7GcgL0%iv~z+)$;rdSGKz$8^VV)gf15-8KwUK3J(f|Lu%(274(Wxd#W z&5(p7Xhbc5T8N*GZQ&?~U)l#vKav<~JJo*i=G}PE*F!F~nrq-%`I4OcSollU3kX*< z9{VdYVog9UkGL#V(UV(`8i)C5$7eY-e!QK(K@FoS&{!A`b=_MisoAdMn5Y=F@6}#a z(9_k`s1o0ESS&*Q`0+iz&7h^l=}*Y=w5O@Br$&KLGWDeOZ$>wox-kvH~};g!sF) zA9akJ766`yo+wHuHwlE43@!C=U0vU>;|s>#6W#B$XFUG+-p)6a;fKBJn)SEYyOnPs zPu{KRNc53cr0g=6qUKIW$mnV3&38?LA_!=voWTep^BL&|7@c}(E$f&F{1Y$*z(zRq z45Wy~^s50ya7QyY=@^ho`>NHGUug;uYA~N&>Q3;ml>eDRoXxtq?0xdud4`(|9PHJb*1uxt8(ziOSp-gIIirAsls6#89?d1MaF9x(SDC~maF zy7>n{B?L8XmJ%L=pE`P`ZStcky@vE+$%%(J<(o+4WxiT2pw?_$h9`Fu6*Hs<7~!^i z-cdZ02Q@;-D%r)`gW14p7AIUUAEv`R-fR4(9;0qd)3w-7Q@>8;0wUwh6?<~*FI(4> zU!LMo`BmNbgUP@IZI<_Q@bN$j3v}E}5PEHaS1#VaRC|jTjz%Q4pO&Pbe!(G6B5q<< z4IWs8$4UdpK&+W>j-g_$ovF+11_pwPV`?tzfF+MR9>q%Okwg_#hd%pZY6RvtU!)I zfvIhPscOx$d1eI-KE&0vQY(b?`gIjmRh#vbUr1(25Ottw00|V(T$K80jXZB$Q3BMi zLk{@Pl=(;<`2p~6_+o9lEpO@eX@OzraeyeIXSC4^eK%w#*aCA~6(muYknT6~@lz!S zyBq%!Eir*fq@HeH{GjNYJ$UO_4(!6J8)z#PtRJc17tTB1TD=PV8?bQSe{an+44gxX z_Da3TDZt~3Ql_q$C(PD26OzeAn=YQOg(i1Twr1~4IF6v>viFS*Uo@MVz(l{0z>u{= z8!Tz38dyMZCENInfAEe&uabhpQ=uvEfr8$z?cxU#2k&NGG1FPcC-Gh$4HuL@{GvBn z(JL(9E`_(@Cp#@{6dF3U(|o260rY)Av|S7j%#xL+;zdBqN8J33H<4L$jNoYV_+TOP zx2wppRCnWQsza+ggEs*Oz;KMG{W&(?%gFZtq9Hb|UGF9?3YyphHo9fP7^QW5Ud0gB z9F7uIAbkNekt4-?d6YE|-NKEA5)+j-445A>$C@qBR}DhnKF6UwRL^c*fIlh4PQ+D-zK)j)ZeqJRot z5V3;3EjZQvSK0V!lz4uhz&l)mZz;2vEUqiom&^>kfp|R70Jt>a&zwQa8yaW*Cop`W zrM-0V>~ed%PExVFqK+_3isZT-&ZPPFs}8}??bqslUC2E}%nyOA1bRwuu?SCc4+)XS zG@mdxz^9+hLP0w(F{T&EWgX<(Y} z=Dmr-I)9I@zGv}*DAlm(rK5SFt@WkeT z*wgh*kovtUXLBlc52htgojirgN^jzB-5v9<+1?H0JT!+&U#I3U*N2CO|JqSAi_WPt zLkJF!9h{L|Ompdssnze?+IXJ_KVEFP|%(8BhATfusbg zlbH#SL}GU91q2~UwFraJg1wP}QLgj)&$z|~7y&Ra@DmnJ3S&RWzBlK`0Mh=+XyK}Z z`TqH=EIE|Z3(U;a#RMI#Awrl`N=-Xby75u7Ebi0I7tbbxC4Cx0?Cl>k1c9X6ew0b6 zp2Coa&*P4*zds)%_S$t5_ss&|38~shT$1JBCU$LVZGD&O+qc}dPb;f3YA#9PbPOCc z+|$)?u&Y^s(NGd)RC=`$12*BAYkj7+km80fC>+q~97)_{uF)K^x} zMZV#iIbM(KDJy`RODx3q7LU*=30~)#h?|!$D9ZhhUzM435w^F>=2q{zKK@Y|Bajps zSeoV4M-VBEDBmP#MKy3Wx5paB_ZM zW~VRqW7TVvhEdV`^ijYG=J1XIrNn9Q8v-k+9|EV2$Kbvc11dW%A@I$W46gJ2w`T2N zMg_XENC!w20}cB>OUz8~B_-Yg=R>W#$L4nfN;;{Y|;MGFMNbqExeWu>h>{{yNsb5q)d7X!J1_yaUPtP&(~qTA zT-=)NuD$=voaZ{JmD5s{`iT=G2YaJC-v~YK9#s0t+kEw`u;dnX%#>G5y+|>#*Jee3 zu-lt!Ika7(S|BLo>+4miq7dBE_v|GM7xq?f4s?%9BeF%M%B(&a=th0wnjnS?vu8M$ zko@`AWOAv@>^gg$!0^udS?;Z`z@T&`W{%@8Ocp-gLrV(s0i?M(616Xnlx0=A@!wbQ zQ)l5kA|R7JkZk;LrNN_o=4l|MRP|hrdukk?Wi=HIjpdNjQ0dMlR0>B2qu(#z@o7p{ z4&jc4QTwIxa!TCc=0av05yEO6t=d+Ct9}vYXMz6$1BXk<_W+BbX{T$&+l>M#fyl64 zs#ePv9)7&AI9?B0z)h6t=--nx4#U0PZ`om&E_}_obmWmxaO9a5@AVjo|hnM1#3G z$L8!IzSetrG7w_F3X3*D76o{6VKY%)t#K%9rW{;OS)i~Z3h zTV0ML{V)k=GWKH%PJkCe@bfJ=5TMO^VS0|e1SZu_Ugfzscnx{uMvliAQ3CC&W~ zgPqkQUIzFIho}0x4I+~w+rNXXY7UAM#AvqHyTtQbWlHd+X`dt`6HHfthE(5|nOT<$ zFa|D6=7l;wS+#UQuX+#LMI7U#Z%=#hvyT$`G7fR)j&p40CeNs0xx+viS96=iM{)d@ zkU;*EVPpa%t3zGp=Vn^^^~E%WKc1$4cK>AKIc)9N@~e>BppUVOy7y5AI;AFC^DlY5Q}C8% z9d{B{h30$21hHnVL#TxDFIAa*XsH{5vJK|2Vak^zcLJ3{;}g@`aWnPlZp=S{lhS6r zht7Rp65MvzZEPvl;k=D45AnUeR@OqiHGpmhSx(%r>B~F`j$Q%i4JjRbr^RQ~lE>{0 zP}GBf44{H?_xalc31xp2L2GlaVuc>#6@keJPGw-0by$3SYzuWpCT}r7qoD(on^}(l zDFcNE#Z@E1iVE-O2L{)`obQ={w7(=dKxnTTVtyOr)sK;1?eKj8MT1$I*q;|(XgG-H z-lG1)Ug$CXSME>{;I&m{XYzR*&UiKcFf0?trvP3vnD2uA+RGJur}Mjpz44Z%iqWy# zeQL9V(GyAu7g{3=k4>HDYlppL89)_@+8(h$9M!Y^XH+bVgJj6EUD1GzsW zn1BB+z-U&^f`eN+vE zj^^)q@nxu110eXZCk+ZA+R-5YIKZ>ag+K|?M}SYnzdgt1K!$8SEgI?a8~UJ|tM)2C z3>&+zmgz^~9aJNn7lvN}nEeP2)PvzfsK_?(JNc=4e6k%(-Zv^PAjF|w)uM0QI4Q7O z9w?CpQtQr|S5U(j-xFF4AWyw&f_kPeq@vlIxhbII zSb7^yZ?m~tOe**f88A__0=p6*YWd)zCk(&%dL25@MPdwBB>CfER06~gC4R>>baS+| z)hB)f=o>%JofpRZA`OKPhjyVZYP1n%=s>`wvQx9Kg74^4fvIlfi6~8|>yHCy} zUoWutS;UVX3jLcwE+IFnO~(^Sggv%6VB&x9%iXNCG!fR}ms6z|Lku+yEf#YvmOo{@ zUox01+~k5aIW#T8Vo6J1Lb)+-cJ==w>O1_o{J-~K5ke^0n~;%_y|ZQSQT7UDWRn?@ zk-bAkc6LT7DD(a{lbl@+QQ`k5rh_=3{1+-``rI-MBDRE_0gRiD_Q{+tX0l ztT0YAPF<~SHM;C3g0G3%Z@n(v3C-PZhL<*Yx>fFlDxv{uxehHDt2124KyTO<*J)0N z75u;d5CI+a4i{W*0KMXx$Rz2nf-=u6t2y^3s?)E_f}`X3KQs@u(Ew>8jDpJMem<&>1YaH3K;T)u zH0SBg-@l7E+^np2mW317cJIEzGMPFYCnYi2A~k)K@p}5b{gFH+y{NRui*=%Le)yxx zg)SewHtJX;eHpFDm4Ddu-a%^d`OKu~bx12hI84zhz(tDDN0vQHejB9(Eb`cY|11{- zw&iE=eqh9U_t!Ps!<1W3J-TfuMJ*Ot1AOmp#sxYENP9eA#~){{e%uQRb9$T>388x| ztN?9bdEGK5eFPxSckx(GA*bi?ZtJr!d1So<)2tymyOoj45&sp#x`8u+9LC$*%3k~S zs;a8i*7<6M>}!lCp1zHp*1mHMhtWQfdFB;<)OB?|BpuPQ)AXWp9^b!4k_yXS8pHaA z%>=`BCW2QQ3C2BYf*|M;fH4{XdR1o*@S4iM+GbK3wvco)F!HBbFrT%8Dnwu?8J@>O zvY6c&WU4Z=a-8@eAv$cu3hd>m5t3$Mg%|{ht;exyW<^mldo?Ir?IVB~fC}D8yaH+g zn7l94X_|h+lzQ9NR@ZQeA@MmiAd>*l#E*F2d=-94@;Au?xZdN_)KDq4$REf$CuarF z;;KLSV{KTAz?yw|94ru7!OR8ZTLL=6)lyH&faHPow{aunsO#2XArZ)|dNqu8n03R* z0?~A?qx$cMkYHgm$-xW0X8NuJJ8Yx2M%m;@)%>Vc{2de%HN14stG3OgVeSn<>`)FV z0DJPxx^71fiBRa$$9Mky%Kr@a{%|!?X?$;Ktl3sK)aF{_DJA5gz)TR9GS~>E*%F}! zK~@G@AN#l6!qSRk<&fwAzyO3T^$ky5ShZOc#6Z;unIL^^HRA;~?<~lNVYFkScpi`Lvd?EoPgq=1EFHg4Hs;Lp~F+7 zT9N>Qqi`Vy^|OPD8nuCk_z-DkxADNKhTnHo>B-5(+7hblihL907|V z@BpDQ9avv~6dM$Mf8)Z^4QOVy8xTGF7$}1e{qwdtd0O}5>5Y2ylag;)yU6Dwrk{lE z4rsEbpi{Z!U!w~P(jYyVutg41U;&i&ry8D5=~C}nj)U{9+Qa0}o^bBJKYT^O?`MbsP7Xiwi4s zx&k)Sw}!V~Jt~tRBH<_`*vCZ~yz8G2%*kW9f-?0gC7m*4Knuc3a+tIp76TWCOvF5eZ9J!PLA_EpSpXob&H_RT^6=1p zG0b;CbH3xmfvS9v^YxC5;-VM~?xNeW%L-SZ^nd#_g`7lENhhZoFkGOHJ4OqZ1NIsy zOZx^S2$I+huO}?g01qG)lH~vxp`7qi%cBsI7kEsdg@({{-)=RW$bm&lpjxQ6BM}B{ z6(kMGu7qV9W^)YHo=;#7s*053oQKFI0zB&(o6kk=n!r5)M_zckwd<^?O6i=h=1`_K z?)JrrD%xj3n+lCKOaPo~xYQu*S4AkPLF!yi3A>Afz4#=s_a)NX+3j)hSQQ>no8fMf z=at%9R@QK*g9i!9Ag6!6Bge{NrU?)M6}Ce@7aJ@yYHkDwivE*qegy;}96)+GWWu&0 zK;s>dgP^J(e~P%>>JX$}U^9SFTD{1CBX%k4;6x9M=ByENVPOSGYPU1oMCb#85Sd|B zfGvT&p8MIHU^QWM(7}&WEMEbHre5%S^1ok8gcs72zW861rd*O#Abmh3mMHd5dPs#I zS_kU}w^N=2u%iCW4u+Goc{DV-fqfrlR6*z-bZBourx>*%*jsS1qQrtd4B`}BfZ>$K zC-Lg+qY>*usO5g?$lw?8Kv7avGF8hVW>@_!q>FH?*@IM3YN{@KAPd#Xl&SsfNOeEc zc8$98x{`Ee^ewB-zV{AMG_DY)60UEWqNlM?)cUs0aIG9spD|~OFcrbZ6Ob>#wrTK7 zGvo*|T~*(MKni#u1he6})qw67K-7d3Vzv?&SRsO9ar%04>J|`^D&@rE!Ki-c5R!kW z05TtbeCd57@OWr{sprxWVoeu`hvATbJ+&ZV~USn=ajO1>WU1%_m9?XFSC* z?IA4KvdcZ43CcQ~g$aeS*PA0?lVKx>BrJZk`P|qjB|B8nKr$;Q^7BhQ_we-!qFflH zUHM{m(Na8J%%))4Gm0slt3F8d^hM{*rWY*OUw-wi)QL3lt2LzbU(3{~@fCkX2s%Zg z8p%Y)2g6Kb#@{U_aNkyEoR)i#E%>Ef0{|*{MI_>}%Ty2x`np7!f&+^F2d2~G2vNqE zLz-O1YuLKxqMcdFPye!r9JQ6RPrB6Ffw%Uh-ljKY zVKQ9F;TKvSQ5$z(&7Y%$VF>eDYU}y4t5{jx#~$8a4z4jhBeR`J_a1Z4zl!;(e(@2j zeqv1x+!ULO_kd1`haxv2uW5Q-h^OBt>gglzi=Zo-$n$rk9&Rop zndkAxiUse|WYY)kw|xonBW+Fp?|00L9D7`L+8)u93{H-)7T%|j2?Qrc`3@A9eRswk zz;tS3#Gnhkrm;atSBRVenL!eWZvL^EEhA?cs7Bu`1xi3@RjPv1)_h(lJnECc>V3shw|m2PlDRz z#rP=HOMN)Yfz)@M-g4ceZ34F+RtC@on1^vnaWpFjBM&B)tv+ZadXhCp!}Uup?Abzx zJHaMPd7QQ{fhOTgT!#-6w}ItYw{T(@ruUR0zd1pHLLK73VN%GjYQYJRALOAu$*DG3 zT%NR9jvsfIf}D?fyZj?lbiQ8m7ctwYwM3w7P9;}ctHQ%S;uh@DKtrzX6KM3kT3i_YBzU9t(w9(9 zDdW0Fkv;Q1Veyh?a12x)#;o1rT5YU&S1TgNUdY3(p(aR#$t<&!cRJ$+c^k+;gt@4v zooV18CcOPjB-rS{Olj`vq=+V^sjrZMV+FG$P z;eEFWk;jXkU_zag+z*#fP54E^SXkDBJYLJ#@>Y|Jn~X}`ySsI<=oa!|hc}|P-u;WN zc*{8-oW`~VqJwnT&c(+b*(YNT{S;H=Fs`&inA9q_4C4pqmcROr%c~>!8Jwl1>)#JPb^Faj zp8}6>cj#6c-ga&XCpTP~pXmLx8Y5C9QxCzeFhzhCCWKnthWTkb`8 zbR4U12p8+$(Zge*3Ox#!-ec@}no8-Cl~BIRJo^(2*H-`U_v(x|nC@5JShLe4UTnZp zZhRMtM3>ADrlNNlr`(c+_lTnOi>~_fQ{M>!quGBu3c^WxQ(?bfUGw@ab?CF1$3@_{ zmA4@k2~#ARYnFt>(fWp*B{nD>6aI6J`pQf33sZ3^XyK4OIUJSOId?9!G zm=l)-z3JeoWsuuB;j2^|=mlUILfZ=(~daSUUwACSU8<3FcKJGqz6} zb&r&dzeudSzKfKE7L+0#ZFI<1++uqx6o3sDld2CE4veejS8Njgj=jD+UAPy^d3%AxdXg(5= zkz<~>ePf(^-{8#@cwkF@N1+0R;kP;|_jz_EQ=LC$ze;qpmWIT`LMk8`KcIOv_VP|P z)ZmF8rfRCL%bJ8^XnwDlyE`lD%Wml2p>o)+1J7x%Fe)nTb0?7hzn@babjupC#1u3& z+TgYlI@Cx~`m=z9I8g+rRGG`}SCUCdwHi{rMXsxH&PY6NcOd4#xH=qF6mgy2W>$wCI-2{pqMjJdjNqAh%EQdF8Ul?KHT z#e!=b^oo!J-nTl>>+3_mSMu#hj%~qRxZpR*`BvYa!=@6;WA`IA(;-xS854Z_!GFI+ z;5hTw2*-)ha{}=OrMX8ha=gASK~_{5L3d$lYyDl_WcAZ@h7Gk>1GG(*;h^ut|ZH8hO$J5n%yLZVuUMeO^vtJzz_)(G> zh^x7B>j;(@qnYvgwqhzPv2OqvL&frvvaJz(EHAwp^*Ni2&Zp$Fhd?`XwEWM_|?41@T{+2VVI4+bUGG5F@S|Oq4S(9l2DJt?k0Xk?e5I6{*cC z+`;LlV**oz1Kx21-G=lUq(6arO8gmF!1MO^Df8ZhKotazu$;j!N-GQ{Mljz#0 zl4#GRLPI6S*6m`Fa22v3-4bHVG&iDAaF%Aic4lSUN=D>NruzS7p=GWTG!~`64pTCQ zlYiOqB>*FDnzz=a6EVv-66utd+P|bECF$zxYwOKUOgx3lc@0ESl8lHk|K?0>SUB8R zea4l?Z$OAdtOUeKB4+60PtrbazcnZ&HHQ?KXrOTa_tSx33v1EpS%Ie=y#i5A{`uql zP2_$i2|Y=VL$%Vvfcaen_GN)qI02p1j`$E3E5Oos)&C-LF&TW9CBaM87u zm6e8tsj>0&!~{+{7AlMpva6j0^;iB6o2^AyH0HrftT==h#YpPtX#K4hvLUwKup@0P z4u_cVAEq7u2AH-rb(z;qb2K}4SWIQhzO61KE{o;J(aTC&`ZK7}VL1oXwQeHRJv1n~ zc%(+9Y6u@&lv6ej$*r@SzLPoqjg1_R17wOsSG`bqq<}b}tOxuaWGyZfK1WW1I}1>B z?&8{FW9hRy1*CCSgs)F!I};s#U}Iy4hlh7;TtZnOF&7=a!!HVDD~HA_udhF8?x&1h zP)FYlNkn9Gsgipa^mi&86 zq4bPfC$@TC07ghJ%oGt{RvdUETKETt<3W?jjJDn-Zr-0l&~9M=^E+mnco(P&exaO; zMmvIo^_J(BHk9QZ?of5X$83?DXBpJ6?sj60-Z-j3d^%Ech^36!_8!lq4%Fs$<~MAp z_vL}cj^*%BmUCd}ke~4eQ<(~hAAaEd|KG@OaWyt9C)tBbY-n%)GaVDayu`AJ_+KPe z2&hN3ba?EOUOc_?3F$am@m3tx;P>lFloLJX{764GALd1{M^(`>PKr;FeH`@qm<}$7 zyd0eh#G6Yg_$eHAH5Vte}SX=m@rox*9J$>(3$1*;7VrOMF2Acq)e#bhu3PsV-8zUnR{z2$9 z#GE9tiwn0-#_xU}d?T7N_w(k2xj2#@Eq+73wmo_prO$%=zED(Er&8)FnkHeYD;Jd4 z0|cNL2^uE)9xWD{dbJjobDni2sbB1I?fGoUiS2|@7C529#ivME;}#iEdOz}HTYcuK zgx)4+ScUw4?vk&RT;00><1bCE=a+^sQ8(qCRGnWC5hBjSEE0M9Rx{1CuntC>il)m% z4XdgDeZGm0Y{TiBo3mLwi%Xz6b^R7?da$QkJqiL5qbiFPhx+en!mh`;as*a3TRdkC zf9`cQU-)&O9;qeVQqOJP`t)wti_iMu!`ptat(aCnDgMt>y(R z-hJYp$%*TOo~W{6AjUS7fJ)04Rb<&=oXi1VX` zgh-X<-thR96(=F#X-$o7{4?~ke7G04YU*Mt;G%KYLSJ7|xm$s&X_=C!<{yk{_-9n; z3E~}_ubyCy1)YY_&l_RmArbm=ce$6?tDN1iHovbw1bJL|e67N#Z01dDkk%QVLAUZ~ zCOy1_yIn2N=op3-wMCxdcQX^j-2ZN(u6-S@R>0Oj6Q_E zwLHj2QK51DH5uZ`%m1|=^lg`ah8mSRbUEW{sUZpW>hG}Gc4+vEUeKpDpLi%Jm`i_- zuB;>_x%~t-Gem1AJjd7Y@%L}snvo4jKL5m8 z1`B*BqZF4^V$P)S+b+(*U3B!Uv}SR9gpttXehvVa&m_md%}Z!0GQNHB?7;M7j^OvZ zZ`y@3_P1~2v-0z^Rx~$BaQFN4X54rB^hb2CBK;cE@7_%DSC{|gmldRLTL0HBBTc~9ki=CL; zpC1a-u+u~!czVy(6(rrfs?CmKexf&TwtMZo|Fi1=u+{1$A8Se6y~SN_M~189wXg2G zC3Qf%g0i<(2#2g_bg%)UKuN76x{8uKM4s%c#&3VXi+e4iLzD%RTsI7V0;WaL;k-I{ z_)z#LQ?*w{7iA^^og}-}ICWUO*_F^$dFU;!*wAD54CA+bohSfGrQx!>pUymFC_oPs z75zP;Spo`)z6;muJ!hkAA9ZcQ&DK?##6&f|d#g5MhDS%vATgh5zUZT-4%yhC4LF>; z;50tI=F{EXEmP*d7RkkDY|P}de>&6l%YU$f(#eh2>Gd1=zXZov$I3BLf(=!&iJ#oZ zu?~)Cy?!1gDg>18oViQ&Z{5X>D)C*cfGkK{;+xi^)RObeCDb zzy22|@ZAQU&FRmd5A&62U)`~s$Pj+_+RMURs#P?@WM$*5qx3#6#4$P=jgw=qz*$4p zC-}TYKvDu-(NMItMm-%%`b?tP=)JhrwL9AQ=y3HdP__p$M z!E9X0U{CA&;9WpYE^XW^8bl2&=5O!7r$LY`6{*>~Bl}q<9DOf7_jm)zP-bE|vXKnu zZzwG)8OTtUAw1??8N|d$(jRR$DK`E^{M7exhJCrM?D(BNOZVgi+JyGyJCl7cX7Vqf z>vezh7`#@+Ei+klHd*6NV*z8nVWOuWNAJ>q1^2c|Ra%MW;<9p9Zp#}e@z4@wm?-Wk zmT`YFE{b2Fv!`QwoC%Cc*XVk-eHvwGnt&@dbwYFxzNk@a-hOT{Bqg3{D-{5VC1@yI!2fJkYa46OlwMED(Ns7UP+H44 zxoSMcxOQXm*9u{(yuhpVu@cS?8u#xSG4s-fM8-3dt<#!my|HWDF*u~@8VCC>*j0Ft z^(f}7R03{vvr=m<9j}Q@zo)kO3jZ~a-SrcaT4hw@y{O*r3{DZEvb1?IykEyNxG+#* z!U)*Jdtt0^5kf*KWlX#$H7`sH1uf7y(Bc42igmM|`4^hxT!w0|cJCA>*(!0mL zyvLuI)7VF^=0l%iCtIM;b0j({p86_=BJOc3yskU;DrA8awbk3{mlS5Dv+}tf>83n+ z*Ris;{L_lp-ciC3i?^-y#Y*1#+qa)RTq4Z|ur(~Oj|fpekosHp%!t9gpa_dH4gzfc zT3p6eBsKPHT+LiuP|)OOzH`Y8&;9zzmpZN5vR?pna3E;d+FEU>Mm0BpAX~|c#`DUf zjJ;8&6Wzp`K;j&{9_<)b9HV6QSae_<=bg(9N(#G%4!fUu9MIMa_!*Wb<2|!l&6!}L zb1%6y&Nwa!?bgsH-VeoNzV86in)U@6C(O;zxzmO-Co;P@g6=O<%kRtUDcs^VYFdyt z(cKwi@mL1~rKeT*2-iA2 zA_7jC-P~N zRIyKdu-=(ibA9O83jLT*)zW?k(yTp?(4_ruXnEnN*+=9@LkW;nF-~KczW5coq^tf2f~7?FvVl< zAcY)F>JDefQsP87y*IifrpAnNT4gQ*FC+4y-z@jxj^X{V9N3oh4S20_oi(a@x&o#u zVw#oNdc& zeq`SCVGjOLXg9`CXuqLccOfLW8}BgP#^LLI+gBYLHOE;R5p&xVR4P7VYV5s}E^N1u zLJ79Gd8#`Z*od24(};-&H@VJHV2|EHYR8)rX2din4j8T2pi9qz)%2ZZY1Vw5*~52b ztcRQrnqE`>6`Ch}#$Z%7phBml#8UrTuiWaPqDPSj_Pw;4Fot|*BK8;klOvsuStPpG zr|A-#sx!Xg;k(Lr43xlQ_=f#&; zCE#4($`Yvb>(zLutrdo8J*7zG_!FQf1{zA`_6vo285llf71)KTS){Z6Zosk;I?Pjf zz-LwG{c>3IahJ--2dJ>ItSH*J}cFODFld?Mq;!Oqhb{Kr* zqw(zYSXWH{@Ei|6|6_c4Sb31csLa7HaLZ%5>G-kzYaKjDwqRP&?MTR`u~&fHXfdXG z1u|MosK&fPC%wlwW}qhvD{&v%aL`w1dWi46ZD9PzXt`DfWS`W$b~tENegE)RH^dio z^vdXMF^*w5+YiLn#Bi_$8seNC`{eaplGS^&9B~?as{*>Le(Q~<8JfOBsbi5jZeoauDEf%`+8&f?)Za@-TQx*D^aQBKnK=H#Hplz zxIDOSkF`g!@a)?eAJX;MW5~qb;~<5b_bhkl+Nn*6%SB~d)DWVw#j=|LdazR5SUx%4WBC^ygEj7Qw? zUA#pZ%%SR9@fWv&X0{i6(hxqjY^FHHJe95N_bQZQ{{eAbg2RxE{uj3~XhM4ku99+g zQko6B{A+jf_MkQLeDqk8pf32++WRd@T)#Z?8 z-m=wr?T^~yufHbxR1VXy%1X@FSGgE3g7Xf%5KG9FQ5nZoDEmc)qt1izKo==J6(Amu1? z6L#%h;U?{6C;|T@DX(@BgHv-AL4WqGUBGVw`=^=F?$`#F0Jg(TUu#Ldcm+sj^2<(c z*!DhufK-FN(Y{yo14Z45_o|LMn?1T)op4|5T=JMM)iyD5!ZbKEm8E%Q!F;P%+C5W+ zA(l~Dz&i?d0gOPzJM}8&rW!M~sxMU()GZbQDSZoa09#DY z=1ezyZZx`n@pH1$SYb3lPJ(lPRGJ|3Uy_7bOav-*INr6CN^%rpxpGi85}=$PTRfIlhDHXgtItB@bvXW#hH5NU=5wUaBvqS`fQ6P-xe_Hi6T9-(9bw_ z@!$A?pO?~mr5vC#qsuc}Az2u}Mci3^gOqpPR z@Z__XWJ+?H7v1la0HySIF9-Cit;j;nmcFqjEiiTHll7gKkJr3q-PG_gPeK#?%6zv| zV8MLt0S)e~OtRvh%z<5KFo9ryHBjdzf_20Mx;KRlZg+4N{(Bw1t?xsaPBP+SL^zD? z&KqioFFiI+?fQ{o!3vNeNJ=q!V4gQDUd*EOF8=8CcIPiQGRdr000i~RKh)E=w0Eg{ z*WAuNwELNLcz8JR0KQ5_CG>>uM>5b-87x9>nr?P(&)m|ovcz<0lkS--0>DY#yhTW$ z{qsZgMo|L5tf!p&e=*O3;{!S$qH~PrCIbaCrY8n%o;GouV8KZkq!dR-p^))~gw8x- zmMtX_Bmi(1IN=bBunbQeT!wz}4^bmE#O1eK6*E&68Gz3gps+-t-2XcSTq^#>s$)L9 z`oo)x4aM5xu2Vq~`u1QmWgT8lR*rABS6rcUVkD3}lCd*84Cy+iJilxGiI|u;@%W*& zHFXUW9cOym{qwpr0b506R#bE(dPwlt$@H?1%vD-R*Js@IYy6{B(`}_@?2hW{oR4 zq{$*fsVs|En}f$){VYm-Md9C5;tdTASy#%+%i+T3lQHB(POEQx)j zZHVDCe4isvRS>JJ9{1oLxnysenSWMAkoNF=;s{*!h29BwD}3OuSH| z^~ZiHP^oUsGurT)w{W$uz?KhHeOQt#h8A(0f^~YReyBY4pUHde6)wArp$5ri6I$== zNZoJfWMxh~5HgwkcOto*7PDS>^V&oUobA@C0{>n`@)=!-P@>2yUel{6vzy?7N|bkV zpJ)zviN!$-c!^yXt2+LTc zIQW{q8c@*j-!z`7TtBEql?S4!&3&Um2;WqU4~3BSa1{D!QQS=cCErr{C6Ui|t0z*$ z8^8Nbo5*`5jPnAhIB?>rQCP-qSY_C+;>gRdRgXNBQ^Xv8^kZjFrwxi`zurP?euBup zA->?N`19&oR-ih>8(fPRy0a;P5`CRcVAAwZ0mcCnOo5Mt+FEbHYkeq&rXSQfV?ULy z8gU~)e&-(a5zMRDs3VoRH$2E0Qw|b&hA|N2mziS)3c=}-r+gr`2;0i%RC4)yO&D8i z?(el2r>H@%kokH}e5G^NM)i7A`Lt%D6x-Mcmd@X?Ym{5uy(r z^7INOBN}>P*ayRPEx`$!WoeH4icW^NCK)m|>0FH51Ywx2EnfKDNV<_yS9I9JPlv(X z0h}@a`{frbouc-Q_Cvv_1ODl+Xy0;?GFp($ovHOkEN1vHjvN&yd0!8pw9>~%;Suow zMF7*$2YXgpq6hN~1HpLptvR5XPHqrRAmeMteNGO|__y9WMfOMsHs7y1?};iD?@*@dX8?}E)A(DxjN4m2IK_u_4SJq)4+=dAJd0~$u4I(}2)!;ITC zCWq^1@fQ^yImMDvoH9Vn`+?X;QG$=fKvCH(G@B3a|};mF*|# z!HpD63VCcYYilAE%QRlgA^`xFyQI2O#iI9>p{`tQUsKdfTtIsbDd=f+{xwi7lk~IJp312++5jIZ9)$tE zq1OsH9#}EfGOtp$!|6Fh3fVAi+#2bjCn3zTHJ2`vI7h@M4&swvQDoISdPO}X`5Tvw z&f%pzoYNlHF<;c@zpkt1o%S6Mdl?ryWG=;(vve{w0^)X75X19Lzk7fv2F@IeZ`YDHl`sQ zu;nFT8(BRx$xGo~KlD5P{6fz9rdKYc2Nt>(G7^;RJve9p3yB`X4A+^sMg%eyyM!C) z3qZ_*@r|g)`J|D}mvcY0fhR|fn({Xi}7n0&TN6Go4 zJj*|WWQqG+Gh$;=y3QHqI7?4)eJwqRMghHuHSw3*4<|!;eS}+}qxD;hHfb!EEjO9s zAf%8^X#x$W9Wn@b=g$E}>S?_o?|+Hh3BW6iTHB`4=^idK>;7!l4hoNNXR`wSsl-7o z$5n7<5Z|~?2Se(v-eUp1 zP^;SW7pzI`mTcqePIbk-K=DGkent(0G(*Wb@>;G{DYA}2YynQte}F=zs1F1^UG{9` zsGZ<=T8vF`Yf;S|7<|d;ekqOx`61B`a4K&!u3FL`P)za0;MkSNPFG=CRxMfTC=~99MMos5%~Z=a{M1WYpX!PXH8Tz zYtgteOf^AMrTurN2f`9GdMfp%?bQlJu>v8-q+Lvje?O8gHoy!O!Lu4x6|v))F2qda z&__^MCg?^t0dWDgV+O!wuSraeJKHvLmK|Nd!?$YhLi2guIVKwhUkkD0fl7~35)~tX zq+b35jtuB6=CKUIK8>x(-}4K6WSC0d4#A-z0#sko-EsrU;+86X)V)8Ux*spIY=15z zV(M(JCDj{-VLcT3J0!#A;8p-FS-kukb)V?n->4{El8}! zY?UHk?8D>f@`W`zUe4p;i*USrsR>~=B0?MHK+9nqL;;-{V>Xq+FXECf+-~c+jFfK? zMIC4$fcyS87Z_91_hK1;BWARpqFA5{h-9xMv7gPAF_TbMv0ZYj7F_L73d(v@4DisnhfjD{TsBK&96?#Z;Q{xcP3_kD&Z1I&$lE=xbz|kW+l+ zV03|q-S50kNu=-g=y9=KQT~pY9#ZP+eZgl9_du*Vjfj8ZJu63}S{B}8U~jikLKB{N z&&kjx$I}2RzJWg_$_^Qvy|YU}s_F9#d2G!Ad9w=2xxe_A9yS1%Cg}`vs#{KK*q@|Frm-Z| zG_Kw5+2ORXe9xlz%nk|;WJ@S&P|Tqf7)*ZqHSC_IGqA$d{HtyYo_0gefQY);)(rw# zrXq8%tJV?mQ~BJ%rcpq|N-Z>%rWpp}CpwTs!7%FcE zjru04=LA^W>VSUp>zNKERFF#fJRuk+6&s@=)oi_ih0p1wDl_hoSBSI^MbKx!TKDDP zoO%2g*BkKde>+UL)FLWsg!(D%ZrfIjgJFFkXlk1)?IJTDca6F>zHlnV(j1^5dLKS@ zs6|1hhG{=o0ixhQEwbfC`AiFd;(Bi{Ne<+sq6zBK9VAg!{TB52t6ktt;#K}QOzm@1 zK7$RAeEp&-50&wtL_zzS0LqU`4YyweJRp!&#IIu|&r@t61&{}E44}%Yo2kAif?LwC z=gD9_l)|dAs1C}?B$>50gm<6*%}UsC3Bh zzW!bHfbtVwa|Ifm-Lm}V&#RXy^us`ERh8Wqin?s7uJoB&OL4Q4D zG8B_HOMCb_^P5TGEv?tHv6S!XbL#N|iX9n86Ue=7~vWwr~yYh^3yTjVg2gn5Ik9Qhj0=T!_o1^xHONpX_)qCgNDv7S9?pQDI z{KeO6TPIa+(?s-y!6wAISnTyNieItmpK$IMY*^3(e(xYz?SW1T7^!=nq*q3^LPJ94 zM%hqgiGRDF*e=Y);5)rky6|slB9F|fJ&A6dUYMH)!M~#9v?{z*-htMe)bs%#m~~kn zj(x+-W!wU>WJ<-j)HpS`o+!8;JX@SQF^8$TKNjFIuLcwpjkQK$1Bv6 zBYAj&jPeeRX#+?}ah^a$?EfC==eRii+c!81*CR)fwIci@RMP(>Itj~12M{K+6beVa zkf6Kne@HR~y`tiQop`-cv0H#JX0H2JD8wC<+ASKiNmn*}j4BNh7eKW!kjuuPAk|2? z=r$zo;5V1j@seyvbyAH^o9Ffk%^jj!6)s<#-b6K#f-mwsKjwxZI|%9Bv%>sIQI-$^ z)F4HVd2FYf0ucUwm#nbx$Hjn?$=T|AXK-5zZpaD{L1krS?bpR*74K+669$k~OYASu zeazANAsb&L1V&JrE-JBWCy%;u$!IF^pkEfaO*&D(Z2$*8sA+WL?1|*dY4ok$T>y8{ z5A^1Sm6M)8@7dEsGJas2b11pRzgot*HtFzZ7(6nT82Gb=0?;WlO&d$aT{e@2_9sJ{dC$Ijjw=k4Bq0>XF5);*#`H=|Gqz-rdG%s{gYlPA~|%K@zLOqkrqOZ z#FZ-Qdn}E2OxlPHFhMwk5JTWP+c3V7S&ghhbEY60Z0c;WZwTPz4mH(`?|#YB{6kjT?`go2W8Xr# zU;w28oD-)7?cZ23ObjzKtrrTKE?;_zv<-dSWUlUg0xEo{ns7D1dk)0!_7CyHZ=E!m zv#9CWymo;{FAhh96uBPb$q}M%EE5hvcDT1$^TC_~IvSbwOL*A9pkGU* zgZUWf4%?;QrGm8G14hum5CoXti!8Z9DYXrh&)UYjI|BC?mkr^?bT(P(_$?!|R5pe^C#(?3s{qYg=gzHE(#tj`{ zhcI9V+5u#zo~5{zb7N_t<2q>#1r4S#ScRX3Df>z-;IKpWq^M!*ld&;|Jj_vgr5#u$ zLVa}yYy?uVxqnN|#IR|&))3?lVv;imqo56i8@V-Z;m#!K(%0qQARn>!7TNP9AJV^)iGK*5rD#-bgjw9YuWYWR~?AE?0-Uo%!qHfWV$oOPyc_L>( zqgodC^65ns+bOB+g6kEZs_Xx;nq7J^-_yeKYToZ92&7PqSC2xV4~Gsno~Fr>n+y#d zz4PG5tTd`+*^2YRUCpbeLw(0GNre50WmgZ=?!m9!(z%P+%*;ls&iLzXbYbxW^Kh`B z!cVP@S95TH%#i4=y&3i0hF(c25t$^~f}y%v=vC2X@;iYK=}?3QqiwYT`bqE3mpd~P z>QXl`EXiLtA1*s>b#O82T|6?}kGu#TYT>z!`<0wX?++&yXeA^|KqU@K9GX|@_i(+} z%IeoqsE6An(~ZjORI)x#q59&~eB_;RZ0*}v8Xx3Mfa{y1SP|bAfO!tE5|7S^E2^nC%ICY4;ZHvRq3X5HwzKiT7hz^u$) zS3ibVV~5jLrJsm?Jc*?+Xfu#;=4=`O00Tsm(cN0nB(y;o@bbJTsePat%_ShPe*+b$ z$U{3Gq=GDWkJ#D9a{fG*n(JrwW>Ucd(REu3-G&uFr8k^_o^*@XEk_y^1F#ksc!<9`pCg`IkT5yKkuNr%amv}U z0+%8|oqVAde^q)u=&aN3?;;wG*NJzZ1GT2*Kio*`27)4E2ZuoX1pu8 zd3iaPCE;01jDLPqUmGu18WJ`sb&#fvx_ONWBO3d|*%zz7GW^%jVV@3I4B()k4WvId zNSF>&OqOP9V;@1~{Z2 zqke|M9#QpQoRCMggIwYR}{b^ zX0F~XH=$VsQ$kb8ojy6gE}xOhh~@nfDZ^)qF}ZON94}SPPOM^WV1hHZ<_oo&4i6GI zbcj~MOn481UCg@Kl>aDyTkLsp6I0WrG9Yj&4kjf7X5D&x!7QxCV=hq^WF;`o1Q>KD$EdgE6oOE!wy)=x;~3iRp&OQvzQ+NdD>$BM(?5 z^v3p!y8HO!wT6d-oFadn)tO}DZ(ezrg=p9pNW9*fzrTu-#m_f{^Z?^~D1z2g4l<9q z-zl6vO#juH@cVoE!J6qs!`rvY3%71d){B37I8u85?^jaO?ZbQqY@v3)J*o&jWU_fG z@PL;FD>qLTILzu#j(^OB|5)%r=g>XN35FC_gDvh-4yT_&!mB;8A< zz{tX89_(K3+EEOJs>)E$p}%(eW%DsTc}!S#{KH1g$@RT=3o5dlN&|ONCZ_ixA?%t> z)Y!pA>n);NpLm6YSXAV1ex{P*KCuG&S6iiHv^o!t`5J%Xx#qrZm4H@zF={QFtZrlXfP; zpf_Zu9VnKY{4_H3zIs#8RfW13I6as2m%w?VkzAy#k??T@nyiPHe18;|-TvcCxz)?TN1J_f-sBGFcEOZupE%qQKOc8f zq5R_a^4IIlVd84a%5bsmw`I1a-prF8-o8^f?fK}8#6-Rc?DX=q%}vLrB5;xU_1UxK zH!))3;wzPd1*$%~KNMDq|KvP+{MgiM4nsjfp&=vdbm10T(xXEuC1qu}*j`l~t_^p; z!Ec5;@bE6@r#U+D0nYZglG6%qu6U zo@TvGc{*z$$O^MBp9lK-3Kge6vF>u8=O!j5%0-fB%C}Hk)!hF1?e6&<(L0-m#l^+D zIGOF+4|trb3K9yg_bGLV1(BXCz$Pv-FuCh-CFN3}P>uv$cjuP0b)$5HgDswA6eWB% zbeYxtw-LYEE57jKL->LpSM%x4Vv1>jXK|s`7FdasmoHzk>zBjV0iIr*pAo?Q`nJz{x8EsZ5 z|7fXcoh-T%z>`RnR52b0#{{R*z7&?l^;k`FilH(FO)w7##JoN>obWFuqpJ_}#pY7|55j`JNzpGjS*SXqsFr_vb3@Q$W*)SQySs2vIrTYb2x%LE zG8cvEN{Xq!@Pxj~Yq7L*Edqzr<%Zec0q0B+F9weHItl&rZ&GbTn0Y} zCy1r zi%k^X0bok+{3hgILh;HL0=Y@|$$Lgcx5<8ro*2q%kf$G$N&HTAL$13ZI^j-*BE)4c(S1}(tLYG$144eJxNK1l=gt{{{pe2s+g(l$ zHa;!#zV}_(tmhe)-bg&y=82KOf$!h^8?cYdlf*JV<9{N<40_EWOw-&LAvwkU_kH~! zQ5UN{eCu%_GNH;JXRT6`ZZ`OHcE-^qHxp~kGk%@s>1h+z{QUg;*3g^&{ca>J%rO_s z3U_nA?T)@bQps3|P3&?T;G4gX9jmbYI7=p|9wv4yo|Pl`b3CYPiiQ5bvMyTU7=5LE z5Nz$-_QCbHW783`SaUE7-z?a>NLMuzD;p!LGwHN-#akBIJ=|IBUbpl1KHB!ic|(0< zcs4(fPatvF;`Jg8`v$YYZKY~*s;N_a*8-=2bee!8CUN&CPsq+UW5X#+Y2G(o{P{*} z<65KYiy}`JKz+n;~)8qtGm~^JA zr2^`IpthyBD8QC;4fZ>gOm=!mbmPLu#Sw1rERHo^m))G)`aULi+N^gM2^f|LM+h3M zCVN(xgZ5xILlb%&c0bWdBazJ3)8^B35KK-Q)YlYxcsA?Pzxof=Xl^(6_8tXJ%NR8$ z2>iP9Yq(G0G<)T?*^?(v&QFhayY8Od9$DV&t|zPzB-6@T9cee_HlstrVw{%K_AV^7fN=YN# za3}uGd){-;y?^e#KbvQL*P3g_m}8FRv-7d$?{(k5bE7Nur5YhS_tBKSX0NJ(g56#Z z`z$0ALtk_nJWdunB8IZvTwIE;&LKyT%J1yPeeJqgF0&r$V z#)?A?i0>2edc^Q9ZEp6=oWg??lAtfS-VUv3`Hx zuk-lB(R|{SpZiTN=>#=R@CLvPJYKNB>5u@=CSRjrcLKx0l9}|rZ&uA}D=Qn;+;;iB za>|aNsFERzK0~U;XlNDc_|GIE zP-)b?yP=fNV_ASGBdGp_7`?I|Mv~AFIM(||PMt@?N@ZVWr z;-W{KvvDU?L=KYwI~|{Z;9!GCHLKe?SEr*`vI7nhhMcBPcJeI}MgLkQ3qoEZepFlG zbe9CR>D$Nm(5V@+r*)dPH9^O1N2iJUw5iaZ1UOWoTFE>-=aIS9_v44*!P+qJj;8pw zXB)5JRfEh>5c(vM5))(MooKu~C^AlRHQJi0E;H`cu5+=4H(Wk%Gus*zs1QIV0V$H@R0P=E8)-;CI(E5(7Dx-;0a;owP+Z zl7bI7=ujR(h!||}XJBJzA3c_94<&NHJoy7coK7_fkfGYd&)ny@tM$aQA2L}1tqSn* zev$FSj~_ag+(Rs~=W|$WlFX(qfS`r_#X5cEstL423zEI{YQ^q`ejkyWak2SC`G`DM zl4k!wS%Yd#zIE@n3-O-P+54`EJ;a$({=1KZ^TRnw&vHmK*3*5ON~jE@fcx5w1I-q| zOo6chG+vMMK^?;$cNLy14zcK_1TV6^T3HrKo4?DVgGSuZoJ;+Sfk6YtjLLBrRQ!>w zNqra1-R+mN^cbA773{y5nXNpZuNN9;O-L{D92cn_#qBVO7aGiNTA$WP8Q*{*sq`X! z9f>eeF7ZdyQL7oIt!1T-#Lcyy4;t5@q4UP|JwP71+#C^+>2~|d$0j#T&==mVK1P0< zURfFa>+o#$s^O-_r_$En@~RWTWbjp#mnO*Kv0yhzC9BiZ48G&c+8KVnQi5@7QIl6 z*yr)cPu^*mm(o~JR&mS#srC*fJ%fuwqG6+L+}%qI8CM6F#t5q`m(ApvW*!ssGr|Wy z9HElRmyqlyLkdP)OzbdMe9LC=<=&4PVH5P5q+as7x!FlH@Jz?*s;P;Iu4FlsFEquO z48&ZXfB6>9%2&e@$4W-y^u6lHB0~jB^tWy?;?nDQznNvY{EszEFmbnD(ZnW{ixJdW z=C2cZzr1nYaYdmaoqTb8Q1nH>8t#pjHY8p!=wHHe%b;U8G^lNDcvN3GX_(FxerMLRo5|i?mbx($m^H(3dfA`T0dq+Iv=5)7|tO}NychatVWTL#VI7U4{cM!bM##GF6OO9Y$nJ+9)( zr8<+(qUu*!hhjhHfGzM5xk zF53d~!hpE2Wr9FJblDYq+CYY|0U5frdc(S~po2miQCDMlKP4YL@N#9D!{H|p%|1nD zB{dbz37w$wSZ&hKc$Mi(vfGmH2oz4suEZM+ksozB>??;}P(h4b!SLKo?dnyqI$c(^ z`NZa`Q?#Mu&Qe`1gik>o;~DxB2>bep3i{WRIUi)sp@6B#la#|@UU$1uY#DHMPVzuT zI{{8M8eL-{=fwLep80pY7B!WERBXAL=e9inLtb7?I>ovV(lYQbik8m~1r)i^Zpg_G7-$-^yce&FFElK$hPPUBaik zX9XHV7r#CE2(0)ZE@cp&$>&pz&w4Hm6=DO+WrAMYRAj7MNb!Byyt#0S?_w?s?U|aVZG{FD*Na~Q!2+y-4Pbfv+&$OuZcLV``LO8vvXRJ*IX#T?BL{tdZ6F;PyWG% z?l}yn2HaVzlNOSpQi+Dyh=;Eug-FvNVf@QPRs0-V4eyimWVo~ZL+LvXJFLV6DU`iXHu^d#8N&1jx&=TLj>DNKU9qk`u;wvo9`W#i~@E*qdo3wA2rhXsw z^%XN|dj1OtLVk)P6VzL2>7=VyK z3%1jr@9ON#`}&nS&eYPf-eDD39oRdQl-&1G^Y5aJ_`Mx;`al)P+~C!Mn`QtaVYssQ zfz1g@U;T+)ju<1*RCx}nA-*p7rI;=Y@j3^yA=V;_qQ9<%CWP4PkVXDWYNB5Xq$W^U zL29Bd0AV789Y66uf1zAEQb;jPZGqk5nM7sIC!j$C)7VGtm5yAh-M(djjDRw52-3bM z_%-ctvFP~ryEl4oHu>Iq{P;0;pOUI7)P}pyCDO3uEzJJs;q<#*AI{s5*^+8)>7z~m3)a5^JZ>DT+) z{|S*UY?{v{dcR#&H?ovI?q+&xYb(~0FzvModyefEDV(1TxA!zTihH$g~2!j;Tb?B+?+e zfFau|i}yNk*neNcQvof_Kn|k#_mPcMd})&p@Cp0o$78i4G*C1WTUjyAg!dJ?a3JD8Y>Vsm&{JshK#QZx3l#~h_JdHz z$zGzFxngzp9gNG6?w2neDUUh>ADJ>RIPW}c>mT$(^JYDtC3bC-qz&gn8w0i(bBAwwcv>2Ai*2Bzi5^qcHFN1Vc+-WR37llGUu1@0WkG|VdY9I zE%%es=-EL<0TNV_m!Z581JLxadc7_z{{Ht*YwxpxI@i#DdJF{Yw{_~&=z@P!vs9LZ zQ@wfoWEk7|nzO|QM3lMXk#CSMbVb{u!1~LDI%dy}mVPBRE+t)#dKaXbP}6OoY(-Vg zoRTbLtE}-B7XI?{+i7DoW4%hjdnTbyD$#JhJIGi!r>OeHYpFB)>h#NL{n2x(smOVx z!W|s)CB1}CL<>f?SOa%ZZN7FYM@B|}S%=~DyfvRMU)qbj>k+dHw5b?=VNXO!$Q|Ve zve;98;EW9_9?rd=J^R6!WND$KRj)4NEhupAeXg(SP@9t7tGs%X^z2D zg#T#Z`$*T;d~LSBW)4~(p;c#Nv(;Z5l1xo$ep5|yx5`z&z~wP9`3X;vM)xFg!8>xX z`}24@Cf@=mSy-I<)9$PsOj;)s5S}ZEil*+!j@&=&Jlk$uUO&s(xksndaNOkEXjjs& z*7&1UG^h24rK8j37L=GStm_ z!^$de3l9S{&8@@DR9U`@!Dq&jMSWMnSr(xO+GktN)^J{tXJV?Vf$!gY8XKp+)h8eb zL;$tY!r_*WB@9;q!xU&>?}f>rMX(fr;NdaER_N1llcFXO##b$h9;BuFj*ovFe3R&K z>lVfoJeII67huFhcn{58Z*$_$H{JQmvu%1Rmyd5#qNBsKJ^Ie1C2#h^C#gs6V!yPqtB77YW<>7NK$2IdP+Rd)~t zk=qcnJ`Agsb-tzfl4i5KMf&(U(FF+-MNn6H+ z77_i0pfE7zmH3{9#4}GSkD*tmIsLzMfWubuCUgYV%ZpE1;?rX2-x+tTUf$Z)c`GJ01Fbd-cZ{D$NTcrM^TE&TrJmJMf{ z%E5q{Z<>%e&CE=g2M9R~-1^j!wtl=x%6G9ER#w_@5uRKadWKuG_~{eb z!p|s;u)xcYXwwL=NEA6@pxz2oB#iuZbWj(*Tp4jw zMdc+bGdmk}X>JYWvO7EBKH$)BGX(V-c5L#Vx=~cL1ud;az$|GF9Xb%nggmFdh<{cK z6n46J84!;2AppLK98RSoRG=l5_x^bfxyazqnd8|jA|VVm%hMAA>+?|Jy313yO5T2W z8q>F4Ksfu1>FM0gq*Z)N7QlWk5j$fcSo*nGjwGjl?vR{o+uO6@xHSn-ZSm0`NB8qG z8~>tPSoMPEqrQ87Kxmw@)3gHI>F7zlMd!i1V-s=hL}ulq)f4H!PG&7sbNk_;&orysZB2p3pjg;O{r!V?IR$2zE$Z?u z`dVdaLhr4q@rC)N4^_`|T{zl$*kzNzk2Ls&=&*Pqma8HKPUVhsZ1?5U6C2CfY94#t zdhnvFUp=+zNU_F)qH93Mt((^{vx`VNG6AH0(1peKwqIg|ohj$BuikL=hto>=JJu$W zMwaMf_>b?K1&kpeQ3C6DY}OOy-~K5^a@#)Nbf2w_C+6Po!x#@sTUy#KS^DJRNmB4t z()en5X^9^zNjrIRRMizi1x2aVU>aFeWaOy##d;LH5Od!A-kV@I`rjS*Zxz{(aSyuL z+Mi}=uX2f-N@c1!<9$^Dj^Gl#C{jXN8kL;Q*LOxqRa1p!Vx7W{26eE9{q`Pu+qqHc z9P^S{!hB?eh{eqv{A^v`zEX|h)*WdBAGQO#g}1^FckNn&0wY)(GqDFgeiscX3VX6y zL&oko*{VUI{*0BCiJ3W=SO~m`)eCLSEtA2jYCryu2}wIsW_#P9Lw32gdNSN&|dejXmIA(BGYl1(>a~{|LI^*LhBgfjpvf+S&Wh@y`3IX||6SR1>pTvGo`9 ze|{|y6lL)0x0M$bK0G{*kUflhR3R0|+V6rOIy!B<(mMFtVx{ygmSRc+*Q3jXwG-Xs<+)!7oaGf3wXIxU*!~~$pb9_E=`WN7uNA05MK67Pd6c+% z<6_BQUt`q{p0!aGomyEA^VV!9o^@N_*!zR_t%~<-!w2O^9YWn>aq$kX(|{df=FVVC z$hw`(%wbpnB0>`-6n)Spg5J?}^@5bPSmcHCcgkM!;-VhnvRkM9Lff3G_Q~U;&M@x{ zc>lO@eUMwQ=t@5BC;3C|Pk{>@9OZax;hP#;%<``$XO@pgCq%3Rg2rGeq&j zy(<&xr^E}`-FMWTzQ(=cy_htqWf&Xkx@Rps@V|^I9gOPSkU}EP1cheyY-U<1#KMGz zk>QK_hd2td)~li1Zu3!^m2L3!@2sfa|Gs;6$Ib+lrzR)jM2{xgNgD01E{@cliuz4^ zlehQ(@HNu#Wk0(uzY7AvKrewrz6k>el0ba^X^U2BaYO}4?oZiB1HQwyR*QMwiR7sz=JtDCGx)Xys12 zbi4%FWW7)9sx8=Kq~pD0Hoxbcg7IV*(9gOwPeI@A^g78(;<5kzo#gR021}Ov=0N6r zEK4>4$(xdr5YQ-rnMH$-*SFUfbZlYT?3z}Cb*-S6mXRi5YWnW$R~E*P4^Q?jvB?Rt zk11!;#L}8R$+dmO@N?{Fk_--_qK?`<`NI(!S_P71e=dH$c~h(Hbz<7e2DKMxbU@U( z?N$VHflcZ_c+4U6gfZ6ZC=SIAdF0$&b|?uSq`XJcE+2S! zEv=dL+R_D-HAU`I;>bS?33xv7fu!vo?K_;*rvF+I74-Lk4iB`kd|T=3ln9u*8Iz2_ zVhg;6H6lqRLUA)owm`N7kQgYBO&)pSQ|90OS07_R<*Aq!z#qvCLuJ4~f};+Og;om` zslb9_P#XDHZwDX(e#3g3{u>Kng%iULKp1_xCcDF_u3W1IwgLY7)#T(9DpuNU61a$B z@{tw+Ct-d~)$hg`6yh2}M1-(5hs6H-$GK3MJP82gnMqp&vbj>e!}aRtLEZ=w=}{1c z7~}JqfDgc?l6b3S1L`(_ZuCCZB`?>S!vcYSouk+}Sul z6aoJ6b3@Ryj)%gJsJ1bToGYu~Y12B+#-9gKB z29NUMe+I^`cZ7jO_WwQ1OYg&9cM5aeuC9(ruE-zbdJ3j5;vSTxl?n;-zqpzC z^!Z>Kt?Pk^pEw2k=^8qNnLOs(j*#!z*pBBUOgCrTZpWcj`J80y=r~>P?yRn^4pb;Lx{*)RfzpAOS=H1W zCRYZ1^Lhv-a^D?$H+R(8&U$j$4{}Y=XL*hOu}xtxlRD5ptKc&5{(FDz5B@WLZ39mG zJ3G%_EG#cqLQ@hDp8WiLcqeFi*{C1}VPf(m4iQY6Tj$LT35L??M{`;{*aZ?1B zYq6wy>+Iv_X*U!f`rNo&iioA)W~5=@*bm|%@hs(XK?_+CG@mg?Y}w5BxNw+CuY8{6 z+HYfVmOB0Wh{)q}mgT4wQ4B(TGQcEj1bbh$lBGN8<(1Sfs#vLa6uTa8@7o*J%XUde z?&8+@f9Tbt4Awgccs?30b=te|n2vK=Z7i<{pRNdt*B9VJO`gH5_0`g z)O4w%qobvzjujB%FQLr8nAq+NoV=qoPJUI=4iYYbfYde=qN0{%6Y`x zU0bK>&t^!V_V$*0_eW#iFHUPhqsq0jXyV65>|<2dAB9vOv<`=XjDpf%;K0?e+e!O~ zQbw~__r~;eg*hG%A|gUjUY=j$aBcXjdVcWbSmf;H6cKLqp9)9Tm5nNg-_i~fXG32D zyKY)bj(poL4w2%p{wZnuMpVJ|s&1U3qjJZBK5yXdr7wKw=V)$u|4egz3r+zzNhF-y z!a`n$6*&=++vqL#Xo}DF7>_vWe#O@@^?M7xvl>6@-feyP?4G92JMm|kO$siV#@@xo z5)T|nZ$2_^y37@x5U!wP+s5WR%YO6Cpl83w?V@F<@%A3~%nbX5-=xUmKHi~QUA?j8 z%2rY1kIL7KZ}k&yULWD<^XJdsb|A>Z!^69sG;17I0q;mmOiWKt&!lVv<&N;5rsn3g zu6tRPH0g!WhlqlmHDrS! zS!7H!r*3ZR0t==2JUI_vv+7FROyOkRQc+Us!<1>UDYbLjlyUn_l=S0WhT%HmXiIj4 zV}Dh=s7QACCbkqRHA-~&ON)4f;8*|>)k^Y{0qTY^{CMq33&A))$*n65l}e9=ux&5+ z7y753K@pUk*<9x@Ero3qe&V@_6Zz2A-=%(#;%uAP%Xf9&FG`ZBq3*R5rHU`g{QUX+ z*P;f6t>gO@%xkOP`c)L{X_!lfC}Sk%?n{nNp;#|9)>E^eujenaTHIX>;g{}s#)0_r z(0*^j)4jGv4**ssa8945M^FnUXte!fD0qJ*ujZ6{F&KZ>li>d{i?>ENWIiA_ghNy6>8n;)Y@Z>9etUuOOy(3T}M zgU@O$mU;Y{uwz}ifXt3;7+t-W4?I}->)Jaww_dJS9USaGf5Jq51D}S~VsJ%rp-PR8 zmj^RymST@&pOeXW=|ywupxaXWw!*4;!v8t3@3hKn8?lJ@RzgU(Ht@$ z|L~sD41dUYT}t9XZ8*j4Ti~24`#1Mz>qr{S_?v=$j{YDACYc-8&i0nx_Dnr^6Lq+C z_5?&k7sIMLKYskk%*>>ACR;+mu%|N+WG#9-9L?>nvpTC4QnIRa)v4Mr7WtFl{W5x1 zSao%E?v~g?0b_3jyTu|&45A4ivmNbNWlK6&;KNPuC>G~`_*-m3rw5|B!=HI1gTJ|{ zVO_YryB3QeTMt6&!}I&N`@M2c>q7CeJ%s(cukqGT0$$n!ZRg8(%6DciN1GYvKa6wS zeF&q91sYXx7yfIZVUgckhhmMKw`|S`W>>ALkE$s{9HrdHxYje95*&Y@l(>JmgNCV{ za(3zJ<}Q)CU>5!0>K*mHZ|Ix&QJ23toFt2e*#2#GAA=VCCiuQLBgF7w{`S~xo)H=0 zWzvHX1C7;ULP}ZUSKc}uaTyKrvB#GHfhcL5#l0u#q;n(isK){?cKRY^{8-+o1&aza z-*4>#sdi(cBB=WCr%xybHD{h)yn^4s?|X(_Y>T~`;hfFXKh{pXV|dD^EY|4CIuv`_ zo(I3yfuN}&E)A{;f{bM54Jq*^y+Za1+*kQ@JE#aAK~#upukGDxYv5iF&}oLEv`-;4 zS;~vP7;ZKvF*8XiDm)Koy4nJ^vjq_R7TQ8cZlsX&K2WPNJkh`kQuW|)UuAb9uHCX7 z=j+tlUf~GT|FcE#yV&@9oppAtD7^>IV&dyocz>oTC10azD};R}AS6UuN{Wmp0-P^! zOmwt|V>AXZ<5kMMCiTJ`Ve=wHer0LU7>QecT0r?iyDn}o@E56mUsgDDaX8{FO@Mk( z-WV-9_?|+$+~RkgyQhDV|Ji|hMp5>cSS*1>5YKE&?i){5w$=5?%AotkO(l0Vf1u<2 zWxqEdW0(M;?eg)c+Z^JEYr4@M9q!)^Z3(wS$C<&R9Sa^;eWHjh#|54$PHHB)lS!?+ z@c4+&vtnoOfsrE9d~sJwvdv)(X59K8RXRy-*^+A+es_`&Xphj+Zyw%kJYEyuNiqKY z`Y+OA+tBvSWNLJ zSp!Ys`0i7%!lgc4TbuTo**kGdS1GAvoAM-2q+Lh}6}IeU-PbDU!d{+OTXQ}nR*grD zdd!zUQKkJZTzmV?JKL_sjuW(|;l9h2V<+t!u z=5fdz4tvXv$bAudtDP+F(du!2U7PCc@Jj-O4(8%=uHWb;tdorjv5WRZ% zYtjhA>*AX#Vo&07&l=CADEm&(^oYc$_+$XzFlmdMIK-jYZJXKzPH?%oK zn!CjH^)@G>Kg_@9M*01?{5$Tv)L;Ac&_IDJ>Bv({emrmhRWm2NwhuR7=2z~qnYHQ9 z#zyZyk0z$dr*m(Sg~fm2HnM8FU?hwwLvS{Q&#ip&-sYot<&bxXT>rH)O{8Y4*hALDAJaWRU|e{^{Q&zva{*o$NImI2GFDTR=(d~1!G5Sd`l z$$yzZD}xk+@LBImk$gkmG7#0KRk#>!A6Wf-7W1W7ZbnTixhz>I-jH8ge-XnDDvga= ztmQ&1s~ua1c|#Nz{UE7;@s&dIoLysa+`;5Z6!v4cE)*|YbNOM2RWdt2;w>MdJG)BD z%tn7?OV~G&enK6iPxDwOii11iq<%lBGul$OTl$IgO8bDUk%e@d61{__v~0im>BV7| z(zAkMEhWkIcAd91AKac;%8kpokLhb&cH?yW>(|gaF68+HJc@T7FlWF~(d40yPQDY( zM#v)>A2ow8+~h}d_)`0*3|=pKpev&Mr!8FG-{a|tR7XFf2t-0^iNGDj0KUx%k6ve zi!3t**~eCp>%po4+XEkbOxQpcIahJvyXd#?`h`ROw-Pi{XoZN$;0?Pi@_268BJwq& zzR$Med_lrORAFpiQd}F49`&iKH0S%g0Ql1MOUKo=QauT?uls2x`&<%{r?>sSLI%_M zkOBd*BREVP?Sfm=?M1!cjF@$H)K6U1vFXLLJk~jE1{zIjhxT{+8U03ecH=UVj48~! zHyeQut&A%0h7_?cI9`grfB2R1$6a2erbs=HHc3C6$Y%s;u951Ql!A9H>J4T6buSCM zlB)&WN(D+L)lU=&^KL%Jjz!>nt-SA|f;>o7)s>{x%!*(yHP*d9mNEO;)IqhMb`?*2 zwEI4JX||OdDLvW6WE6I!zjy6TU1T{T{7R@O|>TNn;`>~Wf%6YonWRay>kZ1lA{z4RuS=!BoVh~?6%(T11 za3E_j2K!~)h8|t_y=^J|VBr%%rHqJA4^~*1@1*WKO!ZkQ=?UG0l{O@4ce-RCi($x7 zkhw+}{T2ujx${Jt=AwlfBU!TBFmy`v8xqndng`+50on}HV6gbCtR4#FBFg9jdaZC$ zoaqZvih-(fl_7szwHx&EZiqYHQdE`0kuRt6d8j+^a~$GyZsDsZ_`{ez5TQ~h-qT?E z7O;z2;wiF`SD@H$**9{q#%38DGQes-=yYSTN)6VOyi(G=UfrXINRfx)ZA1C9A_#x% zcS@9fq>=Y>-M*)WLaeo|&wc5=jC#E=WMU}tzsa3QM~O%#-A!e(f;}d(k+j=1(N%(c z#?$4UFQNQTwa-ebglM7RBRJJ8(Syve%0opvGgmO>&@9my#A}{^zlo`U2DyZ+wQR0h z1vwlNw!P@X`Qq? zoIdM_3X0Im(U>pTuM+g4_$!3Af|tZejsJ|FDHMK13o)0#c~DW9vyTCa6@RuEFccr= z2&hV)Rkv#5ha(|Hyemz3rTx0b&p`lO%Ct)Ugq@pvCq(~xlAH%fOiyyy^eA4B7L~%? zxo?Y~WtZr$UDzFVn-w{WOgC(=&I8iyK-JG%MMf=zcU_%Cm;0Njo@@-#X+k~rz&)P} zmr!t4Vo#7@t>qD1lZAgjPnGsHGx0E7;_5G9ZRWM1XCayTLzezzT%3~Wi%|N|C}vNs z=9{p0cac&YbMEUsn0QssBfPD18>&6JI0e6Q>wpE6yMC(x!4&gD9_icCc^*qEwzQs~ z_BlzHoTr?Bkw8HB2P8EGI(h3bj|E<BC>9j9GB(fq?O2)CZZ>}Vnn~1r1F869Mvd=7892FH9P~J_0Bk5X%}1=eDt9sx zk^7mb;rurTY_oEQC|`9=EPyYWd=s>*tZz}1k4DYY{Ap8=0LtbQ7F8SPFCVGz-A*8V z2Oh}a+);@5>5iN9}nKqlP|ez|}|3i&=% z8L!uGZ3gymz1cDs0Ss`-*LLS{ZShVOT?%93mImwQR5l-tlShZ7IEb3k$*7(|XPCH^SCjX-~} zE822Sqk51S`FdAT`;ew1Y4X9YC)*;P(D!K(dfh>7h;3(3b4A9ktM!N2$RR&32(B&4zTW|#gZ+Wl!h@VPZV_Q9R-*!k)t=vOJNY*_ys zT)1}k>Ddb+xw1A8vU3!q2w_t#;e7l;ze45xvN5g)Qjj)508zS&9pJ|^4hVLY88O`R z!JqtIfvYoc{Ip(ji9J{h$>-hx6!mnkM0D2^4L9XW%pk{j@$1)Zq?K?e`XJTMBWvM1 zmo(2Z5EmE6LZVSDw)b9R4eQ36rKREiV1obXVaMm}9N2=P6mV-05y>uwld`Leva7U| zCNXV(pLdtP|NnliZ0w(7$GNa z?Pi+#ntQ1TnX~hABqTXfbnQ93AP+&RJGh*M6!NL3td==(zV}AlUK@Ky$ZQToiMqf&+>3apa;V_Cnw=2gsaQ#_@_J(TLi1LhdODl+V z!)Pl)ly6`utpf0BU)B9$u+I5;)lFp+ykVpN!j73b<<~BbW{vYdNXN4gaXRGG)YMd0 zyNW$PfgK=oXHqSzR#JPh66DTl#hQsK-0&>-QNoHAT;Tmfh_mUz-SizV6`m!WO)!z8 z<4$<0?+U@~H<{Hdh=>sVo%q_T)OY zb+4;ST*{>FARs`^WQxlm9CXv|)`%>b9V~q@dgTTDyuTnj;$1LeLO~50XX>tHa5d0pnD+tLM`7w~gua;;j zeHE?<&U+kVS8#{*BlZO3B5>QSmt@EvD!*>qU+*5;O8i-%D|g*aAXJH_pY*xK5bPCs z3c;iAp(js$c2#y?fJTW~P+A0*Ri*gKOK?6cbXu#1@byw7Qq()BW)e7d>s&37L!$vw zc9jEs-0fD>`a&ZgDe-p};kmvPRLD^wTUHPTSTWu4-&$N?Ex+vyFCh9#$Yo54?Ga>J zuqYP6vAyGOCPPrXl(;|zxBeb1bJ>hd)r?>%d2{ufE!;8_`PXrNK|k3u$YqiF{0uq) z5U}zzjzMsyL%mPPNCh~;^rz~nN5eCkb#$aiNG5njBX}b6a)>%5wNyu8-@L~uBe%>3 zZL?NemlM0WAC~JHd=xoskqY^DG=6zcrJkEAqz@H=?=pfLgU5M>;qo$mrf>`Ge7@9> z0AF#P+Jl%gPXX4x_=T+G170NK~nRR+b1^x9t8G1agIP_?9WOcWp@hdy0M16<@nI)D+tr{R`}pG7voyU(x!Wn+Hgiok;1bzOX1-wK>6%EN zVw5^GV*?wF87yA~v;)=mNn}ze0hC8f)ZOzTLzT-4LhZ2BJjz{6d-$oi+-I9 z$=IZ6sAq{5hXF*!4VO(i_xszb3RdRp6nUve`*Q^t3_j-a#Py}4<}cyQrvC#WxV*_# zo0q}-y)SAO89p`*#)n^*P)lU4zQc_P{F~##6keTBAp;o$ggt|Y>A#KQKRB%QH|21< z&w}SF?}87nz*UYtZd`%O`2p%$0Je7url^hAj8>Mt*)<<-*fiRw}8|f5-mVv zv)FVpXdoepQy@ut&4n+1rVHgBhxo8B_kZ8i z`S;=9+!Q<@w+Dv|!D83A@KwW$%7TpBdh*Cp;))Yvjn5kM*_7zyQKF2uzs|?LS3i*} z$VbK>dhNIWb_%lO9JCNgh<`iB(Tfw1CUp{Y=Z8;mHuUPIGH<}$|4F8aop0=H5Pjxn zIO(Fmp*4>)gxNFL)!`37D+hz>NJPTVVu#GqtDB8uHkaRh^M`GfzrK&dt4PYFW<5w@ zhFqob?J(}Z_2MKv--$2Vn){R2Z}y87oo3e7x`QsmwWL7I3;ph%o`U>*pejBl%CPQv z1ayNM@4v!^z}jM)&5~5U;q-S z0BwW!MYj)T;eSJ`aXXZV>k#H-ua@JjPft(B3eCm~UFFwf0|XRn$4qH+IbesI^vHWR z#-|>4g`u>U<0}7_`jYzZqn27Q_%fQ>+S+Em*{0{_1|I(90HP_Ul8K=*`y?6IXKvhD zRbz*jkg@Tf>|`$q=VbRS4tlQ}_vzc%<(}2!lfzxiF~a0Lt3vhW#)*aC1ch)4wQ>M| zT!@^aMY#BVm4eD3vs^pgjS_~Q@8s=PrkUQy)vZ%9JHifHkhT%!?Lpqq9gEBQ_wg~l zacLG=na9=?2T17E)OwydU3fi0@@ufDu^fgjYMETw5_L#=qbV%v18Ip`oFZy+5R|-zTQD6bAtolbU(}znE-ob%snu&x)-P`qHkv=nwu0Z+-_P&d9!A19 zS9S-3*ulXeV;HvF35GfyRGpM7Dk^~QmlqW^3}%t!b!y6+%-Dz+7#I+bpy;3O@}2UmRTC4uVdkj&w0w_!%-X-7O1r?%k7{QUf&$1qx`wT^cP5+@2X z78Vxgpvy6HJ)Hj{Q*6BTwDplJOq~o8uv<UPpmC2!Sa2c$ixkG$W3pGc1{GoGh23iEmHl#ep;qk}7v2mj zV7j*p2xIOg(<9Uv-S zx*@|k@UJv#4u4pb0(Be$moVw{BNx1!JcPR?18 zaBahAn*)kjQsFX9FO0fl47;K$DlffOw$D%YR+|^v!&sbGZEMazx}bw%1ZEk1tGfuJ z#msfLV8aHdL3v}@%G5M>7rDTM2Mw1lrB*bQ@Z_MXqgWw}o4w1TG!5C3;v@75_s_b# zs91FSs@`r5oURt8r<2-$;h58LbI3RsH83&?JwYGm>x*feen7(8J+M!Xltm_o9MS`> zX;Gt^WCt}(PeXTo8nfp;Q927)<(Zb<&}Z2Q_9q>a!vkJPUvzLV!;QDQ&W#`BLbiI{ z!ghf;6J2qq)SAyF5qiZ?2I%Qwjy;-q z`{<3uP#v;zBole{*FIMMQ}(jHalRi@;jG$F86L;SPH*_`=UsVuudDF$H-lUYukE}L z#gX_G_7irigt{K~x2<=_TPLP3+;;{?M=9euDPF*df4vyR7(@QJahT=%Ms_B=*}%SNoQ<94-)yw3)* zMr0wncLRN>g!Tkcju^uc+M_YI!I+<#%(YrdLA)49NO66W!=6cBWLw?TpTuHCw^{Lz4h!Fu`OkCfeMer#YKxBmRSfYp z5>|eeD->$+zRpq7>OSZZ^s6gna`PX#OFR$%c}Yl~MYea4A$rWhmvBOB(G-?~1tOw) zEJod*D~P;LSF_n?JtAVWBKX}_$e9*>QCEL_<|uJB7EKpWEG@`bo>TPhsoarDKG%fg zKmscu#%g7KmFEdQd7^y0k`j2@hi!e$fi^8>iwb3xrI97%em~hWkvij1oafuX{-)=SAh-nC*44gg2g?zPN9> zye`hu>W7BM{Ktg$4g$TxSlC1U>k=Z3R6m;BgI6etQ=8kw)Zd{MM>+ zBlqR;0zB|~(w{Es4fKm&zhsPn%Vs{x+)E6iKKm>Epbdt!)V@Ail+5jlK^)cRGUMDIp@|2eL4?k^YTYkJB3 z2R9mL{{C)>EfO`)LK#P!?%RIJ&fa)+dFChZtROoptF66V)QcSG9xeXpwP#zkU-PU; zl3&8eQ$_|^iQ3fkbYet=AfraP3D$oF)Z3DmW!4lSG1J@4P<`_?5G8TWP&-bYlg!*D z+F!J4B95~5lsKv8v{|pNcg^?;8NUK0r{WAn2TBK*((O6}C&fHWRkNE&NCkf;fmUL- z5v`ELQ%gGdGgR^&b?#|d=Q{sekQM8;{o-)8nw@M`vK;08frDwVqeDCjQb}S}2(6BS zfWH1@C!1_z`7u=j=~w#JU-mLRLyQd7i}|IcgP){fYHDi1PhOhAW|kA;;!_MQ)Z`X-Td5iJy zebHTJ`&h`P+BBI76z@Wn`1P}$?yn)EOEH$W755btei%oMzQ<$K&d7_%Pwe4$|pnoGCP?% z)^+yba_j3f(0OqC4jFE+TI^FO5I+*&dC5F^5pT>!yB#0+&amt1UdLDDWyCmCO9PTd$v~<6&-wxAquN1z%kY*o z5pz6XnAw1S^hHnHm!K@NrAH52060?c2jD=g%!XPW`?ae-u~74O99@ZbeBoBV2dVjv}!V+*%|UM#6r=KKQK8w zjUS)HrO)wFoyo7!`>A<#7ib|!N=iysFE4$d9*3p@wXzG{EC1N=XpX@p6Wt+b$w<-s zkiT9lK!Lm4wqMxEBMOp^VqdsVeY16*vi%7B6kzYYHH>Ql|N5Zu8?WhD{$%Y{u`vpU z%dS5cc6FV7OHP>i94izl)+BT%8Z|PH*=PSs@+vcTT;2AJsY}idy!P|}M?#HtqOvv4 zmIPh(ypMP4%pO5kpRFxRzxhR^FXa2s!j6ek_s?H?e&+!pSa2xyXq~^Qgm$i_g#^w> z{;UeL-KrHThx{_K`>aEtUq6SjCa|ze>@GbMDOF|D{9Arb6~Kz-j$UWV5S!x#zCd~V zP?xLs8AzAztSbeF<4KDzLd)wV>Mr$=V-51g$4s)91bfKpwWc9WfS>`kCSJTeMqjXx zLD~GNL-q#J>o0ON3rC^;KmL{}R)0VI&|hWhR| z1vu}O80fCs);;Z=VQ;rug?b<|S^5pV;KG_#UjPUAu`T1g7SiMr)vh9o%+yOOv-y_4~hJ_GGsJAU(87g{HdayMdd4T{WJc` z%iihUJAaYQxsh$+m$jsPZhw3TwAQo8*lI#2W5@OvU)bYtAgZo1zv=Ujf4w?O{%}id z+Q0flq3xGBhuW`cDS^E5#UBSlgW9aMs^S#Y;}oek)=hM=?K7XGmhp9rK=Yylw+TQ5 z9RM)Htb>;z2R&319mlVHU_k$5)V%(NV}*)T2Yjr!V~eY{7i0#JFTf)xb`y#BNBuu+ zy>(cX+txn35F|uEQ9%hsx;vy>Sac(e(k)0xV}XM7qNN)Ilup58pYQOvGmz^Ta-mVy*3|SPobos6sjLCNLt6F-C{_Yq(xC= zXu7NnMn+Zo`TjUoC1UsF0P}_tubY!Jud2obe0?5)ILI!^-4)>whsH2;nJp|S zMzCN#sI&EeLcLZ_;Qe%aH9gKUURlF<*+`V>LlcJ>q-W54`rDtix>w81BNa7_ZgpRInt(~h& zoAH4+n^0O-L>(c)EG#UA#l<-jZ)K~f0>t8n_mc^^5(b8(;cg4gh?R-V7peA zFnCjs<9ru?($OF9TkpIaQc~&X2Gce3k5zkjpV|6!apmoLs9QFrdjlR4}N@LVZ53u|Nv#2Q6G} zm=MXR;(AIv znK#mUC0Ua}sPCCU=greQrj$zX+01SZ(!H-x={a9H3C`!&rw1U7obImG!&tAmlRuj{ zEANFnQWPiz1O)9_u99^(GUZ!`8CyQkQaQA%dsg+OxmnXW>1-ebJxNG~HYhZ1B9|WC z2DuUF(wWXMIaXivbCwETFnNFU!atm=Y|2hd!1MEbL_O@^8#>uv|2&1t6!bnheupfw zKgg(L!n^k5TkGlTRK0KH;^9^`j5htZf|PP5H;<|;RUJI1DUTd3Ui^g8LP;d3blCW_ zG0{d{$a?-9i*3ywEdrl25ybwL{d1*_?@ilC$<(V^zi)ej|F(iQnSjhDc#9WZCs4k+ zbg5GrnTN|;+*USRE$Z}EwbV?PDc0v_`PhsY6^naOSU$062qKlKfbFA zzm(>pb23`o+dpKsv3ohG+E960NXq@!q$84v*+B0SFBXB$(^t8qU^%>i@J5(YL5qar z&xKzGHBfV!h%%J^=V8zqZ3<=@S$Cd|wy-Yjk$Y5II-uC?dPRDgkZh=qcU}ou(6smECBmM{Tn0cOud$wsYW8+(Jtwv(8f4jP(fFi@o`&+pU^|nMcm3vlYRuroD0ZcaXIWvC72X7nc z!`~JQbhuAP zerrh*XkT=nRrt=3TF%2b*50jz)bo@vTU{EM3?b=!Fo5b;7BCw_m6w$Ii7)1)oDG$JS!1DoCG`;K;Z~^BY?B!&wxy`B3Vfj`GK1xY@nqABu zb0Kzf{?YzX(dL|J>upW$+xPoIID`^@>_1#vd$hZHcon^!eecW}3#ZfH@my6zju-H~ z=Ld@v7B#1QNfn5~$oMum{1YlY))`9eUWQIsn{A3?ie+2B*h&PdWNyW_DBRjKor#o= zs9pmTCE>{(n=dOzRyyPM(br!xAyq9b?|x4?-pfK>S^MB!TC)DDvNA(*nx-yoW>tYd zm~bT^>{|{XVkh6}Db4V#8?g$y-(5{R-qKtX-5t+vQzf_k9yuih<0tBmO51_a!C1i2 zQ2t{+^+m^YuBw%nCtE!CY!}{@Z|WUoWmT;u>2Fy-wk;LM$A?=B^7fmYh@A6!{vjqK zT*sMIZQAG!4_(6Gq%p7Ko0_9C6aR+$2d;Lbd<#2CPW(IVvM^2JWzGIV(5*_O(s+gs zIn$!$2IXCK`)h6&XUWy~Q&>)~ph`kFFc*nOrhd)w2bl+N;V>@^D13O8QMJJ8Mjpo0 zszJj2;vbcRQtRj$?AaUe1O)eSajTS&+O9eE)eWxszl-bWTW@wswDW$bs=Nu`Sd7v) zh^aMgk45ypSMYm5Js_IKly6A6-B*Q~Qj#+>eHAZC*Vxl>cdZi;yqo?Sb|sA;;`R2{ z{R`y_`{=Lt<}wygX_2ZZv|)|p$fPsnd%3S&H2N?OZYO}rvn~d;;&b$;vD?AQ>c;Bh z;^Hy^C%-sPW&wdf_jef?)pkQ+xG$zuk}TedHfVX4|X}a z!r9cz-naZ^Pkvdqnw7h^WXb<)8)YJ(lg2hFXZd{pli?y!!``&xMyZl&k1$qx~DoA5aFY!%&{Ypcpn%wq# zcQ;a8gPU#D{Z0N@UtpSnepkoq*fqZ0NxOuYiq*E5D6{4$6TcDJeSQsg5)0wiQNzqj z*V_a#qA2~FQqz1K8XClVFWA{e#mf~GdY-PID(B*4i>s-Z=xiuoZIC6+AN!$ z*sL-;`%kdnQ{n+OlLY4dFOqb%%(rsoBVhKruT_^ha(#4Uq+70A%FK+vA;g@REiP{T z_D|k^Rdp@NJoYzNWAI9qUrA|hXy^0s{?1jc3S`1FRhV=HCsB|mk5aC#tkg`H-QP@& zns{XGA6tBXvYvs73z?b`hfLKdmGTNL_ak>Dnh8yzS(mrnjX)C2qq1HU6!`l4tWKR= zBIgw=s5e4h)T_Weprm4&;$Alz-!sZt5?!6^bYbp?VG!S@rmVWrC#UXrPmJIV>NsKc z)s<$&Q{!lp&$=e(`_kBjsS}gJW4nSFy%(m2-OP0AkBhLcnay3SY-$<`JYWiD#vOPOoxv?x$)s+r@PU~pA*a0fITN1J1PUf% zU*~#ohwlN@OSj1MEmIOjJbO7BuPT}#nN({7VYI$6ohb`jd%IW$TN&L}A zFAYtX8T$`0-mw)y8Kf8UZCtXPz^9nl}yc|OG*qh@G1gANC`5}p^CxfSh1{}@JL3{%9 z?ZvlgXV}PiijCP81-=nkE2zYCI;wfA?-QwmI~3N4;@2@UDhO(>Z#)^0_8W@0rdqBP zE-(Aarz6Jx|NnUK^nYGa6>;6M=jYQ$dmwY&!aQA;efr?ZPqs_^9rzRPP%o73f0o<; z1Ueaj>)3Yj+uyks;fF4xla^{bg@QbuYp=$Ni*H%Kwlrj`&9;~`>b-1uW-MvrD6N0b zM<%221bsfoSbCZoRC`eglI^XFi;c&fAu^ZQgzt#NMwj{KrDo(kRWrK7#C+x8*p@Ev zRYpmhVUvW=cW7~&F6GeDo!j2w8p*ZRJ3vKUE4VYUvhMxEi2abwXvX*b8#$BC)cqlV zECUS0O>?B+1jYPEox9{6Gne@QArwZSD8TlnP6+BW?YEK>9j5g-mpe~sDY|K`75oy( zMCDGLt>zPW{doCiCu+4lr@s-Ea^&SB4_3-5#Kc0ZmpT1Ea@k$F(AaRe*2;_3RxH=E zIJUJHaEr~mD`!bu=BtOZt4eDKDLyMJJ)x~a6jd}ck8H42lS9_QhkTJrO!wEfx9{pX z3;s}5vv5EnQyVtBdaI`VWDiWrx0q!AF$tqA$7dqG2FrRaSNSPuIS@eJgT8*|6Lsi6 zL)HqlQC!?1W#qL~4Qj{ZW8X@9ytEgcNmF$BX$R`8%!B9KRHt+{zRvKyic;00?g=_@ z#!UI(d*GV$R@(~3F7#?}`W@Y?O$NoPr4;Bg6cWx|LJRzQ)I+j&N@@?{TbOz2fY9Iz zQ+n2c=7V-ikLE{DVeYE%k7aFf-=x@VRVKZk-50V5QrF#158<*AMK6RTieNQ9@(avf z2G$&)$QpDj6)q^!{ZY?m*Cv*(Tnzx!4lpYAJnF8oJ)RFQt1J@jWiyO_XLp^KPFxur z+36huQy71IKeqAIuQ|X;#GBu{Hw&-SE-z2Me%DxWgeC2G*~E$Hae9*f$s@ippCO}Y zn4g3a+nx?T)Eeqqv~T)lz!{KJ_+GY3sGLhK!?^* zQE~c~-*6zwf)4HI#QG}Q3so%7b*?klbcaT9(3c=lJ{i=$!Y(r}Q&MgPZ8>kO#^|Pu zRVeIUx!XdT* z6S?1xAGXjCwbaU<`xqyF%9_foBO!dp09t`d{rS5-$J^M8Q?BI&F)X60sa-2; z#c5+`3$DzRaf$c*xH0PpiNoah8`Y$}?sS+;wk4^}pX#;#Yi)6?RCgyrC08op`Nk{W zLivH~6AkMh>TGymyqe3=+(=G@)*%YzYZ`6dviXYk+ zG56@u4jgD-D!^xuwRjq=-MA6<-n=!n_!(T7;v<*@f1Ed9yw+QM$M0wuBE)pfJjDh4 z#>fwJ=%9097vlMB$wPmr&U35!?iwvD^kinKGU=x$p6{JJMl+Ug3aA?3$lG4$)jqSD zKj<{taRo6K(F!o3<4?uulxat~1Qqiw;>Ow)>-o7WjH9UCDu=_M6^T0Qp0~nl*PU!^ zey%qBUKz39=R2AgKR0?W)$br>i}}j(&}qyd&x0=yy+&xlMY?-6sC>By%$(}qo428a zVI+!#t%SRd8J)OHgNO5)ltBL~GG{BvM-Fw*g z4(nikI*QpbYkB|cjKM`2VO&|0jE5}&;*t$-8GRN0O|x?I$JG8e%|Zwh=VURfGF`4p z;%`ti6PO^}bEm7Q40ijty_^vdQMIySFYlwTH&OBUeWu#pYMP0?gmT8r6`cO6tsgM9 zt@)|~v)>yZv#udAH)iPrc{jdrV-u59m3HWV^l*!2D3r&@SNLhC1b;#z6VfuIG`F-> zQ3 zNSeBfO9nSkOs~Fu&3V>e#Gy${t`eRAg&$M7bHzzf) zaL~7HQ@Ky5=kzi><}a`{i61th>gj6kGHH*J{l7Jf++}(Zg3)GJUe*Mpb6T{AmGP3_X+5BWiee%u$=_T(NHmIn6MDhOF zdK$?39Nvpw#^1lTG_NJQKyW>l$@cDJ_=(2~YLPn`vr@Jpl_8u(>^hViWct^48(UEUVIiJjW zuJPC<%G<)uPE}b~V^&jnanN#6;9UK$T+CNnpi-o8`fxb%o2`PBEQk#&;F@OX)N~s| zyM%=>6>j1y9H_1GoSHT|kBedD8NozUKkg|MT|vsI#IcOwr(^i+lmK8ALz| z^yIEJ;{)qOq5f%c%2gWZdi8*O&ZY5bYV=(EFmBu7^I53Sew(~J_Ngx}S)%)YD!Z4s zL3$4p0S^nMI4Fz(%24Jr>P#?eOSX)>rrkMNTztt;dxAA~?(w55!emf>I{VpGGT{r9 zx}PF9~Jj$PJ&(j9*dGjl-Kg3{Wpr6Q8pkt4LaH8>D(_d1@QA znS@I=sz$y(jVt@NfdEzJ?(Wek8tsA8u<$&=*;~^*(47wKyB7CqLD9Bd z$0a086{H4BzodkVO<2vpxbrTzd&ceWTam}585vK7tbruX{ol7%R`SDJ#hz8#2{>hF z0xkP8ZyGhahYvS#g(^%(R~==fy=wp~FC)?ki>~2!SgR%2YBvpxcz~7}lpW#i6VQ4B zow;o{G9V(E`a8vwz}Zway>%HBpb0~>w5BGZ))MBP=;@8zW|y8y^*et|av-Z)Vr=1t zxgcPD?zDTW2EE-#_*#?8#!M@GG-x1zJXssO9wik%fx=w-x~7i6h!V8cz0v_Me><@m zIs>5DS50Kjw!0MYhKL3_2%-Pac9ScLibB1LU@AkW#lm4Z`3o65VxDDr<>rJ8Kidw*8E?ln*n@Y!x14Vw$H*3RcfFY%ENiX_G`V;J$!95A{fB zwgJrM@+E{`Ax{{V9R9J`U5a5%>kYKe0^f;c_`F;TfLdQL@=|a%Oq$O)Fi=zz7ujCOKG2pv%4%q)!we z3An$LG|S^kaJOGD79OWbd0|Mf;MI$XV4xSzn`NYBuuv`ZeUg+R=v+nfofiE{B$*&N zY0^_E%x@4we&fKFU~zwfIJ^J+-8+hH>>)cBDf;LAb6%Q-EdtM-X2S{(&Jmpb{AYq_ z2o(r_8ZG=QKvb&XX1T$c_;=k*u7(pj8klI=+jjRkPw#yj=EaW#T`@Eyw-!S$TmcyT zpdP;vFR))@LLXZ-f-v)Hc%OCvI}TcRc!Q*)S zEn@zT_xOSV0T+jpPSXF&pQmtneTmsU@^|$Q7ewggje#ccF#jF(_etG_6^?vI3Elt_ z^x}1F;PXinB+dbUH_wU4`VJIF_;Kgij{~703Y;Jb^rbx*DCGd=RRPW}U{I|QMd8j7 z6>oc^7?|A+LeQ#%-v9Lg&~6Qz>WmxzJ^?0DL1R1+G(SKR+P(P~UIGpqSof04=VHI1 zR{*md+KxY+X+H&MKc?nN37?(mEtq?8NNA9 zWv7=75KXdwe=l(xBrL!CpC4bl^fBFR4BbSkopG`71%?-Z?_r}dm1gf*z57 z&)|s^j1$Sxs|K(Y&DDT`GesZ(X3eke3NmzsN+`M%P!Ats>HyQVNb`jK>>!wc>XJiBeKay;# zrmKm)zvz`+c{v@?2t2zcwRD-z^vqH8Mpk-;`hasn4*;-njukIWmUHK}xY6nu+&yxu zj0=u469bO(d7$+qAogEk0%U;}fsJ(1^#GnOI`#t$rlo8bcq)$;emI+=Y;TmtY6D=( z_#Y#71|z;GWujWrG)V4$_lXG%6*7&0_?p2Q3Q>U50we_FqzH%U5frlwZ@}c44AMm# zG<`1rD+A6u^Pc-aUZWxe#X?}W0HYTCCcL|ecX4$I-ONsWfj`m9eeT@F`)B)yIWo?R z%-}_NL64XtVSEMDjz&pljYovkbmrDZM3Ywn4b-rVN;d=jd9KCjCK&f5E z`uC@RgOp7q@<-lJgWNJ8U7eH+$P=vSH)jEh!EmLLA3zqc92v~(qv&7X1QHLZ+nM(# zgRDzi5R?l*b$3k99v}()OhrIEq8}gwvpt+y@6dmC%#H?{h6SY~a9X7x(i=oz&6Q1ge>)^{$=SCH>X9A_o!*NqP)K{5gPxflmNsLF*e? zlPOx1JJxIkzXr8N)vOVWK(W1k4ip-?P?vBnY5@{8!Why09MV0y0RnV8b9(NtQUi?{ z*r+eDIe&8zmd1(#1m`N-1qrlE?m?gC6R079?*c*%?s;rxHbOwKBdpJ5&jJC;oEQ=C zUW%z00OZin2DQq-xWk3s4900BJJ{b2e`^72u7_{bvCL8qYz><3ie(uJ4_RVH6Y!_?^EF zj7kKn%#g)E2X}&|qO(0ff|GZf5HeD0Ald|W05ArCJ(tl^AVW(*jhtO&7abVYx?s`@ zOrASq_lsp4qz@X4>kEmFISuA&d;h?qpE+$ZH2!Ry@WR5Qwm@ztM@wq>+U>B?%eG)Wj zFk9r^HjEWnf}GzPp;Bl8XUVvYm!om@KM$Y>vKcTWsR^&=@!mSyMbcmY1E+wkbwY4Ja;)aC`Fa!2I~{k0F=i#P}dO(Xj>a&I*W!k3JCIo)rtcED_ri9?N zHejGialWtUVWH#W6`%p$1uKCGa!`SmW#m+Yimy)PKN~XI;I=KE3V(!a}XK%kZyM%O}YX}h(2*QTwLNBkYSa*j0 zEYo|41>!G(9q4I`-SH*==lwZqqHio#Z|6Hk_`ZXLCh{X%ixA#m03;n=JQm~i!7=yI zsvxbR16)mWun7FHpX07q5P1Swlbd_qi-S^pu`;z>h_2$=u^a zr(8`pArawOcbiWTO%m~9uOd_lnmOO(T5YJ8D*ot2_sHoIk}Cs4Ne(TaEFUZ)ugDD~ zm&XxP{w0hje{+Gf^Kb za6cl4<(j*zytvRek9BuSshvD zEx{j}N<)p|`X)u7Tc0g)_8_B{0LK`?ky^4!^g+?lVAB2xp;{4hed^a*+ zKFw3i7ueNM7frhiV8Nrh2^cRyLja!zp?kOOc*IY0Ts-e`Q_m|UE`0CrtCnfQ6DL&T zro_xmkB)dwRbD_kgC2>q1}JXu-jF>MAIY6VD1zTBa)*;Z>53s~D7e=Rp8kMiF2)4< ztb%4Dr(69Mf{UK2^(nhqz|Le~n-?iX(K0;+?jR(|MVcGGygY5X8t3-`H7bSPi!0rTyo#Nm3GUEVgl(cGNSztyz}|D{edE62g5}2<(Dn!PZ9H^l)SK{m;|6JMEq` z1K^YXQSx$?=7gMn=gm{s_?cl(fZ_v25^b!Mj~enT>_@NxuXyHc5T&sMklphXk?iu& zJ+fkzHvcD`w+`KiNy;KMZh7yWJ#x$2Q~WvM^z6UYr;szX`01J)F`RQIU|_3m zC&oeE;$!yKHP9Jx=D^ZcTe$RQqq*I078hLjn!NU>gy+!mW$~53&rcp;1Tqx7Hc&B&Z$TS-MD~F7Jn?2!C#9+^t;<@{ zt?`42OiPa$xpfN89&o{?E&r(cc^NfoSzd&i(O`yDzk=|43thk_&F{|BT($3yhdE(N zNSY_^Zu`p_8R-NS$49=<-9kiUE3=(FBpyW*wd*X~NoZRmyImT~a{0N}ycfKYGPjg0_+)&KmC(W9sPCPUUWl)EV^MB?-W@|w~H`9;RW$lhp#@<^1QfdTp1c0$F@wu7ryaCO)#4lZ$J z%1^BkY0xQOee$h_>XoQo8$#yV&#U64mIG8ci^Bsw4!+%!iUQ=j2`M^?2@1mf!G$p4yjuU)2Gnb17 z0E=(mDzTD4M+Tucx^RUO;)K@>%*KFJiI=k=cN#kbBVaCvwj75n$W{qnm_u9DZY2Bc zLfNKSizN9{K;%_oB<5Wysi!|@eBpoVk%g|x0DdXJ7~Cdc2-#s|9zaxKXKb3pqnM%6 zeDtt=P2yR0GIX&tBKy7q_<}THaIC=F2kQloRsK2v2<^mx1VR64l=ti!x@Ky?>Tu`I zE%=YU_!6;q;ameSsPi2eAKqhf-B~|NNYdQUuR!D0IO*K9)*g_VLCFx(yNCcI2cYLw`KBh8bLkfEbWB z&Zqgha$|aHaj>F-skg4RHMOr@LxVaiyni5jbW~-)j2edVSCp4qTmMMv4^THr6|-nd z1|XWL0(Aod^LsBLVR)-Jd|mLObQfnYeIy02Cx)Ov59|)wiDH9!?^y*rkc-kA7*zy= z4uQ1|MU|J>2yANw1;tNXW_M3#Ir{h=4YTWQyBnBZk@dZ_J5zuxyqLPtPDjk*pA#N& zcV+518tN=NxeF9A&l$&v!ROF#wM`EAQme=hZ~Y2Uc_ZekwJHwvPc&u=Wbjb4x2r2B zk5OeuGX1xKSNCt_)~>EnhLpz(odPDbvVNPEf)mAti~BRt?xW21AoM0dfZxKHnf8Bu z#r0CAR#sQHV>RuH?<1ab!l#W5-cLdbx=zKzhJI@#-q(9zqraRure7GSz-){gViek< zOF>J(dq_Znmm{g2Z3=YPAW%Klv(D-FJ2?^X9y60((7k-5WzQkI4nV9!(=d|Bu zUL2pB`V;5tlKJVw_~ry29xFntuKkov-$z~hC--ZS$$9eReXqyjpGQ%lXi-P+--z7U z|3-erdwU7ENxMTPRfls)v=%<TZ~;dfo2nz)`&s^6e~W3t?4vFXWRvXC39xicwl@%vbA>dD0D!Fyoc z@NLPq_2uPle-1pJd4eM(e(X9G!*cp?({jflH_c?y=h4P|YKcbR_moiImG^S49LIA4 zCCr$o`>J6_4JQ7Sb~F3-*6Kq{CnxO(yZ%yh^9fcd<29So$LkK(I@$JPl@I2WaHNn0 zr16g#naC?BCKtn9A6{pZ^FA=}lGav`c9WN$CNJ0_7~&!DJq*59yj`Ln#kG+VxFZ~z zVBaSu%ZOKAOFh*s&vH0jSUFLUZ()OQphjs0^FDP|&N?V6IOq(;Cndh+nH6+$N@Z~` zjJ>U$XhC(q?7L1&*FiBMo9heyoEueKcyyACNXC6L&y#&CLW*J5x=TenX-Q)x^WXiA zZk+xKdDhS{!C`!qQNYYcH(RD_d_&vn!sW|$*=^C^nXTI7r*&>xz=vj3A{%(t-SJW5^_mFqoL`GG%}wd`=qrUU+3G*TmgJE zEA4f^!yYc)I`bS+CDFT6T*kbHs{B7SpGQfs)D{S) z@=o%7v@{;TBT&S&UQW;Qv&&AO3(TBumy}GYWm*}2uOR54CNMjEPkbU>R--ppRcvIw zcxSGhtFt;3$%rR~lxiJADaL7J*Qy8J@_CoEb}G1?`}VGFsc`-$OQoN!_kpy>jc=Kl5ejvPCBF{e~9{3#X@3 zYzoTLH?WrJF4FASk1B6_S5sr z@Nu;&ha%qoaa=sTB7geKm9&__RL3^6<_~$P=B+Yo?oKM?eTM(FYir)aH*e^-){ft~ zq)ro3jd zvJ*n-qRBi%r#p`09FuI}d9{Lgc|WAvPDzNG{TL&^Kv(Cn?$!Lb z=b`AxbJfcV=f3H>NkyCTaM;u@$7s^A5~Ml2hzjmX7AByuW(y*yozqAjRFLirU0UL> z$bCy|vcdSdE!VYf#kj$X_hK#%))LF zIpw)W`wlqF7o~GV6*Q*n4>WnEVPbEwF^^`Ngep)P>BM#y8IXFSzSW2*d@ZPB367lK ztPB}#B9sm!SmAUnD|n-Sy4B~4N0)DGdXSz4UjPX|`5m;_czT?4`*z`Sy?L4fms-%c zyN~q1hbLa&nyLg`_bb<8Ro!ewj_rEMO;q_ez}Nz~zQeL&OFqt85>qU${U>s?Q zyT~hGXp}4}I$>@)P$1rhp_Cj>-cnCiBajs!5#pz@8AUm-%oHig-!HKGj`KZLwY!o0&q9xQ z_-n&lf5OHSrsZv`iWAb-L-G9?Cy%<%k2gD8F)&WMZ}Hs!^m6LB3t98a6b#n<-c18v zv2D_dCz{I^_fGo{0xh*`@cz8MQG#|mY0A%|AOLi=L`r8f<|3CGK>#*Nlo{gx(TdYQ z;ZswUnMsYOzE8N1T4+Ss4dweAwgz`q5Bci#Z1y=u}eQH{N!B`gB@cIy^dmR*rhk$E%}_h7F04$qk#i;*@FL zpZETJ9#{$mem1Rxu6W}vj8xFDy3icO=a&3)$ zy?!(BWP&lpr&JaES#2HV^3?clWtskb2r=;y1l`*e{<5+zp8HRH55>^VygB#|=fmvG z=~LdMhI*1przzw8xX7xix{Ll>J)d+Ez*AS82wMhI4f1?iRLpklwZ zn-ftCix&oZlug4x;9TPe2t%9yst5+_DL)MDmu(5_RW!eBX}gZuyEpl6ja2l zR+U0xb`mh%^7$<%=n~O9=I7HMyL$-vi(vV1&E{wNl};P8L<{n0HR9({yEcrT;-spo zRk|h>eBq6BqM{Ff|2XmRNjhqPNfo&{6$5y5PY&y29nD2GbA688>{G?Uv$D^}_fJVN z;*AT>VztV%cq^wRW@E%d4w4=^68zVyd&jfO-~alx0Xg1n-4aW4OTUw&&F>2)xZT7K zZc@k}VsEpPvQyLx1;qBvQ-g!erI1bbT9$K z4|%s+maqtvFKU&{>7F}x!sWVRF%)wqOS zp?y+$O~GYF@-_+!moBt_+`&3G!~ST+g%KTf(qeNiDV$R-Zr<5R^;#&3fvxG0Nf_YK z3GSr9|9Bq7>j6)C9tD3FOOL`D%}Qh$av2|#F%XU|=(|UTNT+=PLYG30=hS$+Oq`Ba z-P64zm@37H+&*V(154>HUkX95fc$?~Fw6f9_9un>yFW{H1K;W28E$W=@X8m_TZ5lT zFpm6kG_Kv~Bo(aHSST~7+>uJYmf?q7s2MYR%aC z9_NJ`F%`<5GSG=iDxKFQD7`eLw9j3od+WtVMX~WlH>;jk(a)n06};)Rrjw2X{k>oL zO|jEyn{UE;0@R}B*t6NzIVb&`n(I%;1ZO=8E|+~uaYb=)JJekH;SbE*(gZt!EGum(C!! zW~|#NlnEtFGCMfr=5d`t#kh?_j5J2$N5yfs0m~Kpxfi={uG?5}VCrp(s(RtHDrz(E zR@vGb(4pQ4dlc83J2{>1&!+|j1?6UCS@V*i^&`oGNpW~U>RNJH*`sJX;xy&xiQV_t z49u(i=ow={|LxE+`8qQiABs=6o?V1* zgVnHIv7_DdY5&sWHo`kJv%bl z7^%O3jQ3|E?uH}M1JK;bW9B0 zBLi)1S+ue6R31ln3R~8!``K@-VT1`!?i>8D8YA166c=v>E#sve4Y<;MlD4#251A(dg=Cshj z6wQ!gUF-H=-1xp)L6iDSIKAnymEMP{%E^`H>u*oJTQhV&RZd{9ZFc?dF0{k%gaiXZ z%lug!!5yq>#nRE1Hk!f+jqR$fC4D0u&)ZyzXt7f=t2#|wS+6>l3<+xsE4Z<%fmU9A z96ND!W#hCZex2s3O<#1wrx#JOkxzi{$cM(qF19FuxdlDQ(;=EV^|xNdNSiQ$%!0k6y z54|O}3tk9hZ{eH(s=Vv+@AC2KkhGGFHe%*9ajQC&?xA-zvBv529L#4UwdHkaHS>;R zyiAv{4nL9*o!I0}bTKuK?f)v#R=txaH2hqzkxbZK@<2G{yjb1rz189aqHHgWit-vk zXYS?cWN-hU#68VFfBdRAALO!SGG6n@Dpw_WWn~4`3{zna_V(Z#D9CJ$?5wPZ_4tqI zyuUpQTXX(w$UaL7o<(y^L~+1|q2U>~C2LG5Z+|bp^+~X$>0V;NTD=;~g29Is?rSyM^1!Lk34_C&o+GE&(_}-6aFea=4=ZDzz+L{

v5&)MErD_|GN3X;E4i}jv>&|g+Uyq= zgvV@Ke$*p%re<9S&68V~Y;9P=!DW#=c8L1}7Z!u9F&n;VKS!973(h|?m-B}o$<3R_ zx$TwSPw=4ujjOf(1gM8r$WX1b(8Ae;xf8QG6QbbG>vYIwHAFZ!bXb z-YQE{Wx?3+JEq2|Mo+m%1GE9i{2CXwW{1wn6V3MMNB77i+G~u{8zSPh7c$!t*KdgX zINsZJE}fusbrt{h(?jQ4Nhh2AS8w`mTWkl>G+8O6*9FWqD<&qU(hwo{mH4}vHaiWd zVy{=avR}L$G#Zy{=0nBhhCA@Qd({U%(wFbf{3f-MJ>F1V68F0_y;vn~y6bSbbKdkI zD%0%OgvjSx-m;=T?vhO@ONCP|O4w!2Ty<2S*h+D;e^ax~Z^xy+$&Ll(LR0W4(|$Br;O>mZ^umGTC?!A3RsW^yRU@YI(9FwFncnz zGuQB*)YJzOlJKHo;sGlK~{&4H2mJ%EC2ua63AgHQ*N{7|!0ttR&B$q6}>q2Em`D1hx9G3OFoDyQQxp;zgy9 zh~4wYS=@aVH57*m%Uo{{tHDwhyKaJjW`0+3^Q0EaJ<;)i3K#!?%-DIuzEFNiogQ#( zr2;>B*_#5uO}Nm~2N|IK2K^Nf^W*QYTts45m$YZd^;6z_7QKh@LrjJd@5c#UP$MKS zT?N0wOGa}PnP)Ev`t6OszM&Y-zA1j?s-gy7IZ5$i;G!^T44hW*3df!IqSUp=MW&WrqgQU^rDUqcn}MGhR-$ZmY6{O zyiAlr+BE?rJQc41Y{u@xJlKpycvCN;pq&{4Hyk*nmt8gm_Y3{ew!ugtht|tP;`y~4 zv!t5dn204B0HzDm?Ph< z?6uSZ6f<2m>exOvlJE>%YzHca2%{zBlx70x;UGgvy)?{!})+)3y0hh=7jl~uz zX$hv`RC`tC3M8(}4d5w{bH>4`^#9~^0w45UHv8&h2UU-qmy|+0{k`bo^j?7j_eonm zG09~_njVNuA_Tp)PeqCPHa8hmT|$<4WE0yUkg{GWfAhDV{@@gsr*}rBAitA(ieT3kxY#k3Z{a*Yj7st31F~#6xrwQjCLzK&wPQrDB9Zyl{*Uva1hgba(kbOb0C2XkBVD2cb zR_fxyL6lR2&^X;{vo?l<*&suQl`zfoEilAf%l9m2LCEF{v3V1}&<0*#P<2?>9}6*7=xlPg+|Nv@jS`w!ed z>S-VS(H?U}T7diQJr`Noe3_^b?q4W|c^?CfuF_A($k9nbM>}++jrFv!6vM~zkWDhU zE(}yc5yQ3%n(hr!EQbdmde9X?;s`R1g72um0onT8z&Ig%Vc-{txXA~W4?gCK1F8J9 z%pxz3+bFS3#??bPL-`J<)qjWgl&N1m$V1uHLIx>)ND+E_St%>#I3evG$$6#Ve$&#p zA*=$6pyD?~HzyxGc-kp4>mC930P_VzKqcHp5gmqbPN1=WuQdu9H~x>qV_-`tt?ppS zRj4*2>xh3lQ~F6xf8^Gm-#jBcFEuqaqk9SnpUcUGy`dEQBIxWPbF5pTzc!Dbso{v@ zY5z4A+?aVu(KsQu*|*;4NlDOub{I-r?gp3KhvHCcxUHNFfwlKt>)-UoVKuMxdj8EB z)US7(%?`@x;R<9TaLc5P;bhQNiGh0{dvBZA#K`A<$E(%WJ=FN+1*EKA2L|<=6Q63I z$<~d1xGgA{5Q@u|Tl-d{hH@soN@vLtCjkt-$KnL)8Kpy(;TI8{Jn;5_TLU^%c|z%t zBy(d41 z4BDI<0Kb@(^MDBh(LoMQqbb6GuL5=niEZ!0sMuafx!w4Web0o$f-BuoceMt6zQcVJ zRzEr}m8fd{$o#8HawRRu!Rb$+PW@0plzbaaqkJLQ;9Emt@3F%_OpRShP9K=U+v7N_ zoYQSU(Obecl+@n#^OrKj_*#Ry;_+{m8Z}(-)dEErVyPBHAbxz11}n43)X6x+ zzPN(E1!qD;z&fZrqBF1dZ7DV1`qrQ*M5U}7Bmy0>H8@LK4CA|Z1Mgy~qal2WM#goCwJaXzrD5%Lu6uaAV~5eUZ+u#N6tY^QAL0O zI_@zSRY2eIcgmINz#529^3PUbi6Fx*>I+vOD^MI%(o=*SVMxtzfK{H>KwZz!vb-X* zUz|Wz_Tbfw$0-3F3}I=0<(NP7NeVe9+ z60l{V5J?f%$t20VXu(4ujpdu->2a3?w+QF*w-iFXitdMkjgo{uM__(nV}dl06f*zr z9wVKvmZ@T|%s;!>r!b!~d{u3@rgDo=$mDA0~oAx2;me_{X=tdmZ`b zsOlmjU=GS<2u>S?-Kp%Qa;aM}Pu3b2wQ$%9sQFYql&juG>~b>^w+?3DX~~^cxt*I$ za#g7JDbusyOYP~}jyO!hC^&VvN{oR^U$KMa=}uwr&>V>i`ZV=%!Oqf@Yh4PmRCcF& zqqKwu;3}h%D`brab)fcwYmmqUfBvN5Hag*FSU3Pfh7IoBYcb<~&B$cw-~asleN4ZY zcMcxX>m$(Hs&<}sOhSv$=B{nAVO_it)nOReHJz%9CO8{to;Ev#zD6}*pH7`aG+Cn+ zdQN)ixT;t)88inL*tf1VFZF;VdFh97|NmFlmB&N9w*5Irr%t6*LRzRKOQHxJYjGlE z$(nV_nkX8A{$N8x#If_Y-Ly^J7K%I>PGJm!NH1l+1vW$` z&0F}yeca9M zD^)+lVe!6^u*8Cdt;jy=Zx_MaS-qMv%AcDZ$&VB*(V4#+ugaNt(5$eXEQY@=LCqcawHhqkl!5(+gB}{~=GXjTB&yvi zw0Ki3<2(rq?x~~eDOg9_j`#Om_vY071d4fZ*r?6xQ~FsjYXLh<>8nn8NanU3&d?)( z3V>kn{&UX$X$Qa{kMSz5r!JoB?VbN41U;gSI~#M!51u9Y@LtXfJz&sGdO}u`1Y=#F z_8rYBJ*T^n{p=^yT~lO?Dg!YZWJs{IFaxgd{t_GLQLZw`ILzIcP^1qRr6E-Y{IwfY z$pHX<5F6rYVN2-KF_#{J>jvMTYAS01@i>p|kb2^fwvefg+AaKMO3vpv1jOF6KPKHg z1_Oj79J!w>k%{ee0t^zR=|WU8efF%dKNrE{i~`{ddUJs=)fI45v1Oxhlw8xSnf{aF z{Rh+s*XkB`X);zT_{WO*GcSucDw$mq?3o^&IA@SWSlmxeD7xgF5T>66Ot1#l8v-wc zbD4uMg)f9yDRROnwPZw98XNdRssi(#to@=?x?wdn{m?>Hb#)hS z&uZi`FX!jW#y!Q5y@79*o*jX1b=UK$lKU;IQTh^?w<8~<7sArEEv9d-vS-&OnAgpo zgmLmg#;G(P`UIl&q_Y7;-cV?jgn1kdv-xlvhheqF$^ z&*_UYk+;<0tEjv@v9+-Q*Wc*sQ}F_+MU0f&VzzRMlb&yO0#mK-&*$WvdRB)3B?<`G zIdni*H(o!BpPlCC0Oz*9j2DGkIR*BJGEv1E+W=KKRp0|5qYe7Uf7L9568ulH12InY zjIB$adWp+Pxpi!qfReZ77gA=;d$uM2_Lr)W6Ujbvu-tB)4@*_k)FfQ8{}jlkunHTJ zNASpdQ(#L7%;%db5ZiQuU0q#mR=WyJi9Rd-iyf*qSCV-Yd~aEJk3X|>(;Fn=NCgE> zu!nYy+myI%^ARr-yM{-BfvOQlI}1#uZPwS(G0DlICFivj*C+W{a6?ccFK%k&bc?j( zWmlIf%wypXyQ^}Dz87nT@`Y>b9DD#T zKz;&q=u>jMAsvDtFg`&F!uFrgWU=wy2C0b;KI&(A+M%f#8+#2cYvGimrk7b+S&Zug zX4voDF2G#FiOgG!6_Q zF~-AWde1o6vNUr*T}l%M%-Ae5zYWOMoc{f~@{K~+UOePTFn_DcL{X^9mMGyc*np1R z$kY!K#Y}yIAy6PLrmhg_ep?Xi?Y(IK#$g*R*%{0O#xju^t|w-H?udAx>5(2w+{~nH zlz&MbWP^{8gpttD_y}9*QyJ3tj?22|v+~M&>wB44Scq!B@LM*fN6u*9hhY|P=;*i^ ztho6@d2Anm8pW}k@R;H5?ptF7S_DnsqsP<4BvB4dL+E@#GeY|r;G9>!_bk-0)z{av zIvxf`F@04Fz_slr_E-L6%G1xt&Ax!Yb$@|UieR1#?Mwv(4q!`E4ISs6!Rn@?*mN?S z>y;9*UY@=QX;=~N9b^?i8i3itj8liwg|tC0`D|k+CW55a1vBk>1xrn8A74(p7E2I^&On=V~1k6A)Kz+oef3(L*S?!wBQUo9{p!jol{l>AL zJ?GirFrppWsm(#bZTWEz7Fns%b!dQit42OxExg7!xSz$ue5rDskj9;4A)}>j$cG2| zrXS8IgHA|Q4#^Z3t)RAy8iP}#Tfhf2c&D&d+VZs2H*;af6VXllV_Ot0rhDp!2wu2 zI_R1<>`*=C;W)}cS?{3$rwdz!_t$t}=ur^Q zlU(;vj3fdFbU1$@7POA_iR*09W`oDOD3q;XcS2xqM0?~`A|2`cR=*=ikqjaaUQ&Jf z;Xm+a8YVr=8<+&e*=tl-I(YXK;+XqMP#ZYM6)E}y1Jzrzk3EZv>w>=hiDf5pLOsQQ zV>XmsK!LvadHAo8bA<}?@7QUrj~@L@w+Cz?toNsydFB1oH*d_OTVTKEsliM)H`qfz z0UU%sed(*X4(a7F7S|f9Z*q`#sq*?=5)hcEf$e>37=>2Lsp%rbu%2Ivn|LAZ1IX$4 z9cIAU2#*#QQ$D_P5({e;U{MzkngsPjF=lTa;Dy2YpYMC4NarHh_&JI_B_B@1w1J5D zQM{17fj`1eU~@RsqnUrk4oBJ;piYN3x3(rrIrYM}9-p8|R|9vp5M-s#ozw6VI(c%a z=mSrlVicUEBJ5{(P8-gb5Yo1>CSWMr{yd&HoJU?pRJ64&d@3?uG>;Y6sAY4p*l%MQ zg2e4+%GZi_Orw(6%a7^&@^8$A_n*;@jbP1WcfU`v*#Kx#YusUN_>rFwL{KTg!WHnD z#glKqNiat*DlvZU`$53+FfAZ6BR4`Q7+RLPNkrRMCD)jC7?D=?%qUpEi?50GyxN*6 zUDDAt1Sim|Moj^x4YHI*PT{_se7O&Xj!_&9B1Rmr_T3#MS*U=_&*msCwjqx%qdK~$ zKI+8TrN{aKo-A&L&kbHlIPl5qhCc5uiM`K(I*a{XS3S(4*nI1&y=m&G=B!Om^aO^MV-W8*DQvES}(wfki|Boti0dU2qR@73!V# zfS;&&?V~)(YC$4JmRo;7Z2-R3cb;66$`&5Fs_;ZS&rICmZOEw;szRyO&3~f7Xmt+7 zefdOCcYvL1VBX0>sC6^;7ZuOA z*0#g(hG5mA7;axr!M#)+ns-q7Klv_gBlv&w-5u)nc&JxVa&t1OA+(M^Mm{ki>3Q!* zh@bGkJ=?T&@@S?R>eV(c!>o3iqf1)G_kUr`*)#jO2{LRbe+b(s6lEOA|nd&=je;9#iF$mj-?4;yMa0T~LI zHVQL2d~F8mg`q%!0;>&S<&fjJO|^((b@LWL&ujNAu*Mm?TXD*%){q82Op3DHMD!b2 z|ChJeFt{+TK@V4qiJ(y4+;)v09BA<%LJNSw0CBC1DFAY|N!TgTK?GhOb{g4D#mRIV zWIjvje)(||V6H%SGZ3r%aEtq7GjLz~pKIA^>uR_U_|S6!D`)mgODhsCUtZpc@)@!k~)VCU@G5#|9NE5I0<00<@K0{ zI>mCv*qN?7>N9bB)r28>LPT51)6D8fjja9jHqBf)H9cPmI8&&71A__O7=)wnt{*lY z8&yTfMEN&`-p_VHSUH0@-}}5(4{}HU1FWc%WWdD<)2EXF+zhsVH$R4M7X5Wfa(;qS z{#SlY#h7r@3VuF5KARx)^{+*5XPxSKj*@V*oaWTN(4vjV?`n`1KL^SgA{p>;fGCi$ ztnubp3nmxZzZ&=e*q51iqVAI*{U;`v-#6sI`F55AU#~V|9_x>_1oqbDR>DZR%9POr zG>_<}cxa7=#2I1Tt#8;R&E1)rgVd={*sJ)3* z(#zG-TkiyQ8IP=+ZFWY%D>MNRe7^xnM$N~{{Z{y|{pI1`NSz)fQ~gc%=*;2t*Wg%UpM%)`JJd z#P?Qj*}`|vt{0Ont(RrP_AQnv3HAtQ4Dl6G0~_~CQMmYH95L+6LzP4c#=W6rKm)LR znfb{+oe9uelohYuG}G)xvP4EJ@nDM z+^14+%^d?}yR&;E7E3+n{G>gYk780dsCM1Ojn2@cE9LR;+lN}M9`(^Z?eE{;h+kgx z$(YucOHH>A!ha;Pu&`8MC_KfcWw?c>i=+*e@FaT9`@OQ~KRgyO*MzJF)h>dh<*4ve zLbjym@9Tz~t#EHdD+e^9BDGkjtl1eu-QTpOr>iT6X-@2W__us4tf3=RWL-EC=TN07K#r5YiDT}G zoqHQ6*X5<`^`KQg`KVAdzqVr>KSH6QZeKzG7^>mESZaC@kH*JVZN-6=0S`Mv!?RyQ z2dAy?cXpz^y{q?e)7(IA-PINOo2<6F<^a%LL!ha3a3EFp-FiKyWPo%tDjL>6z#~~# zpxhi>XueULa?iAkN4 zz-7<-TYRcY)+v>qg}&4MRdjLR$@ZtGl#>kye}JI!_~qAYeHe+d-auz(QnKO}cECt( z>B~q{lc^|LOGABqu}NU#-TSpETuo#fzJii$h-=l;t7y73Hb31No|Y!Ry!_IzV12~N zZsX|!Cr@n`teSz<9tH+EAYAuZUY$H^V<-Ks`B{RD-qSx;8GJM@7g#Zd3J7IR;0wXy z)miEFa8HSy)xHosEGa3-_pV(Vx8{Ap4~l0=$*nEt$h};xCN;}q;wK~Fj5SK}m>;)A zQ9ZUJ&3K7K|CFJ>CNp3(dFaa6xrT_1AV$T72_veK%2;oJgdbqjE)vMWKz zi@u&}^`ZXw33Rf=hmQM#LK$hNe*?)gERaF_y8M74m5@+0YRKz6hW44Y;XWcg$Auxu zw=~&5V(shR6gk^q7 z7%#4;T&gZ_b0wweGVT^ukIjL7}k<61}q_Vk=AW)2CurdwX9iMzw6KDMZHdg%mkW zFp3UUpPbHPyAh9c0|cPVxq}ge1sYq8eAwtp&^}M1Uz%r4zPKr90}DY&8-&lJO;Y5t zPVsyS!K1?29E?jZn6zq{=PzhH0V|EI(D{ndV`IB^vQGL8E2TM)!fF=<(BM}Piq=b< zJO@=G8D7?-GIXzYl5Y*;!AJ5Q?@gCH^BM*$v4PghY&*x)9x+N6)mMevbA76|sK~#p zOmU~L81d5LLYol>7q#FOqxkiOdmtUJa>eC5e;WU_9<~ZQ=Yd&Vc8?#9R4~c*SqkFX z`B4MW8{1JgK{`Ddf~RjNrcyd0=5nWH9u6Vm?$+a~1C5V&Y{8O@9XmE(rk$>OvsqW` z$J^HvQ90{KaT%=t>nnMMTro3#mGts?pQyC4C4F7K{9n$79jWjl1J>E0nWc5&g@1J+ zj6tDAOEx<(D9DTLNPC5H$TB&hv@~GqvjFVBeixMGatqDTRHC%@%0Z#3Dq%0vt5+Yh z@+$I({w1!ma!;%E6&LzA2(q#SUVJaP^A~zaxhdsj+r$f;N~RE{3}olQS`+yN^n36A zm{?RjD}X5~Io45N8Xv;zf0t(c9Oi2UVOF;FIo@;s@w)>>R=d3V-*_1-FQjZOg3n|- z`i1&p%NI0AFf0CcH-8Zy<1_VGDdoilTucx8l8L}B>_mjZ;FKq~y;2~o24dzefNa8-V;_4{?^iyO9Yyvn8C#f@3z zj0Pbxytf3YOTmaf#yd&JxVgAQQJkkfWiUUf1t}_{>W;96;P);0C||U_>CK(Xt_~%Y zk8F1TArbDfgAVce`&q%tYEPyP({(;IHRS+V@ZWOY{5E*=${*X0hooa^gBS`Z()xYx z398>}Rfye>2bd=p!=~wyiu-r@zIu%RM_x-{ghqdOdLl17`?eoYIhCs0(YRelFb573 z;ggZ^1|H_&1=0UU${G-WTDxg!X$dmUOGh;g3^-;apoQ{G&~PER;V~XgnuL>@{s<56 z2VJ21k&Z-32MeEBNel)vHU*;CovcSLz~O`1+SK=lI_rQ@3&L8Wd?_pz$r(E+VN zNmN}n^MjGgBOJ(NW&XWnjx%z3Tc2q+$YnS)mq)GwZqgN}E+lqiBELYRP$;!)TED+i Iy8Y;X0JMwC>Hq)$ literal 47568 zcmaI;bwJeX_XP@L7lMQ+$bgd4NHf$?y1PrHJBKbsLAtvHq=)Vn>7k^hk(BPP_Zbk+ z`M&peFMp^^e4g5S?X}mlx870`f)DTG-A6$|c_=KzCxe1=lN<%*y5QaG;1z1?>`?F* zg$=)wjkdXkgNd%54T_+ynXZ+Vjjqn~cMi`DY-}u`baWObT4pvdQxjTkb5jg@c6{)J z=Z11hHdn8sTmz4>kNYIIZahl==#;kg++s(TA>XnPUVJ7cGOCl(XsVFD_&h8LswmkQ z!5}PgFXjBWt6=;jx4v<6yvj~;okSh4y`;1EX6{F3b7!<6%6Ka8-k96Q^qFaQ$zLEc zgI=kTQpQAM9Spri2~F_()VR=jK1Fh16i-KwJH;eW6WUL7R)j#q*!>(k+>KhKetY3M znZug!BCe4^ZO%lqzr1hIy0JX^0V}We{wFR=!yB}RUhbX$^gMr(e`^@^(MP7+2BlY`+;2L%q?Z6MSl%)e&w3Wyk4-uxa z3u}-OXT9)9n~XP5Io;ytlOu|^R$u%~QfTREK@+1nug|gl>NDsd%zAAc=V7zwy^6i3 zsBd4(>);fR2MlrB)%59UKDc{;2~{_6;^vAsr?19+onT%tzyAxx4`K6>{&BO>W{5J8 zL4j)R)JBjnmCVkIbEAL{I3H^$?{ZZg*<9;#JbevCjNjIl zWf1tpTh)$uj=7hvYH4YAk8`tK8;g#Jh=`8% zhJg<$RajX>-arw{#AkpjdBr&s5fg`ogfQVfL{7xr3I*k<7`xnZ6ub4j=fh{&g@Dy`>gek`+1s~C{gntiH|OaatABAoDmB6{^jS~VM?S!d73=7 zD@qa)0rl1sb#87pHai2^isGeoW#*G-i|L6OvK|}lbaiUg_Q6))pcm%}PkF<~&%e7g zkZm-Og%Gt@Q&)c&>o!^ILOL21^|WtALP0^n3k_0xF#W)2@cGKmf*Eh}1UBabdj89) zpePBlSHPAPJZ?Yy`h`p)?kOG~)TlqBY{IcUpJ#J-NYlx|0m5P1!Q?c%wx*FT5pO({ z8(mJ)(9qBlNLc4`Y%7|1BxAO5dc4nJyYhQ_T1HCh+vbOh4F!*OH~_Q$8m3ZeayV6g zgX(9z%kf^@n)A_OdfK^R0pxITu02v!L^<5mgVl6|MAUV=lNF!tYCpoffR%_+$meNr z5fi7h!cR_4a2fTA4g1nuPmgkQbC()UBk42~xLqoJeMMJCN;qtOe>p3v{)m^4kg@_#0G7-vOV`FN{Y0@bIV4@H1 z-E%nI9nx+3{{2lfv&m0-+%qtY&cR5DF~eB4VxCq1P;aWJuepVA6R7~@vy-8^v+$W) z%NH(|A7Hf=S$D#JU*vixFh+Tlvu(HY2ZY?tP;@`8-ZY zNGK>MI7)xAIP{{v%&*jR)b{M;5OWKW&sRAj3LK>Z?C0ueS)|skWnKsa-3PuUW}|+o z_0ivxB=_=~Nby+Bs2jK(cOG>IyA+p|MM^Wj#Ah-HDm?m;#H(8w7#L`a1_r{m3qf%i zEj8`t+K&?po?GaMPO%>KffEf%%iN>tOMNuo8M{C2OGkNX8g?fxOq1PW%AM#Q!&tsn z16XSUmy?fqe!`oz!JOC*L`TQ0ef^bfbE;L^WIFAyi5O7T?^jqa=p^&;GmLzBeS`F8 z_#*{@@Z%Sl@b--uW@DdO*9#-b? zny7J_tgsg3_&v1!O(s}ggiEwqNp|$VadFA|*adz>K32Z1 zzs`IozON)@H9wyockM@#N2N%SUZ)OXFh|vwTDcLrbCQ7oj7L}DcyG0Yd};Wrpo*Za z?OvZG4~;I7{jmUHmdN@z{ZX*8q04J{H!czAeHy*$qrM|x~kRANv^hlhp#IZxyvSd+P~<}a`< zwESZ`g)&E_#LC8odMjWkgk>>!FzMq~ihzH}uUJ;|r7|>hbo6LUY-~9Z11z+g za3E)6I{*ulIg8f>cGj0JdA<-B&@U3X%2C@tSg?we6p5{m31GiF#Th=84Gab{jIGK&7snOwovl+4NLgy(VgBophFU`EtHL=|;%o zTHQE|821LY^s`Rx4ejutpf9G7}?7~4{yR?SF6j*Kaw6p>L zK}MoqaDkNIN1Qx7bq8ZeaCJJ~GhOw5cYUEdvCeK?BlweOj7D5_HD_C?SYj2e?);?N zd5vw|La(`uuv(q#>DqV|V&l_;C;eLSLa)PjUwsV`QF2}8X*z;s%qt9ziBNtddZy;w z=I4hR%)7hV?Ec3R?r@S&Q?CF=%nxwsF@&U%5D|SJ8d9vXdwAto*U+VTunlev{@S~P zC~Knt%mm3lkbn+Q9T~^JL8s8ybSEn@6Ga#jZPo?QIx+v?JzDmg?_)4|4Wekt%1VxK zJ?|^?d#uvp{q2_G^iVJ*Yy4d$ndJ<-*^uT@-u^s zUXLIpB~49D73PeIjs}5BT!rEaxNro5*Jg0vqZ5z&eY=IZl|{khM29A&JXoOB^}V%K z$XifWMuvo>6M;zXd3QCMR#@6GTNyWH4^*K$?gTaxAy)_FRZwn6ys)$;$E-E?TTSFd>h0dAox*HWDRIm zmO@FMa?w!|ukpjj`hj@NZ-0E5c-;0{Arf(VvQ069)z>@N))=YjdhOpwQ-6b+O-dO3 z__8*6wkKe_M}Uj=aF+{4iIB3~i(z(s@#0+8a{2d2Ny=WMa#8btX5FOJQtzQ9<9NKE z$IvaL4o`8|CSP4y>53oCk^mekf&t?{gET-33#TFYSw{!XQqBrmzv{uW0IABboM~}! zInR_BGAOOyPlyebb%{RP9V*%pi{o}#sXJp9=>!<%R$@MxWj3xtgi1kS+>aXGgozok zzp~hy+!VUKAURkXAROw+wWn0_?VBMYjMr;W1Z}1TT{=g*FYObrS1noH@%IN^_4SOR zp{Byw*x2|d2QvkQ?KvvUhfCSGF2}Vy<96IfqP&OU`T2)kT{$GfIM<2J1B;5!w&Ly6dvU^W-1h`N}`?6CeQt;PzCZ(DO5J64X0mx23#S<3PemPW>*Dw!QhzMnOH*UP?wiziiFAfDSH4o6z!)@$uvOiQ+w) zcw1Daqb%zqB{PU{YeohlWGZm!z4>U6#j{hndkE6+W&>H?l&E&=YkBh7o{nC>{8=6S z@rN~W*M|FaJDv`G|L!po{W~s0MNmVdaF#_E@pL!h!_74!%B6P?pABN&3Ho(%f-NZM zl`6_JP#Q3heLgfXan_rv6ixKvEP}?jIEPo92)!3GIi=1~rxdE#);X?Oqz8i$lQNs1 zEkH22QeV{d78|CtN6y%PzyB9f@&g1L8@;`$0R5)cDno4rQ6u)N9EY8)>+V3ENXS8z zYMBHg;N<8WsV*#|rA|3w#660LUnHMULiq1vK03d@ZlKS9u{jm+vtS-5#)o zXRMf{+f!4~chEh#&g^TP_R+8l0>-QAVj1;&kJr(|H4lGz-3shbSfB*_#$#{E2ljFw zx)Ua_INA5&z*lqEDM$!Pt@@tWSmW3NV_efWNZu(cw{%TmhgSMij zug_|?=H*D1g;BAUJ4&O9llcH0cPr~iFH2Cxi{VGvy6D7&+Y`wk=>aJ4TL@!wiFm9Y zFxm(jYCzA55f#>|x@-(v?b9GYGF_bA1&;=sgm?=HxsCdJqUP8d!N@<5#qQ`B&|kFC z5lw)Jc;b*H;*^XRyfC{`QdS0X*AJCSDFP2CT=qMARa>&;&zGm9N|cMpZP!i|MxniZ ztRkyaSlipOBH9o~1&6)i(jq;7#j2bkxUzZy8doYZ*Sa)#@aN{;Wg+p04<5wQNtd9e ze}xDCRDpTmGBz_CNbMi5FU-xab$3mq)WVjk)}5JRXr>=)XlOg`TJ-2y3O*)Gl-j(1lETQ4DyOcX!v^LIRF)vnpGLR4zTXkMG z`OcR*)B4?}WKtqW*)H(Q23L2A1-=#xWyjN!BMyIq3GJJdw(1Z z1>PVcWoGtUGId?e%$S0c^KHjd#Z7+#u7v$9ywLE+=HEIB{$d3j0hPE(`>5n)g2YV_ z5GbksML?x+SlH}iktt*dyoy-JBl#QT=1n2w*Uz$#%VUt<{cp(9hU}2FK#>#zg#COp zZOe6mu7UAPBg9>m4B)tgZTu(1A$>fXMl zR;e5F5f!>64r^()imq3Cm5;Vuj23c8^6&4)yj9kRrS7`V=Bd@VtS2bVdAwby*j=`o zDw|J-0#APTh`KJ-b=4YYZaI%yt6M87*KX}gNZwce4+@l!kdpEX2zY+_^eN_=@sXs` z_R^$iiNUvN*h5V!-BK+#NR8uH#f)L%lIO4&%*j0l2GLjZRYEs3G{n69$;(T$wW?4? z`804?%XCuZaNMVty4vX1bcuwEXe{h)K6N#M?rkbA@f_X7GeV-Hh1(ndB$y43K`1Z5 z42Y6iTU#H7w?6S5VqPla-iVcI#3&?lF*1z~mU`8GdEI)HXvdQYW&sL|A46QBBii4|cV& zE@JIvX^dkMy_(F{*4Dq1aj$2qg6auI7JRbXZm(46&5%SKsYYstmf7OF&>iD6pGQ89MR1F~qG#1pPk=N+dpyND@|@F2 zm|cOoI+k#TbGmns1VMNyjiD^(yM_q3ST%B#J1_fKeYOuXAAh!(o~(jC&rbE1Ng>#= zFdq*Yzf-QV&iE>}!BInnpa%={#$6 zpp3fl$0J??j+=%(J8NcbnfJEa=fE`qERZUsp>rlCW6)z=t^AGuU*8!N*kYU1-sYu> z5?0IeR`&LyifW4v%J}$RmH-=DhT`wAyB1?^;{27?8GL2WVz!q*`It*i2Q5BUeE!GX z;5Rxx#kMfVF6}darr~3;V{C1ZwsWf@Q&u01lvw>c9FF8`iM7Fil=gjCz%<TXDr<_XU1p_ zx1Ku7K3Jb`VGEq{nEN|(QcJ5IPl$T6!pSl||AE6^h)rT@^Uo=;HNl_Uo~ZQc~q9 z?LR|(S){!G4ys&~-Tf6z$}h~pgf$S1)jVq7cH%Ws^Kk1S3~5|QaW)@~{yzB=(?_r9 z`^y@9vDW%0eFxS>Fv*_xif7q&}!vJ~48iWD+mF3$PP@10kuQQ2J6(PcC`W z^^G%G<+v2qO1o)i>gVQM)?P{JoWE1SEod>#Dp*I?N!N{9fyg2~tD^kby7a5Rqad2W z!IGiIy<2&DD&oAvZATco+W%fSaWn4m-wX{$4RsO%97to2M*IB7qAo@x7y*Jrrq1<&U>Yl4;Sc84y-Tt<@pE|=FJ?NBpSD(pX68jO)OX?7}e(293 zUhPq62X;Hd;A%Q4RzaV~ZjQ5`X?E^yS7Rm`9gUCm=&lS8ypm)6I~^lhQ*j@YHN6${ zJwvRXpCvN2c&^*!WMi4VoBB$TDGg1^R35F;q}b~+cD zN{HSHr*J39{{aEuZ%_S3V?@JuKXHGmhOazbz?heP{!UYsBY?Yx`a|iTfU9)psb{$c zR(H<0wD&U$Q_Z6ZtqDDOGUt5!e+TohY~)~bh%=uvYXG}BjA~644crabZt^FK@0TL! zH|Uo)Y60RC$o;cjkm_t{OngH}`VP`^I`l(=?bqcIzlBIim?fUlG&?l<@5qMNl6v!| zDIx^l5UBIH?bx?Tw0|l2m!F2ccN#7OAQAkJn|gR;$1KMfn0Z7FL-#Zk909_m>d9Zb zaVXs6U7fghBRx(7=f$8F+OxecU+4=fV%eX>V=HE*^N8R5LzRHMfe~E8=1KI-L3tR# z$5;@sd?%o3H^!hV)o$T?IPkLhjYXR)n{9AzJKTM>!dac^I?3@Q_eV1FB#@~d{Dw~my#=Ca zgQp=4pbwant`ECX3)oBPoI9#g!XRh@j0Dzpq5lqEgMKzxaWrxL4ZNP8#V$0Ap?-aA z1hUi)rwrbHX6JLRBqfY9(HRIJJwa3Ek4NM`UM`PxSr&25Qci8RyfY~8KrufoeTO^B z%~AQIUj#?C6RQ=fs@fvAs4vfv?mWOBr600df80dD0D>7K`q?~Z5r`ZOI77MIp}?gJ z%LVK~R0EKC=hi3uz$dn6dW18U1D2Nh+OYeB8@Ua`oYFP*lVJs7p5C z7wS@{PuM`y#<~d{>_P`--{RY8}e6i%9R0% z(>DYHktNil{7!L;XazFw(n{t2-|xq)%_vwb^A-dQFfIThiDZQC;TKnj%E8IL%goXu z-N~xjuO-ZMtjpky4fs)4JMnZJDze0#YxBJnZ&_(>^$8vx|qnxf`G3#@B?W~@5^umi-eBBq})gw){^CS27Y2H#hHK3HHihwy1p0^xYHM;8t& zZkx5N4Pj&XcYiwCp#h!fB{2jTfK>t0Nt`SXZNRz$PGTVFxu1jikWo2(N4O6b02E{X zvwXkTv)xscSJyTZN)5?R{_~gx-vzC(_Vt^;%u%#bl zT2o<(`gFgfn*MP2lwSDER6YIh+3B(3ut^=-rhg7ZP}LHv#`>2C2swb=vZ-8ceFpw= zE5gUlB4cT0`}L;sr%!kEJMkugV$$qSxW9Xv4pmPX-kNYS18Ris(U4|$FQetrPQm%n z&`ahrhwZb0T9@ofm~qX?)`R~`A7e>{>Dmn1a;b7XT>x_WeROoGwDavmVl{QA1kS@5NuEy`@&DLJ z^sf!Qc-Qyk^*CMahKPg)O&_5Xd}Ogtdg_@9nl7%%7V^3lE$n3 zNu4k9+(8_hb^p6lR`3~+4}0$d@ld72DEWTFc-1k;)~8>hIW?a9-g{4N472R=7buDT1tRuCTS0lcj98@c!z_WUIV8-_^<4}2q$J{rlvNjW%ER8LKuBg<8Pb&EFa?apPn5% zR8$-n742u3#~-b65c=!++JA~1iqfc2+Y##@yoVlGFUd1*)t&J8-Sh4FQ_=gT>k~f^ zCBPvp($7lwZq)!X0yxYSE`+KGnyiEXb7p6gGwp3`12=o)rw^O{2^)pTrdJ}RKwu;N z8Jkh%??)`vBJworBqC`cjYJn?ZASdncs6~I8qdx>>rmwu5H$8Hj(@&Q>GTuvTb-}J z(|+Z7^ue%~iz~xXv$F$=d25v1%M&#>fI`)-x#|rT<#tR{+8P2S$2rG70z%#Tq$IOf z-c69#MIMZ@jG6+52>1?u-PO5!wD$kZQ(aX=64zN_|Jt1@P*XkTS8vejP;Vn?fX zm}F}ndupKpSSP@GWOVv?HIbK$h|Y(nnLF|AIX|Hc*N4c^qRraHSs5uUjazyE9OF0r z9i`=eq?$8$xoZMQLVwyXi|_96051=CcV!6(?aK(D4FUG|OXAe@SLo~!1xV5?2`mkb zG4C*3Q4uAt_c=2wOHa>Q@&MO!)UjseRtmVn(CC~QzHe?YLPab|C5jcxSVwl2=)vrb z?G@60;XHsr^-fLOt_9t(0-v^Gtc#PpB}ib+R@q|EBeefq#Yn5Hjki`*a>$Ff@M!K{ zDL|$tofR$fUTKa;eKsW(!LA}=e*gIjiP9S9l)Lr7hdCs#3AWH1%ad7;=~aJW5Nv%}3*1v~Av zg|>ff!2TvOByx0cRF3}uY%6@|`w~#hDufqt>S|l6mX~(WoGkl2>2M+$nnef}oW|T9 zt}2itlj}l+wZhe@{GPWmv2y0=1{6;JO>*(EaAcZw#a!Ah_X7pj)76+)vPk9TFmEk0 z+pS+y1RMv5RGq5tb}ejS9V}1mDo3t%X;Tx)R<+&%S_H@#j@wJk|A&e-z;T{1!GdVYfF#gY&?JvT{#N76{2G;zbSa`} zcHjfx#AUhX#vdvy46eDfw6u~0c;UqTv#kVeu5A;BOn4OcF#~@exOg|;k!{nvHqB9l86rO>Ir@jCw_UAi+$(NDWokqp$pc!PHm}qO( z`xUNJO3I3nkB~~-yI`aa*8Cc-?O_<~63i8H8dB4aK=e*b0$&PkITW#^a9Oqk`v53| ziV21A?#_ZC)1@=y(=0!lY`v@YY^|TivUSIbhd3-cnnGF#Tm;I<^h(SCdem3kT6?+r z@oI;n1C2?}Tz1aY_Rr+EP2h(jYfah3KV@WsLZ5-e_@p@=`_`rNDAmC@U%o^##{owR zu;0L7rGnukFS`+lkl$)4J#kEE?;zyjzAL^f!7Gs~>KYmv!pF5OrbcS4fgrJHn{lR6 z@+)m9aakm)#D4PUnH4Y{0n%o+K%u8>srcQp^7r3C^vdPSm?KFXYmY}uzE>RrDdDrp zTEx;K9hPBD;4Gk5fK>lR^|*9eK)4a7{-(5RmNjH5gV`TD`k+8D!H=&}gVaR2P>t&^ zrX9KLmy4HR$7fSF_SxFt#$uF5B>%~9vI_%ze^sT7J`8tPa&DMDYccU0w7krN5RoG# z^;{xtoiliU5n@mq5?M;N`kTR4F;N)ML7HNwjvr7NvI7VZ>kMp_59;q?664W5jbA~5tdW0ec(>VEJ+X&7Vu zE#ePhq3(qi_AyK!I{+N!sk2Ofe6tFC5$vNVAr;jc#%Sk_{g(kzu?Mkp6Q3F`$=I`! z03i15p=}3%oJRawuCj6q5aU|s&A@O9gO9^iCCddd7*a4(RM6A@`D5D*aN>ob2jH=! z&$-v?(l*R$nZrGmFIdc~qGEUh*Iv1}eDM!NR_54e@^rC{ls}2_H+as>n)wi#*b((u zX5E3nYl+<5aL+@f7LW~2!yePkt`wu*D|yUcjG&~#lDQ6Y8^#n&_KnLp@C`qQ*Lh~6 zn2yw5d|z4RgeF(Xt9}>=BAMnaxRk5|T!5bg0_j7;deC0bmfY#Ajd2+_0d)s>t9LUu z{GZhzMfbxe7Uc_zQ-@%Un`L}7 zV8Q$v;{p3T;yr`A4rxS?bI+28v>eW#IM&}Dbq1>_Ug~QnVmrB<=$DWjVi22vkc+gi zBXTzCE@*)L9diGp#+zo^?Q6mD5y;&8GMfcVJX{arW3JQ*KGzN}zUI6Vg=2x&i;T3s8QIeBryPPJ1%n@Tg|#_J<$@se{xl=bv; zTZW*Z{jQqvZsGykZYNm*b=Qr{DDalAD`)&g?dCdVS3!GiIujTdpf7_%T;3t}r`{U! z1d2uoNTdIq4TKA+(jZ~xlpUNOXOh2Zk*+K_5|5iD?Zw|$Gk5zu8 zPd2dvvoV8Dh@NTiSacX=)dJVCx)GFevBlpM!WzcMyd^SIAT-*Sz-JgYa^zDRjikNZ8asAjAaHy_LPGY@t^}^w zWE9@bbuEH3c_@w1j+HMZ^Vj!_6&>1;@C%h`Wr?xPs?HAyUm!gL$4%)Zy|{kDeAg@8 zrJ}`@^3HXnC%6wJkIlWLpuPE$YG^N&&UAwA3&PrZ;;_ouqmP#|ON zwJ`gUNKAT{v#rld@>#AsJ%XtfK^M!6I5UwKZY7AHEVqvJbpqtg5?vZYbj%l!nN-`q z&u~vpO5iI1ks4j>Xl3T!d6+;d<()w-0PR4ZgeK(lHiRGa0~hQi3+z%i^FA$Vg3d-w zf4+zb`L};_quu@mD30IuUGEK&h~2F%%4B>cuL|WBkXeBnn3!$WYY$s@Z?piEosg?r z2O_gdgwtm%9|qlYT??rTO-_l$hS1B0_(czJYd&^>x4Lrap!6A_Hz01q3udZ!Y7Z1MqMMg2>}d5J9oqz!V3MdXh z1mMs7`ALXLj>!@ThiDHkp4Ltbg{pLAy?*_=>Ka0*Bh2C1vl@aPC+9Ict&zS|cHxwf zLfr?0T3v%$9^DGzjUFGq15^dqP!lsOyWqacdO~iv8pQ2n!hXC9!{kq}zPI%Sp41&c zt4(@T0Tl9|57-~}G7VZT#foFfFX61gQL~i@4D@ zgW_UA@Eh<)-Y}qDn`KSzFAR(B@JzjjMHRt-K?25;FGSpwoe0!YQq(xi;jz{7b3ALO z9$%Qp-&%^_vedQPsxE_B*5*&Bf?=PURaOpw7NSXP0 z8+jtAi=HqM6&M}?MdjLI?lm%gmq6F8CFc+*h2I-L?7k4GIDfKxyO^ z9x&W6)97*sHR4jTHKasKMri3KCG~X3cpBV4^k3dg4JL6gxX$>qYBi(zUN9D9xdn9R96Q;IP^9!pVGhm2UNyFAP`-9P>-c&j7+@{E&V$e zW4A)hjA{9qY*RS|D=I)7FK+?&6y$;1%w9t)J`eay2s-{`6iQxetxD{5=La)^44_;o z@Ci0{6fb*uRaN^QwPx*@4k^9q2oo-{AieVkwDA>SRnKcJ-}dUy9n`TQeLCsH80eKW zsVbJw&Ov1LA8;Z?i*_YGm76p3lY99`<9Z7@dbiddNyl^2thT#kw>Bm{Kfkch9rY?j zJeCC%oZ7))HTIjy44{D1?&M$-R6QDjr3neOg38{BOxg7B-@VQq30X{{P|vs8=@^_> zN0@Qf2C^g1#%Ar(B@@>`xqX%GDjEd(l9e@rR-MwfZI8)(;^c54Q86Np%`#ahDk>_2 zD<&vtW3z>j)p!u@c7ApToj^{njn>}Yo{gQo#B@}lG#~UbEa5Csya%;_ZTaP15~h!E zx3{-LY2fnN5wkeh*f!^9F1iu3)fT_LFC;kS>UG9|lJ*2B%qV!k?d(v0_`$#zo3%m$ zKtQSJ&}sI0nhMZ7f}DQ&D7Aq59Mq6e5M_WX>m9QwiP{8$TTMYG+wlQ?8B*72UP`9g z#07A-VRZ9?W-b4~uoe|M1x*7+=jrD>=XPaU2OGbvfO2O63goi)cGARSMWW6wO38`B zI7l!Z+Be?cxFhFp_%lE0Y|IlCm#D8L08;g?>As|7_s^e7(dGKx1fihzTHT!cZkLJr^UO1t&knom&Cw{wVa(7s{Z`&Ae`3v5o# z_5z=kjvSL7sGOgE!$3X%r6Y#PFvQQtUo4t|-?kofv^c?}pG!X&o&>e(i^IC{V}oiV z9386HGXeHcVkW$U+}3JmY)DagC<5}0>8$5kmL{y|jL!GqsVUAQoLReXDJBp{h$?$S z9pAFw*%vv6I}F>ug0wbIwJaA^$UV3Ag-F*tLn>H^NIvcBB0oJH9b|1hX!>A)@8ksq zRWgBEdozZW>1m<{aL^QmrcH_Yy@sHC0$KUaVYe1%l`>!NafiV~U!Ob$af{-zX81>{ zXryYH+2TosY`O%617(>(Z(pD8&z>G>A5m)l=+qV1<^aDU=q|CF@4$sA1gq07x-9@O zk<#dw-Q2jfoWa&|X4m9+Y_<%kTz8(k5&X?ZCp;2H3~rhLWJ}lyyeZxg6Osd94&aJv zuC}F;l5W*9R^@1)>*Bi1U0lhlwHa9+e&g)yYzglFj1>_k;E(t4+#`ZU74aMND@;#M zV@C!ZXy8=2ox2*sQ@)PrD1hqukOt5RQ?1(`QK;fo=u?Ty@O&8Y4NHU=?VHwo{pG$p}XZ8a9Uyd?>kA|fM4D;XII9*E7j@P0jk zsS!|2K!Rm`Jod4qvIPL!sM?tLxx?$sX}|$N1PD(|tGwX!`2jqM`H0q53mY4_M$OUY zOv`z-QLbuw;~HZL=*R&t7mJzqrTKS7)slenX;xA!?-kI@5blgatt3UZEG1Bq!@HRb z%CN&GpkY*Tg~pOxyJ19fBZpp}KDAkZQt|%BvCPI~vZZF@Ssg~8a=t4L^ytvJB|2{j zQ#@R)K+q25O~34EdsEllzqns$4(|Gw&wl_kb1da#RFg&{B}Ge6KrFyX#yb2&IO!LQ zK>ij20!6%PRf7;An^#CVek?U!9=<}FC3>pQTp6fq06D2bg`tD0wRhzuje#u({aWlx zZ?^zA7OpXb;N=4<=t$u0d#q+-QVIE$uBVlLWaQ+=Ejx788y`^De-`k1KB3cqgQDMx zOrbC93k&IxT4WQJfs~Y#at<*aT^@iN2Ir;zhO%L}QbBUqU=rtTvX_Z_eO){>G~%8> z(k?72(%k_KGl&q-9ub|&ZKdFB3>-JRI!-YdcU2wD^wmGfSg=S^&hT$cAXP{@qSA)k ze(g{yG71$rj|-=!-dqFvWl}K4piOFQ zY^+z+%dfJs5@7*)hN@u8X=AR^`is=>y|Kt0ot*4IJ$w&IJP)#GEVZH1q%<}*78U&& zi*xr(A1|=Y>;${5?k1Q%*!f_U6_H`E^NoVLF#`*0_q&@`E{(m z34j?Kd&ZR7KBaIYJPfpse^1tR zy!lWH`^?FCyi&wKiE#qD3X3PTA@vZZSvfPHEi8tCpAG0p>@ zS_hqJlET8$(3oY4YoF2+9G6b_CnbUSkx*wbhj`qBI>#LddQ1$$SAwC&AcP?FaaLDX z@1Wr;>Fk!5vkmJii#qIy*;Xj*;$4$ZP?ep_dk|Y2!sMpRr#%GHm{$`GlS+d8KRHYU zESG<7?Qo=4553+o^c)l7Ax${vtr-d%@iW*G&|xeS-_?*8VC5cUJR5LRn8grWf?Ni& zmyF;d60;GcgsM)-H}{{=svSt)t05z~PiJffx)BsLsk8m59adGMd3Lg0t@xBmjKa4P zXg$ING&B-S>bks2wzjvM0@fyKRAZc7H&1D&GhbtVGI6WtDvk9j;4$@t$=&qkW*u;m zS7lFDc{nnDu9@mB;?3QOyLkfOXmHey#w=V~@?mCcEF#11l4mx2`@Np6u6!~>cy2HV z9|mRzqE*h=hXX(kI`y}KuJ-7xv4H@xyi%Da{*a;GZaP2Uz7;7gKc(nu|E4ymaXq|= z){`Dhd}rn;YutvLZ(kd0$cjgw|H?N!%h1YrejLuNBZz2e&6fHz^J?}+wm~0U7sq;Q zQycugsof;HTgJd~_oGsyvd?7ZB*2P%*X@Qv-Cp%Ru+0zfN#WL)N9bq!+=&@6@0#P& z7hvXa9=zQ`03}E^<`F~d*gBQ{Myt7fM+7a{se`0%7Pbo!);%^XkL{9Bmja_>%ZW-^7 zi0QfqswPa%>JsM8!_VxsLEQmA15Xjr(v;VkB^SAfLaD<9mFx&Xc{Oi%CC3Ma-zN5# zJYR5FD*08&t6os%nO1;C+0*>&@xslJ#b*Q;a?_x6!rd9ZsSi|}%R-(ae_Ro0Jc43Q)97cuK*e0@i4&SL__IrQFm8jF-$B66UGBUWuF}Qz zJY^PHLR$Xwi?!S?MvK8hmIDJDq_2kkuA9>EWVRPiRf5+<3yF-=UPQ z5?|Qm>BBQo6yZ@tI&CI@o^6x^G6_B8Eu-M^o=c{B70wdi$%(L)pIy&~N?&_8iW&lj zj=kz_Xt=C14G5DvOzsEtd&3Q=oo%j0jmkjQ9<#9mkBvc2lC5oj#x)tc$9axP3ORV- zZh3Fi{2`9Yuuuv^4NQs8I!iHcuQm7-q=O@<#zpwIGSnF!(~@qV*lf-A%vDA43%KH9Gt~&ef0P&tuIlP_;swIw#{Sqhx z{56GI&*kXF!W<<OQ+C!HCSM?n9gj(83BsS8x|LzWh<0KG7Oi}C`D5}+1=Q1 z-%{^rRFX@Se)nmD+*W+jW5?}cRF4-TNZ$W@|3ybIr z64IQoU%KsQb(hOpgp7|L z=eV3Iiw!nsPYy%F7vI0{!(+-&%~tRS?HUwzqO%{MhQ0YM1leRoVY+A#6~OCrHfOvt zuDw%s^$6ORy%D(Rk24n$-C4=&YzUPY8czWeocK zZOkoYK?Q|s4_F=)RGWr+3WcGf1| zAGFh&y2~pAx-x1HzKh-_lfVaU$**UsM{Wn`%xJuC^((T@h>guU%}cGC z=fvV8G!~APl@c$uV*-f&J$13r7LE(JQ@oNbQd0xb# z%#=(|Ae?xWeiL*`WS`%rH5?Vue9h9EA?7BL|9L(vw#uzFNO5k5lqJJ$o5ZVyFpvB? z#RAbQnN=UG8^IJ=dadYfmAd1@)9F&VhoGY7d`|6eN-2L&d-l2X0kmHDtHvBjdcg610K#!<)M+Wn?xV*b{*m!vO zX_G}GT~&Iq6hcOq2F{Jm&D|Dd;9|kP)5wmc8;w>cK3(!8_?e(vE{sCEzoIkt{BXUu z%5HxsOP=?1=-`?84jy}9-4PYgLMolvyD*(XC+iX^ACv!*%T^HYzbo0@wYRd!0D7`M zDql2D-TueHp)DjJ{fEq60??LsKILJzW1<>O*(!h+zSwJ{~i{@{c6{LcfI0(1E7M>Ecd#_Dzcg0flQO!F};v*X8z>4;Mx!YvaHA z`Mjw<-OE053SD;ij>x!*hV;%KU;LVy2^^}jB<`tCT3W9o|7a5CP=QmmOKt<7ZcHWtMh}imIj;!+JGwo9IE^#rno=9Mzt1~nlV81E5{A*DHD zk9COs5jzVv8ONm1+aUYP6)*$#4G$MvrDHXRKgZ4VaZvgNh`{F)#_DMg< z%HY;YO}koNRw2BhBUVr-8&Cq-?DH7MJ^1=g35NKUnM3+ZYep}>A*0a&Up~b z<)fvraQ+Eg8yL+sPIrKc(o<@>4SJ&}Wr5&101CrEaZOmy&W^u~6o;K_HfOGs$npU& zFvlJHT6_0$qk(fMsGb>Ixkt*&{10_Y-fv}oRHH%orV;u~40AS>WizO-%DyNJQVyoO z2h?04Jo+zyZg($EDl>P+Exwpjp0&hS^)@Q_{9$Bt)Yxr2RXXMNdhM~^Ay7C>sbp_~ zz?;RlOus!(;*0FEn{@RZD00B;6CKMl@}y4VqErTrMpC!|ftqQjALXNMOIsHu^snJe z=GA)~XAtsHwn2Sn;ao=jp2+rJpyxg%{r-D&pOnI9u6^NUiq&eQ^zH!d|Fnt$SyIB~ zPVm{(6zDrM>bDnj7}4PPM=rt3u(wgTLM8j!J9bwPMi`cU|9(!utc!?B;8G{)Qp$gu z1qJ$A<)j-A9jl!iXm8!^WREb0WM+brhi%fE!CJ$Rdwl(GbHB!ikMtOn3O%-W@R#0| zS>RiJxA|{3+l!fMhx3q}@haA0=fNcXE3mr=tTu;MaUC+CWmtb(AYg@#2m4tS3A@AY zxA$CpPX=5eiLTJi%=DPkBjed-|1$`*xX1N(Hk>0nVQmNd%SvQx_*M*@&?!;wM7&SPgKf}4pM2(F0ovx2 zfxL)USgm8WIQZh1+<0XPK7N+SPa2G(s^e!OjVb#2sX96i_Z%y27N2V+CU}oD5ovXP z#>g8TQN!meIQn#*pzL{nP9|GQhS0Z$v`H(<-&W(ZnShWMy@waF}+zmB5*^T@|X zf@x4SO#SuqwwpKK4s$ai-ikokXR0UJP`MNLwB`$W_h&N?VZ?VUOjpBxRLAga!Ew!d+77-%O!C$0H#{;Vm-J@tK&t`kGk9f7{$6 zIzXHISxU9dz}RG*C^E%4m{A0?idIPwj}X-d4Bu|S1uxB zU|;~vbBv5wAZ7Xn-ANh`wo(26G4|B~Q8r(=(nu`bE}MDE=1a&}> z-u3r0D*mg;keSr{?Q=%;Wc7UYZ{1|L?I1s#FD3a6r(-LvCq8)ct~702Z!%ePE+!5R zK>iIb^EKK*op`!5zFH&@h5)U7+7c2P3c}*|uCIY~0*WTT&dLR1WYV>_!NEP1W?3gp zqoqSb(H~;>V%6iyo+XPt_y+&z&P*E(3KF%s^y`NXS!YlUzai5$JA2hDIICY`?8n32 zy^GLL33WO?Il;ohV(ZrmP3X2{=iuPr<|ewfiF~-e2#O2e8ExH#y)OwET3JlYY|Cs} z`T8G~jG-?LL*LpEMV#8=UfKn0s@1DaYzU#f>!vdD8cdVKIpJtA?TEA;Ob;q3C@3y2 zRy_Ir+1^H9=o;c+mwNd!TVG;mXlUtq+3-2>`naX#Q#pBe`8@dvb`?hIU$CF`q=cRA z?HI0JS1roFGG1qtO*o@d$D#B)fXr7oB^ND7GZGL`KWOgnSBW*t6) z5|e)KWR_YWh>&p}0&joxUvD3KX{s0eNjn{FVrnD8Xdy;`Z?*MpRT^vP5Mq=PV zVw-^26F${_0zAA^&}=CoQc}`?wY`w06jETRNl^-6U`Tl#5;co38IPQci^X{J9+!Zi za1Tc^7(z%$_-r<&#{gB+09IPdy+RD~OM|u=0Ih)EsjW?v=N!M?qs555j@L2POKz@r z_VsC9;;bz$@`9w@MER@!3X?|biP>%YtB*Htw3`5E3z*XY$hiRNp`~}^WMly#%?@^1 zW2k@$baLl;+MoW$@|jmm+_dNq*n!Lq``#qqzhBR^K36I6Ihmn`YfjjZ57Sr^^ruzO z=VX~GLgqj3c?GT4vRE%*xOMFM7`&z=(G7?uTz^6*ctRKFZq)u`GE!+Fp!b1N5c{Wm z2PFUkg5JLcI{*BG>X+D?J~;`}su1ur>gND#FyZt`Zdh8KT8_m4az&`W2TxIl1G4Q0 zRvfSZ!K6mjJ^uXz`2TAM^^af)sv5=_fNKCIO#@PiD2xLGWmX+P+yV4Wh5xT6l^2*E z)|~xslTb)Ik~b7+xdGA|K)~BD-bB1lN{YDd&L~=|f^qqnw}xKFRE=}NjW?TWQ}?f% z)eL*{`liiFt-boXDul_+&F!41S%zA`hWZ~v5ObrAc@OIb1Fb5X?;8{4LZ5Gp;{uYP z6M~3ARR6M@=C1~e0^mqGfD+MI$tKpQhiSoh z_&=(4Q4N67QC~sbOc@duhMcae>eC<^6%Pr858MUztKOn(gZT#poZPzehHwbvmU)6a zba0pe;1^S34!Sm4boRC?a&^_>Br7#Fb^3-eUFYt7*$a@PD}hKT76#n=+d`>P@w;fo z*0J1(MSaw}Kl0KQ9IMJek&&4;b7}99Z)|RY6zMq- zADl)3_F6T*ebjpRz|P-;2I52Q1o;3{F&1OgL5pfa?H>Z$b3O>-7>2NDpDYo@V47X<*up!$KZ9*QIlPu0imK^ z&!zXxYiG{S#QDAxChhildc`2na_idpY~P3y2_k3wy8wcFpLeuJ(3!sml3en{PKO)1 zWakw(U(gqw=>)eqAMcp3&4B#wD>F0lPjM+JPe9HTY~P4tquG&>KMQT)-3K7w1jLuv zn3(1lqn_A}7V2JIUJ%iX;>DJDlB+Rup4P#z8B5tb49`1n9!F1i2k3^GOB*`D?RPw%x= zvjU>2iCXwNU_nsoUaX|}`dpr@0J*;+PT!NgW$?PX>S{!hfgVrnKJ8cBf4o%*q`V9g z${Z`(5qCdP9>LU+C&dPY{*F-!e0T(2-rnYXjErKvT9kBvM6&y|z4hp9N|l@uKLdqA{EAC!~C+*5+NZoED{uuK|d!l{4P&Ggv!@*E@zdrCeA@SrFLJnSq< zY#Q0GJv=I%? znqCh)zr)%Pb7k5Q$T|Xd%A;nbx%MuL&--Sbxn>PR`Yn-v1oML8GM+<}yJSzA{P!3$ zTGyjQ(8FKdZUMj-Pd;BBNKpa}7Kw!W21|F<&Eof*fjp%8o~GScW=>8|Nk+yZnBi^e zvAQ7b#+qDSc>h(F#~);pkh4tEGN37xgPA1R^BpGk8B}%yt{VJ#aBCupJ>fU45 z=C_|PVW^1g29|xi#9@w86a~pipf<^8Ig|+&)%^T?+gH>5-@V;WNC9WS9k{0iHbA{m zf7ulVbM^sqe;J&riVq7@QyI-;41G?_&Ck%*-vm=l<7>C_0rHS)D-5K|yGnu@c_`K& zFrH0$XFgQDUh6GgU2ELFCq2MfP|(omn#I!wuHGatL;U)s_x7=fh}mmIuG+$bHr|Kc zKU?4o>m}?ciN27!*h8>5V(*w_Qw9?;+;`%%WDpq~9ON0uR-#M7iR!yk^7>x4$yB;@ zI7lK7$;26H+lN}MFi=w7YXJ*RRoiB?@RK4~Uj;k`V1dG%>4D1Ix%XZd3%5~Y;bZMG zxT~v{fX#P*tcqLUv?KuXjF^%&i)lI?p#GdjXL}$Cp9!2mP(@y3I>CE^aeca0#OW*# zqUSSYqq-fEU~rTeA)qs1VqsDAw;udmXOq##dh(t?l(KtT#?#YtGdTQ!K?Fq%$Xo_W zGDO@Dy1JGhq3`eQfwj-p0w&W3-7@B^3dr*N2bHrQT`$2Y8%1A9NJ_d$d9oG8u3*JS zYaZ_rTyZQ4xM5l~26{_SBOWB+5De=8Jqq2TEz!G?X(7Sq-nq994a}RJQhuWXw!#+n zuaYXpshp_?!_goXzlDgpuzdC6^dH~4F=0Dnj6VIFcOvAq9J z&c0^s)9256=PRJcIW99F3>=)DW9Gpr+OppR;MWOUdiUzsq|+K|zi3^j?QK>AZ<57x z>N+iTlC;4*4pzlEe!#?-%YuHjCl;UYbSOm;ju;u*&)U|73sXs;PHMmfUikU#fB=Yn zKKM}fXPDWGg@3wc^Xpsh%#5Msa;4RX2cWi%G~)etn*NEHzrA^Ua)4~20`Tdv$)q=L z-dq+&=cnzpE^Yv#Jq=?8(9r&l)$KyU?AOQjA|jCnGNAK^7euB12BBR+`p2!YZIxA{cT4 znep16wV|2(HsL+HA}UhXvXvwZC}&$In?I9kR0abtI6+R4&->H`2HP?MtJ~E2c_(?x zTf#G0y%Q@QCaQnWJ_NIkhszYF4&LC55}nM?s*&iz%x*gwq4mHaFpJmg+*pe zpX5VcVUY4v83QgN%@sK(NAMJEYQ~}BPoIX=%WHnwH=eEEFWu*3DS;wGM?*nkP~w~_={BayQ{JUi(!tl%w%?_2-#cv_sro_1ptQ`%tzxS}ME+d480-eX z(O6Gbtbvi|E~8Ou--(bV5%uIOf=@Lh(ZqiByOxPT&_D1e!Q; zE{1x2q+an|WaoK12^mu>+t|p}Pj#wsH1LlW2{WJtTS>iywnFAF@Wc559%fVB0q-tuBr%aa0>K|rjWtqQv6~hK-jzHh?of#IT z>OgnFzx5 zI3lqUYR-EH19!?3;1QsSR_{O)v5;?7e#+5#j*DZVKA&!GqDzQSi(giikaBT^@#VFh%~Wj1;r;rvy`r?XmCc0R0~}l zEjoCo9;A&j6<09$&~S5Zd0ATPUcG{X0$He3n7o$pLmkEHG})Kj>-XTzTqC4%V$iTZ zW~#yLc^Kjt($+u9CUYfc9+?nQ*g>Ec+lu!@ffO(LD{dVEN-__W32Nn_4hof0q+nV} zR$n)p--HI?2L?bAH9}(0)W|ZVnzCWx#9c5&{QPfIBdZtoQkGTqNlyPrboZp0Jk%jT zq!DWCeCHB}qL1+oCgo~n7~_l>m;|Nzp(Ate!Y$S(EtZ!DfW@$p1L(*%dj8%+8KI-WVI0}=>JRI`jht8>39{_+tN&;qD&%pp0FA_eTlmezUr%4Jn^=gnWtE#1 z<)5JpO$yaQ6-cgoF316`@XgsKpCdcrWFisIc-MJ}&I0YVR7l#F)8_5iRLJ+d)wS?B zq_+tUg=(;>zf4eh;aD-RMPIEu@3-n>7k8HL>MT+tu?h)4Vjr$pbq>nKI0y_t|@EW+fQx#+r-y}AK7M|?ayh0l7%o15Pl z9w0}u^lepTo~zF|9ohf~uo)hv%u)YR=@D#gVSQbU5Lk{CF4ceb{#&NbeO8ZC2`8x(D5X4Bz<}c>!D8oQ z^sOY~!tM3zTq(g~L_-q8;H+#GJDF(9y=8VY@A&z@0;>AgJX_zZBoJ}r9UmLa`Up$& zBS>g!BC<_`#xnsi7bwYU$>}sg0cgtk4y!K_%(w|-rc0AuoZk|5wOL*$OV=bjQ+`fqj8Hh0Aq#`8smURYgJTkFi&YrF z4nijcyHA;;0oekaWiVMr3!_pB*E&%F*Hu81Yd;&rzHt%6VW0TyJBW@$3Ttvx7WLO zLt9F}(KiRZohWZQoe#FJe);)RWq7#r%9AiWFR(y0k*b9%>b>8-nF1yt5_wu2o-DLz zRQGMSPg9E=REY;2qxh&^~bNf4Y%x4pMUmYCH z_Rq6tZcf+v9-p7c?ee{0>0yWar&@@;4{ny6Vd}J|CW7w-O1)ZH1{wG?-7=-m+26k_ zlkXx&zuMa92i)Tqu;1H=e1fNj5r04Fr(qPG;Kzwic)f9&Cu_r3hQ<<`CgNrK!XJdD z6F7{!f8M?e^Jn4gu|}?s(LBqi&dfPmy$v6q=mnbB&(w#w{rvQ-MtC4ZZYIArH&;fB zje>vIfB9mZDD~&hpPJngs}UEC;pa=PRNQaXi9)Xw3~dodKtyzXuw|p!FGA#rCNCLI zz2Kwf)SPXzXebSYLqjljA#R%_I6u;q1esFA;Q9surm?#EaPy2gK&mZ+a(B z{`4iMIL6DB%tNH=nNC_>`l83+^VO7Q*ASCI8jDr*Uhnn9l8f z5G49cC>wA{z+zsWe*bf=Q6Ej?B_)&L>mNKS8WUE}PT%yY;baX;6-*Jq0SfN+BarD` zOJ^j9mLkQIqmJ6Ewr~xEyF$ET{AEJ@q>d3O{!Cp$>+FwT?+uwP?WWPtM$UFraW1@l z)_wBa=8Dd_YMHfSoS$e8&m#)7gNM*xa@@Jo^4PO+MKJO}H9;36ityyoSEpsAd9qVF z&4yzA&!2T3AggH(tn(-{q`D{E-LEcFRKFw&yAkw@KWjEQE-pzwe|Jd-ZAfZhe(^TNg-7(HXhEDJ*jNuMjH3MyRV;WK4nPVSOvb!>j4k{8|Jak!!QtJoksa}y0sFSGbjy#rI=B?ajM=LNTW zW!wd_*4KIUaMTCdv}bM4m2|1Trk*-4rv#?&_eU6R+D5e~!u-;06Vw2VufHU6kEP#AdW(XC51jWwKQRfd|+CjI0Uu(ZUf{xz!o)k^BM3@;D=o6hdm%zN__ z?GgAOU#0H{?fSCSL-D};C5it0&7O0mEFRYlM(cGnm@JfYqO2-KA>KUbVyfpB?4e^- z1)KfMyDVR68M_$NQs`EIn!QM|AH_t_QSY2oG}B&P-e8z9c96<2Y1)oZ?TnI)(;s8v zHl2H@Op_PT@s?dj$)|*2`~8c!W% zi0FjGe}$EO2b|W_i%ZlCRd|rb+_`||k}S^quwgkjphIX``i8ran$;ZE^#L^tn{W-S z%`Qf%{KK)j7?3ckGCv2Jk>&vgc>@1AiVr;dA-6OHuk*GW|e< zyEQLkqb|k=7T>z<9f;Nw@2%we5v*9@UZ;h?F`)6kw7w3)HI!}X9%Pl{?*0nt!be$G zB3JU}+xvzl(q=Ud^O&J3LqEq-{n!%j4|lZA`W&bMhT<#70v1r0(|5^S{0&dFqv@yw zDauy3-r9LTh8!LyIf3|Y`h`awCyMZyGZDvDrmWXdo-FpyoH2^V;AeT)6E-^%Y%r4_ zzMSI=zXC_=sgXl76W0&1BVOxTx1@163Ph*iRi^DapkhYxT#6lw4QxRA`>WOWlxW)x zb22w5n)Kyo)HLTfQXwO%!4h5{G9dQbDFlQCe72|b`;w8c*IzUYG3QRg_RzVnPieU~ z;HpS_V`IkTQ$ZUYx9(W(nk!Pc&Gac37S6Y0@p{xWeMg2_S>kg6i=u}nTvGYAEE$+E zJLhm4#@aAEOiyWbP4}N^sEc=VLN~%hSiO_1~YfJ zyUw-6I?d$$w_C&OOMcF3vV#*b%j|#;lGuo@H(s{*Ie&xQ^d#o;l3q|yNAz4 za{-i?3u|klt?p*>cHZdTZJ@HI@5>WuC{2&t z7SkZ~i?Knl*EYG`IhZp$q3<7fn&$?rq;{`A7(zH+cvLhtUKSZx=egwyY`wf=^X% zU8}Ty4~`$;C_?hYoqhZ71h&eRWD@OC!>Q}cmy5x+QC|3G_9hpza_{)V5b3L~87YQ! zSi-mou-u+O%p@_LvBinR%I;aYR_EWEkt45AB$X4Ix;8v$WtDY;`4l<*hp|xBDv(}u;wIN z+XR#B#Mn*73v9w zZzz;}PN0%bTA446T!eqBeD)E>hn!f-1iDi@yckiA-A|!=HmtF)VtNQh_aOQ8kUP!> z9SdzSU^fgmbJzP-s!1elBHti})`O^^#PX&+8RGBg*iL^vKUWgH@6KE9ij9q}^tn;i z({iT%{c(AhWe&KPDr1wuXFSoMw~HyT{_oz&o)Ou7c2^dv`60$PjV+~t(rApSU&#IT zoBDs*|IUSNm!AoaxCTyE6i~R&)(!aRCsW$0l$D-Qr9lU-X5uRUJMCN7hJAkjhUz3L zlVn(r9mZlNOg?bW3#m_1w^BZVDP?Pd)8WgieV^k8S=z<-L5Nq{6WCoXye3g@w%Gos z=Cg3AhFGid#%7+zjEl>!Z{FIJ0sE@Ktu97~Z`gRlKf>UTD#I?sy2#?)6KkE8D~g*g zCC*aDzH5YhC^m}vT_iBy&EVVGtykZVmqzFISS+tHZ&~hFTw0_&<)`*Mvnbrj7r>4` zAb03PS$UjPXsDhDdmY)G(5Jg>8xGkwxzodz1RUma$S+wNF|jUwK5hwFaTVK(Oh-3D zgXjL_id%`vg1s_tOdoH?N8SKlm0ctGu|l0YZ+zXXVhtYfpY0?{^S62T8Qq>Yejtpg z(g=Gx^lW*B#o>Ma4_8KdYTkBrb8PNn_nu_wPJ*5xrH0oJ5kJ!Fs;4ogXP6kPNq5AFyuO;NVT#V-^WjopeLE`r-;6E;c7^Ki_wtz8M>r-3ot6R2}AMa7& zErUV%_KiLjq1;*v(49$Bd=$F#72#K-xZkri?Yi-{rj|Iqp~1J-_el*83UB#t-0*=Y zBiG?3k=rMciLi}db#7x>3M}D9LFuJJ72TZKB8wIUZK`4@)T4t^HJEm2K^%XZIlBeY zoelYE&ib4(enbYUt)FC>`!s>$ejhqiy1VZmkOK>aTqywLidAcESWy4yoj+7l5{gU9 z`kV~)&|dpZYgVV#GNbOB;cJm?V$kQPub@uLL%S&O0$@xK!tk_fPz4oGVgNWL-CSFz z=KPfqxwbRd^>q7whVFp1qYOk$v`6j?b~LajCD2SYslpSPy{5MB}W zIGwYg)OMOaSJ=W_`F*!hMn1&NUt_`B_!^yI@tl1-8uQ5mhtiN``co&PrmrstTY$C{%+xfeFY}ai&N?FW!hjSy|CD zA@1Q>n406Y_v4lakd@;})d3~c^30wzC?n}^p!Vj9x7Y9<9SYv7$j;?NA$6lKo(}`u z?a^BlQYTqjyRqcy+9ohA&V|C3rb-G?TL34;N|~U=g%*I!>a~Sc^{{kB8sd^9t^M23 zJ$#_!mVBIV)oTmR=k69Kz`wvS=i_ZA2mHtJLFD;w9P%T;A=f-&$wuqBIXDYaV68wO z639fXt*optI}9=n_zld5NZrnAzgMaI)BM#NHSTS~z7p!zJ8o`A;A)#8qfaQDk_ope z^F&AZ1!{jx1DIZoB3qYYgWrRIz2DxUC55^Oz*bO-z$GHexkpc2n7iwO>%!>r=;Y)? zK0?W=VqCny{=8Ntu#J&kNJz;0WH|vSukir;q@CEwA=03wRKS)4JjiDKY+`r&>9yHr zQdiZXdTwYue>lwIh=zs+0ZC71{2oTD5-f%z5v-xAnu-|DR1V)1CN_pKuC64NP~TMn zfDtMUw_5zu5F2h$t%vQgfnR8!_up@)uDPNea3KDfn(1x?dpGbVMVO z*S6kj&CY`8p$q4}2exLDa;$>{x>6Ggvs%t1Kzb6Tp$Nv?Oje(s-mPWVQCnOiUKLw) zFl_M10$nf+H|VONo^;C|_ z01Lke+Yq{0$r9^nK-izhR+d7#>10T;xfTe4IY5M%u^^T zc(QQ=`6e*Ov?amx;2P-9m82q1J)(Jc14&#vlw)7LMS~^$2F5tQHsAFHV6$P#ji@dK zQ?fomR*#DK(t>Gt11^TbV=|w?EKPkK7e&X>&;jsTv6lSd8m7VcFj_j_v!4-p-JfWW z*cd7+tCRCl?~mx{*JtqOg<>s+I-NM+i;)La4JjY4znI2u`mM#iZn^{m)g91y9WW@5 z$)-^W5Pz*NHN7dH5{z+xwKN+*9G6~7q3m|u4AgPd4>s5S@kksRO$xWG^lZ1mK{W~Y zIq+Z4_YnYs7~%&;62yj!JL%)gwyL{5 zVa*4HkB>)B7n6nslSKmc}zf01UgZ>l9UuP7bF4-QDG!5(lH0WSDB z>Gikb@FG(p)*jIp2&2+hsgTW&xS$SqZ67cQ9Eb>F!YtQ@vX&%3CNHCDNg35nI?;zL zwi{qN<7C>x9=gI&^Z5;Eq}X$|gV5-t<>)^0d3sf#%QI1C{&Tl8qjMdUe}gm*3C1%v zCu1km>#u%JHmswVG4A~QgXMbUBp*x*TFf`Jx+n$?b*3%85)y;ri9`B!JCndQqQgT& zQnyK7q_W4YkR)2x(YS)i|2%hbl*2Nx1%eu;w5Ol#BJ!g{i94B!cohsBu!#EVO&0 zrrb+?)!>qQ5}XXO?b!}XVg#CXeP3OhmR7G<$|j1n7+Upxzb1NQ#GWHZEE4Um6GF!&#GS`6BrqDJ)Tx1udvn1GiM7^VOk06xtZ zQA43hzow_AA|=$r3^u(i#=XY=?XkpB^VE(SVW3|%!@mNmk49BmMX_kJ!$5v%I5EVM({`cZN!#kAeQuyqy6 zU1Tn!-1xAgh|+V^XamFI=&Oa)rhEuuh?;wi>j;(#=^MLX2Bi%lGFOx_!`WzxIx~Ra zg|w_J>h@3lSr5RC@^?Eu+iv2}5B10k$mu1O#pwtjUO@FQ#7&)(J(B;k)CQUcs>J%z{(mBfPVzY z#Cdsnc!D861r%;UZxQ8x{6R{og3tKWk~t54_$q5&NFA7LP; zBDj79?&cDmYy@`=0<8|iTFu5^_D`3VDGD+<5f2^ann86u9WCupW1^tFp?+b>>%QTa zAh%15gYx0aTI#bxf?k*V8kF?Qz`#0wY`U-V7MuMr@bZ+f+)s}CPwhu}@gb1xvEAKW z8#fde!|yA|R7>1KdAm40td4$}N?Mess9G+I;r{8bNmMGa>EV9Fq}0`KKOM}Gp8x~m z!^7ZePHJ zuLID6LFC1hmOi}b>Ftf+>KZRG!Ds*y4D=7{CGf?JfIoyswAiBGe zfVs60%r^55Rr0Q1GX$tht*wEyk|mfO!@I;scee!;fjf73+VPgVe;;>wW+9t3L^OmV zF%`ib|8^9aGUXQOs*k~@kBrutX`TU4<|fb^KwZ7qK>zRs=2=2S1YtwgCSdv^XBve2 zYaz+<_Y^;h*@MeH0WEM8oO9&9sK55oy&D6g#;z#RS^-zT=wG~i`LdNdc1ojG<%XDE zh}kVD6zF%z6tkV>sN7>_j`J#ouLJ#~A-x};0e`Ppr&I#fvyMxCaGxVmTvDCa}@w#*1R$Nkq<3v|N-J(!NB;fXwHVnLi zt&Pk)-lN++<>KVbr8yAb=8k&S(iLtPXhu-1se>6ug&GrqwiXTO3-#5d zX46XHM0V7m1WdtkD%5e^_1HmVn!xcrmy8VGiGNTv@uJUjXEwI$ z6?p&0J5>z@JWO9q77g5LaE|&6C=wiqQELpUz>f#7_8q%o^~;UA-<(J^;fnVZmAtTT z^J9X+5)A=aQRd!**izE?DFD;!)5BvK1M`^OrF{^)@4a!hhIn%_ufVa_7G#o`i+ceN zlbJ;l244^zsIb(SB&DNMDCRb-e_9Mw!r@0-(}2lEO#M{*hqDVB8rr+q(GVc%Tz=JA zb9BD}T-G*AcL@^~ZrOEF11K_8u z`zvD}1IJBUG=v;}sNSG9{rNvQ;r$(&5dM>eqXrTOX{GE)a7k*Zbu_57Q90EH>`1I! z6rYgadNgy)-QE457|eGC4cW^y&EFKfwa*7;Q@4}Q2|VP_pWiL512T3Vpw7WaM`zC_ z5O6n3tfX&~oZZmN5ufGZ<6EM~XEnUpKUlh6334b!MooS8wO763_Oq}@k4oV897Lc5 zj-^TWo(5LWAybTg(GV;GAN}>255iGkve;9G#7Cr&6OX@A@z@4t+jVkDHI9Su(@CS0 zQ*2+59>6cENQSL)>74^6@fq;guB!uS=m>H9uZ4xHm7sD2P=`Tb^6g7*<={j@qK8{6 zw&NMC5A%-*`BrJp2Bj`td;!1MruP(ZO5YMOC>H}Io1P0daJRCJ>{Pnzn*8fXJD?bI zFgZ2l1^x+4K~P-wV$4oX&=e2@>)f_Q-L`IJL;{UIFdh$Dv7T*acr!szXH|I;y*5yT zYPTY|=kRo1!4%DhC6XY+s3* zU<(`lC2JitU7m)YfeT5A#+|W&_{-8WFpo4xK-o{e*4OU7Tp(HYs7JC1tD9mDtGPe?P2Hl4QiiuY(_u7(mNqD^mU+_ zMolLvehQxohO0peDk_a-Ff<&M9{^uCV0xY^GUqc4W~OG5gmzUEQc+4V3IY~JgTc)Z z&0QhRqII)zOw^#?M@1^FtqZ=<89Tpj+z@1!lM>e~=&D@6=5}b~v6`uO2i46eMZ2IN zEa5AM)d8lps3e1B`qZl}|6|$|BbV_gHH;22tq?M!21=c*6{A;0&77d4!BUn=E}x6X zDYSwUz0d-T+ocOtGn@=A{PPc9bUMQIZs@DI)rzvQSpqUND3L*N7C$~T?WE;-a+tky zv+j{-u9f}nnZKBWi1^bb3h3#UvNsr~{^I>N;@~LXw`Qy6c#8NVzMV{Z!}|K=#VUxafyeF@#$eh(T)${?fwMoP?@g zQuUg-Ee^w{AaQK=^pD%#g_9U`IdSj>$t?7y-rc0e>UNNZ5{-fAxl_{axSeTfFHoPk zO)L`+58v@J^g+;zBnAZ2T{Wt`sTxwOEY9WcgPf{3GJG>N z_XA*o2^sp1@WvF~6B84B$$#l!1mNeR zyJ`-A8n22IhLi*+PwBBQpp}_z*)>JX4IkH$iql)*1J4takf^z9ORc2G3NEp;WB)i(4O}<}{3C9Gwrv{Ld~13XnlP>E3uS} z44|@wO5Fw~tB8#9zcHE32#k>)`*?{x9yaI6nz#)>~N%Q0l?w0(cNWHXWhM|K_$_!pV zmm7}y66$B8$?2%zhkCG90=lYC4p+nRB0}Muxv?`M|A8d!f>Ds9DnV5Xpp+R)W#lHo z25MNZ#|3-`k%kQaL7{iJZPeshl_ciovZg)`($_yKH%NW%Pbdu&`ml8=S|IhbC_LJQWH|c6tMI@rQ+o^YRj@J38yh9TE^u zchCI(EyLP6oudq$X}V(B;Qwy7IoV#3@Z?IQ`pRH;5O5aE~wM`-o zI_o%y6nI!yUjZyf18klz7wE={lCnzW#sRSeFxJzsSC*wqQxCr5aIt#6KI9}sFiqE( zo__#>iiMo+C9 z0>{(d{mI*aBye~N$8PBBx>=;&_U4e3YNhY;SCPT~jsw-7$O6gWDZpYb%v?v~=^c(Go~ozmvCZ7dj!66z$fe6uqYRB{x|Co3rutjH^v8s|P_AUHGY-*K zmtjYHt%fa%uJsM8`@>;gDH+zvj(;|!*X2p(t>@9ed}7d75|UMs^p8s%7f%^{rv|6J z39Ah5!bIT69c)6yH6#525;wPnV<3nl;=cDQOTk3b7Vz38tKGJV;^R}mKJaR>9qS=!O7+@sN<)Hr|OI4)^Evkq{D>gVwE-sk zSt(<&4iPsTLdpuah&+=kOc3j48i+-qV~-@ozuY?7TQ2#tZp)SpN`t=GrabYjk9)>| z2Wz_cEw^jn4_jDt1M48^vh5qlDj%(+Bsk5#ZG2><>Fcq+Tsa@30xRtw|J4s7W&!Va zUp^qyye}?2$+ba{*z0ldK-j_fj&*W96x`eLKSZqGK10z(;`IYN4$gXAhTM9G@Ft(> zYK>AeVmnf+=~_#h!aC$<_F=?=`AGg+c6McMjs+xpC#|@+AIl;+yNvnq0h(&C7MPZJ z2MG!_pf*B|tjiy3v4(3;L^ZpqYl@Zo(axjHpGk&I^hynq5N=2ESk?i~HlOm$&y5p4FK(j;egauog5!vyVlbeZ>mM`R#mZyTj;qv&Gl zn;@IdlTiy(TSyfV8GZKEs?~sNf3r&CEoAeVOmMHh$SVrG>Vw(U)y)mt6w%)xJuEjB`=&;CM9i+El>4kEoG<`f7v9#oakmN- z8=*BM3D{yPQ5-Cc6u6i!(t=$1@280}ST&;BTxx$BJmt=XJx=z>pZ&NP{nJL}&7w$s zyBNFZ{Es#}bSzEM?hDDgcLXV-Tr~#9pfeWSHd?}N_d^n;HZ{F9%o7tixDCIgcmAjh zPI~v!B)ln4^YrM-to_~lRETZXKYXRFDY;B8zT#+mXF2L9obCfb9~N>VhzNwuJ_kj4 zDrP}Zo)i{?zLm=<82;#XW~4X{Q_T%fHv_CbJ|8x4v#;B`p99}{&++lpS-VRwZ(FNc zyeO{m3S#st1J}`H=QofsL&9DM>`}Xh(_1ZhZja#Oj;m7!uYBsUpaN>aOcloP59Ws( zCx)f;eNS+1M#A_8E;9Q|n3joObfaMJ*w_qNUjsm%Q+xe1g!dQqtqrNctHmf`QdoGM zu$u7h0pNPJTBqVZYw}5wjf$kB)kERjbXp5`{)c&T;ts$O zfh4Dv!nH-I?gxe(g&#AQ{KMc0>$^54t>co=)|Rz$%3%iD@o%Mj34yz>Qx$zvxNZiC$O1MB8o_Or&4`UMc=<;$8Db zyxkoq1KZ*gM4I0g%O)IbWFJu*SrY8>QaD^f5)goInvs!;+|}O%lnHQe6wp^K=eYOj zO#K0Jva+GZ9mkc8i6ckZeb~)yL+4^p)^=k1*}+#Xy?BR^emlpfgv3{0{p*-Qe>yTZ z+!5HE_|>2Fum$g^g-!)G^&$5|-Jy^A-!`mirJYwI)4IxJBCJ@HUwfaf$u>4ZkXHE> zc{NV^FFBu4(9|fwFn0vw@YN zUvqP_*g5?#pdFUPv#gMevJh<%;23b}KT4~2gQv>3+8;Dp4&Mgb4<+UFgwO!k_XZHs z{N>CRRyO(7d2RSuqbNp_H7kmdByj@}^}w0McYV|z7)TL1GxLOoR?Bko;ITkhTXD&U z6~FV#cH5^4?ZS!Le=I6z5tdRTSOweP|dx2j#%%Hfg(pPw>Bk5k)9P0zXPdV+XX5_vv)5B2P#*>SRRKN%< z4AP8y|DGvG8x}3<{gX}1xhj9jl6^B4cGMn0jXHGU)cwvx5qo|UH_b^Uqx{=igvj3& zd9b(qAhDK}ISYFjP!Py0TTzqsfacI|yD4S377qB}w*vw|-JI1OA9^FhB;fZ1)NrnL za(LX!C{D1esUYN@!BrDiGY-SZ&*i$!}N zi%wr;jY#u2iedd{RhATG+-=mb$Te(0*22qeY&%dSprns?v@`rnte%IbW4nK>IBW0N zzTM>Fpwl@$@rL~s$6+ogJUum--CTB+q8IWxH%2!P<0d~k+BWJZ>s#6kdE;~9L}yh} z>PTqCe#EJGNh}sT1qh5=%J>_>lwmFlD^^vuXUYajahAwDjbM#}@r_GxQ2Yb&w%M9` z=u&V}6+|rVfa`M5;;=`JWoHyY$Y&e2?I0>(~xfvVZ4 z*z{oj$k_oJ-do_t+UyPfnvBgPeLHNn`x_^OyWhZl6wW%(0*{7iZu=~NHA;+HqjqY+ z^%8bc4nH4zYn^LhyKUcNKNRAT8WOZ`A7U(2WVW9%1E+yrgV|^C5&_OXsTCq@ttOPb z7Xqo=p50K<;5H+r0ZT`bkDW9WyD&FyK0(@nvCa5@I{B(X1F zBqII&eSXvDv^nE5y2G{RIB1oj`2_jC=Oi54S_1hSQfqWyR3}@LB=XMnwLTTcj_)gM2#Dt;p`GOL5O&VR$4L_BPl%nKJv}>HGig44KR2> z4=hx1U{rnBNHJ8c65o{vLBdtx%|$%r&mb;+kgmqJj>qA0Kdct8Y5qQ^qZEk_c70;Hn}e_jV7$oig-z!D&$sg%S#P0& zW6UZa0$;zWF3+y_-M24P|TKaBWNV88oQRDON^ z`t|YS$KKxFckbK)^=m+8xH1l=iV($HQj;~;|Gyv??Vl^+P^3{9?f(#Pn$j)Yb<2eu zy1U=_3}Za^rZFDpO>Lv{OHqP+DdBXO?q4qI9@dM=3%Hnk2!wuiWelqw(o$2!&Rzud zMOf_3BfT+ReO2>SxD>`SQ&-WMDw93?trZU=_cze3bZ?bh>5|d5MEJjXb0e!Z-9h;1O@_5tgCDsAu^Yv*%>t7))QCr=^`9onA(%)!#)Y)PG_;Eixma1jRTpufanm$| zjm~4If+Yu+?sXoYnMCmwbV*dG8xa3e`yh$EjO(-?7YLl21bm#H7Ul$(<^MSpjO zZ$*O7 zgX?qAUp=5uhEDO6l?Bqi--7cKV)y9LhQ{Yr=_hTTrx|)_c#nMByyy{WK58p! z9je@6Qt1W6ca0K;%6Z&Q7?PUU^6QiwHqlBkerNYhL@&~T+iRhF>3C7L(rO*6gBdmE zX682^PttO@I7sE(pY+PT_=wMDY4v~VI_t0~n=kG^B9DMdhysGFtO6n>N=d_#3(_Hi z0)mB7A}l3{f&wZXqSD>n4Fb}=bVy6*B1pbxF`nQ1Uf1j8U+a4BJ!fXloSFHY@97ur z|CFl4&(g%g#B{lv>P>eMmhqHXs*2D8YDLp6M?NA?^~uT9!ezbkXZ#%dGeZ$%n;&H- zc>5nBS{SvTU5UOP)rfyBeI2Z!g+&Ab%C|F+{uH9X@)0(WFA5<`)H8FgXK&tkkYOT! zL5z|rX8UVlaq(Q4M$wB!iPZK>e(Y|!=jF|lMjlpD)k;aD-;bT49t|y?wVV9sT1qGx zX^6^cY2)v!?lh)~L(38Vfu5}{*qQalXfk3K%}e*n*bo~}yt++_^_Q!M9T*NGdoA7c zo6b9IB+qN9J>YTJTvD=hPZMOnF@s8N=T_=A$uxUE@~#wY(viDkPu0bo|Hl0!UXEt|D2B6i9v5Rcv$7$=MvWyw+NJ(hezLq&L*eMIrKei~mISt*ZOo~5VE#s377xbrDhg`<-=^L**>Qf#@Eq#_5PbIrNKj>9~2Kht)ul4%;2 zak7bBv)O?BXon*viC;(I>QDud|5}zeHA=K#tem5GawZ61x|L&6#=KMZuP<^-3Bf5h z;UTM1?;{jGKd18Z;wd!nceFVOAevqPOEDoq)#Q5QJlnle0J0tvSJO$-)D8}g$#q>y ze{XC`^~hWbYsqyDag_v6FyhW>k?FSP8^K|YKDYu>xKi#h?JVkEs$l{^xl%xtRwhXu z=m`LOE6*rUJxF{_k5>FQ4KS*8n-bPex6iti>EDaD#mhNdbbyE=LHDUv zdUKh~*tg^=)1#8o#DL6D)Fi+=HYt~bJ<0jP2a@D(80kPh0 zhImzvdjI=v!L|%v&V1&WixlxuHS&a9>VWOm3#z53L)=Ufk;I@I;QNJma;$;=vq4eN z43dI)H0tT3-KFBA8paR*VagRKF}iKk_UXbUYIwy1Ckw(Yio>?EY6A@f_AFka4H9#b zf}|-V#Y;KpU+5vjcgNq$p7vryy2s zm4x*ju4OT%l`VL>s23g{2E{Ao-9&Z&lhccsLA&R}#nzvueucD`PZ410cbXranpm8W3?&C4TXt zOBu|n*7QEN1UIQaKVd0kzjj}tKd^A~?$hcjl0i`oq>jQgzA#Wjz!rzYkHqY(I$cX=0l0v=P}rnyxYqY zd!XE!9t zWJF8Cq{tjv5?!}1gYYGkv0!yfXG-%W*8#T!}mr znk_$USpigGqZHXr z2u|>#?)!))OQuU=v9&7@)36(8X=(-n%BnaC;sF%8p|=;$cUd}0-%IiACxfeH;1?yV zT@EKR1+jF<)Y4LV_IE3vp6B1>Gpsm8rP3U?3;n{Ygvz{maJn};y0kUJ03QkNH*tya zhB5pY^cXTjR8L{f(2KwC`fY_yJ%Xsz8MN~t{f;PLu$Q$ux9|CQd&lo~x3nxKDp(u$ z?fTmEux63Sm3CkBnY8^!0?SZW8LpI71Kr8El zWq@LeQ@SIUX=%E(_H9#k6!ikU3@n5$jcL%WKo~vZw0_4Ia^}gChEpEx8jtc+>%^B^ z?NsB_|Mj-){2CsL8bh+3nY-Jo+7THk$>q_R1!MHk=DtR_wIwN8}SY+7&^E z+1Bb?$X{&(S8h3?(g2w!R)DK`yp;hb2`=waB#WXH!est zX*3}Y>*u}*3y|9+%dBPjn zYw4FdDdr+yTqv&pxJc2__wKH%Hy!z!>u*ZuZ>eU`|F%UteE)9dL;WSLX0*BJl4iE|L#6-xnuNe40^CPjQBYWQ^1;Q+UMa zu>j%Ws)JObd0^Tr8gn_z8&>3+8W(@TmZEbR4oTH;By3<%QLG;X{BJG5tH)YSZ3U8NwO#M8@b_NckJx%0u^^&QAev@$*sx195^1w^&?16WiN&AvKl zoy9z8# z#iHD88an1zADsHi`nxL@PnFQz+{TeYl$LFUOs+VWF_1I?#slCdj^9+j;#z8V{i;LD zS&Zr>rE|_#x!Kv+q6-2yy9NRLbH1-<*q!~L354f?j|EcjG%f=~7&jOC1M-Ch1R!7E zN0o_-O9gJ^_iL3u0xcPMXXwFw+-TKNZz0fx^0-!*@07B6M2#p*0S=7;Vbkx7MWC`C z254HP`DG_fKxzjpo()qM>2AZEx9t)~CDC}Iez|uSUdaV|G(-_g=B2QHrHty|dXhlq zaEg{z-Yiik=i$uAQDI#1(!v3s*-1cyy^c6UIEg;PL52X_<)bVnKv+<{4*0*vHyj;H z!@~4H|Idx|+(TpIEyz6p=uJ6Pz)y^Vnp&0tDPS1k3_2bqit|uy>Y!e?_S*fI%!>|< zfvmYsz?S7sxv3~ZgvVAQlggdwTIez}9_uAqz*`C+x)k|u-{Soa?OH-Z&FsPIO5HgR z(R`4lq0c2%qPKl7KX*V4KR1VNM@6`(l{`v7a28{V;K#Ha%){$(C%HP=f!N$|iLC+X zoq<&83?!Ue1hHw6(F;g0tgNrcaDwf|%fBdR)_nci80+01#%bIOgF1TAi;hpIpi4Zt zI}tL6*m%4Z=SC@V7qF{9pW(L*^kon;!=^^vo%6!rf1?^ntHXG^su_m0dEO2~Rl_@7 zqUa0Q)zKfI>91x{8E`)a5<&j^>ha-l z8pUAT6;E0Q)gng34=O5MN2sYk+>x{X-ipZQ@QlCtGk%iOb{OUw2KRS=={l$0g{H#X_0={nZee zxUM0Az5S-4zl8@FwxO=h-LDdKGjDwiyD_?I7K{qq-kEjI@9JRX;4tk@)>?46Z1XS% z%FOPsFayjLuB*KqOSW<~|!p5$1WR`t4sX~phVF7y=F2HCKykX~s z{kY=b%Cz5<$jHhsHYmOg+lPvOETIoP=g@)%U(-LGv?8h<~+e~2hl%1h?m*0N9|~S_@+U^J6Eg!8oQ8?{cn3XZF&n>+!+#As% zv3R=o<$U^TA_LBT%^j97H&=v4TmB8lkiMtqHs8HA4vR2NmSrJA9M zU;F_8cpS;pz*6R|oVVEg5R`o5NN?CjUK{hV>@F1Fc`l-Jcix)vUo39X9twJPxJ!kN zB+A#7OP< zWx6O*qW2l0(~VTx-*;G#T%Kdoz7v{6v&0;G0Mo0D5r@rX(5ax;&&+xkE`|@M@FyV@ zO#i?}ReY=L0`ptbpzsTPV&us^*VET0yH30)KWED=J||$7P_qe&eegVx>{I0u5%|nJ zVglR$x{@WlKO4|WLE*Q+_}~=T(mfb$^rO&(?LBVbYii%xRyNvk3I~&2)!j{1F0^R` zYR3cb9sM5U^q}79#Bugt8)!ILOWB&5hiuG#PHUZg1!v|DKHrN!OqLX-w+3!Ko5GBd z_`aM=Vl>|Csjo*zzX!I3z|+Nr0N(_>9gt`==re>3jdBTWL{ZPJ=;SoCwQ-4vg#N=Q zEG#T8&hSe;(FI9Nc@-r2#9BY_!I8Qo;uo7qEb=$C>_5BQo%5}u;@JBallVr6U@`2i z5UfO%*N*-D0wMn5O5#t$1+kX3Q{qi^MK0{A_!EKOZ*>j(s(bSA1rLoK%Q0uSJ~QhY zqjRSLv(8myl^znX_4(P5AS6@$naga-2+F~r?Z8UVZZIEyx3ja%p%>$^x6`{kVn&2_ zMTcK~CDf>ZV3gCHDbpyiY5Cqvi`Tvx6tf&x-41TfcQcBN4_A3g-zAjYo>Lc zird1v6=O`q?Vi3K1Qj{CG^hRD*kVw%;Ql+3Aj#E6_ZtYwNxIYxu8u<4>&qM>vQX@se{xEOHArZto!Grm1~E)4+#!u&z>!P5q4qqlSMi0FdmcKnC)VgKLAmMWWg+nmK5D?58WWYvD-Ft6c$(U zR{qt{#}pXV?2)wFaz7lV8Xv!>R*Izo0`iwmSaoyiFepzPKduhaC;*}bB|aJ|tQsuH zFv^%1ADXSMVOdyoBV9)zXu96Fx2MI(k(&+`?*A4{a|&K$=P+eAvuLlh9x1iV(2uif z0e-`}+70s@clDkL8uVofw{T#F5$B2v-SW>34g8Fs>d?t(%Tfx0y(EFz2By)X5RvVEh(QuKUq<;q;+{8#CYA6BL;CrGeE;1xDPP6yT*@q5v zSBmW2fnsWDAc^PXdFUBC+!DUm9V1s)bB>##V$c9t%Feq$hqQZ((fIn2wIL8>_hkOgyMb*s6JJ6W2){|l0n zL+U9Xu9=idyEb)Rb-7Fs6ydD@IebaVD=J@&`Oe@#zL{*Zlvz`Y7e#@h<8C_WBp&<- zVQIy8=;10TfrwLeuM)O=T!zC)Q_^tk?b%Yt%|cEQ=WYJ65B>}w&}R%ah5ZND_(5-~ zTp0AG9GsT!P^|uAyW~POPCB4xjk7T~&@0JzhCx)u{BUb7iLH&ii)hX3G`dRz}L0NSY{KQ6%ne zr-;H;@qg39I(?!itq?@;PRlfh?`b7rs~T4)c5dey-^qE1aj3fa39rY5wA^=8&3Whr zC3)@{sjqL$dxE^0CxiIMot}=EigW{urdGk%uLYX!u&z${tj}Suv0f45R7T*X;`VBe z(PP@F9XjBC{>V|WI7#8f{+t|Z*BfSicxF`vBr?*ENV(z0m3`0|ibiGj>;6Snb)Yks{C`nX z3nv>U8*Ty>jj_HH>N{E`g{V0SvEG9e0fD*5Y@0<6scN=`C+yK=Pgu$_3nI?ZFVq~@ zV$Z4CC2eXYi*z7k7^!j^8E}vQ?pOqi3bI`s7#S)(dhiag@ZX+`7<)k;(VHQsA!-^{ zV3)7d;IKTR_uz%oOgrSqVGJ{_o9W(v*wgwYdIwlToK1V=`4iNh8M@sw85QcYW&liS zg@gD}8ORN}x(msA&8cjl?-U6g;~{&TxTaUq zGg6MI01d!xCR-EM0UG9QOH1YxB_t;ml}s!TC=yjue?32zY8O}HJmh6Z*VKbLe}+vI-FZHq*T95 z2&h3aG8_Nad21D|O0jKsh+l^E1c$uo}ra^Si2RiJ0u_{k?hn@YW0E%s}M(`9--_Oa9Lw zhK?pS8$a_=K}?qfQ$T& zN@BIJRm$coAVajRdKa?CNjC3W9$2}djXJ8}0}h7o$I~+pr{S;2rB>B_^kx8F-QmyLRi#=NBB#PY z-!iU{9^%{(W;GmI(;H|q8fJw8eC5yM?j}kZl^8`(dVo0YU*`ik<A@T2p-~5v zny05HBg(0!-H*_K486G00^ zZBBMJrTB*B*q1jDI@xjm2~zB^7t$dvgTP8$T$~H7^~~1Rpy(O%-c)|`C!Pe@h)KDV z;T5zK5?C;DaPT85>xO1>*xkA&Li;C(G@XMgJWxP_4D?hKH~)#H{7*Gyf%AeKR2Xsx z?tXN16!y>QX&32A9X6hzP*sqDIr_I1h9KmW56UTo=L2Vtq1Zaw#1Dp&G+4H8WI&?bIDaDDYF%MSNe`pKBuiJfR`2~a6$Huf u_i$3cy!hkh@gE+}3;5*!^Wm(^KKZ$CldP3zEOY;XKk_ol(&>_SUH%VR8Gir( diff --git a/bitswap/docs/go-bitswap.puml b/bitswap/docs/go-bitswap.puml index 49da618b3..6a291dc35 100644 --- a/bitswap/docs/go-bitswap.puml +++ b/bitswap/docs/go-bitswap.puml @@ -3,15 +3,17 @@ node "Top Level Interface" { [Bitswap] } -node "Sending Blocks" { - + +node "Sending Blocks" { + [Bitswap] --* [Engine] [Engine] -left-* [Ledger] [Engine] -right-* [PeerTaskQueue] [Engine] --> [TaskWorker (workers.go)] } -[Bitswap] --* "Sending Blocks" + node "Requesting Blocks" { [Bitswap] --* [WantManager] + [WantManager] --> [BlockPresenceManager] [WantManager] --> [PeerManager] [PeerManager] --* [MessageQueue] } @@ -27,13 +29,16 @@ node "Finding Providers" { node "Sessions (smart requests)" { [Bitswap] --* [SessionManager] + [SessionManager] --> [SessionInterestManager] [SessionManager] --o [Session] - [SessionManager] --o [SessionPeerManager] - [SessionManager] --o [SessionRequestSplitter] + [Session] --* [sessionWantSender] [Session] --* [SessionPeerManager] - [Session] --* [SessionRequestSplitter] [Session] --> [WantManager] - [SessionPeerManager] --> [ProvideQueryManager] + [Session] --> [ProvideQueryManager] + [Session] --* [sessionWants] + [Session] --> [SessionInterestManager] + [sessionWantSender] --> [BlockPresenceManager] + [sessionWantSender] --> [PeerManager] } node "Network" { diff --git a/bitswap/docs/how-bitswap-works.md b/bitswap/docs/how-bitswap-works.md new file mode 100644 index 000000000..749a5a769 --- /dev/null +++ b/bitswap/docs/how-bitswap-works.md @@ -0,0 +1,142 @@ +How Bitswap Works +================= + +When a client requests blocks, Bitswap sends the CID of those blocks to its peers as "wants". When Bitswap receives a "want" from a peer, it responds with the corresponding block. + +### Requesting Blocks + +#### Sessions + +Bitswap Sessions allow the client to make related requests to the same group of peers. For example typically requests to fetch all the blocks in a file would be made with a single session. + +#### Discovery + +To discover which peers have a block, Bitswap broadcasts a `want-have` message to all peers it is connected to asking if they have the block. + +Any peers that have the block respond with a `HAVE` message. They are added to the Session. + +If no connected peers have the block, Bitswap queries the DHT to find peers that have the block. + +### Wants + +When the client requests a block, Bitswap sends a `want-have` message with the block CID to all peers in the Session to ask who has the block. + +Bitswap simultaneously sends a `want-block` message to one of the peers in the Session to request the block. If the peer does not have the block, it responds with a `DONT_HAVE` message. In that case Bitswap selects another peer and sends the `want-block` to that peer. + +If no peers have the block, Bitswap broadcasts a `want-have` to all connected peers, and queries the DHT to find peers that have the block. + +#### Peer Selection + +Bitswap uses a probabilistic algorithm to select which peer to send `want-block` to, favouring peers that +- sent `HAVE` for the block +- were discovered as providers of the block in the DHT +- were first to send blocks to previous session requests + +The selection algorithm includes some randomness so as to allow peers that are discovered later, but are more responsive, to rise in the ranking. + +#### Periodic Search Widening + +Periodically the Bitswap Session selects a random CID from the list of "pending wants" (wants that have been sent but for which no block has been received). Bitswap broadcasts a `want-have` to all connected peers and queries the DHT for the CID. + +### Serving Blocks + +#### Processing Requests + +When Bitswap receives a `want-have` it checks if the block is in the local blockstore. + +If the block is in the local blockstore Bitswap responds with `HAVE`. If the block is small Bitswap sends the block itself instead of `HAVE`. + +If the block is not in the local blockstore, Bitswap checks the `send-dont-have` flag on the request. If `send-dont-have` is true, Bitswap sends `DONT_HAVE`. Otherwise it does not respond. + +#### Processing Incoming Blocks + +When Bitswap receives a block, it checks to see if any peers sent `want-have` or `want-block` for the block. If so it sends `HAVE` or the block itself to those peers. + +#### Priority + +Bitswap keeps requests from each peer in separate queues, ordered by the priority specified in the request message. + +To select which peer to send the next response to, Bitswap chooses the peer with the least amount of data in its send queue. That way it will tend to "keep peers busy" by always keeping some data in each peer's send queue. + + +Implementation +============== + +![Bitswap Components](./docs/go-bitswap.png) + +### Bitswap + +The Bitswap class receives incoming messages and implements the Exchange API. + +When a message is received, Bitswap +- Records some statistics about the message +- Informs the Engine of any new wants + So that the Engine can send responses to the wants +- Informs the Engine of any received blocks + So that the Engine can send the received blocks to any peers that want them +- Informs the WantManager of received blocks, HAVEs and DONT_HAVEs + So that the WantManager can inform interested sessions + +When the client makes an API call, Bitswap creates a new Session and calls the corresponding method (eg `GetBlocks()`). + +### Sending Blocks + +When the Engine is informed of new wants it +- Adds the wants to the Ledger (peer A wants block with CID Qmhash...) +- Checks the blockstore for the corresponding blocks, and adds a task to the PeerTaskQueue + - If the blockstore does not have a wanted block, adds a `DONT_HAVE` task + - If the blockstore has the block + - for a `want-have` adds a `HAVE` task + - for a `want-block` adds a `block` task + +When the Engine is informed of new blocks it checks the Ledger to see if any peers want information about those blocks. +- For each block + - For each peer that sent a `want-have` for the corresponding block + Adds a `HAVE` task to the PeerTaskQueue + - For each peer that sent a `want-block` for the corresponding block + Adds a `block` task to the PeerTaskQueue + +The Engine periodically pops tasks off the PeerTaskQueue, and creates a message with `blocks`, `HAVEs` and `DONT_HAVEs`. +The PeerTaskQueue prioritizes tasks such that the peers with the least amount of data in their send queue are highest priority, so as to "keep peers busy". + +### Requesting Blocks + +When the WantManager is informed of a new message, it +- informs the SessionManager + The SessionManager informs the Sessions that are interested in the received blocks and wants +- informs the PeerManager of received blocks + The PeerManager checks if any wants were send to a peer for the received blocks. If so it sends a `CANCEL` message to those peers. + +### Sessions + +The Session starts in "discovery" mode. This means it doesn't have any peers yet, and needs to discover which peers have the blocks it wants. + +When the client initially requests blocks from a Session, the Session +- informs the SessionInterestManager that it is interested in the want +- informs the sessionWantManager of the want +- tells the WantManager to broadcast a `want-have` to all connected peers so as to discover which peers have the block +- queries the ProviderQueryManager to discover which peers have the block + +When the session receives a message with `HAVE` or a `block`, it informs the SessionPeerManager. The SessionPeerManager keeps track of all peers in the session. +When the session receives a message with a `block` it informs the SessionInterestManager. + +Once the session has peers it is no longer in "discovery" mode. When the client requests subsequent blocks the Session informs the sessionWantSender. The sessionWantSender tells the PeerManager to send `want-have` and `want-block` to peers in the session. + +For each block that the Session wants, the sessionWantSender decides which peer is most likely to have a block by checking with the BlockPresenceManager which peers have sent a `HAVE` for the block. If no peers or multiple peers have sent `HAVE`, a peer is chosen probabilistically according to which how many times each peer was first to send a block in response to previous wants requested by the Session. The sessionWantSender sends a single "optimistic" `want-block` to the chosen peer, and sends `want-have` to all other peers in the Session. +When a peer responds with `DONT_HAVE`, the Session sends `want-block` to the next best peer, and so on until the block is received. + +### PeerManager + +The PeerManager creates a MessageQueue for each peer that connects to Bitswap. It remembers which `want-have` / `want-block` has been sent to each peer, and directs any new wants to the correct peer. +The MessageQueue groups together wants into a message, and sends the message to the peer. It monitors for timeouts and simulates a `DONT_HAVE` response if a peer takes too long to respond. + +### Finding Providers + +When bitswap can't find a connected peer who already has the block it wants, it falls back to querying a content routing system (a DHT in IPFS's case) to try to locate a peer with the block. + +Bitswap routes these requests through the ProviderQueryManager system, which rate-limits these requests and also deduplicates in-process requests. + +### Providing + +As a bitswap client receives blocks, by default it announces them on the provided content routing system (again, a DHT in most cases). This behaviour can be disabled by passing `bitswap.ProvideEnabled(false)` as a parameter when initializing Bitswap. IPFS currently has its own experimental provider system ([go-ipfs-provider](https://github.com/ipfs/go-ipfs-provider)) which will eventually replace Bitswap's system entirely. + From da9aad913b3e39ae94bd839296e1351cac749ca2 Mon Sep 17 00:00:00 2001 From: dirkmc Date: Tue, 7 Apr 2020 16:59:22 -0400 Subject: [PATCH 0898/1035] fix: path to architecture diagram (#338) This commit was moved from ipfs/go-bitswap@38114a67942be255c23d8097f719aa05766d4dc4 --- bitswap/docs/how-bitswap-works.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bitswap/docs/how-bitswap-works.md b/bitswap/docs/how-bitswap-works.md index 749a5a769..4b6ab1a74 100644 --- a/bitswap/docs/how-bitswap-works.md +++ b/bitswap/docs/how-bitswap-works.md @@ -62,7 +62,7 @@ To select which peer to send the next response to, Bitswap chooses the peer with Implementation ============== -![Bitswap Components](./docs/go-bitswap.png) +![Bitswap Components](./go-bitswap.png) ### Bitswap From 4345843fb8f4e3397a348a560f98bbc971e5ae37 Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Fri, 10 Apr 2020 06:57:30 -0700 Subject: [PATCH 0899/1035] fix: start score worker along with other engine workers (#344) This commit was moved from ipfs/go-bitswap@d44a5f6769f776fb041a99995e3f21dab3f0d88b --- bitswap/internal/decision/engine.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/bitswap/internal/decision/engine.go b/bitswap/internal/decision/engine.go index b744cb543..4a49c2435 100644 --- a/bitswap/internal/decision/engine.go +++ b/bitswap/internal/decision/engine.go @@ -197,7 +197,6 @@ func newEngine(ctx context.Context, bs bstore.Blockstore, peerTagger PeerTagger, peertaskqueue.OnPeerRemovedHook(e.onPeerRemoved), peertaskqueue.TaskMerger(newTaskMerger()), peertaskqueue.IgnoreFreezing(true)) - go e.scoreWorker(ctx) return e } @@ -215,6 +214,7 @@ func (e *Engine) SetSendDontHaves(send bool) { func (e *Engine) StartWorkers(ctx context.Context, px process.Process) { // Start up blockstore manager e.bsm.start(px) + px.Go(e.scoreWorker) for i := 0; i < e.taskWorkerCount; i++ { px.Go(func(px process.Process) { @@ -240,7 +240,7 @@ func (e *Engine) StartWorkers(ctx context.Context, px process.Process) { // To calculate the final score, we sum the short-term and long-term scores then // adjust it ±25% based on our debt ratio. Peers that have historically been // more useful to us than we are to them get the highest score. -func (e *Engine) scoreWorker(ctx context.Context) { +func (e *Engine) scoreWorker(px process.Process) { ticker := time.NewTicker(e.peerSampleInterval) defer ticker.Stop() @@ -257,7 +257,7 @@ func (e *Engine) scoreWorker(ctx context.Context) { var now time.Time select { case now = <-ticker.C: - case <-ctx.Done(): + case <-px.Closing(): return } From fd8891be62f9cae71679b4aa0a6a6443c0b5f709 Mon Sep 17 00:00:00 2001 From: Dirk McCormick Date: Fri, 10 Apr 2020 14:14:37 -0400 Subject: [PATCH 0900/1035] fix: in message queue only send cancel if want was sent This commit was moved from ipfs/go-bitswap@4800d07d7fd1d44d0fd7cef621bd7afe07747805 --- bitswap/internal/messagequeue/messagequeue.go | 100 ++++++++++++------ .../messagequeue/messagequeue_test.go | 57 +++++++--- .../sessionwantlist/sessionwantlist.go | 11 ++ 3 files changed, 123 insertions(+), 45 deletions(-) diff --git a/bitswap/internal/messagequeue/messagequeue.go b/bitswap/internal/messagequeue/messagequeue.go index daf8664bf..ca6f7c3bc 100644 --- a/bitswap/internal/messagequeue/messagequeue.go +++ b/bitswap/internal/messagequeue/messagequeue.go @@ -9,6 +9,7 @@ import ( bsmsg "github.com/ipfs/go-bitswap/message" pb "github.com/ipfs/go-bitswap/message/pb" bsnet "github.com/ipfs/go-bitswap/network" + "github.com/ipfs/go-bitswap/wantlist" bswl "github.com/ipfs/go-bitswap/wantlist" cid "github.com/ipfs/go-cid" logging "github.com/ipfs/go-log" @@ -80,41 +81,44 @@ type MessageQueue struct { msg bsmsg.BitSwapMessage } -// recallWantlist keeps a list of pending wants, and a list of all wants that -// have ever been requested +// recallWantlist keeps a list of pending wants and a list of sent wants type recallWantlist struct { - // The list of all wants that have been requested, including wants that - // have been sent and wants that have not yet been sent - allWants *bswl.Wantlist // The list of wants that have not yet been sent pending *bswl.Wantlist + // The list of wants that have been sent + sent *bswl.Wantlist } func newRecallWantList() recallWantlist { return recallWantlist{ - allWants: bswl.New(), - pending: bswl.New(), + pending: bswl.New(), + sent: bswl.New(), } } -// Add want to both the pending list and the list of all wants +// Add want to the pending list func (r *recallWantlist) Add(c cid.Cid, priority int32, wtype pb.Message_Wantlist_WantType) { - r.allWants.Add(c, priority, wtype) r.pending.Add(c, priority, wtype) } -// Remove wants from both the pending list and the list of all wants +// Remove wants from both the pending list and the list of sent wants func (r *recallWantlist) Remove(c cid.Cid) { - r.allWants.Remove(c) + r.sent.Remove(c) r.pending.Remove(c) } -// Remove wants by type from both the pending list and the list of all wants +// Remove wants by type from both the pending list and the list of sent wants func (r *recallWantlist) RemoveType(c cid.Cid, wtype pb.Message_Wantlist_WantType) { - r.allWants.RemoveType(c, wtype) + r.sent.RemoveType(c, wtype) r.pending.RemoveType(c, wtype) } +// Sent moves the want from the pending to the sent list +func (r *recallWantlist) Sent(e bsmsg.Entry) { + r.pending.RemoveType(e.Cid, e.WantType) + r.sent.Add(e.Cid, e.Priority, e.WantType) +} + type peerConn struct { p peer.ID network MessageNetwork @@ -251,15 +255,29 @@ func (mq *MessageQueue) AddCancels(cancelKs []cid.Cid) { mq.wllock.Lock() defer mq.wllock.Unlock() + workReady := false + // Remove keys from broadcast and peer wants, and add to cancels for _, c := range cancelKs { + // Check if a want for the key was sent + _, wasSentBcst := mq.bcstWants.sent.Contains(c) + _, wasSentPeer := mq.peerWants.sent.Contains(c) + + // Remove the want from tracking wantlists mq.bcstWants.Remove(c) mq.peerWants.Remove(c) - mq.cancels.Add(c) + + // Only send a cancel if a want was sent + if wasSentBcst || wasSentPeer { + mq.cancels.Add(c) + workReady = true + } } // Schedule a message send - mq.signalWorkReady() + if workReady { + mq.signalWorkReady() + } } // SetRebroadcastInterval sets a new interval on which to rebroadcast the full wantlist @@ -366,13 +384,13 @@ func (mq *MessageQueue) transferRebroadcastWants() bool { defer mq.wllock.Unlock() // Check if there are any wants to rebroadcast - if mq.bcstWants.allWants.Len() == 0 && mq.peerWants.allWants.Len() == 0 { + if mq.bcstWants.sent.Len() == 0 && mq.peerWants.sent.Len() == 0 { return false } - // Copy all wants into pending wants lists - mq.bcstWants.pending.Absorb(mq.bcstWants.allWants) - mq.peerWants.pending.Absorb(mq.peerWants.allWants) + // Copy sent wants into pending wants lists + mq.bcstWants.pending.Absorb(mq.bcstWants.sent) + mq.peerWants.pending.Absorb(mq.peerWants.sent) return true } @@ -405,7 +423,7 @@ func (mq *MessageQueue) sendMessage() { mq.dhTimeoutMgr.Start() // Convert want lists to a Bitswap Message - message := mq.extractOutgoingMessage(mq.sender.SupportsHave()) + message, onSent := mq.extractOutgoingMessage(mq.sender.SupportsHave()) // After processing the message, clear out its fields to save memory defer mq.msg.Reset(false) @@ -421,7 +439,7 @@ func (mq *MessageQueue) sendMessage() { for i := 0; i < maxRetries; i++ { if mq.attemptSendAndRecovery(message) { // We were able to send successfully. - mq.onMessageSent(wantlist) + onSent(wantlist) mq.simulateDontHaveWithTimeout(wantlist) @@ -452,7 +470,7 @@ func (mq *MessageQueue) simulateDontHaveWithTimeout(wantlist []bsmsg.Entry) { // Unlikely, but just in case check that the block hasn't been // received in the interim c := entry.Cid - if _, ok := mq.peerWants.allWants.Contains(c); ok { + if _, ok := mq.peerWants.sent.Contains(c); ok { wants = append(wants, c) } } @@ -522,7 +540,7 @@ func (mq *MessageQueue) pendingWorkCount() int { } // Convert the lists of wants into a Bitswap message -func (mq *MessageQueue) extractOutgoingMessage(supportsHave bool) bsmsg.BitSwapMessage { +func (mq *MessageQueue) extractOutgoingMessage(supportsHave bool) (bsmsg.BitSwapMessage, func([]bsmsg.Entry)) { mq.wllock.Lock() defer mq.wllock.Unlock() @@ -572,19 +590,35 @@ func (mq *MessageQueue) extractOutgoingMessage(supportsHave bool) bsmsg.BitSwapM mq.cancels.Remove(c) } - return mq.msg -} + // Called when the message has been successfully sent. + onMessageSent := func(wantlist []bsmsg.Entry) { + bcst := keysToSet(bcstEntries) + prws := keysToSet(peerEntries) -// Called when the message has been successfully sent. -func (mq *MessageQueue) onMessageSent(wantlist []bsmsg.Entry) { - // Remove the sent keys from the broadcast and regular wantlists. - mq.wllock.Lock() - defer mq.wllock.Unlock() + mq.wllock.Lock() + defer mq.wllock.Unlock() - for _, e := range wantlist { - mq.bcstWants.pending.Remove(e.Cid) - mq.peerWants.pending.RemoveType(e.Cid, e.WantType) + // Move the keys from pending to sent + for _, e := range wantlist { + if _, ok := bcst[e.Cid]; ok { + mq.bcstWants.Sent(e) + } + if _, ok := prws[e.Cid]; ok { + mq.peerWants.Sent(e) + } + } + } + + return mq.msg, onMessageSent +} + +// Convert wantlist entries into a set of cids +func keysToSet(wl []wantlist.Entry) map[cid.Cid]struct{} { + set := make(map[cid.Cid]struct{}, len(wl)) + for _, e := range wl { + set[e.Cid] = struct{}{} } + return set } func (mq *MessageQueue) initializeSender() error { diff --git a/bitswap/internal/messagequeue/messagequeue_test.go b/bitswap/internal/messagequeue/messagequeue_test.go index 059534057..49c1033d6 100644 --- a/bitswap/internal/messagequeue/messagequeue_test.go +++ b/bitswap/internal/messagequeue/messagequeue_test.go @@ -319,18 +319,22 @@ func TestCancelOverridesPendingWants(t *testing.T) { fakenet := &fakeMessageNetwork{nil, nil, fakeSender} peerID := testutil.GeneratePeers(1)[0] messageQueue := New(ctx, peerID, fakenet, mockTimeoutCb) + wantHaves := testutil.GenerateCids(2) wantBlocks := testutil.GenerateCids(2) + cancels := []cid.Cid{wantBlocks[0], wantHaves[0]} messageQueue.Startup() messageQueue.AddWants(wantBlocks, wantHaves) - messageQueue.AddCancels([]cid.Cid{wantBlocks[0], wantHaves[0]}) + messageQueue.AddCancels(cancels) messages := collectMessages(ctx, t, messagesSent, 10*time.Millisecond) - if totalEntriesLength(messages) != len(wantHaves)+len(wantBlocks) { + if totalEntriesLength(messages) != len(wantHaves)+len(wantBlocks)-len(cancels) { t.Fatal("Wrong message count") } + // Cancelled 1 want-block and 1 want-have before they were sent + // so that leaves 1 want-block and 1 want-have wb, wh, cl := filterWantTypes(messages[0]) if len(wb) != 1 || !wb[0].Equals(wantBlocks[1]) { t.Fatal("Expected 1 want-block") @@ -338,6 +342,20 @@ func TestCancelOverridesPendingWants(t *testing.T) { if len(wh) != 1 || !wh[0].Equals(wantHaves[1]) { t.Fatal("Expected 1 want-have") } + // Cancelled wants before they were sent, so no cancel should be sent + // to the network + if len(cl) != 0 { + t.Fatal("Expected no cancels") + } + + // Cancel the remaining want-blocks and want-haves + cancels = append(wantHaves, wantBlocks...) + messageQueue.AddCancels(cancels) + messages = collectMessages(ctx, t, messagesSent, 10*time.Millisecond) + + // The remaining 2 cancels should be sent to the network as they are for + // wants that were sent to the network + _, _, cl = filterWantTypes(messages[0]) if len(cl) != 2 { t.Fatal("Expected 2 cancels") } @@ -353,26 +371,41 @@ func TestWantOverridesPendingCancels(t *testing.T) { fakenet := &fakeMessageNetwork{nil, nil, fakeSender} peerID := testutil.GeneratePeers(1)[0] messageQueue := New(ctx, peerID, fakenet, mockTimeoutCb) - cancels := testutil.GenerateCids(3) + + cids := testutil.GenerateCids(3) + wantBlocks := cids[:1] + wantHaves := cids[1:] messageQueue.Startup() - messageQueue.AddCancels(cancels) - messageQueue.AddWants([]cid.Cid{cancels[0]}, []cid.Cid{cancels[1]}) + + // Add 1 want-block and 2 want-haves + messageQueue.AddWants(wantBlocks, wantHaves) + messages := collectMessages(ctx, t, messagesSent, 10*time.Millisecond) + if totalEntriesLength(messages) != len(wantBlocks)+len(wantHaves) { + t.Fatal("Wrong message count", totalEntriesLength(messages)) + } - if totalEntriesLength(messages) != len(cancels) { - t.Fatal("Wrong message count") + // Cancel existing wants + messageQueue.AddCancels(cids) + // Override one cancel with a want-block (before cancel is sent to network) + messageQueue.AddWants(cids[:1], []cid.Cid{}) + + messages = collectMessages(ctx, t, messagesSent, 10*time.Millisecond) + if totalEntriesLength(messages) != 3 { + t.Fatal("Wrong message count", totalEntriesLength(messages)) } + // Should send 1 want-block and 2 cancels wb, wh, cl := filterWantTypes(messages[0]) - if len(wb) != 1 || !wb[0].Equals(cancels[0]) { + if len(wb) != 1 { t.Fatal("Expected 1 want-block") } - if len(wh) != 1 || !wh[0].Equals(cancels[1]) { - t.Fatal("Expected 1 want-have") + if len(wh) != 0 { + t.Fatal("Expected 0 want-have") } - if len(cl) != 1 || !cl[0].Equals(cancels[2]) { - t.Fatal("Expected 1 cancel") + if len(cl) != 2 { + t.Fatal("Expected 2 cancels") } } diff --git a/bitswap/internal/sessionwantlist/sessionwantlist.go b/bitswap/internal/sessionwantlist/sessionwantlist.go index d98147396..05c143367 100644 --- a/bitswap/internal/sessionwantlist/sessionwantlist.go +++ b/bitswap/internal/sessionwantlist/sessionwantlist.go @@ -6,6 +6,7 @@ import ( cid "github.com/ipfs/go-cid" ) +// The SessionWantList keeps track of which sessions want a CID type SessionWantlist struct { sync.RWMutex wants map[cid.Cid]map[uint64]struct{} @@ -17,6 +18,7 @@ func NewSessionWantlist() *SessionWantlist { } } +// The given session wants the keys func (swl *SessionWantlist) Add(ks []cid.Cid, ses uint64) { swl.Lock() defer swl.Unlock() @@ -29,6 +31,8 @@ func (swl *SessionWantlist) Add(ks []cid.Cid, ses uint64) { } } +// Remove the keys for all sessions. +// Called when blocks are received. func (swl *SessionWantlist) RemoveKeys(ks []cid.Cid) { swl.Lock() defer swl.Unlock() @@ -38,6 +42,8 @@ func (swl *SessionWantlist) RemoveKeys(ks []cid.Cid) { } } +// Remove the session's wants, and return wants that are no longer wanted by +// any session. func (swl *SessionWantlist) RemoveSession(ses uint64) []cid.Cid { swl.Lock() defer swl.Unlock() @@ -54,6 +60,7 @@ func (swl *SessionWantlist) RemoveSession(ses uint64) []cid.Cid { return deletedKs } +// Remove the session's wants func (swl *SessionWantlist) RemoveSessionKeys(ses uint64, ks []cid.Cid) { swl.Lock() defer swl.Unlock() @@ -68,6 +75,7 @@ func (swl *SessionWantlist) RemoveSessionKeys(ses uint64, ks []cid.Cid) { } } +// All keys wanted by all sessions func (swl *SessionWantlist) Keys() []cid.Cid { swl.RLock() defer swl.RUnlock() @@ -79,6 +87,7 @@ func (swl *SessionWantlist) Keys() []cid.Cid { return ks } +// All sessions that want the given keys func (swl *SessionWantlist) SessionsFor(ks []cid.Cid) []uint64 { swl.RLock() defer swl.RUnlock() @@ -97,6 +106,7 @@ func (swl *SessionWantlist) SessionsFor(ks []cid.Cid) []uint64 { return ses } +// Filter for keys that at least one session wants func (swl *SessionWantlist) Has(ks []cid.Cid) *cid.Set { swl.RLock() defer swl.RUnlock() @@ -110,6 +120,7 @@ func (swl *SessionWantlist) Has(ks []cid.Cid) *cid.Set { return has } +// Filter for keys that the given session wants func (swl *SessionWantlist) SessionHas(ses uint64, ks []cid.Cid) *cid.Set { swl.RLock() defer swl.RUnlock() From 3bee0227b8025dfb6651eecca1b6b423519ab171 Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Fri, 10 Apr 2020 17:25:50 -0700 Subject: [PATCH 0901/1035] feat: prioritize more important wants In case we're sending a _lot_ of wants: * Prioritize cancels. * Then targeted wants. * Finally broadcast wants. This commit was moved from ipfs/go-bitswap@c444535ffe1e65676e9e90dd90677a81917fcd93 --- bitswap/internal/messagequeue/messagequeue.go | 40 ++++++++++--------- 1 file changed, 21 insertions(+), 19 deletions(-) diff --git a/bitswap/internal/messagequeue/messagequeue.go b/bitswap/internal/messagequeue/messagequeue.go index ca6f7c3bc..4b3f090d7 100644 --- a/bitswap/internal/messagequeue/messagequeue.go +++ b/bitswap/internal/messagequeue/messagequeue.go @@ -551,19 +551,18 @@ func (mq *MessageQueue) extractOutgoingMessage(supportsHave bool) (bsmsg.BitSwap // Size of the message so far msgSize := 0 - // Add each broadcast want-have to the message - for i := 0; i < len(bcstEntries) && msgSize < mq.maxMessageSize; i++ { - // Broadcast wants are sent as want-have - wantType := pb.Message_Wantlist_Have + // Always prioritize cancels, then targeted, then broadcast. - // If the remote peer doesn't support HAVE / DONT_HAVE messages, - // send a want-block instead - if !supportsHave { - wantType = pb.Message_Wantlist_Block - } + // Add each cancel to the message + cancels := mq.cancels.Keys() + for i := 0; i < len(cancels) && msgSize < mq.maxMessageSize; i++ { + c := cancels[i] - e := bcstEntries[i] - msgSize += mq.msg.AddEntry(e.Cid, e.Priority, wantType, false) + msgSize += mq.msg.Cancel(c) + + // Clear the cancel - we make a best effort to let peers know about + // cancels but won't save them to resend if there's a failure. + mq.cancels.Remove(c) } // Add each regular want-have / want-block to the message @@ -578,16 +577,19 @@ func (mq *MessageQueue) extractOutgoingMessage(supportsHave bool) (bsmsg.BitSwap } } - // Add each cancel to the message - cancels := mq.cancels.Keys() - for i := 0; i < len(cancels) && msgSize < mq.maxMessageSize; i++ { - c := cancels[i] + // Add each broadcast want-have to the message + for i := 0; i < len(bcstEntries) && msgSize < mq.maxMessageSize; i++ { + // Broadcast wants are sent as want-have + wantType := pb.Message_Wantlist_Have - msgSize += mq.msg.Cancel(c) + // If the remote peer doesn't support HAVE / DONT_HAVE messages, + // send a want-block instead + if !supportsHave { + wantType = pb.Message_Wantlist_Block + } - // Clear the cancel - we make a best effort to let peers know about - // cancels but won't save them to resend if there's a failure. - mq.cancels.Remove(c) + e := bcstEntries[i] + msgSize += mq.msg.AddEntry(e.Cid, e.Priority, wantType, false) } // Called when the message has been successfully sent. From 22cca928c2432294ce30184a2de64c209adbcc68 Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Mon, 13 Apr 2020 07:23:51 -0700 Subject: [PATCH 0902/1035] fix: avoid allocating cids (#348) This commit was moved from ipfs/go-bitswap@906b2fb5c12f169ab2e2d2bc3afe6bb297884215 --- bitswap/message/pb/cid.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/bitswap/message/pb/cid.go b/bitswap/message/pb/cid.go index 59e32bb27..34862b3d4 100644 --- a/bitswap/message/pb/cid.go +++ b/bitswap/message/pb/cid.go @@ -18,7 +18,8 @@ func (c Cid) Marshal() ([]byte, error) { } func (c *Cid) MarshalTo(data []byte) (int, error) { - return copy(data[:c.Size()], c.Cid.Bytes()), nil + // intentionally using KeyString here to avoid allocating. + return copy(data[:c.Size()], c.Cid.KeyString()), nil } func (c *Cid) Unmarshal(data []byte) (err error) { From 51301c0013bdfcfd77e930f3ec383d164ad97888 Mon Sep 17 00:00:00 2001 From: Dirk McCormick Date: Mon, 13 Apr 2020 11:21:48 -0400 Subject: [PATCH 0903/1035] refactor: simplify messageQueue onSent This commit was moved from ipfs/go-bitswap@e06ac247eec2f6a98824a1fa3c27756ac86faa6c --- bitswap/internal/messagequeue/messagequeue.go | 34 +++++++------------ 1 file changed, 13 insertions(+), 21 deletions(-) diff --git a/bitswap/internal/messagequeue/messagequeue.go b/bitswap/internal/messagequeue/messagequeue.go index 4b3f090d7..8b106b0df 100644 --- a/bitswap/internal/messagequeue/messagequeue.go +++ b/bitswap/internal/messagequeue/messagequeue.go @@ -113,8 +113,8 @@ func (r *recallWantlist) RemoveType(c cid.Cid, wtype pb.Message_Wantlist_WantTyp r.pending.RemoveType(c, wtype) } -// Sent moves the want from the pending to the sent list -func (r *recallWantlist) Sent(e bsmsg.Entry) { +// MarkSent moves the want from the pending to the sent list +func (r *recallWantlist) MarkSent(e wantlist.Entry) { r.pending.RemoveType(e.Cid, e.WantType) r.sent.Add(e.Cid, e.Priority, e.WantType) } @@ -566,6 +566,7 @@ func (mq *MessageQueue) extractOutgoingMessage(supportsHave bool) (bsmsg.BitSwap } // Add each regular want-have / want-block to the message + peerSentCount := 0 for i := 0; i < len(peerEntries) && msgSize < mq.maxMessageSize; i++ { e := peerEntries[i] // If the remote peer doesn't support HAVE / DONT_HAVE messages, @@ -575,9 +576,12 @@ func (mq *MessageQueue) extractOutgoingMessage(supportsHave bool) (bsmsg.BitSwap } else { msgSize += mq.msg.AddEntry(e.Cid, e.Priority, e.WantType, true) } + + peerSentCount++ } // Add each broadcast want-have to the message + bcstSentCount := 0 for i := 0; i < len(bcstEntries) && msgSize < mq.maxMessageSize; i++ { // Broadcast wants are sent as want-have wantType := pb.Message_Wantlist_Have @@ -590,39 +594,27 @@ func (mq *MessageQueue) extractOutgoingMessage(supportsHave bool) (bsmsg.BitSwap e := bcstEntries[i] msgSize += mq.msg.AddEntry(e.Cid, e.Priority, wantType, false) + + bcstSentCount++ } // Called when the message has been successfully sent. onMessageSent := func(wantlist []bsmsg.Entry) { - bcst := keysToSet(bcstEntries) - prws := keysToSet(peerEntries) - mq.wllock.Lock() defer mq.wllock.Unlock() // Move the keys from pending to sent - for _, e := range wantlist { - if _, ok := bcst[e.Cid]; ok { - mq.bcstWants.Sent(e) - } - if _, ok := prws[e.Cid]; ok { - mq.peerWants.Sent(e) - } + for i := 0; i < bcstSentCount; i++ { + mq.bcstWants.MarkSent(bcstEntries[i]) + } + for i := 0; i < peerSentCount; i++ { + mq.peerWants.MarkSent(peerEntries[i]) } } return mq.msg, onMessageSent } -// Convert wantlist entries into a set of cids -func keysToSet(wl []wantlist.Entry) map[cid.Cid]struct{} { - set := make(map[cid.Cid]struct{}, len(wl)) - for _, e := range wl { - set[e.Cid] = struct{}{} - } - return set -} - func (mq *MessageQueue) initializeSender() error { if mq.sender != nil { return nil From b5f3c4f8b16691200d3e52bd2e4908c033003163 Mon Sep 17 00:00:00 2001 From: Dirk McCormick Date: Mon, 13 Apr 2020 11:23:44 -0400 Subject: [PATCH 0904/1035] refactor: save some vars This commit was moved from ipfs/go-bitswap@efd006e9a458492a18bae131fb88dc7c4d8c9f1a --- bitswap/internal/messagequeue/messagequeue.go | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/bitswap/internal/messagequeue/messagequeue.go b/bitswap/internal/messagequeue/messagequeue.go index 8b106b0df..4a16ee607 100644 --- a/bitswap/internal/messagequeue/messagequeue.go +++ b/bitswap/internal/messagequeue/messagequeue.go @@ -567,8 +567,8 @@ func (mq *MessageQueue) extractOutgoingMessage(supportsHave bool) (bsmsg.BitSwap // Add each regular want-have / want-block to the message peerSentCount := 0 - for i := 0; i < len(peerEntries) && msgSize < mq.maxMessageSize; i++ { - e := peerEntries[i] + for ; peerSentCount < len(peerEntries) && msgSize < mq.maxMessageSize; peerSentCount++ { + e := peerEntries[peerSentCount] // If the remote peer doesn't support HAVE / DONT_HAVE messages, // don't send want-haves (only send want-blocks) if !supportsHave && e.WantType == pb.Message_Wantlist_Have { @@ -576,13 +576,11 @@ func (mq *MessageQueue) extractOutgoingMessage(supportsHave bool) (bsmsg.BitSwap } else { msgSize += mq.msg.AddEntry(e.Cid, e.Priority, e.WantType, true) } - - peerSentCount++ } // Add each broadcast want-have to the message bcstSentCount := 0 - for i := 0; i < len(bcstEntries) && msgSize < mq.maxMessageSize; i++ { + for ; bcstSentCount < len(bcstEntries) && msgSize < mq.maxMessageSize; bcstSentCount++ { // Broadcast wants are sent as want-have wantType := pb.Message_Wantlist_Have @@ -592,10 +590,8 @@ func (mq *MessageQueue) extractOutgoingMessage(supportsHave bool) (bsmsg.BitSwap wantType = pb.Message_Wantlist_Block } - e := bcstEntries[i] + e := bcstEntries[bcstSentCount] msgSize += mq.msg.AddEntry(e.Cid, e.Priority, wantType, false) - - bcstSentCount++ } // Called when the message has been successfully sent. From 9e6faf70582653fdb95b1ce7d5cea27d6c54f92c Mon Sep 17 00:00:00 2001 From: Dirk McCormick Date: Mon, 13 Apr 2020 11:26:20 -0400 Subject: [PATCH 0905/1035] refactor: remove unnecessary func param This commit was moved from ipfs/go-bitswap@6c4126051520a3c3fcf460896200342cf1b7b96c --- bitswap/internal/messagequeue/messagequeue.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/bitswap/internal/messagequeue/messagequeue.go b/bitswap/internal/messagequeue/messagequeue.go index 4a16ee607..ed43ec57c 100644 --- a/bitswap/internal/messagequeue/messagequeue.go +++ b/bitswap/internal/messagequeue/messagequeue.go @@ -439,7 +439,7 @@ func (mq *MessageQueue) sendMessage() { for i := 0; i < maxRetries; i++ { if mq.attemptSendAndRecovery(message) { // We were able to send successfully. - onSent(wantlist) + onSent() mq.simulateDontHaveWithTimeout(wantlist) @@ -540,7 +540,7 @@ func (mq *MessageQueue) pendingWorkCount() int { } // Convert the lists of wants into a Bitswap message -func (mq *MessageQueue) extractOutgoingMessage(supportsHave bool) (bsmsg.BitSwapMessage, func([]bsmsg.Entry)) { +func (mq *MessageQueue) extractOutgoingMessage(supportsHave bool) (bsmsg.BitSwapMessage, func()) { mq.wllock.Lock() defer mq.wllock.Unlock() @@ -595,7 +595,7 @@ func (mq *MessageQueue) extractOutgoingMessage(supportsHave bool) (bsmsg.BitSwap } // Called when the message has been successfully sent. - onMessageSent := func(wantlist []bsmsg.Entry) { + onMessageSent := func() { mq.wllock.Lock() defer mq.wllock.Unlock() From c7d4c482cefe7958e123309e307139c3199cbe7e Mon Sep 17 00:00:00 2001 From: Dirk McCormick Date: Mon, 13 Apr 2020 12:08:26 -0400 Subject: [PATCH 0906/1035] fix: only mark sent wants as sent This commit was moved from ipfs/go-bitswap@b6a8a73a29063bd23a3dac7727a3b9bad6d7fe81 --- bitswap/internal/messagequeue/messagequeue.go | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/bitswap/internal/messagequeue/messagequeue.go b/bitswap/internal/messagequeue/messagequeue.go index ed43ec57c..1a8c2d5a5 100644 --- a/bitswap/internal/messagequeue/messagequeue.go +++ b/bitswap/internal/messagequeue/messagequeue.go @@ -566,15 +566,16 @@ func (mq *MessageQueue) extractOutgoingMessage(supportsHave bool) (bsmsg.BitSwap } // Add each regular want-have / want-block to the message - peerSentCount := 0 - for ; peerSentCount < len(peerEntries) && msgSize < mq.maxMessageSize; peerSentCount++ { - e := peerEntries[peerSentCount] + peerSent := make([]wantlist.Entry, 0, len(peerEntries)) + for i := 0; i < len(peerEntries) && msgSize < mq.maxMessageSize; i++ { + e := peerEntries[i] // If the remote peer doesn't support HAVE / DONT_HAVE messages, // don't send want-haves (only send want-blocks) if !supportsHave && e.WantType == pb.Message_Wantlist_Have { mq.peerWants.RemoveType(e.Cid, pb.Message_Wantlist_Have) } else { msgSize += mq.msg.AddEntry(e.Cid, e.Priority, e.WantType, true) + peerSent = append(peerSent, e) } } @@ -603,8 +604,8 @@ func (mq *MessageQueue) extractOutgoingMessage(supportsHave bool) (bsmsg.BitSwap for i := 0; i < bcstSentCount; i++ { mq.bcstWants.MarkSent(bcstEntries[i]) } - for i := 0; i < peerSentCount; i++ { - mq.peerWants.MarkSent(peerEntries[i]) + for _, e := range peerSent { + mq.peerWants.MarkSent(e) } } From 4fffefd7642c30bb3871f280a6fbe050b5d97fd2 Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Tue, 14 Apr 2020 07:01:48 -0700 Subject: [PATCH 0907/1035] feat: optimize message sending (#350) Instead of copying these slices, we can just reuse them. This commit was moved from ipfs/go-bitswap@ac68698bc98841fe2e781f380fa2fd39611b6430 --- bitswap/internal/messagequeue/messagequeue.go | 30 ++++++++++++------- 1 file changed, 19 insertions(+), 11 deletions(-) diff --git a/bitswap/internal/messagequeue/messagequeue.go b/bitswap/internal/messagequeue/messagequeue.go index 1a8c2d5a5..d42db10d6 100644 --- a/bitswap/internal/messagequeue/messagequeue.go +++ b/bitswap/internal/messagequeue/messagequeue.go @@ -555,9 +555,10 @@ func (mq *MessageQueue) extractOutgoingMessage(supportsHave bool) (bsmsg.BitSwap // Add each cancel to the message cancels := mq.cancels.Keys() - for i := 0; i < len(cancels) && msgSize < mq.maxMessageSize; i++ { - c := cancels[i] - + for _, c := range cancels { + if msgSize >= mq.maxMessageSize { + break + } msgSize += mq.msg.Cancel(c) // Clear the cancel - we make a best effort to let peers know about @@ -566,9 +567,12 @@ func (mq *MessageQueue) extractOutgoingMessage(supportsHave bool) (bsmsg.BitSwap } // Add each regular want-have / want-block to the message - peerSent := make([]wantlist.Entry, 0, len(peerEntries)) - for i := 0; i < len(peerEntries) && msgSize < mq.maxMessageSize; i++ { - e := peerEntries[i] + peerSent := peerEntries[:0] + for _, e := range peerEntries { + if msgSize >= mq.maxMessageSize { + break + } + // If the remote peer doesn't support HAVE / DONT_HAVE messages, // don't send want-haves (only send want-blocks) if !supportsHave && e.WantType == pb.Message_Wantlist_Have { @@ -580,8 +584,12 @@ func (mq *MessageQueue) extractOutgoingMessage(supportsHave bool) (bsmsg.BitSwap } // Add each broadcast want-have to the message - bcstSentCount := 0 - for ; bcstSentCount < len(bcstEntries) && msgSize < mq.maxMessageSize; bcstSentCount++ { + bcstSent := bcstEntries[:0] + for _, e := range bcstEntries { + if msgSize >= mq.maxMessageSize { + break + } + // Broadcast wants are sent as want-have wantType := pb.Message_Wantlist_Have @@ -591,8 +599,8 @@ func (mq *MessageQueue) extractOutgoingMessage(supportsHave bool) (bsmsg.BitSwap wantType = pb.Message_Wantlist_Block } - e := bcstEntries[bcstSentCount] msgSize += mq.msg.AddEntry(e.Cid, e.Priority, wantType, false) + bcstSent = append(bcstSent, e) } // Called when the message has been successfully sent. @@ -601,8 +609,8 @@ func (mq *MessageQueue) extractOutgoingMessage(supportsHave bool) (bsmsg.BitSwap defer mq.wllock.Unlock() // Move the keys from pending to sent - for i := 0; i < bcstSentCount; i++ { - mq.bcstWants.MarkSent(bcstEntries[i]) + for _, e := range bcstSent { + mq.bcstWants.MarkSent(e) } for _, e := range peerSent { mq.peerWants.MarkSent(e) From f155add67c8028ba48847cd82c61cdafa5aec5e2 Mon Sep 17 00:00:00 2001 From: Dirk McCormick Date: Wed, 15 Apr 2020 17:26:24 -0400 Subject: [PATCH 0908/1035] refactor: move connection management into networking layer This commit was moved from ipfs/go-bitswap@bfd6fe8e9f1d9e1ace617b1a390000614cf4f45e --- bitswap/internal/decision/engine.go | 21 +- bitswap/internal/decision/ledger.go | 4 - bitswap/internal/messagequeue/messagequeue.go | 117 +++-------- bitswap/internal/peermanager/peermanager.go | 61 ++---- bitswap/network/interface.go | 8 +- bitswap/network/ipfs_impl.go | 197 ++++++++++++++++-- bitswap/testnet/virtual.go | 2 +- 7 files changed, 246 insertions(+), 164 deletions(-) diff --git a/bitswap/internal/decision/engine.go b/bitswap/internal/decision/engine.go index 4a49c2435..620bb868c 100644 --- a/bitswap/internal/decision/engine.go +++ b/bitswap/internal/decision/engine.go @@ -745,32 +745,19 @@ func (e *Engine) MessageSent(p peer.ID, m bsmsg.BitSwapMessage) { func (e *Engine) PeerConnected(p peer.ID) { e.lock.Lock() defer e.lock.Unlock() - l, ok := e.ledgerMap[p] + + _, ok := e.ledgerMap[p] if !ok { - l = newLedger(p) - e.ledgerMap[p] = l + e.ledgerMap[p] = newLedger(p) } - - l.lk.Lock() - defer l.lk.Unlock() - l.ref++ } // PeerDisconnected is called when a peer disconnects. func (e *Engine) PeerDisconnected(p peer.ID) { e.lock.Lock() defer e.lock.Unlock() - l, ok := e.ledgerMap[p] - if !ok { - return - } - l.lk.Lock() - defer l.lk.Unlock() - l.ref-- - if l.ref <= 0 { - delete(e.ledgerMap, p) - } + delete(e.ledgerMap, p) } // If the want is a want-have, and it's below a certain size, send the full diff --git a/bitswap/internal/decision/ledger.go b/bitswap/internal/decision/ledger.go index 8f103bd46..87fedc458 100644 --- a/bitswap/internal/decision/ledger.go +++ b/bitswap/internal/decision/ledger.go @@ -43,10 +43,6 @@ type ledger struct { // wantList is a (bounded, small) set of keys that Partner desires. wantList *wl.Wantlist - // ref is the reference count for this ledger, its used to ensure we - // don't drop the reference to this ledger in multi-connection scenarios - ref int - lk sync.RWMutex } diff --git a/bitswap/internal/messagequeue/messagequeue.go b/bitswap/internal/messagequeue/messagequeue.go index d42db10d6..b08834f3d 100644 --- a/bitswap/internal/messagequeue/messagequeue.go +++ b/bitswap/internal/messagequeue/messagequeue.go @@ -25,7 +25,8 @@ const ( defaultRebroadcastInterval = 30 * time.Second // maxRetries is the number of times to attempt to send a message before // giving up - maxRetries = 10 + maxRetries = 3 + sendTimeout = 30 * time.Second // maxMessageSize is the maximum message size in bytes maxMessageSize = 1024 * 1024 * 2 // sendErrorBackoff is the time to wait before retrying to connect after @@ -46,7 +47,7 @@ const ( // sender. type MessageNetwork interface { ConnectTo(context.Context, peer.ID) error - NewMessageSender(context.Context, peer.ID) (bsnet.MessageSender, error) + NewMessageSender(context.Context, peer.ID, *bsnet.MessageSenderOpts) (bsnet.MessageSender, error) Latency(peer.ID) time.Duration Ping(context.Context, peer.ID) ping.Result Self() peer.ID @@ -409,12 +410,11 @@ func (mq *MessageQueue) sendIfReady() { } func (mq *MessageQueue) sendMessage() { - err := mq.initializeSender() + sender, err := mq.initializeSender() if err != nil { - log.Infof("cant open message sender to peer %s: %s", mq.p, err) - // TODO: cant connect, what now? - // TODO: should we stop using this connection and clear the want list - // to avoid using up memory? + // If we fail to initialize the sender, the networking layer will + // emit a Disconnect event and the MessageQueue will get cleaned up + log.Infof("Could not open message sender to peer %s: %s", mq.p, err) return } @@ -435,23 +435,24 @@ func (mq *MessageQueue) sendMessage() { wantlist := message.Wantlist() mq.logOutgoingMessage(wantlist) - // Try to send this message repeatedly - for i := 0; i < maxRetries; i++ { - if mq.attemptSendAndRecovery(message) { - // We were able to send successfully. - onSent() + if err := sender.SendMsg(mq.ctx, message); err != nil { + // If the message couldn't be sent, the networking layer will + // emit a Disconnect event and the MessageQueue will get cleaned up + log.Infof("Could not send message to peer %s: %s", mq.p, err) + return + } - mq.simulateDontHaveWithTimeout(wantlist) + // We were able to send successfully. + onSent() - // If the message was too big and only a subset of wants could be - // sent, schedule sending the rest of the wants in the next - // iteration of the event loop. - if mq.hasPendingWork() { - mq.signalWorkReady() - } + // Set a timer to wait for responses + mq.simulateDontHaveWithTimeout(wantlist) - return - } + // If the message was too big and only a subset of wants could be + // sent, schedule sending the rest of the wants in the next + // iteration of the event loop. + if mq.hasPendingWork() { + mq.signalWorkReady() } } @@ -620,69 +621,19 @@ func (mq *MessageQueue) extractOutgoingMessage(supportsHave bool) (bsmsg.BitSwap return mq.msg, onMessageSent } -func (mq *MessageQueue) initializeSender() error { - if mq.sender != nil { - return nil - } - nsender, err := openSender(mq.ctx, mq.network, mq.p) - if err != nil { - return err - } - mq.sender = nsender - return nil -} - -func (mq *MessageQueue) attemptSendAndRecovery(message bsmsg.BitSwapMessage) bool { - err := mq.sender.SendMsg(mq.ctx, message) - if err == nil { - return true - } - - log.Infof("bitswap send error: %s", err) - _ = mq.sender.Reset() - mq.sender = nil - - select { - case <-mq.done: - return true - case <-mq.ctx.Done(): - return true - case <-time.After(mq.sendErrorBackoff): - // wait 100ms in case disconnect notifications are still propagating - log.Warn("SendMsg errored but neither 'done' nor context.Done() were set") - } - - err = mq.initializeSender() - if err != nil { - log.Infof("couldnt open sender again after SendMsg(%s) failed: %s", mq.p, err) - return true - } - - // TODO: Is this the same instance for the remote peer? - // If its not, we should resend our entire wantlist to them - /* - if mq.sender.InstanceID() != mq.lastSeenInstanceID { - wlm = mq.getFullWantlistMessage() +func (mq *MessageQueue) initializeSender() (bsnet.MessageSender, error) { + if mq.sender == nil { + opts := &bsnet.MessageSenderOpts{ + MaxRetries: maxRetries, + SendTimeout: sendTimeout, + SendErrorBackoff: sendErrorBackoff, + } + nsender, err := mq.network.NewMessageSender(mq.ctx, mq.p, opts) + if err != nil { + return nil, err } - */ - return false -} - -func openSender(ctx context.Context, network MessageNetwork, p peer.ID) (bsnet.MessageSender, error) { - // allow ten minutes for connections this includes looking them up in the - // dht dialing them, and handshaking - conctx, cancel := context.WithTimeout(ctx, time.Minute*10) - defer cancel() - - err := network.ConnectTo(conctx, p) - if err != nil { - return nil, err - } - nsender, err := network.NewMessageSender(ctx, p) - if err != nil { - return nil, err + mq.sender = nsender } - - return nsender, nil + return mq.sender, nil } diff --git a/bitswap/internal/peermanager/peermanager.go b/bitswap/internal/peermanager/peermanager.go index c2159b198..0cf8b2e35 100644 --- a/bitswap/internal/peermanager/peermanager.go +++ b/bitswap/internal/peermanager/peermanager.go @@ -30,17 +30,12 @@ type Session interface { // PeerQueueFactory provides a function that will create a PeerQueue. type PeerQueueFactory func(ctx context.Context, p peer.ID) PeerQueue -type peerQueueInstance struct { - refcnt int - pq PeerQueue -} - // PeerManager manages a pool of peers and sends messages to peers in the pool. type PeerManager struct { // sync access to peerQueues and peerWantManager pqLk sync.RWMutex // peerQueues -- interact through internal utility functions get/set/remove/iterate - peerQueues map[peer.ID]*peerQueueInstance + peerQueues map[peer.ID]PeerQueue pwm *peerWantManager createPeerQueue PeerQueueFactory @@ -57,7 +52,7 @@ type PeerManager struct { func New(ctx context.Context, createPeerQueue PeerQueueFactory, self peer.ID) *PeerManager { wantGauge := metrics.NewCtx(ctx, "wantlist_total", "Number of items in wantlist.").Gauge() return &PeerManager{ - peerQueues: make(map[peer.ID]*peerQueueInstance), + peerQueues: make(map[peer.ID]PeerQueue), pwm: newPeerWantManager(wantGauge), createPeerQueue: createPeerQueue, ctx: ctx, @@ -92,19 +87,15 @@ func (pm *PeerManager) Connected(p peer.ID, initialWantHaves []cid.Cid) { defer pm.pqLk.Unlock() pq := pm.getOrCreate(p) - pq.refcnt++ - - // If this is the first connection to the peer - if pq.refcnt == 1 { - // Inform the peer want manager that there's a new peer - pm.pwm.addPeer(p) - // Record that the want-haves are being sent to the peer - _, wantHaves := pm.pwm.prepareSendWants(p, nil, initialWantHaves) - // Broadcast any live want-haves to the newly connected peers - pq.pq.AddBroadcastWantHaves(wantHaves) - // Inform the sessions that the peer has connected - pm.signalAvailability(p, true) - } + + // Inform the peer want manager that there's a new peer + pm.pwm.addPeer(p) + // Record that the want-haves are being sent to the peer + _, wantHaves := pm.pwm.prepareSendWants(p, nil, initialWantHaves) + // Broadcast any live want-haves to the newly connected peers + pq.AddBroadcastWantHaves(wantHaves) + // Inform the sessions that the peer has connected + pm.signalAvailability(p, true) } // Disconnected is called to remove a peer from the pool. @@ -118,17 +109,12 @@ func (pm *PeerManager) Disconnected(p peer.ID) { return } - pq.refcnt-- - if pq.refcnt > 0 { - return - } - // Inform the sessions that the peer has disconnected pm.signalAvailability(p, false) // Clean up the peer delete(pm.peerQueues, p) - pq.pq.Shutdown() + pq.Shutdown() pm.pwm.removePeer(p) } @@ -141,8 +127,8 @@ func (pm *PeerManager) BroadcastWantHaves(ctx context.Context, wantHaves []cid.C defer pm.pqLk.Unlock() for p, ks := range pm.pwm.prepareBroadcastWantHaves(wantHaves) { - if pqi, ok := pm.peerQueues[p]; ok { - pqi.pq.AddBroadcastWantHaves(ks) + if pq, ok := pm.peerQueues[p]; ok { + pq.AddBroadcastWantHaves(ks) } } } @@ -153,9 +139,9 @@ func (pm *PeerManager) SendWants(ctx context.Context, p peer.ID, wantBlocks []ci pm.pqLk.Lock() defer pm.pqLk.Unlock() - if pqi, ok := pm.peerQueues[p]; ok { + if pq, ok := pm.peerQueues[p]; ok { wblks, whvs := pm.pwm.prepareSendWants(p, wantBlocks, wantHaves) - pqi.pq.AddWants(wblks, whvs) + pq.AddWants(wblks, whvs) } } @@ -167,8 +153,8 @@ func (pm *PeerManager) SendCancels(ctx context.Context, cancelKs []cid.Cid) { // Send a CANCEL to each peer that has been sent a want-block or want-have for p, ks := range pm.pwm.prepareSendCancels(cancelKs) { - if pqi, ok := pm.peerQueues[p]; ok { - pqi.pq.AddCancels(ks) + if pq, ok := pm.peerQueues[p]; ok { + pq.AddCancels(ks) } } } @@ -197,15 +183,14 @@ func (pm *PeerManager) CurrentWantHaves() []cid.Cid { return pm.pwm.getWantHaves() } -func (pm *PeerManager) getOrCreate(p peer.ID) *peerQueueInstance { - pqi, ok := pm.peerQueues[p] +func (pm *PeerManager) getOrCreate(p peer.ID) PeerQueue { + pq, ok := pm.peerQueues[p] if !ok { - pq := pm.createPeerQueue(pm.ctx, p) + pq = pm.createPeerQueue(pm.ctx, p) pq.Startup() - pqi = &peerQueueInstance{0, pq} - pm.peerQueues[p] = pqi + pm.peerQueues[p] = pq } - return pqi + return pq } // RegisterSession tells the PeerManager that the given session is interested diff --git a/bitswap/network/interface.go b/bitswap/network/interface.go index 6b2878e38..a350d5254 100644 --- a/bitswap/network/interface.go +++ b/bitswap/network/interface.go @@ -42,7 +42,7 @@ type BitSwapNetwork interface { ConnectTo(context.Context, peer.ID) error DisconnectFrom(context.Context, peer.ID) error - NewMessageSender(context.Context, peer.ID) (MessageSender, error) + NewMessageSender(context.Context, peer.ID, *MessageSenderOpts) (MessageSender, error) ConnectionManager() connmgr.ConnManager @@ -63,6 +63,12 @@ type MessageSender interface { SupportsHave() bool } +type MessageSenderOpts struct { + MaxRetries int + SendTimeout time.Duration + SendErrorBackoff time.Duration +} + // Receiver is an interface that can receive messages from the BitSwapNetwork. type Receiver interface { ReceiveMessage( diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index b5661408d..d626ad038 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "io" + "sync" "sync/atomic" "time" @@ -43,6 +44,8 @@ func NewFromIpfsHost(host host.Host, r routing.ContentRouting, opts ...NetOpt) B supportedProtocols: s.SupportedProtocols, } + bitswapNetwork.connectEvtMgr = newConnectEventManager(&bitswapNetwork) + return &bitswapNetwork } @@ -71,8 +74,9 @@ type impl struct { // alignment. stats Stats - host host.Host - routing routing.ContentRouting + host host.Host + routing routing.ContentRouting + connectEvtMgr *connectEventManager protocolBitswapNoVers protocol.ID protocolBitswapOneZero protocol.ID @@ -86,24 +90,93 @@ type impl struct { } type streamMessageSender struct { - s network.Stream - bsnet *impl + to peer.ID + stream network.Stream + bsnet *impl + opts *MessageSenderOpts } -func (s *streamMessageSender) Close() error { - return helpers.FullClose(s.s) +func (s *streamMessageSender) Connect(ctx context.Context) (stream network.Stream, err error) { + defer func() { + if err != nil { + s.bsnet.connectEvtMgr.MarkUnresponsive(s.to) + } + }() + + if s.stream != nil { + return s.stream, nil + } + + if err = s.bsnet.ConnectTo(ctx, s.to); err != nil { + return nil, err + } + + stream, err = s.bsnet.newStreamToPeer(ctx, s.to) + if err != nil { + s.stream = stream + return s.stream, nil + } + return nil, err } func (s *streamMessageSender) Reset() error { - return s.s.Reset() + err := s.stream.Reset() + s.stream = nil + return err } -func (s *streamMessageSender) SendMsg(ctx context.Context, msg bsmsg.BitSwapMessage) error { - return s.bsnet.msgToStream(ctx, s.s, msg) +func (s *streamMessageSender) Close() error { + return helpers.FullClose(s.stream) } func (s *streamMessageSender) SupportsHave() bool { - return s.bsnet.SupportsHave(s.s.Protocol()) + return s.bsnet.SupportsHave(s.stream.Protocol()) +} + +func (s *streamMessageSender) SendMsg(ctx context.Context, msg bsmsg.BitSwapMessage) error { + // Try to send the message repeatedly + var err error + for i := 0; i < s.opts.MaxRetries; i++ { + if err = s.attemptSend(ctx, msg); err == nil { + // Sent successfully + return nil + } + + // Failed to send so reset stream and try again + _ = s.Reset() + + if i == s.opts.MaxRetries { + s.bsnet.connectEvtMgr.MarkUnresponsive(s.to) + return err + } + + select { + case <-ctx.Done(): + return nil + case <-time.After(s.opts.SendErrorBackoff): + // wait a short time in case disconnect notifications are still propagating + log.Infof("send message to %s failed but context was not Done: %s", s.to, err) + } + } + return err +} + +func (s *streamMessageSender) attemptSend(ctx context.Context, msg bsmsg.BitSwapMessage) error { + sndctx, cancel := context.WithTimeout(ctx, s.opts.SendTimeout) + defer cancel() + + stream, err := s.Connect(sndctx) + if err != nil { + log.Infof("failed to open stream to %s: %s", s.to, err) + return err + } + + if err = s.bsnet.msgToStream(sndctx, stream, msg); err != nil { + log.Infof("failed to send message to %s: %s", s.to, err) + return err + } + + return nil } func (bsnet *impl) Self() peer.ID { @@ -164,17 +237,21 @@ func (bsnet *impl) msgToStream(ctx context.Context, s network.Stream, msg bsmsg. return nil } -func (bsnet *impl) NewMessageSender(ctx context.Context, p peer.ID) (MessageSender, error) { - s, err := bsnet.newStreamToPeer(ctx, p) - if err != nil { - return nil, err +func (bsnet *impl) NewMessageSender(ctx context.Context, p peer.ID, opts *MessageSenderOpts) (MessageSender, error) { + sender := &streamMessageSender{ + to: p, + bsnet: bsnet, + opts: opts, } - return &streamMessageSender{s: s, bsnet: bsnet}, nil -} + conctx, cancel := context.WithTimeout(ctx, sender.opts.SendTimeout) + defer cancel() -func (bsnet *impl) newStreamToPeer(ctx context.Context, p peer.ID) (network.Stream, error) { - return bsnet.host.NewStream(ctx, p, bsnet.supportedProtocols...) + _, err := sender.Connect(conctx) + if err != nil { + return nil, err + } + return sender, nil } func (bsnet *impl) SendMessage( @@ -197,7 +274,10 @@ func (bsnet *impl) SendMessage( //nolint go helpers.AwaitEOF(s) return s.Close() +} +func (bsnet *impl) newStreamToPeer(ctx context.Context, p peer.ID) (network.Stream, error) { + return bsnet.host.NewStream(ctx, p, bsnet.supportedProtocols...) } func (bsnet *impl) SetDelegate(r Receiver) { @@ -268,6 +348,7 @@ func (bsnet *impl) handleNewStream(s network.Stream) { p := s.Conn().RemotePeer() ctx := context.Background() log.Debugf("bitswap net handleNewStream from %s", s.Conn().RemotePeer()) + bsnet.connectEvtMgr.OnMessage(s.Conn().RemotePeer()) bsnet.receiver.ReceiveMessage(ctx, p, received) atomic.AddUint64(&bsnet.stats.MessagesRecvd, 1) } @@ -284,6 +365,82 @@ func (bsnet *impl) Stats() Stats { } } +type connectEventManager struct { + bsnet *impl + lk sync.Mutex + conns map[peer.ID]*connState +} + +type connState struct { + refs int + responsive bool +} + +func newConnectEventManager(bsnet *impl) *connectEventManager { + return &connectEventManager{ + bsnet: bsnet, + conns: make(map[peer.ID]*connState), + } +} + +func (c *connectEventManager) Connected(p peer.ID) { + c.lk.Lock() + defer c.lk.Unlock() + + state, ok := c.conns[p] + if !ok { + state = &connState{responsive: true} + } + state.refs++ + + if state.refs == 1 && state.responsive { + c.bsnet.receiver.PeerConnected(p) + } +} + +func (c *connectEventManager) Disconnected(p peer.ID) { + c.lk.Lock() + defer c.lk.Unlock() + + state, ok := c.conns[p] + if !ok { + // Should never happen + return + } + state.refs-- + c.conns[p] = state + + if state.refs == 0 && state.responsive { + c.bsnet.receiver.PeerDisconnected(p) + } +} + +func (c *connectEventManager) MarkUnresponsive(p peer.ID) { + c.lk.Lock() + defer c.lk.Unlock() + + state, ok := c.conns[p] + if !ok { + return + } + state.responsive = false + c.conns[p] = state + + c.bsnet.receiver.PeerDisconnected(p) +} + +func (c *connectEventManager) OnMessage(p peer.ID) { + c.lk.Lock() + defer c.lk.Unlock() + + state, ok := c.conns[p] + if ok && !state.responsive { + state.responsive = true + c.conns[p] = state + c.bsnet.receiver.PeerConnected(p) + } +} + type netNotifiee impl func (nn *netNotifiee) impl() *impl { @@ -291,10 +448,10 @@ func (nn *netNotifiee) impl() *impl { } func (nn *netNotifiee) Connected(n network.Network, v network.Conn) { - nn.impl().receiver.PeerConnected(v.RemotePeer()) + nn.impl().connectEvtMgr.Connected(v.RemotePeer()) } func (nn *netNotifiee) Disconnected(n network.Network, v network.Conn) { - nn.impl().receiver.PeerDisconnected(v.RemotePeer()) + nn.impl().connectEvtMgr.Disconnected(v.RemotePeer()) } func (nn *netNotifiee) OpenedStream(n network.Network, s network.Stream) {} func (nn *netNotifiee) ClosedStream(n network.Network, v network.Stream) {} diff --git a/bitswap/testnet/virtual.go b/bitswap/testnet/virtual.go index 1e472110f..c44b430db 100644 --- a/bitswap/testnet/virtual.go +++ b/bitswap/testnet/virtual.go @@ -284,7 +284,7 @@ func (mp *messagePasser) SupportsHave() bool { return false } -func (nc *networkClient) NewMessageSender(ctx context.Context, p peer.ID) (bsnet.MessageSender, error) { +func (nc *networkClient) NewMessageSender(ctx context.Context, p peer.ID, opts *bsnet.MessageSenderOpts) (bsnet.MessageSender, error) { return &messagePasser{ net: nc, target: p, From 68141ca8c4d7a74670f36cc2f39089aadebe352b Mon Sep 17 00:00:00 2001 From: Dirk McCormick Date: Wed, 15 Apr 2020 17:36:42 -0400 Subject: [PATCH 0909/1035] fix: stop sender when message queue shut down This commit was moved from ipfs/go-bitswap@b097d7027049ac57d2a503fc3047edea0d4128d9 --- bitswap/network/ipfs_impl.go | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index d626ad038..8a02fcea5 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -94,6 +94,7 @@ type streamMessageSender struct { stream network.Stream bsnet *impl opts *MessageSenderOpts + done chan struct{} } func (s *streamMessageSender) Connect(ctx context.Context) (stream network.Stream, err error) { @@ -126,6 +127,7 @@ func (s *streamMessageSender) Reset() error { } func (s *streamMessageSender) Close() error { + close(s.done) return helpers.FullClose(s.stream) } @@ -142,6 +144,15 @@ func (s *streamMessageSender) SendMsg(ctx context.Context, msg bsmsg.BitSwapMess return nil } + // If the sender has been closed or the context cancelled, just bail out + select { + case <-ctx.Done(): + return nil + case <-s.done: + return nil + default: + } + // Failed to send so reset stream and try again _ = s.Reset() @@ -153,6 +164,8 @@ func (s *streamMessageSender) SendMsg(ctx context.Context, msg bsmsg.BitSwapMess select { case <-ctx.Done(): return nil + case <-s.done: + return nil case <-time.After(s.opts.SendErrorBackoff): // wait a short time in case disconnect notifications are still propagating log.Infof("send message to %s failed but context was not Done: %s", s.to, err) @@ -242,6 +255,7 @@ func (bsnet *impl) NewMessageSender(ctx context.Context, p peer.ID, opts *Messag to: p, bsnet: bsnet, opts: opts, + done: make(chan struct{}), } conctx, cancel := context.WithTimeout(ctx, sender.opts.SendTimeout) From b1a67f71cf29873e0566e7a0920463acf1d7816d Mon Sep 17 00:00:00 2001 From: Dirk McCormick Date: Wed, 15 Apr 2020 18:00:09 -0400 Subject: [PATCH 0910/1035] fix: tests This commit was moved from ipfs/go-bitswap@c1922c0d987d6df209d7afd613aa76ece93ebf4d --- .../messagequeue/messagequeue_test.go | 153 ++---------------- .../internal/peermanager/peermanager_test.go | 11 +- bitswap/network/ipfs_impl.go | 2 +- bitswap/network/ipfs_impl_test.go | 8 +- 4 files changed, 25 insertions(+), 149 deletions(-) diff --git a/bitswap/internal/messagequeue/messagequeue_test.go b/bitswap/internal/messagequeue/messagequeue_test.go index 49c1033d6..38ffafa2b 100644 --- a/bitswap/internal/messagequeue/messagequeue_test.go +++ b/bitswap/internal/messagequeue/messagequeue_test.go @@ -2,7 +2,6 @@ package messagequeue import ( "context" - "errors" "fmt" "math" "math/rand" @@ -31,7 +30,7 @@ func (fmn *fakeMessageNetwork) ConnectTo(context.Context, peer.ID) error { return fmn.connectError } -func (fmn *fakeMessageNetwork) NewMessageSender(context.Context, peer.ID) (bsnet.MessageSender, error) { +func (fmn *fakeMessageNetwork) NewMessageSender(context.Context, peer.ID, *bsnet.MessageSenderOpts) (bsnet.MessageSender, error) { if fmn.messageSenderError == nil { return fmn.messageSender, nil } @@ -83,23 +82,19 @@ func (fp *fakeDontHaveTimeoutMgr) pendingCount() int { type fakeMessageSender struct { lk sync.Mutex - sendError error fullClosed chan<- struct{} reset chan<- struct{} messagesSent chan<- []bsmsg.Entry - sendErrors chan<- error supportsHave bool } -func newFakeMessageSender(sendError error, fullClosed chan<- struct{}, reset chan<- struct{}, - messagesSent chan<- []bsmsg.Entry, sendErrors chan<- error, supportsHave bool) *fakeMessageSender { +func newFakeMessageSender(fullClosed chan<- struct{}, reset chan<- struct{}, + messagesSent chan<- []bsmsg.Entry, supportsHave bool) *fakeMessageSender { return &fakeMessageSender{ - sendError: sendError, fullClosed: fullClosed, reset: reset, messagesSent: messagesSent, - sendErrors: sendErrors, supportsHave: supportsHave, } } @@ -108,19 +103,9 @@ func (fms *fakeMessageSender) SendMsg(ctx context.Context, msg bsmsg.BitSwapMess fms.lk.Lock() defer fms.lk.Unlock() - if fms.sendError != nil { - fms.sendErrors <- fms.sendError - return fms.sendError - } fms.messagesSent <- msg.Wantlist() return nil } -func (fms *fakeMessageSender) clearSendError() { - fms.lk.Lock() - defer fms.lk.Unlock() - - fms.sendError = nil -} func (fms *fakeMessageSender) Close() error { fms.fullClosed <- struct{}{}; return nil } func (fms *fakeMessageSender) Reset() error { fms.reset <- struct{}{}; return nil } func (fms *fakeMessageSender) SupportsHave() bool { return fms.supportsHave } @@ -155,10 +140,9 @@ func totalEntriesLength(messages [][]bsmsg.Entry) int { func TestStartupAndShutdown(t *testing.T) { ctx := context.Background() messagesSent := make(chan []bsmsg.Entry) - sendErrors := make(chan error) resetChan := make(chan struct{}, 1) fullClosedChan := make(chan struct{}, 1) - fakeSender := newFakeMessageSender(nil, fullClosedChan, resetChan, messagesSent, sendErrors, true) + fakeSender := newFakeMessageSender(fullClosedChan, resetChan, messagesSent, true) fakenet := &fakeMessageNetwork{nil, nil, fakeSender} peerID := testutil.GeneratePeers(1)[0] messageQueue := New(ctx, peerID, fakenet, mockTimeoutCb) @@ -197,10 +181,9 @@ func TestStartupAndShutdown(t *testing.T) { func TestSendingMessagesDeduped(t *testing.T) { ctx := context.Background() messagesSent := make(chan []bsmsg.Entry) - sendErrors := make(chan error) resetChan := make(chan struct{}, 1) fullClosedChan := make(chan struct{}, 1) - fakeSender := newFakeMessageSender(nil, fullClosedChan, resetChan, messagesSent, sendErrors, true) + fakeSender := newFakeMessageSender(fullClosedChan, resetChan, messagesSent, true) fakenet := &fakeMessageNetwork{nil, nil, fakeSender} peerID := testutil.GeneratePeers(1)[0] messageQueue := New(ctx, peerID, fakenet, mockTimeoutCb) @@ -220,10 +203,9 @@ func TestSendingMessagesDeduped(t *testing.T) { func TestSendingMessagesPartialDupe(t *testing.T) { ctx := context.Background() messagesSent := make(chan []bsmsg.Entry) - sendErrors := make(chan error) resetChan := make(chan struct{}, 1) fullClosedChan := make(chan struct{}, 1) - fakeSender := newFakeMessageSender(nil, fullClosedChan, resetChan, messagesSent, sendErrors, true) + fakeSender := newFakeMessageSender(fullClosedChan, resetChan, messagesSent, true) fakenet := &fakeMessageNetwork{nil, nil, fakeSender} peerID := testutil.GeneratePeers(1)[0] messageQueue := New(ctx, peerID, fakenet, mockTimeoutCb) @@ -243,10 +225,9 @@ func TestSendingMessagesPartialDupe(t *testing.T) { func TestSendingMessagesPriority(t *testing.T) { ctx := context.Background() messagesSent := make(chan []bsmsg.Entry) - sendErrors := make(chan error) resetChan := make(chan struct{}, 1) fullClosedChan := make(chan struct{}, 1) - fakeSender := newFakeMessageSender(nil, fullClosedChan, resetChan, messagesSent, sendErrors, true) + fakeSender := newFakeMessageSender(fullClosedChan, resetChan, messagesSent, true) fakenet := &fakeMessageNetwork{nil, nil, fakeSender} peerID := testutil.GeneratePeers(1)[0] messageQueue := New(ctx, peerID, fakenet, mockTimeoutCb) @@ -312,10 +293,9 @@ func TestSendingMessagesPriority(t *testing.T) { func TestCancelOverridesPendingWants(t *testing.T) { ctx := context.Background() messagesSent := make(chan []bsmsg.Entry) - sendErrors := make(chan error) resetChan := make(chan struct{}, 1) fullClosedChan := make(chan struct{}, 1) - fakeSender := newFakeMessageSender(nil, fullClosedChan, resetChan, messagesSent, sendErrors, true) + fakeSender := newFakeMessageSender(fullClosedChan, resetChan, messagesSent, true) fakenet := &fakeMessageNetwork{nil, nil, fakeSender} peerID := testutil.GeneratePeers(1)[0] messageQueue := New(ctx, peerID, fakenet, mockTimeoutCb) @@ -364,10 +344,9 @@ func TestCancelOverridesPendingWants(t *testing.T) { func TestWantOverridesPendingCancels(t *testing.T) { ctx := context.Background() messagesSent := make(chan []bsmsg.Entry) - sendErrors := make(chan error) resetChan := make(chan struct{}, 1) fullClosedChan := make(chan struct{}, 1) - fakeSender := newFakeMessageSender(nil, fullClosedChan, resetChan, messagesSent, sendErrors, true) + fakeSender := newFakeMessageSender(fullClosedChan, resetChan, messagesSent, true) fakenet := &fakeMessageNetwork{nil, nil, fakeSender} peerID := testutil.GeneratePeers(1)[0] messageQueue := New(ctx, peerID, fakenet, mockTimeoutCb) @@ -412,10 +391,9 @@ func TestWantOverridesPendingCancels(t *testing.T) { func TestWantlistRebroadcast(t *testing.T) { ctx := context.Background() messagesSent := make(chan []bsmsg.Entry) - sendErrors := make(chan error) resetChan := make(chan struct{}, 1) fullClosedChan := make(chan struct{}, 1) - fakeSender := newFakeMessageSender(nil, fullClosedChan, resetChan, messagesSent, sendErrors, true) + fakeSender := newFakeMessageSender(fullClosedChan, resetChan, messagesSent, true) fakenet := &fakeMessageNetwork{nil, nil, fakeSender} peerID := testutil.GeneratePeers(1)[0] messageQueue := New(ctx, peerID, fakenet, mockTimeoutCb) @@ -509,10 +487,9 @@ func TestWantlistRebroadcast(t *testing.T) { func TestSendingLargeMessages(t *testing.T) { ctx := context.Background() messagesSent := make(chan []bsmsg.Entry) - sendErrors := make(chan error) resetChan := make(chan struct{}, 1) fullClosedChan := make(chan struct{}, 1) - fakeSender := newFakeMessageSender(nil, fullClosedChan, resetChan, messagesSent, sendErrors, true) + fakeSender := newFakeMessageSender(fullClosedChan, resetChan, messagesSent, true) fakenet := &fakeMessageNetwork{nil, nil, fakeSender} dhtm := &fakeDontHaveTimeoutMgr{} peerID := testutil.GeneratePeers(1)[0] @@ -540,10 +517,9 @@ func TestSendingLargeMessages(t *testing.T) { func TestSendToPeerThatDoesntSupportHave(t *testing.T) { ctx := context.Background() messagesSent := make(chan []bsmsg.Entry) - sendErrors := make(chan error) resetChan := make(chan struct{}, 1) fullClosedChan := make(chan struct{}, 1) - fakeSender := newFakeMessageSender(nil, fullClosedChan, resetChan, messagesSent, sendErrors, false) + fakeSender := newFakeMessageSender(fullClosedChan, resetChan, messagesSent, false) fakenet := &fakeMessageNetwork{nil, nil, fakeSender} peerID := testutil.GeneratePeers(1)[0] @@ -596,10 +572,9 @@ func TestSendToPeerThatDoesntSupportHave(t *testing.T) { func TestSendToPeerThatDoesntSupportHaveMonitorsTimeouts(t *testing.T) { ctx := context.Background() messagesSent := make(chan []bsmsg.Entry) - sendErrors := make(chan error) resetChan := make(chan struct{}, 1) fullClosedChan := make(chan struct{}, 1) - fakeSender := newFakeMessageSender(nil, fullClosedChan, resetChan, messagesSent, sendErrors, false) + fakeSender := newFakeMessageSender(fullClosedChan, resetChan, messagesSent, false) fakenet := &fakeMessageNetwork{nil, nil, fakeSender} peerID := testutil.GeneratePeers(1)[0] @@ -626,105 +601,6 @@ func TestSendToPeerThatDoesntSupportHaveMonitorsTimeouts(t *testing.T) { } } -func TestResendAfterError(t *testing.T) { - ctx := context.Background() - messagesSent := make(chan []bsmsg.Entry) - sendErrors := make(chan error) - resetChan := make(chan struct{}, 1) - fullClosedChan := make(chan struct{}, 1) - fakeSender := newFakeMessageSender(nil, fullClosedChan, resetChan, messagesSent, sendErrors, true) - fakenet := &fakeMessageNetwork{nil, nil, fakeSender} - dhtm := &fakeDontHaveTimeoutMgr{} - peerID := testutil.GeneratePeers(1)[0] - sendErrBackoff := 5 * time.Millisecond - messageQueue := newMessageQueue(ctx, peerID, fakenet, maxMessageSize, sendErrBackoff, dhtm) - wantBlocks := testutil.GenerateCids(10) - wantHaves := testutil.GenerateCids(10) - - messageQueue.Startup() - - var errs []error - go func() { - // After the first error is received, clear sendError so that - // subsequent sends will not error - errs = append(errs, <-sendErrors) - fakeSender.clearSendError() - }() - - // Make the first send error out - fakeSender.sendError = errors.New("send err") - messageQueue.AddWants(wantBlocks, wantHaves) - messages := collectMessages(ctx, t, messagesSent, 10*time.Millisecond) - - if len(errs) != 1 { - t.Fatal("Expected first send to error") - } - - if totalEntriesLength(messages) != len(wantHaves)+len(wantBlocks) { - t.Fatal("Expected subsequent send to succeed") - } -} - -func TestResendAfterMaxRetries(t *testing.T) { - ctx := context.Background() - messagesSent := make(chan []bsmsg.Entry) - sendErrors := make(chan error) - resetChan := make(chan struct{}, maxRetries*2) - fullClosedChan := make(chan struct{}, 1) - fakeSender := newFakeMessageSender(nil, fullClosedChan, resetChan, messagesSent, sendErrors, true) - fakenet := &fakeMessageNetwork{nil, nil, fakeSender} - dhtm := &fakeDontHaveTimeoutMgr{} - peerID := testutil.GeneratePeers(1)[0] - sendErrBackoff := 2 * time.Millisecond - messageQueue := newMessageQueue(ctx, peerID, fakenet, maxMessageSize, sendErrBackoff, dhtm) - wantBlocks := testutil.GenerateCids(10) - wantHaves := testutil.GenerateCids(10) - wantBlocks2 := testutil.GenerateCids(10) - wantHaves2 := testutil.GenerateCids(10) - - messageQueue.Startup() - - var lk sync.Mutex - var errs []error - go func() { - lk.Lock() - defer lk.Unlock() - for len(errs) < maxRetries { - err := <-sendErrors - errs = append(errs, err) - } - }() - - // Make the first group of send attempts error out - fakeSender.sendError = errors.New("send err") - messageQueue.AddWants(wantBlocks, wantHaves) - messages := collectMessages(ctx, t, messagesSent, 50*time.Millisecond) - - lk.Lock() - errCount := len(errs) - lk.Unlock() - if errCount != maxRetries { - t.Fatal("Expected maxRetries errors, got", len(errs)) - } - - // No successful send after max retries, so expect no messages sent - if totalEntriesLength(messages) != 0 { - t.Fatal("Expected no messages") - } - - // Clear sendError so that subsequent sends will not error - fakeSender.clearSendError() - - // Add a new batch of wants - messageQueue.AddWants(wantBlocks2, wantHaves2) - messages = collectMessages(ctx, t, messagesSent, 10*time.Millisecond) - - // All wants from previous and new send should be sent - if totalEntriesLength(messages) != len(wantHaves)+len(wantBlocks)+len(wantHaves2)+len(wantBlocks2) { - t.Fatal("Expected subsequent send to send first and second batches of wants") - } -} - func filterWantTypes(wantlist []bsmsg.Entry) ([]cid.Cid, []cid.Cid, []cid.Cid) { var wbs []cid.Cid var whs []cid.Cid @@ -747,10 +623,9 @@ func BenchmarkMessageQueue(b *testing.B) { createQueue := func() *MessageQueue { messagesSent := make(chan []bsmsg.Entry) - sendErrors := make(chan error) resetChan := make(chan struct{}, 1) fullClosedChan := make(chan struct{}, 1) - fakeSender := newFakeMessageSender(nil, fullClosedChan, resetChan, messagesSent, sendErrors, true) + fakeSender := newFakeMessageSender(fullClosedChan, resetChan, messagesSent, true) fakenet := &fakeMessageNetwork{nil, nil, fakeSender} dhtm := &fakeDontHaveTimeoutMgr{} peerID := testutil.GeneratePeers(1)[0] diff --git a/bitswap/internal/peermanager/peermanager_test.go b/bitswap/internal/peermanager/peermanager_test.go index 0305b9f90..f979b2c81 100644 --- a/bitswap/internal/peermanager/peermanager_test.go +++ b/bitswap/internal/peermanager/peermanager_test.go @@ -99,7 +99,7 @@ func TestAddingAndRemovingPeers(t *testing.T) { t.Fatal("Peers connected that shouldn't be connected") } - // removing a peer with only one reference + // disconnect a peer peerManager.Disconnected(peer1) connectedPeers = peerManager.ConnectedPeers() @@ -107,13 +107,12 @@ func TestAddingAndRemovingPeers(t *testing.T) { t.Fatal("Peer should have been disconnected but was not") } - // connecting a peer twice, then disconnecting once, should stay in queue - peerManager.Connected(peer2, nil) - peerManager.Disconnected(peer2) + // reconnect peer + peerManager.Connected(peer1, nil) connectedPeers = peerManager.ConnectedPeers() - if !testutil.ContainsPeer(connectedPeers, peer2) { - t.Fatal("Peer was disconnected but should not have been") + if !testutil.ContainsPeer(connectedPeers, peer1) { + t.Fatal("Peer should have been connected but was not") } } diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index 8a02fcea5..7ca07dac9 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -113,7 +113,7 @@ func (s *streamMessageSender) Connect(ctx context.Context) (stream network.Strea } stream, err = s.bsnet.newStreamToPeer(ctx, s.to) - if err != nil { + if err == nil { s.stream = stream return s.stream, nil } diff --git a/bitswap/network/ipfs_impl_test.go b/bitswap/network/ipfs_impl_test.go index 5e0f512bc..96e14b993 100644 --- a/bitswap/network/ipfs_impl_test.go +++ b/bitswap/network/ipfs_impl_test.go @@ -5,10 +5,10 @@ import ( "testing" "time" - tn "github.com/ipfs/go-bitswap/testnet" bsmsg "github.com/ipfs/go-bitswap/message" pb "github.com/ipfs/go-bitswap/message/pb" bsnet "github.com/ipfs/go-bitswap/network" + tn "github.com/ipfs/go-bitswap/testnet" blocksutil "github.com/ipfs/go-ipfs-blocksutil" mockrouting "github.com/ipfs/go-ipfs-routing/mock" @@ -170,7 +170,7 @@ func TestSupportsHave(t *testing.T) { mr := mockrouting.NewServer() streamNet, err := tn.StreamNet(ctx, mn, mr) if err != nil { - t.Fatal("Unable to setup network") + t.Fatalf("Unable to setup network: %s", err) } type testCase struct { @@ -199,7 +199,9 @@ func TestSupportsHave(t *testing.T) { t.Fatal(err) } - senderCurrent, err := bsnet1.NewMessageSender(ctx, p2.ID()) + senderCurrent, err := bsnet1.NewMessageSender(ctx, p2.ID(), &bsnet.MessageSenderOpts{ + SendTimeout: time.Second, + }) if err != nil { t.Fatal(err) } From 253abb395e3563269878ca264861ddef57e831fc Mon Sep 17 00:00:00 2001 From: Dirk McCormick Date: Thu, 16 Apr 2020 10:27:27 -0400 Subject: [PATCH 0911/1035] fix: don't hang on to disconnected peer refs This commit was moved from ipfs/go-bitswap@ba4b52e7beb452c78df69dbf9c77d0fc0fa7ce5b --- bitswap/network/ipfs_impl.go | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index 7ca07dac9..453160104 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -424,8 +424,11 @@ func (c *connectEventManager) Disconnected(p peer.ID) { state.refs-- c.conns[p] = state - if state.refs == 0 && state.responsive { - c.bsnet.receiver.PeerDisconnected(p) + if state.refs == 0 { + if state.responsive { + c.bsnet.receiver.PeerDisconnected(p) + } + delete(c.conns, p) } } From 4e3697fc689b6095fdd85237113ca117e3745437 Mon Sep 17 00:00:00 2001 From: Dirk McCormick Date: Thu, 16 Apr 2020 10:36:37 -0400 Subject: [PATCH 0912/1035] fix: shutdown message queue when there's a send error This commit was moved from ipfs/go-bitswap@189564eddc7650b7d715bb6a0d4885e5de1908ae --- bitswap/internal/messagequeue/messagequeue.go | 4 ++++ bitswap/network/ipfs_impl.go | 7 +++++++ 2 files changed, 11 insertions(+) diff --git a/bitswap/internal/messagequeue/messagequeue.go b/bitswap/internal/messagequeue/messagequeue.go index b08834f3d..c45a355ca 100644 --- a/bitswap/internal/messagequeue/messagequeue.go +++ b/bitswap/internal/messagequeue/messagequeue.go @@ -359,6 +359,8 @@ func (mq *MessageQueue) runQueue() { return case <-mq.ctx.Done(): if mq.sender != nil { + // TODO: should I call sender.Close() here also to stop + // and in progress connection? _ = mq.sender.Reset() } return @@ -415,6 +417,7 @@ func (mq *MessageQueue) sendMessage() { // If we fail to initialize the sender, the networking layer will // emit a Disconnect event and the MessageQueue will get cleaned up log.Infof("Could not open message sender to peer %s: %s", mq.p, err) + mq.Shutdown() return } @@ -439,6 +442,7 @@ func (mq *MessageQueue) sendMessage() { // If the message couldn't be sent, the networking layer will // emit a Disconnect event and the MessageQueue will get cleaned up log.Infof("Could not send message to peer %s: %s", mq.p, err) + mq.Shutdown() return } diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index 453160104..bea3d6b09 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -112,6 +112,13 @@ func (s *streamMessageSender) Connect(ctx context.Context) (stream network.Strea return nil, err } + // Check if the sender has been closed + select { + case <-s.done: + return nil, nil + default: + } + stream, err = s.bsnet.newStreamToPeer(ctx, s.to) if err == nil { s.stream = stream From 9fe62d44fada90328d59167d183f5c8ec5c988b9 Mon Sep 17 00:00:00 2001 From: Dirk McCormick Date: Thu, 16 Apr 2020 11:13:42 -0400 Subject: [PATCH 0913/1035] refactor: extract Connection Event Manager to own file and add tests This commit was moved from ipfs/go-bitswap@37301bc32bee6fcade2267d7c34d3115158acc9e --- bitswap/network/connecteventmanager.go | 92 +++++++++++++ bitswap/network/connecteventmanager_test.go | 144 ++++++++++++++++++++ bitswap/network/ipfs_impl.go | 82 +---------- 3 files changed, 237 insertions(+), 81 deletions(-) create mode 100644 bitswap/network/connecteventmanager.go create mode 100644 bitswap/network/connecteventmanager_test.go diff --git a/bitswap/network/connecteventmanager.go b/bitswap/network/connecteventmanager.go new file mode 100644 index 000000000..100b6f96f --- /dev/null +++ b/bitswap/network/connecteventmanager.go @@ -0,0 +1,92 @@ +package network + +import ( + "sync" + + "github.com/libp2p/go-libp2p-core/peer" +) + +type ConnectionListener interface { + PeerConnected(peer.ID) + PeerDisconnected(peer.ID) +} + +type connectEventManager struct { + connListener ConnectionListener + lk sync.Mutex + conns map[peer.ID]*connState +} + +type connState struct { + refs int + responsive bool +} + +func newConnectEventManager(connListener ConnectionListener) *connectEventManager { + return &connectEventManager{ + connListener: connListener, + conns: make(map[peer.ID]*connState), + } +} + +func (c *connectEventManager) Connected(p peer.ID) { + c.lk.Lock() + defer c.lk.Unlock() + + state, ok := c.conns[p] + if !ok { + state = &connState{responsive: true} + c.conns[p] = state + } + state.refs++ + + if state.refs == 1 && state.responsive { + c.connListener.PeerConnected(p) + } +} + +func (c *connectEventManager) Disconnected(p peer.ID) { + c.lk.Lock() + defer c.lk.Unlock() + + state, ok := c.conns[p] + if !ok { + // Should never happen + return + } + state.refs-- + c.conns[p] = state + + if state.refs == 0 { + if state.responsive { + c.connListener.PeerDisconnected(p) + } + delete(c.conns, p) + } +} + +func (c *connectEventManager) MarkUnresponsive(p peer.ID) { + c.lk.Lock() + defer c.lk.Unlock() + + state, ok := c.conns[p] + if !ok { + return + } + state.responsive = false + c.conns[p] = state + + c.connListener.PeerDisconnected(p) +} + +func (c *connectEventManager) OnMessage(p peer.ID) { + c.lk.Lock() + defer c.lk.Unlock() + + state, ok := c.conns[p] + if ok && !state.responsive { + state.responsive = true + c.conns[p] = state + c.connListener.PeerConnected(p) + } +} diff --git a/bitswap/network/connecteventmanager_test.go b/bitswap/network/connecteventmanager_test.go new file mode 100644 index 000000000..fb81abeec --- /dev/null +++ b/bitswap/network/connecteventmanager_test.go @@ -0,0 +1,144 @@ +package network + +import ( + "testing" + + "github.com/ipfs/go-bitswap/internal/testutil" + "github.com/libp2p/go-libp2p-core/peer" +) + +type mockConnListener struct { + conns map[peer.ID]int +} + +func newMockConnListener() *mockConnListener { + return &mockConnListener{ + conns: make(map[peer.ID]int), + } +} + +func (cl *mockConnListener) PeerConnected(p peer.ID) { + cl.conns[p]++ +} + +func (cl *mockConnListener) PeerDisconnected(p peer.ID) { + cl.conns[p]-- +} + +func TestConnectEventManagerConnectionCount(t *testing.T) { + connListener := newMockConnListener() + peers := testutil.GeneratePeers(2) + cem := newConnectEventManager(connListener) + + // Peer A: 1 Connection + cem.Connected(peers[0]) + if connListener.conns[peers[0]] != 1 { + t.Fatal("Expected Connected event") + } + + // Peer A: 2 Connections + cem.Connected(peers[0]) + if connListener.conns[peers[0]] != 1 { + t.Fatal("Unexpected no Connected event for the same peer") + } + + // Peer A: 2 Connections + // Peer B: 1 Connection + cem.Connected(peers[1]) + if connListener.conns[peers[1]] != 1 { + t.Fatal("Expected Connected event") + } + + // Peer A: 2 Connections + // Peer B: 0 Connections + cem.Disconnected(peers[1]) + if connListener.conns[peers[1]] != 0 { + t.Fatal("Expected Disconnected event") + } + + // Peer A: 1 Connection + // Peer B: 0 Connections + cem.Disconnected(peers[0]) + if connListener.conns[peers[0]] != 1 { + t.Fatal("Expected no Disconnected event for peer with one remaining conn") + } + + // Peer A: 0 Connections + // Peer B: 0 Connections + cem.Disconnected(peers[0]) + if connListener.conns[peers[0]] != 0 { + t.Fatal("Expected Disconnected event") + } +} + +func TestConnectEventManagerMarkUnresponsive(t *testing.T) { + connListener := newMockConnListener() + p := testutil.GeneratePeers(1)[0] + cem := newConnectEventManager(connListener) + + // Peer A: 1 Connection + cem.Connected(p) + if connListener.conns[p] != 1 { + t.Fatal("Expected Connected event") + } + + // Peer A: 1 Connection + cem.MarkUnresponsive(p) + if connListener.conns[p] != 0 { + t.Fatal("Expected Disconnected event") + } + + // Peer A: 2 Connections + cem.Connected(p) + if connListener.conns[p] != 0 { + t.Fatal("Expected no Connected event for unresponsive peer") + } + + // Peer A: 2 Connections + cem.OnMessage(p) + if connListener.conns[p] != 1 { + t.Fatal("Expected Connected event for newly responsive peer") + } + + // Peer A: 2 Connections + cem.OnMessage(p) + if connListener.conns[p] != 1 { + t.Fatal("Expected no further Connected event for subsequent messages") + } + + // Peer A: 1 Connection + cem.Disconnected(p) + if connListener.conns[p] != 1 { + t.Fatal("Expected no Disconnected event for peer with one remaining conn") + } + + // Peer A: 0 Connections + cem.Disconnected(p) + if connListener.conns[p] != 0 { + t.Fatal("Expected Disconnected event") + } +} + +func TestConnectEventManagerDisconnectAfterMarkUnresponsive(t *testing.T) { + connListener := newMockConnListener() + p := testutil.GeneratePeers(1)[0] + cem := newConnectEventManager(connListener) + + // Peer A: 1 Connection + cem.Connected(p) + if connListener.conns[p] != 1 { + t.Fatal("Expected Connected event") + } + + // Peer A: 1 Connection + cem.MarkUnresponsive(p) + if connListener.conns[p] != 0 { + t.Fatal("Expected Disconnected event") + } + + // Peer A: 0 Connections + cem.Disconnected(p) + if connListener.conns[p] != 0 { + t.Fatal("Expected not to receive a second Disconnected event") + } +} diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index bea3d6b09..acf605253 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -4,7 +4,6 @@ import ( "context" "fmt" "io" - "sync" "sync/atomic" "time" @@ -44,7 +43,6 @@ func NewFromIpfsHost(host host.Host, r routing.ContentRouting, opts ...NetOpt) B supportedProtocols: s.SupportedProtocols, } - bitswapNetwork.connectEvtMgr = newConnectEventManager(&bitswapNetwork) return &bitswapNetwork } @@ -303,6 +301,7 @@ func (bsnet *impl) newStreamToPeer(ctx context.Context, p peer.ID) (network.Stre func (bsnet *impl) SetDelegate(r Receiver) { bsnet.receiver = r + bsnet.connectEvtMgr = newConnectEventManager(r) for _, proto := range bsnet.supportedProtocols { bsnet.host.SetStreamHandler(proto, bsnet.handleNewStream) } @@ -386,85 +385,6 @@ func (bsnet *impl) Stats() Stats { } } -type connectEventManager struct { - bsnet *impl - lk sync.Mutex - conns map[peer.ID]*connState -} - -type connState struct { - refs int - responsive bool -} - -func newConnectEventManager(bsnet *impl) *connectEventManager { - return &connectEventManager{ - bsnet: bsnet, - conns: make(map[peer.ID]*connState), - } -} - -func (c *connectEventManager) Connected(p peer.ID) { - c.lk.Lock() - defer c.lk.Unlock() - - state, ok := c.conns[p] - if !ok { - state = &connState{responsive: true} - } - state.refs++ - - if state.refs == 1 && state.responsive { - c.bsnet.receiver.PeerConnected(p) - } -} - -func (c *connectEventManager) Disconnected(p peer.ID) { - c.lk.Lock() - defer c.lk.Unlock() - - state, ok := c.conns[p] - if !ok { - // Should never happen - return - } - state.refs-- - c.conns[p] = state - - if state.refs == 0 { - if state.responsive { - c.bsnet.receiver.PeerDisconnected(p) - } - delete(c.conns, p) - } -} - -func (c *connectEventManager) MarkUnresponsive(p peer.ID) { - c.lk.Lock() - defer c.lk.Unlock() - - state, ok := c.conns[p] - if !ok { - return - } - state.responsive = false - c.conns[p] = state - - c.bsnet.receiver.PeerDisconnected(p) -} - -func (c *connectEventManager) OnMessage(p peer.ID) { - c.lk.Lock() - defer c.lk.Unlock() - - state, ok := c.conns[p] - if ok && !state.responsive { - state.responsive = true - c.conns[p] = state - c.bsnet.receiver.PeerConnected(p) - } -} - type netNotifiee impl func (nn *netNotifiee) impl() *impl { From 2b611253a5176721862afec80945b9450e4be4f0 Mon Sep 17 00:00:00 2001 From: Dirk McCormick Date: Thu, 16 Apr 2020 16:55:37 -0400 Subject: [PATCH 0914/1035] test: add more testing for ipfs_impl This commit was moved from ipfs/go-bitswap@b62e7fd0e103db39d54ca3c7a879729eae0a6bf5 --- bitswap/network/connecteventmanager.go | 2 +- bitswap/network/ipfs_impl.go | 83 +++++--- bitswap/network/ipfs_impl_test.go | 253 ++++++++++++++++++++++++- 3 files changed, 306 insertions(+), 32 deletions(-) diff --git a/bitswap/network/connecteventmanager.go b/bitswap/network/connecteventmanager.go index 100b6f96f..67082c4d7 100644 --- a/bitswap/network/connecteventmanager.go +++ b/bitswap/network/connecteventmanager.go @@ -70,7 +70,7 @@ func (c *connectEventManager) MarkUnresponsive(p peer.ID) { defer c.lk.Unlock() state, ok := c.conns[p] - if !ok { + if !ok || !state.responsive { return } state.responsive = false diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index acf605253..e3f6cc271 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -95,18 +95,13 @@ type streamMessageSender struct { done chan struct{} } -func (s *streamMessageSender) Connect(ctx context.Context) (stream network.Stream, err error) { - defer func() { - if err != nil { - s.bsnet.connectEvtMgr.MarkUnresponsive(s.to) - } - }() - +// Open a stream to the remote peer +func (s *streamMessageSender) Connect(ctx context.Context) (network.Stream, error) { if s.stream != nil { return s.stream, nil } - if err = s.bsnet.ConnectTo(ctx, s.to); err != nil { + if err := s.bsnet.ConnectTo(ctx, s.to); err != nil { return nil, err } @@ -117,38 +112,59 @@ func (s *streamMessageSender) Connect(ctx context.Context) (stream network.Strea default: } - stream, err = s.bsnet.newStreamToPeer(ctx, s.to) - if err == nil { - s.stream = stream - return s.stream, nil + stream, err := s.bsnet.newStreamToPeer(ctx, s.to) + if err != nil { + return nil, err } - return nil, err + + s.stream = stream + return s.stream, nil } +// Reset the stream func (s *streamMessageSender) Reset() error { - err := s.stream.Reset() - s.stream = nil - return err + if s.stream != nil { + err := s.stream.Reset() + s.stream = nil + return err + } + return nil } +// Close the stream func (s *streamMessageSender) Close() error { close(s.done) return helpers.FullClose(s.stream) } +// Indicates whether the peer supports HAVE / DONT_HAVE messages func (s *streamMessageSender) SupportsHave() bool { return s.bsnet.SupportsHave(s.stream.Protocol()) } +// Send a message to the peer, attempting multiple times func (s *streamMessageSender) SendMsg(ctx context.Context, msg bsmsg.BitSwapMessage) error { - // Try to send the message repeatedly + return s.multiAttempt(ctx, func(fnctx context.Context) error { + return s.send(fnctx, msg) + }) +} + +// Perform a function with multiple attempts, and a timeout +func (s *streamMessageSender) multiAttempt(ctx context.Context, fn func(context.Context) error) error { + // Try to call the function repeatedly var err error for i := 0; i < s.opts.MaxRetries; i++ { - if err = s.attemptSend(ctx, msg); err == nil { - // Sent successfully + deadline := time.Now().Add(s.opts.SendTimeout) + sndctx, cancel := context.WithDeadline(ctx, deadline) + + if err = fn(sndctx); err == nil { + cancel() + // Attempt was successful return nil } + cancel() + // Attempt failed. // If the sender has been closed or the context cancelled, just bail out select { case <-ctx.Done(): @@ -161,6 +177,7 @@ func (s *streamMessageSender) SendMsg(ctx context.Context, msg bsmsg.BitSwapMess // Failed to send so reset stream and try again _ = s.Reset() + // Failed too many times so mark the peer as unresponsive and return an error if i == s.opts.MaxRetries { s.bsnet.connectEvtMgr.MarkUnresponsive(s.to) return err @@ -179,17 +196,15 @@ func (s *streamMessageSender) SendMsg(ctx context.Context, msg bsmsg.BitSwapMess return err } -func (s *streamMessageSender) attemptSend(ctx context.Context, msg bsmsg.BitSwapMessage) error { - sndctx, cancel := context.WithTimeout(ctx, s.opts.SendTimeout) - defer cancel() - - stream, err := s.Connect(sndctx) +// Send a message to the peer +func (s *streamMessageSender) send(ctx context.Context, msg bsmsg.BitSwapMessage) error { + stream, err := s.Connect(ctx) if err != nil { log.Infof("failed to open stream to %s: %s", s.to, err) return err } - if err = s.bsnet.msgToStream(sndctx, stream, msg); err != nil { + if err = s.bsnet.msgToStream(ctx, stream, msg); err != nil { log.Infof("failed to send message to %s: %s", s.to, err) return err } @@ -256,6 +271,16 @@ func (bsnet *impl) msgToStream(ctx context.Context, s network.Stream, msg bsmsg. } func (bsnet *impl) NewMessageSender(ctx context.Context, p peer.ID, opts *MessageSenderOpts) (MessageSender, error) { + if opts.MaxRetries == 0 { + opts.MaxRetries = 3 + } + if opts.SendTimeout == 0 { + opts.SendTimeout = sendMessageTimeout + } + if opts.SendErrorBackoff == 0 { + opts.SendErrorBackoff = 100 * time.Millisecond + } + sender := &streamMessageSender{ to: p, bsnet: bsnet, @@ -263,13 +288,15 @@ func (bsnet *impl) NewMessageSender(ctx context.Context, p peer.ID, opts *Messag done: make(chan struct{}), } - conctx, cancel := context.WithTimeout(ctx, sender.opts.SendTimeout) - defer cancel() + err := sender.multiAttempt(ctx, func(fnctx context.Context) error { + _, err := sender.Connect(fnctx) + return err + }) - _, err := sender.Connect(conctx) if err != nil { return nil, err } + return sender, nil } diff --git a/bitswap/network/ipfs_impl_test.go b/bitswap/network/ipfs_impl_test.go index 96e14b993..6311c63dd 100644 --- a/bitswap/network/ipfs_impl_test.go +++ b/bitswap/network/ipfs_impl_test.go @@ -2,6 +2,8 @@ package network_test import ( "context" + "fmt" + "sync" "testing" "time" @@ -9,9 +11,12 @@ import ( pb "github.com/ipfs/go-bitswap/message/pb" bsnet "github.com/ipfs/go-bitswap/network" tn "github.com/ipfs/go-bitswap/testnet" + ds "github.com/ipfs/go-datastore" blocksutil "github.com/ipfs/go-ipfs-blocksutil" mockrouting "github.com/ipfs/go-ipfs-routing/mock" + "github.com/libp2p/go-libp2p-core/host" + "github.com/libp2p/go-libp2p-core/network" "github.com/libp2p/go-libp2p-core/peer" "github.com/libp2p/go-libp2p-core/protocol" tnet "github.com/libp2p/go-libp2p-testing/net" @@ -60,6 +65,90 @@ func (r *receiver) PeerDisconnected(p peer.ID) { r.connectionEvent <- struct{}{} } +var mockNetErr = fmt.Errorf("network err") + +type ErrStream struct { + network.Stream + lk sync.Mutex + err bool + timingOut bool +} + +type ErrHost struct { + host.Host + lk sync.Mutex + err bool + timingOut bool + streams []*ErrStream +} + +func (es *ErrStream) Write(b []byte) (int, error) { + es.lk.Lock() + defer es.lk.Unlock() + + if es.err { + return 0, mockNetErr + } + if es.timingOut { + return 0, context.DeadlineExceeded + } + return es.Stream.Write(b) +} + +func (eh *ErrHost) Connect(ctx context.Context, pi peer.AddrInfo) error { + eh.lk.Lock() + defer eh.lk.Unlock() + + if eh.err { + return mockNetErr + } + if eh.timingOut { + return context.DeadlineExceeded + } + return eh.Host.Connect(ctx, pi) +} + +func (eh *ErrHost) NewStream(ctx context.Context, p peer.ID, pids ...protocol.ID) (network.Stream, error) { + eh.lk.Lock() + defer eh.lk.Unlock() + + if eh.err { + return nil, mockNetErr + } + if eh.timingOut { + return nil, context.DeadlineExceeded + } + stream, err := eh.Host.NewStream(ctx, p, pids...) + estrm := &ErrStream{Stream: stream, err: eh.err, timingOut: eh.timingOut} + + eh.streams = append(eh.streams, estrm) + return estrm, err +} + +func (eh *ErrHost) setErrorState(erroring bool) { + eh.lk.Lock() + defer eh.lk.Unlock() + + eh.err = erroring + for _, s := range eh.streams { + s.lk.Lock() + s.err = erroring + s.lk.Unlock() + } +} + +func (eh *ErrHost) setTimeoutState(timingOut bool) { + eh.lk.Lock() + defer eh.lk.Unlock() + + eh.timingOut = timingOut + for _, s := range eh.streams { + s.lk.Lock() + s.timingOut = timingOut + s.lk.Unlock() + } +} + func TestMessageSendAndReceive(t *testing.T) { // create network ctx := context.Background() @@ -164,6 +253,166 @@ func TestMessageSendAndReceive(t *testing.T) { } } +func TestMessageResendAfterError(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + + // create network + mn := mocknet.New(ctx) + mr := mockrouting.NewServer() + streamNet, err := tn.StreamNet(ctx, mn, mr) + if err != nil { + t.Fatal("Unable to setup network") + } + p1 := tnet.RandIdentityOrFatal(t) + p2 := tnet.RandIdentityOrFatal(t) + + h1, err := mn.AddPeer(p1.PrivateKey(), p1.Address()) + if err != nil { + t.Fatal(err) + } + + // Create a special host that we can force to start returning errors + eh := &ErrHost{ + Host: h1, + err: false, + } + routing := mr.ClientWithDatastore(context.TODO(), p1, ds.NewMapDatastore()) + bsnet1 := bsnet.NewFromIpfsHost(eh, routing) + + bsnet2 := streamNet.Adapter(p2) + r1 := newReceiver() + r2 := newReceiver() + bsnet1.SetDelegate(r1) + bsnet2.SetDelegate(r2) + + err = mn.LinkAll() + if err != nil { + t.Fatal(err) + } + err = bsnet1.ConnectTo(ctx, p2.ID()) + if err != nil { + t.Fatal(err) + } + err = bsnet2.ConnectTo(ctx, p1.ID()) + if err != nil { + t.Fatal(err) + } + + blockGenerator := blocksutil.NewBlockGenerator() + block1 := blockGenerator.Next() + msg := bsmsg.New(false) + msg.AddEntry(block1.Cid(), 1, pb.Message_Wantlist_Block, true) + + testSendErrorBackoff := 100 * time.Millisecond + ms, err := bsnet1.NewMessageSender(ctx, p2.ID(), &bsnet.MessageSenderOpts{ + MaxRetries: 3, + SendTimeout: 100 * time.Millisecond, + SendErrorBackoff: testSendErrorBackoff, + }) + if err != nil { + t.Fatal(err) + } + + <-r1.connectionEvent + + // Return an error from the networking layer the next time we try to send + // a message + eh.setErrorState(true) + + go func() { + time.Sleep(testSendErrorBackoff / 2) + // Stop throwing errors so that the following attempt to send succeeds + eh.setErrorState(false) + }() + + // Send message with retries, first one should fail, then subsequent + // message should succeed + err = ms.SendMsg(ctx, msg) + if err != nil { + t.Fatal(err) + } + + select { + case <-ctx.Done(): + t.Fatal("did not receive message sent") + case <-r2.messageReceived: + } +} + +func TestMessageSendTimeout(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + + // create network + mn := mocknet.New(ctx) + mr := mockrouting.NewServer() + streamNet, err := tn.StreamNet(ctx, mn, mr) + if err != nil { + t.Fatal("Unable to setup network") + } + p1 := tnet.RandIdentityOrFatal(t) + p2 := tnet.RandIdentityOrFatal(t) + + h1, err := mn.AddPeer(p1.PrivateKey(), p1.Address()) + if err != nil { + t.Fatal(err) + } + + // Create a special host that we can force to start timing out + eh := &ErrHost{ + Host: h1, + err: false, + } + routing := mr.ClientWithDatastore(context.TODO(), p1, ds.NewMapDatastore()) + bsnet1 := bsnet.NewFromIpfsHost(eh, routing) + + bsnet2 := streamNet.Adapter(p2) + r1 := newReceiver() + r2 := newReceiver() + bsnet1.SetDelegate(r1) + bsnet2.SetDelegate(r2) + + err = mn.LinkAll() + if err != nil { + t.Fatal(err) + } + err = bsnet1.ConnectTo(ctx, p2.ID()) + if err != nil { + t.Fatal(err) + } + err = bsnet2.ConnectTo(ctx, p1.ID()) + if err != nil { + t.Fatal(err) + } + + blockGenerator := blocksutil.NewBlockGenerator() + block1 := blockGenerator.Next() + msg := bsmsg.New(false) + msg.AddEntry(block1.Cid(), 1, pb.Message_Wantlist_Block, true) + + ms, err := bsnet1.NewMessageSender(ctx, p2.ID(), &bsnet.MessageSenderOpts{ + MaxRetries: 3, + SendTimeout: 100 * time.Millisecond, + SendErrorBackoff: 100 * time.Millisecond, + }) + if err != nil { + t.Fatal(err) + } + <-r1.connectionEvent + + // Return a DeadlineExceeded error from the networking layer the next time we try to + // send a message + eh.setTimeoutState(true) + + // Send message with retries, first one should fail, then subsequent + // message should succeed + err = ms.SendMsg(ctx, msg) + if err == nil { + t.Fatal("Expected error from SednMsg") + } +} + func TestSupportsHave(t *testing.T) { ctx := context.Background() mn := mocknet.New(ctx) @@ -199,9 +448,7 @@ func TestSupportsHave(t *testing.T) { t.Fatal(err) } - senderCurrent, err := bsnet1.NewMessageSender(ctx, p2.ID(), &bsnet.MessageSenderOpts{ - SendTimeout: time.Second, - }) + senderCurrent, err := bsnet1.NewMessageSender(ctx, p2.ID(), &bsnet.MessageSenderOpts{}) if err != nil { t.Fatal(err) } From 26155b5db37bccba24f30ee011c769417bf9810d Mon Sep 17 00:00:00 2001 From: Dirk McCormick Date: Fri, 17 Apr 2020 11:49:34 -0400 Subject: [PATCH 0915/1035] feat: dont retry if connect error is multistream.ErrNotSupported This commit was moved from ipfs/go-bitswap@3b40d49d0fdacdfb12fe4e431e3724ad0749b7e9 --- bitswap/network/ipfs_impl.go | 13 ++- bitswap/network/ipfs_impl_test.go | 141 ++++++++++++++++++++++++------ 2 files changed, 123 insertions(+), 31 deletions(-) diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index e3f6cc271..cc1d0fd1f 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -2,6 +2,7 @@ package network import ( "context" + "errors" "fmt" "io" "sync/atomic" @@ -22,6 +23,7 @@ import ( "github.com/libp2p/go-libp2p/p2p/protocol/ping" msgio "github.com/libp2p/go-msgio" ma "github.com/multiformats/go-multiaddr" + "github.com/multiformats/go-multistream" ) var log = logging.Logger("bitswap_network") @@ -164,7 +166,8 @@ func (s *streamMessageSender) multiAttempt(ctx context.Context, fn func(context. } cancel() - // Attempt failed. + // Attempt failed + // If the sender has been closed or the context cancelled, just bail out select { case <-ctx.Done(): @@ -174,11 +177,17 @@ func (s *streamMessageSender) multiAttempt(ctx context.Context, fn func(context. default: } + // Protocol is not supported, so no need to try multiple times + if errors.Is(err, multistream.ErrNotSupported) { + s.bsnet.connectEvtMgr.MarkUnresponsive(s.to) + return err + } + // Failed to send so reset stream and try again _ = s.Reset() // Failed too many times so mark the peer as unresponsive and return an error - if i == s.opts.MaxRetries { + if i == s.opts.MaxRetries-1 { s.bsnet.connectEvtMgr.MarkUnresponsive(s.to) return err } diff --git a/bitswap/network/ipfs_impl_test.go b/bitswap/network/ipfs_impl_test.go index 6311c63dd..454bb4109 100644 --- a/bitswap/network/ipfs_impl_test.go +++ b/bitswap/network/ipfs_impl_test.go @@ -14,6 +14,7 @@ import ( ds "github.com/ipfs/go-datastore" blocksutil "github.com/ipfs/go-ipfs-blocksutil" mockrouting "github.com/ipfs/go-ipfs-routing/mock" + "github.com/multiformats/go-multistream" "github.com/libp2p/go-libp2p-core/host" "github.com/libp2p/go-libp2p-core/network" @@ -27,7 +28,7 @@ import ( type receiver struct { peers map[peer.ID]struct{} messageReceived chan struct{} - connectionEvent chan struct{} + connectionEvent chan bool lastMessage bsmsg.BitSwapMessage lastSender peer.ID } @@ -36,7 +37,7 @@ func newReceiver() *receiver { return &receiver{ peers: make(map[peer.ID]struct{}), messageReceived: make(chan struct{}), - connectionEvent: make(chan struct{}, 1), + connectionEvent: make(chan bool, 1), } } @@ -57,12 +58,12 @@ func (r *receiver) ReceiveError(err error) { func (r *receiver) PeerConnected(p peer.ID) { r.peers[p] = struct{}{} - r.connectionEvent <- struct{}{} + r.connectionEvent <- true } func (r *receiver) PeerDisconnected(p peer.ID) { delete(r.peers, p) - r.connectionEvent <- struct{}{} + r.connectionEvent <- false } var mockNetErr = fmt.Errorf("network err") @@ -70,14 +71,14 @@ var mockNetErr = fmt.Errorf("network err") type ErrStream struct { network.Stream lk sync.Mutex - err bool + err error timingOut bool } type ErrHost struct { host.Host lk sync.Mutex - err bool + err error timingOut bool streams []*ErrStream } @@ -86,8 +87,8 @@ func (es *ErrStream) Write(b []byte) (int, error) { es.lk.Lock() defer es.lk.Unlock() - if es.err { - return 0, mockNetErr + if es.err != nil { + return 0, es.err } if es.timingOut { return 0, context.DeadlineExceeded @@ -99,8 +100,8 @@ func (eh *ErrHost) Connect(ctx context.Context, pi peer.AddrInfo) error { eh.lk.Lock() defer eh.lk.Unlock() - if eh.err { - return mockNetErr + if eh.err != nil { + return eh.err } if eh.timingOut { return context.DeadlineExceeded @@ -112,7 +113,7 @@ func (eh *ErrHost) NewStream(ctx context.Context, p peer.ID, pids ...protocol.ID eh.lk.Lock() defer eh.lk.Unlock() - if eh.err { + if eh.err != nil { return nil, mockNetErr } if eh.timingOut { @@ -125,14 +126,14 @@ func (eh *ErrHost) NewStream(ctx context.Context, p peer.ID, pids ...protocol.ID return estrm, err } -func (eh *ErrHost) setErrorState(erroring bool) { +func (eh *ErrHost) setError(err error) { eh.lk.Lock() defer eh.lk.Unlock() - eh.err = erroring + eh.err = err for _, s := range eh.streams { s.lk.Lock() - s.err = erroring + s.err = err s.lk.Unlock() } } @@ -273,10 +274,7 @@ func TestMessageResendAfterError(t *testing.T) { } // Create a special host that we can force to start returning errors - eh := &ErrHost{ - Host: h1, - err: false, - } + eh := &ErrHost{Host: h1} routing := mr.ClientWithDatastore(context.TODO(), p1, ds.NewMapDatastore()) bsnet1 := bsnet.NewFromIpfsHost(eh, routing) @@ -294,6 +292,11 @@ func TestMessageResendAfterError(t *testing.T) { if err != nil { t.Fatal(err) } + isConnected := <-r1.connectionEvent + if !isConnected { + t.Fatal("Expected connect event") + } + err = bsnet2.ConnectTo(ctx, p1.ID()) if err != nil { t.Fatal(err) @@ -314,16 +317,14 @@ func TestMessageResendAfterError(t *testing.T) { t.Fatal(err) } - <-r1.connectionEvent - // Return an error from the networking layer the next time we try to send // a message - eh.setErrorState(true) + eh.setError(mockNetErr) go func() { time.Sleep(testSendErrorBackoff / 2) // Stop throwing errors so that the following attempt to send succeeds - eh.setErrorState(false) + eh.setError(nil) }() // Send message with retries, first one should fail, then subsequent @@ -360,10 +361,7 @@ func TestMessageSendTimeout(t *testing.T) { } // Create a special host that we can force to start timing out - eh := &ErrHost{ - Host: h1, - err: false, - } + eh := &ErrHost{Host: h1} routing := mr.ClientWithDatastore(context.TODO(), p1, ds.NewMapDatastore()) bsnet1 := bsnet.NewFromIpfsHost(eh, routing) @@ -381,6 +379,11 @@ func TestMessageSendTimeout(t *testing.T) { if err != nil { t.Fatal(err) } + isConnected := <-r1.connectionEvent + if !isConnected { + t.Fatal("Expected connect event") + } + err = bsnet2.ConnectTo(ctx, p1.ID()) if err != nil { t.Fatal(err) @@ -399,18 +402,98 @@ func TestMessageSendTimeout(t *testing.T) { if err != nil { t.Fatal(err) } - <-r1.connectionEvent // Return a DeadlineExceeded error from the networking layer the next time we try to // send a message eh.setTimeoutState(true) - // Send message with retries, first one should fail, then subsequent - // message should succeed + // Send message with retries, all attempts should fail err = ms.SendMsg(ctx, msg) if err == nil { t.Fatal("Expected error from SednMsg") } + + select { + case <-time.After(500 * time.Millisecond): + t.Fatal("Did not receive disconnect event") + case isConnected = <-r1.connectionEvent: + if isConnected { + t.Fatal("Expected disconnect event (got connect event)") + } + } +} + +func TestMessageSendNotSupportedResponse(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + + // create network + mn := mocknet.New(ctx) + mr := mockrouting.NewServer() + streamNet, err := tn.StreamNet(ctx, mn, mr) + if err != nil { + t.Fatal("Unable to setup network") + } + p1 := tnet.RandIdentityOrFatal(t) + p2 := tnet.RandIdentityOrFatal(t) + + h1, err := mn.AddPeer(p1.PrivateKey(), p1.Address()) + if err != nil { + t.Fatal(err) + } + + // Create a special host that responds with ErrNotSupported + eh := &ErrHost{Host: h1} + routing := mr.ClientWithDatastore(context.TODO(), p1, ds.NewMapDatastore()) + bsnet1 := bsnet.NewFromIpfsHost(eh, routing) + + bsnet2 := streamNet.Adapter(p2) + r1 := newReceiver() + r2 := newReceiver() + bsnet1.SetDelegate(r1) + bsnet2.SetDelegate(r2) + + err = mn.LinkAll() + if err != nil { + t.Fatal(err) + } + err = bsnet1.ConnectTo(ctx, p2.ID()) + if err != nil { + t.Fatal(err) + } + isConnected := <-r1.connectionEvent + if !isConnected { + t.Fatal("Expected connect event") + } + + err = bsnet2.ConnectTo(ctx, p1.ID()) + if err != nil { + t.Fatal(err) + } + + blockGenerator := blocksutil.NewBlockGenerator() + block1 := blockGenerator.Next() + msg := bsmsg.New(false) + msg.AddEntry(block1.Cid(), 1, pb.Message_Wantlist_Block, true) + + eh.setError(multistream.ErrNotSupported) + _, err = bsnet1.NewMessageSender(ctx, p2.ID(), &bsnet.MessageSenderOpts{ + MaxRetries: 3, + SendTimeout: 100 * time.Millisecond, + SendErrorBackoff: 100 * time.Millisecond, + }) + if err == nil { + t.Fatal("Expected ErrNotSupported") + } + + select { + case <-time.After(500 * time.Millisecond): + t.Fatal("Did not receive disconnect event") + case isConnected = <-r1.connectionEvent: + if isConnected { + t.Fatal("Expected disconnect event (got connect event)") + } + } } func TestSupportsHave(t *testing.T) { From 0819d40dfc3f3c7a843686b4f77ea46c791f99fd Mon Sep 17 00:00:00 2001 From: Dirk McCormick Date: Fri, 17 Apr 2020 11:56:14 -0400 Subject: [PATCH 0916/1035] fix: copy opts in ipfs_impl This commit was moved from ipfs/go-bitswap@59e7aa4226fabeb9ad69d3c3be2e71b70d709b97 --- bitswap/network/ipfs_impl.go | 24 +++++++++++++++--------- 1 file changed, 15 insertions(+), 9 deletions(-) diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index cc1d0fd1f..94afd61e1 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -280,15 +280,7 @@ func (bsnet *impl) msgToStream(ctx context.Context, s network.Stream, msg bsmsg. } func (bsnet *impl) NewMessageSender(ctx context.Context, p peer.ID, opts *MessageSenderOpts) (MessageSender, error) { - if opts.MaxRetries == 0 { - opts.MaxRetries = 3 - } - if opts.SendTimeout == 0 { - opts.SendTimeout = sendMessageTimeout - } - if opts.SendErrorBackoff == 0 { - opts.SendErrorBackoff = 100 * time.Millisecond - } + opts = setDefaultOpts(opts) sender := &streamMessageSender{ to: p, @@ -309,6 +301,20 @@ func (bsnet *impl) NewMessageSender(ctx context.Context, p peer.ID, opts *Messag return sender, nil } +func setDefaultOpts(opts *MessageSenderOpts) *MessageSenderOpts { + copy := *opts + if opts.MaxRetries == 0 { + copy.MaxRetries = 3 + } + if opts.SendTimeout == 0 { + copy.SendTimeout = sendMessageTimeout + } + if opts.SendErrorBackoff == 0 { + copy.SendErrorBackoff = 100 * time.Millisecond + } + return © +} + func (bsnet *impl) SendMessage( ctx context.Context, p peer.ID, From 3f521fe2619a307691f6de2fdaee3153914eaa01 Mon Sep 17 00:00:00 2001 From: Dirk McCormick Date: Fri, 17 Apr 2020 11:58:35 -0400 Subject: [PATCH 0917/1035] fix: remove extraneous map writes in connectionEventManager This commit was moved from ipfs/go-bitswap@c233956cc9f9f0f7142235a9f15850cca730d043 --- bitswap/network/connecteventmanager.go | 3 --- 1 file changed, 3 deletions(-) diff --git a/bitswap/network/connecteventmanager.go b/bitswap/network/connecteventmanager.go index 67082c4d7..e86d6839d 100644 --- a/bitswap/network/connecteventmanager.go +++ b/bitswap/network/connecteventmanager.go @@ -55,7 +55,6 @@ func (c *connectEventManager) Disconnected(p peer.ID) { return } state.refs-- - c.conns[p] = state if state.refs == 0 { if state.responsive { @@ -74,7 +73,6 @@ func (c *connectEventManager) MarkUnresponsive(p peer.ID) { return } state.responsive = false - c.conns[p] = state c.connListener.PeerDisconnected(p) } @@ -86,7 +84,6 @@ func (c *connectEventManager) OnMessage(p peer.ID) { state, ok := c.conns[p] if ok && !state.responsive { state.responsive = true - c.conns[p] = state c.connListener.PeerConnected(p) } } From 486085fea3317d8bcde246175f094e97dc832f57 Mon Sep 17 00:00:00 2001 From: Dirk McCormick Date: Fri, 17 Apr 2020 12:06:13 -0400 Subject: [PATCH 0918/1035] fix: perf improvement for connectEventManager This commit was moved from ipfs/go-bitswap@c26bd59db63f49c3b3d21c4e31bcc861bc0312dc --- bitswap/network/connecteventmanager.go | 26 +++++++++++++++++++++----- 1 file changed, 21 insertions(+), 5 deletions(-) diff --git a/bitswap/network/connecteventmanager.go b/bitswap/network/connecteventmanager.go index e86d6839d..b28e8e5b8 100644 --- a/bitswap/network/connecteventmanager.go +++ b/bitswap/network/connecteventmanager.go @@ -13,7 +13,7 @@ type ConnectionListener interface { type connectEventManager struct { connListener ConnectionListener - lk sync.Mutex + lk sync.RWMutex conns map[peer.ID]*connState } @@ -78,12 +78,28 @@ func (c *connectEventManager) MarkUnresponsive(p peer.ID) { } func (c *connectEventManager) OnMessage(p peer.ID) { + // This is a frequent operation so to avoid different message arrivals + // getting blocked by a write lock, first take a read lock to check if + // we need to modify state + c.lk.RLock() + state, ok := c.conns[p] + c.lk.RUnlock() + + if !ok || state.responsive { + return + } + + // We need to make a modification so now take a write lock c.lk.Lock() defer c.lk.Unlock() - state, ok := c.conns[p] - if ok && !state.responsive { - state.responsive = true - c.connListener.PeerConnected(p) + // Note: state may have changed in the time between when read lock + // was released and write lock taken, so check again + state, ok = c.conns[p] + if !ok || state.responsive { + return } + + state.responsive = true + c.connListener.PeerConnected(p) } From a67c55a791622a11b5e384a2cd279e26ea26576f Mon Sep 17 00:00:00 2001 From: Dirk McCormick Date: Fri, 17 Apr 2020 14:57:30 -0400 Subject: [PATCH 0919/1035] fix: simplify message queue shutdown This commit was moved from ipfs/go-bitswap@bdccb20e6aebd2f2343b860b51a1b9f2062e9e8b --- bitswap/internal/messagequeue/messagequeue.go | 22 ++++------ .../messagequeue/messagequeue_test.go | 43 ++++++------------- bitswap/network/ipfs_impl.go | 14 ------ 3 files changed, 23 insertions(+), 56 deletions(-) diff --git a/bitswap/internal/messagequeue/messagequeue.go b/bitswap/internal/messagequeue/messagequeue.go index c45a355ca..2fb196650 100644 --- a/bitswap/internal/messagequeue/messagequeue.go +++ b/bitswap/internal/messagequeue/messagequeue.go @@ -56,6 +56,7 @@ type MessageNetwork interface { // MessageQueue implements queue of want messages to send to peers. type MessageQueue struct { ctx context.Context + shutdown func() p peer.ID network MessageNetwork dhTimeoutMgr DontHaveTimeoutManager @@ -63,7 +64,6 @@ type MessageQueue struct { sendErrorBackoff time.Duration outgoingWork chan time.Time - done chan struct{} // Take lock whenever any of these variables are modified wllock sync.Mutex @@ -170,8 +170,10 @@ func New(ctx context.Context, p peer.ID, network MessageNetwork, onDontHaveTimeo func newMessageQueue(ctx context.Context, p peer.ID, network MessageNetwork, maxMsgSize int, sendErrorBackoff time.Duration, dhTimeoutMgr DontHaveTimeoutManager) *MessageQueue { + ctx, cancel := context.WithCancel(ctx) mq := &MessageQueue{ ctx: ctx, + shutdown: cancel, p: p, network: network, dhTimeoutMgr: dhTimeoutMgr, @@ -180,7 +182,6 @@ func newMessageQueue(ctx context.Context, p peer.ID, network MessageNetwork, peerWants: newRecallWantList(), cancels: cid.NewSet(), outgoingWork: make(chan time.Time, 1), - done: make(chan struct{}), rebroadcastInterval: defaultRebroadcastInterval, sendErrorBackoff: sendErrorBackoff, priority: maxPriority, @@ -301,12 +302,17 @@ func (mq *MessageQueue) Startup() { // Shutdown stops the processing of messages for a message queue. func (mq *MessageQueue) Shutdown() { - close(mq.done) + mq.shutdown() } func (mq *MessageQueue) onShutdown() { // Shut down the DONT_HAVE timeout manager mq.dhTimeoutMgr.Shutdown() + + // Reset the streamMessageSender + if mq.sender != nil { + _ = mq.sender.Reset() + } } func (mq *MessageQueue) runQueue() { @@ -352,17 +358,7 @@ func (mq *MessageQueue) runQueue() { // in sendMessageDebounce. Send immediately. workScheduled = time.Time{} mq.sendIfReady() - case <-mq.done: - if mq.sender != nil { - mq.sender.Close() - } - return case <-mq.ctx.Done(): - if mq.sender != nil { - // TODO: should I call sender.Close() here also to stop - // and in progress connection? - _ = mq.sender.Reset() - } return } } diff --git a/bitswap/internal/messagequeue/messagequeue_test.go b/bitswap/internal/messagequeue/messagequeue_test.go index 38ffafa2b..344da41a5 100644 --- a/bitswap/internal/messagequeue/messagequeue_test.go +++ b/bitswap/internal/messagequeue/messagequeue_test.go @@ -82,17 +82,15 @@ func (fp *fakeDontHaveTimeoutMgr) pendingCount() int { type fakeMessageSender struct { lk sync.Mutex - fullClosed chan<- struct{} reset chan<- struct{} messagesSent chan<- []bsmsg.Entry supportsHave bool } -func newFakeMessageSender(fullClosed chan<- struct{}, reset chan<- struct{}, +func newFakeMessageSender(reset chan<- struct{}, messagesSent chan<- []bsmsg.Entry, supportsHave bool) *fakeMessageSender { return &fakeMessageSender{ - fullClosed: fullClosed, reset: reset, messagesSent: messagesSent, supportsHave: supportsHave, @@ -106,7 +104,7 @@ func (fms *fakeMessageSender) SendMsg(ctx context.Context, msg bsmsg.BitSwapMess fms.messagesSent <- msg.Wantlist() return nil } -func (fms *fakeMessageSender) Close() error { fms.fullClosed <- struct{}{}; return nil } +func (fms *fakeMessageSender) Close() error { return nil } func (fms *fakeMessageSender) Reset() error { fms.reset <- struct{}{}; return nil } func (fms *fakeMessageSender) SupportsHave() bool { return fms.supportsHave } @@ -141,8 +139,7 @@ func TestStartupAndShutdown(t *testing.T) { ctx := context.Background() messagesSent := make(chan []bsmsg.Entry) resetChan := make(chan struct{}, 1) - fullClosedChan := make(chan struct{}, 1) - fakeSender := newFakeMessageSender(fullClosedChan, resetChan, messagesSent, true) + fakeSender := newFakeMessageSender(resetChan, messagesSent, true) fakenet := &fakeMessageNetwork{nil, nil, fakeSender} peerID := testutil.GeneratePeers(1)[0] messageQueue := New(ctx, peerID, fakenet, mockTimeoutCb) @@ -170,11 +167,9 @@ func TestStartupAndShutdown(t *testing.T) { timeoutctx, cancel := context.WithTimeout(ctx, 10*time.Millisecond) defer cancel() select { - case <-fullClosedChan: case <-resetChan: - t.Fatal("message sender should have been closed but was reset") case <-timeoutctx.Done(): - t.Fatal("message sender should have been closed but wasn't") + t.Fatal("message sender should have been reset but wasn't") } } @@ -182,8 +177,7 @@ func TestSendingMessagesDeduped(t *testing.T) { ctx := context.Background() messagesSent := make(chan []bsmsg.Entry) resetChan := make(chan struct{}, 1) - fullClosedChan := make(chan struct{}, 1) - fakeSender := newFakeMessageSender(fullClosedChan, resetChan, messagesSent, true) + fakeSender := newFakeMessageSender(resetChan, messagesSent, true) fakenet := &fakeMessageNetwork{nil, nil, fakeSender} peerID := testutil.GeneratePeers(1)[0] messageQueue := New(ctx, peerID, fakenet, mockTimeoutCb) @@ -204,8 +198,7 @@ func TestSendingMessagesPartialDupe(t *testing.T) { ctx := context.Background() messagesSent := make(chan []bsmsg.Entry) resetChan := make(chan struct{}, 1) - fullClosedChan := make(chan struct{}, 1) - fakeSender := newFakeMessageSender(fullClosedChan, resetChan, messagesSent, true) + fakeSender := newFakeMessageSender(resetChan, messagesSent, true) fakenet := &fakeMessageNetwork{nil, nil, fakeSender} peerID := testutil.GeneratePeers(1)[0] messageQueue := New(ctx, peerID, fakenet, mockTimeoutCb) @@ -226,8 +219,7 @@ func TestSendingMessagesPriority(t *testing.T) { ctx := context.Background() messagesSent := make(chan []bsmsg.Entry) resetChan := make(chan struct{}, 1) - fullClosedChan := make(chan struct{}, 1) - fakeSender := newFakeMessageSender(fullClosedChan, resetChan, messagesSent, true) + fakeSender := newFakeMessageSender(resetChan, messagesSent, true) fakenet := &fakeMessageNetwork{nil, nil, fakeSender} peerID := testutil.GeneratePeers(1)[0] messageQueue := New(ctx, peerID, fakenet, mockTimeoutCb) @@ -294,8 +286,7 @@ func TestCancelOverridesPendingWants(t *testing.T) { ctx := context.Background() messagesSent := make(chan []bsmsg.Entry) resetChan := make(chan struct{}, 1) - fullClosedChan := make(chan struct{}, 1) - fakeSender := newFakeMessageSender(fullClosedChan, resetChan, messagesSent, true) + fakeSender := newFakeMessageSender(resetChan, messagesSent, true) fakenet := &fakeMessageNetwork{nil, nil, fakeSender} peerID := testutil.GeneratePeers(1)[0] messageQueue := New(ctx, peerID, fakenet, mockTimeoutCb) @@ -345,8 +336,7 @@ func TestWantOverridesPendingCancels(t *testing.T) { ctx := context.Background() messagesSent := make(chan []bsmsg.Entry) resetChan := make(chan struct{}, 1) - fullClosedChan := make(chan struct{}, 1) - fakeSender := newFakeMessageSender(fullClosedChan, resetChan, messagesSent, true) + fakeSender := newFakeMessageSender(resetChan, messagesSent, true) fakenet := &fakeMessageNetwork{nil, nil, fakeSender} peerID := testutil.GeneratePeers(1)[0] messageQueue := New(ctx, peerID, fakenet, mockTimeoutCb) @@ -392,8 +382,7 @@ func TestWantlistRebroadcast(t *testing.T) { ctx := context.Background() messagesSent := make(chan []bsmsg.Entry) resetChan := make(chan struct{}, 1) - fullClosedChan := make(chan struct{}, 1) - fakeSender := newFakeMessageSender(fullClosedChan, resetChan, messagesSent, true) + fakeSender := newFakeMessageSender(resetChan, messagesSent, true) fakenet := &fakeMessageNetwork{nil, nil, fakeSender} peerID := testutil.GeneratePeers(1)[0] messageQueue := New(ctx, peerID, fakenet, mockTimeoutCb) @@ -488,8 +477,7 @@ func TestSendingLargeMessages(t *testing.T) { ctx := context.Background() messagesSent := make(chan []bsmsg.Entry) resetChan := make(chan struct{}, 1) - fullClosedChan := make(chan struct{}, 1) - fakeSender := newFakeMessageSender(fullClosedChan, resetChan, messagesSent, true) + fakeSender := newFakeMessageSender(resetChan, messagesSent, true) fakenet := &fakeMessageNetwork{nil, nil, fakeSender} dhtm := &fakeDontHaveTimeoutMgr{} peerID := testutil.GeneratePeers(1)[0] @@ -518,8 +506,7 @@ func TestSendToPeerThatDoesntSupportHave(t *testing.T) { ctx := context.Background() messagesSent := make(chan []bsmsg.Entry) resetChan := make(chan struct{}, 1) - fullClosedChan := make(chan struct{}, 1) - fakeSender := newFakeMessageSender(fullClosedChan, resetChan, messagesSent, false) + fakeSender := newFakeMessageSender(resetChan, messagesSent, false) fakenet := &fakeMessageNetwork{nil, nil, fakeSender} peerID := testutil.GeneratePeers(1)[0] @@ -573,8 +560,7 @@ func TestSendToPeerThatDoesntSupportHaveMonitorsTimeouts(t *testing.T) { ctx := context.Background() messagesSent := make(chan []bsmsg.Entry) resetChan := make(chan struct{}, 1) - fullClosedChan := make(chan struct{}, 1) - fakeSender := newFakeMessageSender(fullClosedChan, resetChan, messagesSent, false) + fakeSender := newFakeMessageSender(resetChan, messagesSent, false) fakenet := &fakeMessageNetwork{nil, nil, fakeSender} peerID := testutil.GeneratePeers(1)[0] @@ -624,8 +610,7 @@ func BenchmarkMessageQueue(b *testing.B) { createQueue := func() *MessageQueue { messagesSent := make(chan []bsmsg.Entry) resetChan := make(chan struct{}, 1) - fullClosedChan := make(chan struct{}, 1) - fakeSender := newFakeMessageSender(fullClosedChan, resetChan, messagesSent, true) + fakeSender := newFakeMessageSender(resetChan, messagesSent, true) fakenet := &fakeMessageNetwork{nil, nil, fakeSender} dhtm := &fakeDontHaveTimeoutMgr{} peerID := testutil.GeneratePeers(1)[0] diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index 94afd61e1..6fa2f5357 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -94,7 +94,6 @@ type streamMessageSender struct { stream network.Stream bsnet *impl opts *MessageSenderOpts - done chan struct{} } // Open a stream to the remote peer @@ -107,13 +106,6 @@ func (s *streamMessageSender) Connect(ctx context.Context) (network.Stream, erro return nil, err } - // Check if the sender has been closed - select { - case <-s.done: - return nil, nil - default: - } - stream, err := s.bsnet.newStreamToPeer(ctx, s.to) if err != nil { return nil, err @@ -135,7 +127,6 @@ func (s *streamMessageSender) Reset() error { // Close the stream func (s *streamMessageSender) Close() error { - close(s.done) return helpers.FullClose(s.stream) } @@ -172,8 +163,6 @@ func (s *streamMessageSender) multiAttempt(ctx context.Context, fn func(context. select { case <-ctx.Done(): return nil - case <-s.done: - return nil default: } @@ -195,8 +184,6 @@ func (s *streamMessageSender) multiAttempt(ctx context.Context, fn func(context. select { case <-ctx.Done(): return nil - case <-s.done: - return nil case <-time.After(s.opts.SendErrorBackoff): // wait a short time in case disconnect notifications are still propagating log.Infof("send message to %s failed but context was not Done: %s", s.to, err) @@ -286,7 +273,6 @@ func (bsnet *impl) NewMessageSender(ctx context.Context, p peer.ID, opts *Messag to: p, bsnet: bsnet, opts: opts, - done: make(chan struct{}), } err := sender.multiAttempt(ctx, func(fnctx context.Context) error { From 5242e59cd89a6919dde0254d348a68c319b2fd2a Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Fri, 17 Apr 2020 12:01:09 -0700 Subject: [PATCH 0920/1035] fix: avoid goroutine when receiving an error (#353) There's no reason to launch this async. This commit was moved from ipfs/go-bitswap@9cafdc24fbe94164912085aaba168c59f83ffbc0 --- bitswap/network/ipfs_impl.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index b5661408d..890419bb9 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -259,7 +259,7 @@ func (bsnet *impl) handleNewStream(s network.Stream) { if err != nil { if err != io.EOF { _ = s.Reset() - go bsnet.receiver.ReceiveError(err) + bsnet.receiver.ReceiveError(err) log.Debugf("bitswap net handleNewStream from %s error: %s", s.Conn().RemotePeer(), err) } return From 24a6ad83d895b58c6addea54c15c1d61ef92bc77 Mon Sep 17 00:00:00 2001 From: Dirk McCormick Date: Fri, 17 Apr 2020 15:38:12 -0400 Subject: [PATCH 0921/1035] fix: use explicit connected bool for streamMessageSender This commit was moved from ipfs/go-bitswap@a8ed651525f3feec12f5e69344eddc368eaca762 --- bitswap/network/ipfs_impl.go | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index 6fa2f5357..daad69be1 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -90,15 +90,16 @@ type impl struct { } type streamMessageSender struct { - to peer.ID - stream network.Stream - bsnet *impl - opts *MessageSenderOpts + to peer.ID + stream network.Stream + connected bool + bsnet *impl + opts *MessageSenderOpts } // Open a stream to the remote peer func (s *streamMessageSender) Connect(ctx context.Context) (network.Stream, error) { - if s.stream != nil { + if s.connected { return s.stream, nil } @@ -112,6 +113,7 @@ func (s *streamMessageSender) Connect(ctx context.Context) (network.Stream, erro } s.stream = stream + s.connected = true return s.stream, nil } @@ -119,7 +121,7 @@ func (s *streamMessageSender) Connect(ctx context.Context) (network.Stream, erro func (s *streamMessageSender) Reset() error { if s.stream != nil { err := s.stream.Reset() - s.stream = nil + s.connected = false return err } return nil From 90dca276cd4ff08a940452a5f5b27eb20939e30a Mon Sep 17 00:00:00 2001 From: Dirk McCormick Date: Fri, 17 Apr 2020 16:21:02 -0400 Subject: [PATCH 0922/1035] fix: ipfs_impl error handling This commit was moved from ipfs/go-bitswap@8894bb6a26765da19ee61510b415d660e6e59df6 --- bitswap/network/ipfs_impl.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index daad69be1..e57d37ce8 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -164,7 +164,7 @@ func (s *streamMessageSender) multiAttempt(ctx context.Context, fn func(context. // If the sender has been closed or the context cancelled, just bail out select { case <-ctx.Done(): - return nil + return ctx.Err() default: } @@ -185,7 +185,7 @@ func (s *streamMessageSender) multiAttempt(ctx context.Context, fn func(context. select { case <-ctx.Done(): - return nil + return ctx.Err() case <-time.After(s.opts.SendErrorBackoff): // wait a short time in case disconnect notifications are still propagating log.Infof("send message to %s failed but context was not Done: %s", s.to, err) From 386f87d845d67fef7246945387f5312f965846c2 Mon Sep 17 00:00:00 2001 From: Dirk McCormick Date: Fri, 17 Apr 2020 15:59:10 -0400 Subject: [PATCH 0923/1035] fix: mark wants sent when they are added to a message to be sent This commit was moved from ipfs/go-bitswap@e6bf8af372ac2d6ec48366c277d2957c93a82029 --- bitswap/internal/messagequeue/messagequeue.go | 31 +++++-------------- 1 file changed, 8 insertions(+), 23 deletions(-) diff --git a/bitswap/internal/messagequeue/messagequeue.go b/bitswap/internal/messagequeue/messagequeue.go index 2fb196650..9fcab6d31 100644 --- a/bitswap/internal/messagequeue/messagequeue.go +++ b/bitswap/internal/messagequeue/messagequeue.go @@ -422,7 +422,7 @@ func (mq *MessageQueue) sendMessage() { mq.dhTimeoutMgr.Start() // Convert want lists to a Bitswap Message - message, onSent := mq.extractOutgoingMessage(mq.sender.SupportsHave()) + message := mq.extractOutgoingMessage(mq.sender.SupportsHave()) // After processing the message, clear out its fields to save memory defer mq.msg.Reset(false) @@ -442,9 +442,6 @@ func (mq *MessageQueue) sendMessage() { return } - // We were able to send successfully. - onSent() - // Set a timer to wait for responses mq.simulateDontHaveWithTimeout(wantlist) @@ -541,7 +538,7 @@ func (mq *MessageQueue) pendingWorkCount() int { } // Convert the lists of wants into a Bitswap message -func (mq *MessageQueue) extractOutgoingMessage(supportsHave bool) (bsmsg.BitSwapMessage, func()) { +func (mq *MessageQueue) extractOutgoingMessage(supportsHave bool) bsmsg.BitSwapMessage { mq.wllock.Lock() defer mq.wllock.Unlock() @@ -568,7 +565,6 @@ func (mq *MessageQueue) extractOutgoingMessage(supportsHave bool) (bsmsg.BitSwap } // Add each regular want-have / want-block to the message - peerSent := peerEntries[:0] for _, e := range peerEntries { if msgSize >= mq.maxMessageSize { break @@ -580,12 +576,13 @@ func (mq *MessageQueue) extractOutgoingMessage(supportsHave bool) (bsmsg.BitSwap mq.peerWants.RemoveType(e.Cid, pb.Message_Wantlist_Have) } else { msgSize += mq.msg.AddEntry(e.Cid, e.Priority, e.WantType, true) - peerSent = append(peerSent, e) + + // Move the key from pending to sent + mq.peerWants.MarkSent(e) } } // Add each broadcast want-have to the message - bcstSent := bcstEntries[:0] for _, e := range bcstEntries { if msgSize >= mq.maxMessageSize { break @@ -601,24 +598,12 @@ func (mq *MessageQueue) extractOutgoingMessage(supportsHave bool) (bsmsg.BitSwap } msgSize += mq.msg.AddEntry(e.Cid, e.Priority, wantType, false) - bcstSent = append(bcstSent, e) - } - // Called when the message has been successfully sent. - onMessageSent := func() { - mq.wllock.Lock() - defer mq.wllock.Unlock() - - // Move the keys from pending to sent - for _, e := range bcstSent { - mq.bcstWants.MarkSent(e) - } - for _, e := range peerSent { - mq.peerWants.MarkSent(e) - } + // Move the key from pending to sent + mq.bcstWants.MarkSent(e) } - return mq.msg, onMessageSent + return mq.msg } func (mq *MessageQueue) initializeSender() (bsnet.MessageSender, error) { From df3881e4ccc44358e0e17940f0523640d886fea7 Mon Sep 17 00:00:00 2001 From: Dirk McCormick Date: Fri, 17 Apr 2020 17:28:04 -0400 Subject: [PATCH 0924/1035] feat: optimize entry sorting in MessageQueue This commit was moved from ipfs/go-bitswap@2fe1405be75ba40100aee7cf3a41ab85becdd065 --- bitswap/internal/messagequeue/messagequeue.go | 25 ++++++++++++++++--- bitswap/message/message.go | 19 ++++++++++++++ 2 files changed, 41 insertions(+), 3 deletions(-) diff --git a/bitswap/internal/messagequeue/messagequeue.go b/bitswap/internal/messagequeue/messagequeue.go index d42db10d6..4e245095d 100644 --- a/bitswap/internal/messagequeue/messagequeue.go +++ b/bitswap/internal/messagequeue/messagequeue.go @@ -544,9 +544,28 @@ func (mq *MessageQueue) extractOutgoingMessage(supportsHave bool) (bsmsg.BitSwap mq.wllock.Lock() defer mq.wllock.Unlock() - // Get broadcast and regular wantlist entries - bcstEntries := mq.bcstWants.pending.SortedEntries() - peerEntries := mq.peerWants.pending.SortedEntries() + // Get broadcast and regular wantlist entries. + // SortedEntries() slows down the MessageQueue a lot, and entries only need + // to be sorted if the number of wants will overflow the size of the + // message (to make sure that the highest priority wants are sent in the + // first message). + // We prioritize cancels, then regular wants, then broadcast wants. + var peerEntries []bswl.Entry + var bcstEntries []bswl.Entry + maxCancelsSize := mq.cancels.Len() * bsmsg.MaxEntrySize + maxPeerSize := mq.peerWants.pending.Len() * bsmsg.MaxEntrySize + maxBcstSize := mq.bcstWants.pending.Len() * bsmsg.MaxEntrySize + + if maxCancelsSize+maxPeerSize < mq.maxMessageSize { + peerEntries = mq.peerWants.pending.Entries() + } else { + peerEntries = mq.peerWants.pending.SortedEntries() + } + if maxCancelsSize+maxPeerSize+maxBcstSize < mq.maxMessageSize { + bcstEntries = mq.bcstWants.pending.Entries() + } else { + bcstEntries = mq.bcstWants.pending.SortedEntries() + } // Size of the message so far msgSize := 0 diff --git a/bitswap/message/message.go b/bitswap/message/message.go index 8377ea733..f820c9dc7 100644 --- a/bitswap/message/message.go +++ b/bitswap/message/message.go @@ -13,6 +13,7 @@ import ( pool "github.com/libp2p/go-buffer-pool" msgio "github.com/libp2p/go-msgio" + u "github.com/ipfs/go-ipfs-util" "github.com/libp2p/go-libp2p-core/network" ) @@ -118,6 +119,24 @@ func (e *Entry) ToPB() pb.Message_Wantlist_Entry { } } +var MaxEntrySize = maxEntrySize() + +func maxEntrySize() int { + var maxInt32 int32 = (1 << 31) - 1 + + c := cid.NewCidV0(u.Hash([]byte("cid"))) + e := Entry{ + Entry: wantlist.Entry{ + Cid: c, + Priority: maxInt32, + WantType: pb.Message_Wantlist_Have, + }, + SendDontHave: true, // true takes up more space than false + Cancel: true, + } + return e.Size() +} + type impl struct { full bool wantlist map[cid.Cid]*Entry From 9fdb2ae362cdeedecd9992604ad1cfbd0a683cf1 Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Tue, 21 Apr 2020 10:27:44 -0700 Subject: [PATCH 0925/1035] fix: minimize time holding wantlist lock (#361) * fix: minimize time holding wantlist lock Instead of holding the lock the entire time we prepare a message, hold the lock while we retrieve the wantlist entries, process the entries without the lock, retake the lock, then mark entries as sent. This means: 1. We never sort entries while holding the lock. 2. We allocate exactly three times while holding the lock (once per entry list). * fix: address code review This commit was moved from ipfs/go-bitswap@9fc4a36823cdbe12e06f5c2743dd158b482289b1 --- bitswap/internal/decision/engine.go | 8 +- bitswap/internal/messagequeue/messagequeue.go | 142 +++++++++++------- bitswap/message/message.go | 8 + bitswap/wantlist/wantlist.go | 12 +- bitswap/wantlist/wantlist_test.go | 6 +- 5 files changed, 113 insertions(+), 63 deletions(-) diff --git a/bitswap/internal/decision/engine.go b/bitswap/internal/decision/engine.go index 620bb868c..81ef9b9e5 100644 --- a/bitswap/internal/decision/engine.go +++ b/bitswap/internal/decision/engine.go @@ -337,9 +337,13 @@ func (e *Engine) onPeerRemoved(p peer.ID) { // WantlistForPeer returns the currently understood want list for a given peer func (e *Engine) WantlistForPeer(p peer.ID) (out []wl.Entry) { partner := e.findOrCreate(p) + partner.lk.Lock() - defer partner.lk.Unlock() - return partner.wantList.SortedEntries() + entries := partner.wantList.Entries() + partner.lk.Unlock() + + wl.SortEntries(entries) + return } // LedgerForPeer returns aggregated data about blocks swapped and communication diff --git a/bitswap/internal/messagequeue/messagequeue.go b/bitswap/internal/messagequeue/messagequeue.go index b8323a779..7bcc087f1 100644 --- a/bitswap/internal/messagequeue/messagequeue.go +++ b/bitswap/internal/messagequeue/messagequeue.go @@ -115,9 +115,15 @@ func (r *recallWantlist) RemoveType(c cid.Cid, wtype pb.Message_Wantlist_WantTyp } // MarkSent moves the want from the pending to the sent list -func (r *recallWantlist) MarkSent(e wantlist.Entry) { - r.pending.RemoveType(e.Cid, e.WantType) +// +// Returns true if the want was marked as sent. Returns false if the want wasn't +// pending. +func (r *recallWantlist) MarkSent(e wantlist.Entry) bool { + if !r.pending.RemoveType(e.Cid, e.WantType) { + return false + } r.sent.Add(e.Cid, e.Priority, e.WantType) + return true } type peerConn struct { @@ -539,74 +545,77 @@ func (mq *MessageQueue) pendingWorkCount() int { // Convert the lists of wants into a Bitswap message func (mq *MessageQueue) extractOutgoingMessage(supportsHave bool) bsmsg.BitSwapMessage { - mq.wllock.Lock() - defer mq.wllock.Unlock() - // Get broadcast and regular wantlist entries. - // SortedEntries() slows down the MessageQueue a lot, and entries only need - // to be sorted if the number of wants will overflow the size of the - // message (to make sure that the highest priority wants are sent in the - // first message). - // We prioritize cancels, then regular wants, then broadcast wants. - var peerEntries []bswl.Entry - var bcstEntries []bswl.Entry - maxCancelsSize := mq.cancels.Len() * bsmsg.MaxEntrySize - maxPeerSize := mq.peerWants.pending.Len() * bsmsg.MaxEntrySize - maxBcstSize := mq.bcstWants.pending.Len() * bsmsg.MaxEntrySize - - if maxCancelsSize+maxPeerSize < mq.maxMessageSize { - peerEntries = mq.peerWants.pending.Entries() - } else { - peerEntries = mq.peerWants.pending.SortedEntries() - } - if maxCancelsSize+maxPeerSize+maxBcstSize < mq.maxMessageSize { - bcstEntries = mq.bcstWants.pending.Entries() - } else { - bcstEntries = mq.bcstWants.pending.SortedEntries() + mq.wllock.Lock() + peerEntries := mq.peerWants.pending.Entries() + bcstEntries := mq.bcstWants.pending.Entries() + cancels := mq.cancels.Keys() + if !supportsHave { + filteredPeerEntries := peerEntries[:0] + // If the remote peer doesn't support HAVE / DONT_HAVE messages, + // don't send want-haves (only send want-blocks) + // + // Doing this here under the lock makes everything else in this + // function simpler. + // + // TODO: We should _try_ to avoid recording these in the first + // place if possible. + for _, e := range peerEntries { + if e.WantType == pb.Message_Wantlist_Have { + mq.peerWants.RemoveType(e.Cid, pb.Message_Wantlist_Have) + } else { + filteredPeerEntries = append(filteredPeerEntries, e) + } + } + peerEntries = filteredPeerEntries } + mq.wllock.Unlock() - // Size of the message so far - msgSize := 0 + // We prioritize cancels, then regular wants, then broadcast wants. - // Always prioritize cancels, then targeted, then broadcast. + var ( + msgSize = 0 // size of message so far + sentCancels = 0 // number of cancels in message + sentPeerEntries = 0 // number of peer entries in message + sentBcstEntries = 0 // number of broadcast entries in message + ) // Add each cancel to the message - cancels := mq.cancels.Keys() for _, c := range cancels { + msgSize += mq.msg.Cancel(c) + sentCancels++ + if msgSize >= mq.maxMessageSize { - break + goto FINISH } - msgSize += mq.msg.Cancel(c) + } - // Clear the cancel - we make a best effort to let peers know about - // cancels but won't save them to resend if there's a failure. - mq.cancels.Remove(c) + // Next, add the wants. If we have too many entries to fit into a single + // message, sort by priority and include the high priority ones first. + // However, avoid sorting till we really need to as this code is a + // called frequently. + + // Add each regular want-have / want-block to the message. + if msgSize+(len(peerEntries)*bsmsg.MaxEntrySize) > mq.maxMessageSize { + bswl.SortEntries(peerEntries) } - // Add each regular want-have / want-block to the message for _, e := range peerEntries { + msgSize += mq.msg.AddEntry(e.Cid, e.Priority, e.WantType, true) + sentPeerEntries++ + if msgSize >= mq.maxMessageSize { - break + goto FINISH } + } - // If the remote peer doesn't support HAVE / DONT_HAVE messages, - // don't send want-haves (only send want-blocks) - if !supportsHave && e.WantType == pb.Message_Wantlist_Have { - mq.peerWants.RemoveType(e.Cid, pb.Message_Wantlist_Have) - } else { - msgSize += mq.msg.AddEntry(e.Cid, e.Priority, e.WantType, true) - - // Move the key from pending to sent - mq.peerWants.MarkSent(e) - } + // Add each broadcast want-have to the message. + if msgSize+(len(bcstEntries)*bsmsg.MaxEntrySize) > mq.maxMessageSize { + bswl.SortEntries(bcstEntries) } // Add each broadcast want-have to the message for _, e := range bcstEntries { - if msgSize >= mq.maxMessageSize { - break - } - // Broadcast wants are sent as want-have wantType := pb.Message_Wantlist_Have @@ -617,11 +626,40 @@ func (mq *MessageQueue) extractOutgoingMessage(supportsHave bool) bsmsg.BitSwapM } msgSize += mq.msg.AddEntry(e.Cid, e.Priority, wantType, false) + sentBcstEntries++ - // Move the key from pending to sent - mq.bcstWants.MarkSent(e) + if msgSize >= mq.maxMessageSize { + goto FINISH + } } +FINISH: + + // Finally, re-take the lock, mark sent and remove any entries from our + // message that we've decided to cancel at the last minute. + mq.wllock.Lock() + for _, e := range peerEntries[:sentPeerEntries] { + if !mq.peerWants.MarkSent(e) { + // It changed. + mq.msg.Remove(e.Cid) + } + } + + for _, e := range bcstEntries[:sentBcstEntries] { + if !mq.bcstWants.MarkSent(e) { + mq.msg.Remove(e.Cid) + } + } + + for _, c := range cancels[:sentCancels] { + if !mq.cancels.Has(c) { + mq.msg.Remove(c) + } else { + mq.cancels.Remove(c) + } + } + mq.wllock.Unlock() + return mq.msg } diff --git a/bitswap/message/message.go b/bitswap/message/message.go index f820c9dc7..88c3f7d41 100644 --- a/bitswap/message/message.go +++ b/bitswap/message/message.go @@ -44,6 +44,10 @@ type BitSwapMessage interface { // Returns the size of the CANCEL entry in the protobuf Cancel(key cid.Cid) int + // Remove removes any entries for the given CID. Useful when the want + // status for the CID changes when preparing a message. + Remove(key cid.Cid) + // Empty indicates whether the message has any information Empty() bool // Size returns the size of the message in bytes @@ -298,6 +302,10 @@ func (m *impl) SetPendingBytes(pendingBytes int32) { m.pendingBytes = pendingBytes } +func (m *impl) Remove(k cid.Cid) { + delete(m.wantlist, k) +} + func (m *impl) Cancel(k cid.Cid) int { return m.addEntry(k, 0, true, pb.Message_Wantlist_Block, false) } diff --git a/bitswap/wantlist/wantlist.go b/bitswap/wantlist/wantlist.go index e18567dbf..555c293e6 100644 --- a/bitswap/wantlist/wantlist.go +++ b/bitswap/wantlist/wantlist.go @@ -111,16 +111,14 @@ func (w *Wantlist) Entries() []Entry { return es } -// SortedEntries returns wantlist entries ordered by priority. -func (w *Wantlist) SortedEntries() []Entry { - es := w.Entries() - sort.Sort(entrySlice(es)) - return es -} - // Absorb all the entries in other into this want list func (w *Wantlist) Absorb(other *Wantlist) { for _, e := range other.Entries() { w.Add(e.Cid, e.Priority, e.WantType) } } + +// SortEntries sorts the list of entries by priority. +func SortEntries(es []Entry) { + sort.Sort(entrySlice(es)) +} diff --git a/bitswap/wantlist/wantlist_test.go b/bitswap/wantlist/wantlist_test.go index 1139e87ae..49dc55905 100644 --- a/bitswap/wantlist/wantlist_test.go +++ b/bitswap/wantlist/wantlist_test.go @@ -203,14 +203,16 @@ func TestAbsort(t *testing.T) { } } -func TestSortedEntries(t *testing.T) { +func TestSortEntries(t *testing.T) { wl := New() wl.Add(testcids[0], 3, pb.Message_Wantlist_Block) wl.Add(testcids[1], 5, pb.Message_Wantlist_Have) wl.Add(testcids[2], 4, pb.Message_Wantlist_Have) - entries := wl.SortedEntries() + entries := wl.Entries() + SortEntries(entries) + if !entries[0].Cid.Equals(testcids[1]) || !entries[1].Cid.Equals(testcids[2]) || !entries[2].Cid.Equals(testcids[0]) { From 53302ba5b9581f9c9f07d0c0cfbb52a0ce0f8629 Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Tue, 21 Apr 2020 11:48:45 -0700 Subject: [PATCH 0926/1035] fix: ensure we shutdown the message queue asap (#362) This commit was moved from ipfs/go-bitswap@824f7264ea9289fac57e598906eecfeb3bc42d6e --- bitswap/internal/messagequeue/messagequeue.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bitswap/internal/messagequeue/messagequeue.go b/bitswap/internal/messagequeue/messagequeue.go index 7bcc087f1..ad85e5234 100644 --- a/bitswap/internal/messagequeue/messagequeue.go +++ b/bitswap/internal/messagequeue/messagequeue.go @@ -333,7 +333,7 @@ func (mq *MessageQueue) runQueue() { } var workScheduled time.Time - for { + for mq.ctx.Err() == nil { select { case <-mq.rebroadcastTimer.C: mq.rebroadcastWantlist() From b05e6c3d0699d9f7b1a814f4b30bf1096e93cd50 Mon Sep 17 00:00:00 2001 From: dirkmc Date: Tue, 21 Apr 2020 16:30:03 -0400 Subject: [PATCH 0927/1035] refactor: add reverse index to peerWantManager to speed up cancels (#364) * refactor: add reverse index to peerWantManager to speed up cancels * refactor: in peerWantManager use ForEach instead of allocating lists This commit was moved from ipfs/go-bitswap@4ce7de9600a181e92684a618f012469d78faa4f9 --- .../internal/peermanager/peerwantmanager.go | 86 +++++++++++++++---- 1 file changed, 71 insertions(+), 15 deletions(-) diff --git a/bitswap/internal/peermanager/peerwantmanager.go b/bitswap/internal/peermanager/peerwantmanager.go index 08914bbca..1928966ca 100644 --- a/bitswap/internal/peermanager/peerwantmanager.go +++ b/bitswap/internal/peermanager/peerwantmanager.go @@ -20,6 +20,9 @@ type Gauge interface { // sent to each peer, so that the PeerManager doesn't send duplicates. type peerWantManager struct { peerWants map[peer.ID]*peerWant + // Reverse index mapping wants to the peers that sent them. This is used + // to speed up cancels + wantPeers map[cid.Cid]map[peer.ID]struct{} // Keeps track of the number of active want-blocks wantBlockGauge Gauge } @@ -34,6 +37,7 @@ type peerWant struct { func newPeerWantManager(wantBlockGauge Gauge) *peerWantManager { return &peerWantManager{ peerWants: make(map[peer.ID]*peerWant), + wantPeers: make(map[cid.Cid]map[peer.ID]struct{}), wantBlockGauge: wantBlockGauge, } } @@ -55,10 +59,19 @@ func (pwm *peerWantManager) removePeer(p peer.ID) { return } - // Decrement the gauge by the number of pending want-blocks to the peer - for range pws.wantBlocks.Keys() { + pws.wantBlocks.ForEach(func(c cid.Cid) error { + // Decrement the gauge by the number of pending want-blocks to the peer pwm.wantBlockGauge.Dec() - } + // Clean up want-blocks from the reverse index + pwm.reverseIndexRemove(c, p) + return nil + }) + + // Clean up want-haves from the reverse index + pws.wantHaves.ForEach(func(c cid.Cid) error { + pwm.reverseIndexRemove(c, p) + return nil + }) delete(pwm.peerWants, p) } @@ -77,6 +90,9 @@ func (pwm *peerWantManager) prepareBroadcastWantHaves(wantHaves []cid.Cid) map[p // Record that the CID has been sent as a want-have pws.wantHaves.Add(c) + // Update the reverse index + pwm.reverseIndexAdd(c, p) + // Add the CID to the results if _, ok := res[p]; !ok { res[p] = make([]cid.Cid, 0, 1) @@ -114,6 +130,9 @@ func (pwm *peerWantManager) prepareSendWants(p peer.ID, wantBlocks []cid.Cid, wa // Record that the CID was sent as a want-block pws.wantBlocks.Add(c) + // Update the reverse index + pwm.reverseIndexAdd(c, p) + // Add the CID to the results resWantBlks = append(resWantBlks, c) @@ -132,6 +151,9 @@ func (pwm *peerWantManager) prepareSendWants(p peer.ID, wantBlocks []cid.Cid, wa // Record that the CID was sent as a want-have pws.wantHaves.Add(c) + // Update the reverse index + pwm.reverseIndexAdd(c, p) + // Add the CID to the results resWantHvs = append(resWantHvs, c) } @@ -146,10 +168,17 @@ func (pwm *peerWantManager) prepareSendWants(p peer.ID, wantBlocks []cid.Cid, wa func (pwm *peerWantManager) prepareSendCancels(cancelKs []cid.Cid) map[peer.ID][]cid.Cid { res := make(map[peer.ID][]cid.Cid) - // Iterate over all known peers - for p, pws := range pwm.peerWants { - // Iterate over all requested cancels - for _, c := range cancelKs { + // Iterate over all requested cancels + for _, c := range cancelKs { + // Iterate over peers that have sent a corresponding want + for p := range pwm.wantPeers[c] { + pws, ok := pwm.peerWants[p] + if !ok { + // Should never happen but check just in case + log.Errorf("peerWantManager reverse index missing peer %s for key %s", p, c) + continue + } + isWantBlock := pws.wantBlocks.Has(c) isWantHave := pws.wantHaves.Has(c) @@ -169,6 +198,9 @@ func (pwm *peerWantManager) prepareSendCancels(cancelKs []cid.Cid) map[peer.ID][ res[p] = make([]cid.Cid, 0, 1) } res[p] = append(res[p], c) + + // Update the reverse index + pwm.reverseIndexRemove(c, p) } } } @@ -176,6 +208,26 @@ func (pwm *peerWantManager) prepareSendCancels(cancelKs []cid.Cid) map[peer.ID][ return res } +// Add the peer to the list of peers that have sent a want with the cid +func (pwm *peerWantManager) reverseIndexAdd(c cid.Cid, p peer.ID) { + peers, ok := pwm.wantPeers[c] + if !ok { + peers = make(map[peer.ID]struct{}, 1) + pwm.wantPeers[c] = peers + } + peers[p] = struct{}{} +} + +// Remove the peer from the list of peers that have sent a want with the cid +func (pwm *peerWantManager) reverseIndexRemove(c cid.Cid, p peer.ID) { + if peers, ok := pwm.wantPeers[c]; ok { + delete(peers, p) + if len(peers) == 0 { + delete(pwm.wantPeers, c) + } + } +} + // GetWantBlocks returns the set of all want-blocks sent to all peers func (pwm *peerWantManager) getWantBlocks() []cid.Cid { res := cid.NewSet() @@ -183,10 +235,11 @@ func (pwm *peerWantManager) getWantBlocks() []cid.Cid { // Iterate over all known peers for _, pws := range pwm.peerWants { // Iterate over all want-blocks - for _, c := range pws.wantBlocks.Keys() { + pws.wantBlocks.ForEach(func(c cid.Cid) error { // Add the CID to the results res.Add(c) - } + return nil + }) } return res.Keys() @@ -199,10 +252,11 @@ func (pwm *peerWantManager) getWantHaves() []cid.Cid { // Iterate over all known peers for _, pws := range pwm.peerWants { // Iterate over all want-haves - for _, c := range pws.wantHaves.Keys() { + pws.wantHaves.ForEach(func(c cid.Cid) error { // Add the CID to the results res.Add(c) - } + return nil + }) } return res.Keys() @@ -215,16 +269,18 @@ func (pwm *peerWantManager) getWants() []cid.Cid { // Iterate over all known peers for _, pws := range pwm.peerWants { // Iterate over all want-blocks - for _, c := range pws.wantBlocks.Keys() { + pws.wantBlocks.ForEach(func(c cid.Cid) error { // Add the CID to the results res.Add(c) - } + return nil + }) // Iterate over all want-haves - for _, c := range pws.wantHaves.Keys() { + pws.wantHaves.ForEach(func(c cid.Cid) error { // Add the CID to the results res.Add(c) - } + return nil + }) } return res.Keys() From d0a7ee15a2ad3a2a6f3e93a543632527f1fd4fda Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Wed, 22 Apr 2020 07:19:51 -0700 Subject: [PATCH 0928/1035] feat: move broadcast wantlist into the peermanager (#365) * feat: small optimizations * feat: move broadcast wantlist into the peermanager This deduplicates some state and allows us to do less book-keeping for broadcast wants. We should probably rename the PeerManager to the WantManager and rename the WantManager to something else. * fix: lint warnings This commit was moved from ipfs/go-bitswap@2a033735f078eead076582199fbbe3b99ffbf36a --- bitswap/internal/messagequeue/messagequeue.go | 5 +- bitswap/internal/peermanager/peermanager.go | 8 +- .../internal/peermanager/peermanager_test.go | 40 ++-- .../internal/peermanager/peerwantmanager.go | 187 ++++++++++++------ .../peermanager/peerwantmanager_test.go | 27 ++- bitswap/internal/wantmanager/wantmanager.go | 25 +-- .../internal/wantmanager/wantmanager_test.go | 126 +----------- 7 files changed, 179 insertions(+), 239 deletions(-) diff --git a/bitswap/internal/messagequeue/messagequeue.go b/bitswap/internal/messagequeue/messagequeue.go index ad85e5234..755df08a7 100644 --- a/bitswap/internal/messagequeue/messagequeue.go +++ b/bitswap/internal/messagequeue/messagequeue.go @@ -261,7 +261,6 @@ func (mq *MessageQueue) AddCancels(cancelKs []cid.Cid) { mq.dhTimeoutMgr.CancelPending(cancelKs) mq.wllock.Lock() - defer mq.wllock.Unlock() workReady := false @@ -282,6 +281,10 @@ func (mq *MessageQueue) AddCancels(cancelKs []cid.Cid) { } } + mq.wllock.Unlock() + + // Unlock first to be nice to the scheduler. + // Schedule a message send if workReady { mq.signalWorkReady() diff --git a/bitswap/internal/peermanager/peermanager.go b/bitswap/internal/peermanager/peermanager.go index 0cf8b2e35..522823263 100644 --- a/bitswap/internal/peermanager/peermanager.go +++ b/bitswap/internal/peermanager/peermanager.go @@ -82,18 +82,16 @@ func (pm *PeerManager) ConnectedPeers() []peer.ID { // Connected is called to add a new peer to the pool, and send it an initial set // of wants. -func (pm *PeerManager) Connected(p peer.ID, initialWantHaves []cid.Cid) { +func (pm *PeerManager) Connected(p peer.ID) { pm.pqLk.Lock() defer pm.pqLk.Unlock() pq := pm.getOrCreate(p) // Inform the peer want manager that there's a new peer - pm.pwm.addPeer(p) - // Record that the want-haves are being sent to the peer - _, wantHaves := pm.pwm.prepareSendWants(p, nil, initialWantHaves) + wants := pm.pwm.addPeer(p) // Broadcast any live want-haves to the newly connected peers - pq.AddBroadcastWantHaves(wantHaves) + pq.AddBroadcastWantHaves(wants) // Inform the sessions that the peer has connected pm.signalAvailability(p, true) } diff --git a/bitswap/internal/peermanager/peermanager_test.go b/bitswap/internal/peermanager/peermanager_test.go index f979b2c81..469aa4d19 100644 --- a/bitswap/internal/peermanager/peermanager_test.go +++ b/bitswap/internal/peermanager/peermanager_test.go @@ -82,9 +82,9 @@ func TestAddingAndRemovingPeers(t *testing.T) { self, peer1, peer2, peer3, peer4, peer5 := tp[0], tp[1], tp[2], tp[3], tp[4], tp[5] peerManager := New(ctx, peerQueueFactory, self) - peerManager.Connected(peer1, nil) - peerManager.Connected(peer2, nil) - peerManager.Connected(peer3, nil) + peerManager.Connected(peer1) + peerManager.Connected(peer2) + peerManager.Connected(peer3) connectedPeers := peerManager.ConnectedPeers() @@ -108,7 +108,7 @@ func TestAddingAndRemovingPeers(t *testing.T) { } // reconnect peer - peerManager.Connected(peer1, nil) + peerManager.Connected(peer1) connectedPeers = peerManager.ConnectedPeers() if !testutil.ContainsPeer(connectedPeers, peer1) { @@ -126,9 +126,10 @@ func TestBroadcastOnConnect(t *testing.T) { peerManager := New(ctx, peerQueueFactory, self) cids := testutil.GenerateCids(2) + peerManager.BroadcastWantHaves(ctx, cids) // Connect with two broadcast wants for first peer - peerManager.Connected(peer1, cids) + peerManager.Connected(peer1) collected := collectMessages(msgs, 2*time.Millisecond) if len(collected[peer1].wantHaves) != 2 { @@ -147,8 +148,11 @@ func TestBroadcastWantHaves(t *testing.T) { cids := testutil.GenerateCids(3) - // Connect to first peer with two broadcast wants - peerManager.Connected(peer1, []cid.Cid{cids[0], cids[1]}) + // Broadcast the first two. + peerManager.BroadcastWantHaves(ctx, cids[:2]) + + // First peer should get them. + peerManager.Connected(peer1) collected := collectMessages(msgs, 2*time.Millisecond) if len(collected[peer1].wantHaves) != 2 { @@ -156,7 +160,7 @@ func TestBroadcastWantHaves(t *testing.T) { } // Connect to second peer - peerManager.Connected(peer2, nil) + peerManager.Connected(peer2) // Send a broadcast to all peers, including cid that was already sent to // first peer @@ -165,10 +169,12 @@ func TestBroadcastWantHaves(t *testing.T) { // One of the want-haves was already sent to peer1 if len(collected[peer1].wantHaves) != 1 { - t.Fatal("Expected 1 want-haves to be sent to first peer", collected[peer1].wantHaves) + t.Fatalf("Expected 1 want-haves to be sent to first peer, got %d", + len(collected[peer1].wantHaves)) } - if len(collected[peer2].wantHaves) != 2 { - t.Fatal("Expected 2 want-haves to be sent to second peer") + if len(collected[peer2].wantHaves) != 3 { + t.Fatalf("Expected 3 want-haves to be sent to second peer, got %d", + len(collected[peer2].wantHaves)) } } @@ -182,7 +188,7 @@ func TestSendWants(t *testing.T) { peerManager := New(ctx, peerQueueFactory, self) cids := testutil.GenerateCids(4) - peerManager.Connected(peer1, nil) + peerManager.Connected(peer1) peerManager.SendWants(ctx, peer1, []cid.Cid{cids[0]}, []cid.Cid{cids[2]}) collected := collectMessages(msgs, 2*time.Millisecond) @@ -217,8 +223,8 @@ func TestSendCancels(t *testing.T) { cids := testutil.GenerateCids(4) // Connect to peer1 and peer2 - peerManager.Connected(peer1, nil) - peerManager.Connected(peer2, nil) + peerManager.Connected(peer1) + peerManager.Connected(peer2) // Send 2 want-blocks and 1 want-have to peer1 peerManager.SendWants(ctx, peer1, []cid.Cid{cids[0], cids[1]}, []cid.Cid{cids[2]}) @@ -286,11 +292,11 @@ func TestSessionRegistration(t *testing.T) { t.Fatal("Expected peer not be available till connected") } - peerManager.Connected(p1, nil) + peerManager.Connected(p1) if !s.available[p1] { t.Fatal("Expected signal callback") } - peerManager.Connected(p2, nil) + peerManager.Connected(p2) if !s.available[p2] { t.Fatal("Expected signal callback") } @@ -305,7 +311,7 @@ func TestSessionRegistration(t *testing.T) { peerManager.UnregisterSession(id) - peerManager.Connected(p1, nil) + peerManager.Connected(p1) if s.available[p1] { t.Fatal("Expected no signal callback (session unregistered)") } diff --git a/bitswap/internal/peermanager/peerwantmanager.go b/bitswap/internal/peermanager/peerwantmanager.go index 1928966ca..418a646c4 100644 --- a/bitswap/internal/peermanager/peerwantmanager.go +++ b/bitswap/internal/peermanager/peerwantmanager.go @@ -19,10 +19,17 @@ type Gauge interface { // peerWantManager keeps track of which want-haves and want-blocks have been // sent to each peer, so that the PeerManager doesn't send duplicates. type peerWantManager struct { + // peerWants maps peers to outstanding wants. + // A peer's wants is the _union_ of the broadcast wants and the wants in + // this list. peerWants map[peer.ID]*peerWant - // Reverse index mapping wants to the peers that sent them. This is used - // to speed up cancels + + // Reverse index of all wants in peerWants. wantPeers map[cid.Cid]map[peer.ID]struct{} + + // broadcastWants tracks all the current broadcast wants. + broadcastWants *cid.Set + // Keeps track of the number of active want-blocks wantBlockGauge Gauge } @@ -36,20 +43,24 @@ type peerWant struct { // number of active want-blocks (ie sent but no response received) func newPeerWantManager(wantBlockGauge Gauge) *peerWantManager { return &peerWantManager{ + broadcastWants: cid.NewSet(), peerWants: make(map[peer.ID]*peerWant), wantPeers: make(map[cid.Cid]map[peer.ID]struct{}), wantBlockGauge: wantBlockGauge, } } -// AddPeer adds a peer whose wants we need to keep track of -func (pwm *peerWantManager) addPeer(p peer.ID) { +// addPeer adds a peer whose wants we need to keep track of. It returns the +// current list of broadcast wants that should be sent to the peer. +func (pwm *peerWantManager) addPeer(p peer.ID) []cid.Cid { if _, ok := pwm.peerWants[p]; !ok { pwm.peerWants[p] = &peerWant{ wantBlocks: cid.NewSet(), wantHaves: cid.NewSet(), } + return pwm.broadcastWants.Keys() } + return nil } // RemovePeer removes a peer and its associated wants from tracking @@ -59,7 +70,7 @@ func (pwm *peerWantManager) removePeer(p peer.ID) { return } - pws.wantBlocks.ForEach(func(c cid.Cid) error { + _ = pws.wantBlocks.ForEach(func(c cid.Cid) error { // Decrement the gauge by the number of pending want-blocks to the peer pwm.wantBlockGauge.Dec() // Clean up want-blocks from the reverse index @@ -68,7 +79,7 @@ func (pwm *peerWantManager) removePeer(p peer.ID) { }) // Clean up want-haves from the reverse index - pws.wantHaves.ForEach(func(c cid.Cid) error { + _ = pws.wantHaves.ForEach(func(c cid.Cid) error { pwm.reverseIndexRemove(c, p) return nil }) @@ -79,26 +90,30 @@ func (pwm *peerWantManager) removePeer(p peer.ID) { // PrepareBroadcastWantHaves filters the list of want-haves for each peer, // returning a map of peers to the want-haves they have not yet been sent. func (pwm *peerWantManager) prepareBroadcastWantHaves(wantHaves []cid.Cid) map[peer.ID][]cid.Cid { - res := make(map[peer.ID][]cid.Cid) + res := make(map[peer.ID][]cid.Cid, len(pwm.peerWants)) + for _, c := range wantHaves { + if pwm.broadcastWants.Has(c) { + // Already a broadcast want, skip it. + continue + } + pwm.broadcastWants.Add(c) + + // Prepare broadcast. + wantedBy := pwm.wantPeers[c] + for p := range pwm.peerWants { + // If we've already sent a want to this peer, skip them. + // + // This is faster than checking the actual wantlists due + // to better locality. + if _, ok := wantedBy[p]; ok { + continue + } - // Iterate over all known peers - for p, pws := range pwm.peerWants { - // Iterate over all want-haves - for _, c := range wantHaves { - // If the CID has not been sent as a want-block or want-have - if !pws.wantBlocks.Has(c) && !pws.wantHaves.Has(c) { - // Record that the CID has been sent as a want-have - pws.wantHaves.Add(c) - - // Update the reverse index - pwm.reverseIndexAdd(c, p) - - // Add the CID to the results - if _, ok := res[p]; !ok { - res[p] = make([]cid.Cid, 0, 1) - } - res[p] = append(res[p], c) + cids, ok := res[p] + if !ok { + cids = make([]cid.Cid, 0, len(wantHaves)) } + res[p] = append(cids, c) } } @@ -146,6 +161,12 @@ func (pwm *peerWantManager) prepareSendWants(p peer.ID, wantBlocks []cid.Cid, wa // Iterate over the requested want-haves for _, c := range wantHaves { + // If we've already broadcasted this want, don't bother with a + // want-have. + if pwm.broadcastWants.Has(c) { + continue + } + // If the CID has not been sent as a want-block or want-have if !pws.wantBlocks.Has(c) && !pws.wantHaves.Has(c) { // Record that the CID was sent as a want-have @@ -166,11 +187,36 @@ func (pwm *peerWantManager) prepareSendWants(p peer.ID, wantBlocks []cid.Cid, wa // returning a map of peers which only contains cancels for wants that have // been sent to the peer. func (pwm *peerWantManager) prepareSendCancels(cancelKs []cid.Cid) map[peer.ID][]cid.Cid { - res := make(map[peer.ID][]cid.Cid) + if len(cancelKs) == 0 { + return nil + } + + // Pre-allocate enough space for all peers that have the first CID. + // Chances are these peers are related. + expectedResSize := 0 + firstCancel := cancelKs[0] + if pwm.broadcastWants.Has(firstCancel) { + expectedResSize = len(pwm.peerWants) + } else { + expectedResSize = len(pwm.wantPeers[firstCancel]) + } + res := make(map[peer.ID][]cid.Cid, expectedResSize) + + // Keep the broadcast keys separate. This lets us batch-process them at + // the end. + broadcastKs := make([]cid.Cid, 0, len(cancelKs)) // Iterate over all requested cancels for _, c := range cancelKs { - // Iterate over peers that have sent a corresponding want + // Handle broadcast wants up-front. + isBroadcast := pwm.broadcastWants.Has(c) + if isBroadcast { + broadcastKs = append(broadcastKs, c) + pwm.broadcastWants.Remove(c) + } + + // Even if this is a broadcast, we may have sent targeted wants. + // Deal with them. for p := range pwm.wantPeers[c] { pws, ok := pwm.peerWants[p] if !ok { @@ -179,28 +225,45 @@ func (pwm *peerWantManager) prepareSendCancels(cancelKs []cid.Cid) map[peer.ID][ continue } - isWantBlock := pws.wantBlocks.Has(c) - isWantHave := pws.wantHaves.Has(c) - - // If the CID was sent as a want-block, decrement the want-block count - if isWantBlock { + // Update the want gauge. + if pws.wantBlocks.Has(c) { pwm.wantBlockGauge.Dec() } - // If the CID was sent as a want-block or want-have - if isWantBlock || isWantHave { - // Remove the CID from the recorded want-blocks and want-haves - pws.wantBlocks.Remove(c) - pws.wantHaves.Remove(c) + // Unconditionally remove from the want lists. + pws.wantBlocks.Remove(c) + pws.wantHaves.Remove(c) - // Add the CID to the results - if _, ok := res[p]; !ok { - res[p] = make([]cid.Cid, 0, 1) - } - res[p] = append(res[p], c) + // If it's a broadcast want, we've already added it to + // the broadcastKs list. + if isBroadcast { + continue + } - // Update the reverse index - pwm.reverseIndexRemove(c, p) + // Add the CID to the result for the peer. + cids, ok := res[p] + if !ok { + // Pre-allocate enough for all keys. + // Cancels are usually related. + cids = make([]cid.Cid, 0, len(cancelKs)) + } + res[p] = append(cids, c) + } + + // Finally, batch-remove the reverse-index. There's no need to + // clear this index peer-by-peer. + delete(pwm.wantPeers, c) + } + + // If we have any broadcasted CIDs, add them in. + // + // Doing this at the end can save us a bunch of work and allocations. + if len(broadcastKs) > 0 { + for p := range pwm.peerWants { + if cids, ok := res[p]; ok { + res[p] = append(cids, broadcastKs...) + } else { + res[p] = broadcastKs } } } @@ -212,7 +275,7 @@ func (pwm *peerWantManager) prepareSendCancels(cancelKs []cid.Cid) map[peer.ID][ func (pwm *peerWantManager) reverseIndexAdd(c cid.Cid, p peer.ID) { peers, ok := pwm.wantPeers[c] if !ok { - peers = make(map[peer.ID]struct{}, 1) + peers = make(map[peer.ID]struct{}, 10) pwm.wantPeers[c] = peers } peers[p] = struct{}{} @@ -235,7 +298,7 @@ func (pwm *peerWantManager) getWantBlocks() []cid.Cid { // Iterate over all known peers for _, pws := range pwm.peerWants { // Iterate over all want-blocks - pws.wantBlocks.ForEach(func(c cid.Cid) error { + _ = pws.wantBlocks.ForEach(func(c cid.Cid) error { // Add the CID to the results res.Add(c) return nil @@ -249,41 +312,37 @@ func (pwm *peerWantManager) getWantBlocks() []cid.Cid { func (pwm *peerWantManager) getWantHaves() []cid.Cid { res := cid.NewSet() - // Iterate over all known peers + // Iterate over all peers with active wants. for _, pws := range pwm.peerWants { // Iterate over all want-haves - pws.wantHaves.ForEach(func(c cid.Cid) error { + _ = pws.wantHaves.ForEach(func(c cid.Cid) error { // Add the CID to the results res.Add(c) return nil }) } + _ = pwm.broadcastWants.ForEach(func(c cid.Cid) error { + res.Add(c) + return nil + }) return res.Keys() } // GetWants returns the set of all wants (both want-blocks and want-haves). func (pwm *peerWantManager) getWants() []cid.Cid { - res := cid.NewSet() - - // Iterate over all known peers - for _, pws := range pwm.peerWants { - // Iterate over all want-blocks - pws.wantBlocks.ForEach(func(c cid.Cid) error { - // Add the CID to the results - res.Add(c) - return nil - }) + res := pwm.broadcastWants.Keys() - // Iterate over all want-haves - pws.wantHaves.ForEach(func(c cid.Cid) error { - // Add the CID to the results - res.Add(c) - return nil - }) + // Iterate over all targeted wants, removing ones that are also in the + // broadcast list. + for c := range pwm.wantPeers { + if pwm.broadcastWants.Has(c) { + continue + } + res = append(res, c) } - return res.Keys() + return res } func (pwm *peerWantManager) String() string { diff --git a/bitswap/internal/peermanager/peerwantmanager_test.go b/bitswap/internal/peermanager/peerwantmanager_test.go index a56df168a..766033e8f 100644 --- a/bitswap/internal/peermanager/peerwantmanager_test.go +++ b/bitswap/internal/peermanager/peerwantmanager_test.go @@ -38,8 +38,12 @@ func TestPrepareBroadcastWantHaves(t *testing.T) { cids2 := testutil.GenerateCids(2) cids3 := testutil.GenerateCids(2) - pwm.addPeer(peers[0]) - pwm.addPeer(peers[1]) + if blist := pwm.addPeer(peers[0]); len(blist) > 0 { + t.Errorf("expected no broadcast wants") + } + if blist := pwm.addPeer(peers[1]); len(blist) > 0 { + t.Errorf("expected no broadcast wants") + } // Broadcast 2 cids to 2 peers bcst := pwm.prepareBroadcastWantHaves(cids) @@ -104,16 +108,19 @@ func TestPrepareBroadcastWantHaves(t *testing.T) { } } + allCids := cids + allCids = append(allCids, cids2...) + allCids = append(allCids, cids3...) + allCids = append(allCids, cids4...) + // Add another peer - pwm.addPeer(peers[2]) - bcst6 := pwm.prepareBroadcastWantHaves(cids) - if len(bcst6) != 1 { - t.Fatal("Expected 1 peer") + bcst6 := pwm.addPeer(peers[2]) + if !testutil.MatchKeysIgnoreOrder(bcst6, allCids) { + t.Fatalf("Expected all cids to be broadcast.") } - for p := range bcst6 { - if !testutil.MatchKeysIgnoreOrder(bcst6[p], cids) { - t.Fatal("Expected all cids to be broadcast") - } + + if broadcast := pwm.prepareBroadcastWantHaves(allCids); len(broadcast) != 0 { + t.Errorf("did not expect to have CIDs to broadcast") } } diff --git a/bitswap/internal/wantmanager/wantmanager.go b/bitswap/internal/wantmanager/wantmanager.go index 908f9dca3..539017a9d 100644 --- a/bitswap/internal/wantmanager/wantmanager.go +++ b/bitswap/internal/wantmanager/wantmanager.go @@ -6,7 +6,6 @@ import ( bsbpm "github.com/ipfs/go-bitswap/internal/blockpresencemanager" bssim "github.com/ipfs/go-bitswap/internal/sessioninterestmanager" "github.com/ipfs/go-bitswap/internal/sessionmanager" - bsswl "github.com/ipfs/go-bitswap/internal/sessionwantlist" logging "github.com/ipfs/go-log" cid "github.com/ipfs/go-cid" @@ -17,9 +16,8 @@ var log = logging.Logger("bitswap") // PeerHandler sends wants / cancels to other peers type PeerHandler interface { - // Connected is called when a peer connects, with any initial want-haves - // that have been broadcast to all peers (as part of session discovery) - Connected(p peer.ID, initialWants []cid.Cid) + // Connected is called when a peer connects. + Connected(p peer.ID) // Disconnected is called when a peer disconnects Disconnected(p peer.ID) // BroadcastWantHaves sends want-haves to all connected peers @@ -38,11 +36,7 @@ type SessionManager interface { // - informs the SessionManager and BlockPresenceManager of incoming information // and cancelled sessions // - informs the PeerManager of connects and disconnects -// - manages the list of want-haves that are broadcast to the internet -// (as opposed to being sent to specific peers) type WantManager struct { - bcwl *bsswl.SessionWantlist - peerHandler PeerHandler sim *bssim.SessionInterestManager bpm *bsbpm.BlockPresenceManager @@ -52,7 +46,6 @@ type WantManager struct { // New initializes a new WantManager for a given context. func New(ctx context.Context, peerHandler PeerHandler, sim *bssim.SessionInterestManager, bpm *bsbpm.BlockPresenceManager) *WantManager { return &WantManager{ - bcwl: bsswl.NewSessionWantlist(), peerHandler: peerHandler, sim: sim, bpm: bpm, @@ -69,8 +62,6 @@ func (wm *WantManager) ReceiveFrom(ctx context.Context, p peer.ID, blks []cid.Ci wm.bpm.ReceiveFrom(p, haves, dontHaves) // Inform interested sessions wm.sm.ReceiveFrom(p, blks, haves, dontHaves) - // Remove received blocks from broadcast wantlist - wm.bcwl.RemoveKeys(blks) // Send CANCEL to all peers with want-have / want-block wm.peerHandler.SendCancels(ctx, blks) } @@ -78,11 +69,10 @@ func (wm *WantManager) ReceiveFrom(ctx context.Context, p peer.ID, blks []cid.Ci // BroadcastWantHaves is called when want-haves should be broadcast to all // connected peers (as part of session discovery) func (wm *WantManager) BroadcastWantHaves(ctx context.Context, ses uint64, wantHaves []cid.Cid) { - log.Debugf("BroadcastWantHaves session%d: %s", ses, wantHaves) - - // Record broadcast wants - wm.bcwl.Add(wantHaves, ses) + // TODO: Avoid calling broadcast through here. It doesn't fit with + // everything else this module does. + log.Debugf("BroadcastWantHaves session%d: %s", ses, wantHaves) // Send want-haves to all peers wm.peerHandler.BroadcastWantHaves(ctx, wantHaves) } @@ -92,9 +82,6 @@ func (wm *WantManager) RemoveSession(ctx context.Context, ses uint64) { // Remove session's interest in the given blocks. cancelKs := wm.sim.RemoveSessionInterest(ses) - // Remove broadcast want-haves for session - wm.bcwl.RemoveSession(ses) - // Free up block presence tracking for keys that no session is interested // in anymore wm.bpm.RemoveKeys(cancelKs) @@ -107,7 +94,7 @@ func (wm *WantManager) RemoveSession(ctx context.Context, ses uint64) { func (wm *WantManager) Connected(p peer.ID) { // Tell the peer handler that there is a new connection and give it the // list of outstanding broadcast wants - wm.peerHandler.Connected(p, wm.bcwl.Keys()) + wm.peerHandler.Connected(p) } // Disconnected is called when a peer disconnects diff --git a/bitswap/internal/wantmanager/wantmanager_test.go b/bitswap/internal/wantmanager/wantmanager_test.go index 38d41d9f1..9855eb30d 100644 --- a/bitswap/internal/wantmanager/wantmanager_test.go +++ b/bitswap/internal/wantmanager/wantmanager_test.go @@ -14,13 +14,11 @@ import ( ) type fakePeerHandler struct { - lastInitialWants []cid.Cid - lastBcstWants []cid.Cid - lastCancels []cid.Cid + lastBcstWants []cid.Cid + lastCancels []cid.Cid } -func (fph *fakePeerHandler) Connected(p peer.ID, initialWants []cid.Cid) { - fph.lastInitialWants = initialWants +func (fph *fakePeerHandler) Connected(p peer.ID) { } func (fph *fakePeerHandler) Disconnected(p peer.ID) { @@ -39,124 +37,6 @@ func (*fakeSessionManager) ReceiveFrom(p peer.ID, blks []cid.Cid, haves []cid.Ci return nil } -func TestInitialBroadcastWantsAddedCorrectly(t *testing.T) { - ctx := context.Background() - ph := &fakePeerHandler{} - sim := bssim.New() - bpm := bsbpm.New() - wm := New(context.Background(), ph, sim, bpm) - sm := &fakeSessionManager{} - wm.SetSessionManager(sm) - - peers := testutil.GeneratePeers(3) - - // Connect peer 0. Should not receive anything yet. - wm.Connected(peers[0]) - if len(ph.lastInitialWants) != 0 { - t.Fatal("expected no initial wants") - } - - // Broadcast 2 wants - wantHaves := testutil.GenerateCids(2) - wm.BroadcastWantHaves(ctx, 1, wantHaves) - if len(ph.lastBcstWants) != 2 { - t.Fatal("expected broadcast wants") - } - - // Connect peer 1. Should receive all wants broadcast so far. - wm.Connected(peers[1]) - if len(ph.lastInitialWants) != 2 { - t.Fatal("expected broadcast wants") - } - - // Broadcast 3 more wants - wantHaves2 := testutil.GenerateCids(3) - wm.BroadcastWantHaves(ctx, 2, wantHaves2) - if len(ph.lastBcstWants) != 3 { - t.Fatal("expected broadcast wants") - } - - // Connect peer 2. Should receive all wants broadcast so far. - wm.Connected(peers[2]) - if len(ph.lastInitialWants) != 5 { - t.Fatal("expected all wants to be broadcast") - } -} - -func TestReceiveFromRemovesBroadcastWants(t *testing.T) { - ctx := context.Background() - ph := &fakePeerHandler{} - sim := bssim.New() - bpm := bsbpm.New() - wm := New(context.Background(), ph, sim, bpm) - sm := &fakeSessionManager{} - wm.SetSessionManager(sm) - - peers := testutil.GeneratePeers(3) - - // Broadcast 2 wants - cids := testutil.GenerateCids(2) - wm.BroadcastWantHaves(ctx, 1, cids) - if len(ph.lastBcstWants) != 2 { - t.Fatal("expected broadcast wants") - } - - // Connect peer 0. Should receive all wants. - wm.Connected(peers[0]) - if len(ph.lastInitialWants) != 2 { - t.Fatal("expected broadcast wants") - } - - // Receive block for first want - ks := cids[0:1] - haves := []cid.Cid{} - dontHaves := []cid.Cid{} - wm.ReceiveFrom(ctx, peers[1], ks, haves, dontHaves) - - // Connect peer 2. Should get remaining want (the one that the block has - // not yet been received for). - wm.Connected(peers[2]) - if len(ph.lastInitialWants) != 1 { - t.Fatal("expected remaining wants") - } -} - -func TestRemoveSessionRemovesBroadcastWants(t *testing.T) { - ctx := context.Background() - ph := &fakePeerHandler{} - sim := bssim.New() - bpm := bsbpm.New() - wm := New(context.Background(), ph, sim, bpm) - sm := &fakeSessionManager{} - wm.SetSessionManager(sm) - - peers := testutil.GeneratePeers(2) - - // Broadcast 2 wants for session 0 and 2 wants for session 1 - ses0 := uint64(0) - ses1 := uint64(1) - ses0wants := testutil.GenerateCids(2) - ses1wants := testutil.GenerateCids(2) - wm.BroadcastWantHaves(ctx, ses0, ses0wants) - wm.BroadcastWantHaves(ctx, ses1, ses1wants) - - // Connect peer 0. Should receive all wants. - wm.Connected(peers[0]) - if len(ph.lastInitialWants) != 4 { - t.Fatal("expected broadcast wants") - } - - // Remove session 0 - wm.RemoveSession(ctx, ses0) - - // Connect peer 1. Should receive all wants from session that has not been - // removed. - wm.Connected(peers[1]) - if len(ph.lastInitialWants) != 2 { - t.Fatal("expected broadcast wants") - } -} - func TestReceiveFrom(t *testing.T) { ctx := context.Background() ph := &fakePeerHandler{} From 0928737a44cb65daa2c88e86aa03988b9a4f68e8 Mon Sep 17 00:00:00 2001 From: Dirk McCormick Date: Wed, 22 Apr 2020 13:49:36 -0400 Subject: [PATCH 0929/1035] fix: avoid calling ctx.SetDeadline() every time we send a message This commit was moved from ipfs/go-bitswap@0b7aab09d43293208ab9a4f34014e5d24048cbe2 --- bitswap/network/ipfs_impl.go | 39 +++++++++++++++++++----------------- 1 file changed, 21 insertions(+), 18 deletions(-) diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index e7673795a..3636b048a 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -103,11 +103,14 @@ func (s *streamMessageSender) Connect(ctx context.Context) (network.Stream, erro return s.stream, nil } - if err := s.bsnet.ConnectTo(ctx, s.to); err != nil { + tctx, cancel := context.WithTimeout(ctx, s.opts.SendTimeout) + defer cancel() + + if err := s.bsnet.ConnectTo(tctx, s.to); err != nil { return nil, err } - stream, err := s.bsnet.newStreamToPeer(ctx, s.to) + stream, err := s.bsnet.newStreamToPeer(tctx, s.to) if err != nil { return nil, err } @@ -139,25 +142,20 @@ func (s *streamMessageSender) SupportsHave() bool { // Send a message to the peer, attempting multiple times func (s *streamMessageSender) SendMsg(ctx context.Context, msg bsmsg.BitSwapMessage) error { - return s.multiAttempt(ctx, func(fnctx context.Context) error { - return s.send(fnctx, msg) + return s.multiAttempt(ctx, func() error { + return s.send(ctx, msg) }) } // Perform a function with multiple attempts, and a timeout -func (s *streamMessageSender) multiAttempt(ctx context.Context, fn func(context.Context) error) error { +func (s *streamMessageSender) multiAttempt(ctx context.Context, fn func() error) error { // Try to call the function repeatedly var err error for i := 0; i < s.opts.MaxRetries; i++ { - deadline := time.Now().Add(s.opts.SendTimeout) - sndctx, cancel := context.WithDeadline(ctx, deadline) - - if err = fn(sndctx); err == nil { - cancel() + if err = fn(); err == nil { // Attempt was successful return nil } - cancel() // Attempt failed @@ -196,13 +194,18 @@ func (s *streamMessageSender) multiAttempt(ctx context.Context, fn func(context. // Send a message to the peer func (s *streamMessageSender) send(ctx context.Context, msg bsmsg.BitSwapMessage) error { + start := time.Now() stream, err := s.Connect(ctx) if err != nil { log.Infof("failed to open stream to %s: %s", s.to, err) return err } - if err = s.bsnet.msgToStream(ctx, stream, msg); err != nil { + // The send timeout includes the time required to connect + // (although usually we will already have connected - we only need to + // connect after a failed attempt to send) + timeout := s.opts.SendTimeout - time.Since(start) + if err = s.bsnet.msgToStream(ctx, stream, msg, timeout); err != nil { log.Infof("failed to send message to %s: %s", s.to, err) return err } @@ -234,9 +237,9 @@ func (bsnet *impl) SupportsHave(proto protocol.ID) bool { return true } -func (bsnet *impl) msgToStream(ctx context.Context, s network.Stream, msg bsmsg.BitSwapMessage) error { - deadline := time.Now().Add(sendMessageTimeout) - if dl, ok := ctx.Deadline(); ok { +func (bsnet *impl) msgToStream(ctx context.Context, s network.Stream, msg bsmsg.BitSwapMessage, timeout time.Duration) error { + deadline := time.Now().Add(timeout) + if dl, ok := ctx.Deadline(); ok && dl.Before(deadline) { deadline = dl } @@ -277,8 +280,8 @@ func (bsnet *impl) NewMessageSender(ctx context.Context, p peer.ID, opts *Messag opts: opts, } - err := sender.multiAttempt(ctx, func(fnctx context.Context) error { - _, err := sender.Connect(fnctx) + err := sender.multiAttempt(ctx, func() error { + _, err := sender.Connect(ctx) return err }) @@ -313,7 +316,7 @@ func (bsnet *impl) SendMessage( return err } - if err = bsnet.msgToStream(ctx, s, outgoing); err != nil { + if err = bsnet.msgToStream(ctx, s, outgoing, sendMessageTimeout); err != nil { _ = s.Reset() return err } From 76385acb4a776609d595d995e8fa523a31ef9f83 Mon Sep 17 00:00:00 2001 From: Dirk McCormick Date: Wed, 22 Apr 2020 17:43:23 -0400 Subject: [PATCH 0930/1035] fix: change timing for DONT_HAVE timeouts to be more conservative This commit was moved from ipfs/go-bitswap@43284e90606a7febb8b6178285dd3d90c2b9a65e --- bitswap/internal/messagequeue/donthavetimeoutmgr.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/bitswap/internal/messagequeue/donthavetimeoutmgr.go b/bitswap/internal/messagequeue/donthavetimeoutmgr.go index e5ce0b287..e53b232e6 100644 --- a/bitswap/internal/messagequeue/donthavetimeoutmgr.go +++ b/bitswap/internal/messagequeue/donthavetimeoutmgr.go @@ -19,12 +19,12 @@ const ( // maxExpectedWantProcessTime is the maximum amount of time we expect a // peer takes to process a want and initiate sending a response to us - maxExpectedWantProcessTime = 200 * time.Millisecond + maxExpectedWantProcessTime = 2 * time.Second // latencyMultiplier is multiplied by the average ping time to // get an upper bound on how long we expect to wait for a peer's response // to arrive - latencyMultiplier = 2 + latencyMultiplier = 3 ) // PeerConnection is a connection to a peer that can be pinged, and the From c6960915e2ba47f1a988d6999e7d3eff661c2f38 Mon Sep 17 00:00:00 2001 From: Dirk McCormick Date: Thu, 23 Apr 2020 14:59:12 -0400 Subject: [PATCH 0931/1035] refactor: remove unused code This commit was moved from ipfs/go-bitswap@1274d405223d5614f4c0f98e62040c4fe7e437cd --- bitswap/internal/sessiondata/sessiondata.go | 18 -- .../sessionrequestsplitter.go | 163 ------------------ .../sessionrequestsplitter_test.go | 98 ----------- bitswap/internal/testutil/testutil.go | 19 -- 4 files changed, 298 deletions(-) delete mode 100644 bitswap/internal/sessiondata/sessiondata.go delete mode 100644 bitswap/internal/sessionrequestsplitter/sessionrequestsplitter.go delete mode 100644 bitswap/internal/sessionrequestsplitter/sessionrequestsplitter_test.go diff --git a/bitswap/internal/sessiondata/sessiondata.go b/bitswap/internal/sessiondata/sessiondata.go deleted file mode 100644 index a56f93be5..000000000 --- a/bitswap/internal/sessiondata/sessiondata.go +++ /dev/null @@ -1,18 +0,0 @@ -package sessiondata - -import ( - cid "github.com/ipfs/go-cid" - peer "github.com/libp2p/go-libp2p-core/peer" -) - -// OptimizedPeer describes a peer and its level of optimization from 0 to 1. -type OptimizedPeer struct { - Peer peer.ID - OptimizationRating float64 -} - -// PartialRequest is represents one slice of an over request split among peers -type PartialRequest struct { - Peers []peer.ID - Keys []cid.Cid -} diff --git a/bitswap/internal/sessionrequestsplitter/sessionrequestsplitter.go b/bitswap/internal/sessionrequestsplitter/sessionrequestsplitter.go deleted file mode 100644 index b96985ec9..000000000 --- a/bitswap/internal/sessionrequestsplitter/sessionrequestsplitter.go +++ /dev/null @@ -1,163 +0,0 @@ -package sessionrequestsplitter - -import ( - "context" - - bssd "github.com/ipfs/go-bitswap/internal/sessiondata" - - "github.com/ipfs/go-cid" - "github.com/libp2p/go-libp2p-core/peer" -) - -const ( - minReceivedToAdjustSplit = 2 - maxSplit = 16 - maxAcceptableDupes = 0.4 - minDuplesToTryLessSplits = 0.2 - initialSplit = 2 -) - -type srsMessage interface { - handle(srs *SessionRequestSplitter) -} - -// SessionRequestSplitter track how many duplicate and unique blocks come in and -// uses that to determine how much to split up each set of wants among peers. -type SessionRequestSplitter struct { - ctx context.Context - messages chan srsMessage - - // data, do not touch outside run loop - receivedCount int - split int - duplicateReceivedCount int -} - -// New returns a new SessionRequestSplitter. -func New(ctx context.Context) *SessionRequestSplitter { - srs := &SessionRequestSplitter{ - ctx: ctx, - messages: make(chan srsMessage, 10), - split: initialSplit, - } - go srs.run() - return srs -} - -// SplitRequest splits a request for the given cids one or more times among the -// given peers. -func (srs *SessionRequestSplitter) SplitRequest(optimizedPeers []bssd.OptimizedPeer, ks []cid.Cid) []bssd.PartialRequest { - resp := make(chan []bssd.PartialRequest, 1) - - select { - case srs.messages <- &splitRequestMessage{optimizedPeers, ks, resp}: - case <-srs.ctx.Done(): - return nil - } - select { - case splitRequests := <-resp: - return splitRequests - case <-srs.ctx.Done(): - return nil - } - -} - -// RecordDuplicateBlock records the fact that the session received a duplicate -// block and adjusts split factor as neccesary. -func (srs *SessionRequestSplitter) RecordDuplicateBlock() { - select { - case srs.messages <- &recordDuplicateMessage{}: - case <-srs.ctx.Done(): - } -} - -// RecordUniqueBlock records the fact that the session received a unique block -// and adjusts the split factor as neccesary. -func (srs *SessionRequestSplitter) RecordUniqueBlock() { - select { - case srs.messages <- &recordUniqueMessage{}: - case <-srs.ctx.Done(): - } -} - -func (srs *SessionRequestSplitter) run() { - for { - select { - case message := <-srs.messages: - message.handle(srs) - case <-srs.ctx.Done(): - return - } - } -} - -func (srs *SessionRequestSplitter) duplicateRatio() float64 { - return float64(srs.duplicateReceivedCount) / float64(srs.receivedCount) -} - -type splitRequestMessage struct { - optimizedPeers []bssd.OptimizedPeer - ks []cid.Cid - resp chan []bssd.PartialRequest -} - -func (s *splitRequestMessage) handle(srs *SessionRequestSplitter) { - split := srs.split - // first iteration ignore optimization ratings - peers := make([]peer.ID, len(s.optimizedPeers)) - for i, optimizedPeer := range s.optimizedPeers { - peers[i] = optimizedPeer.Peer - } - ks := s.ks - if len(peers) < split { - split = len(peers) - } - peerSplits := splitPeers(peers, split) - if len(ks) < split { - split = len(ks) - } - keySplits := splitKeys(ks, split) - splitRequests := make([]bssd.PartialRequest, 0, len(keySplits)) - for i, keySplit := range keySplits { - splitRequests = append(splitRequests, bssd.PartialRequest{Peers: peerSplits[i], Keys: keySplit}) - } - s.resp <- splitRequests -} - -type recordDuplicateMessage struct{} - -func (r *recordDuplicateMessage) handle(srs *SessionRequestSplitter) { - srs.receivedCount++ - srs.duplicateReceivedCount++ - if (srs.receivedCount > minReceivedToAdjustSplit) && (srs.duplicateRatio() > maxAcceptableDupes) && (srs.split < maxSplit) { - srs.split++ - } -} - -type recordUniqueMessage struct{} - -func (r *recordUniqueMessage) handle(srs *SessionRequestSplitter) { - srs.receivedCount++ - if (srs.split > 1) && (srs.duplicateRatio() < minDuplesToTryLessSplits) { - srs.split-- - } - -} -func splitKeys(ks []cid.Cid, split int) [][]cid.Cid { - splits := make([][]cid.Cid, split) - for i, c := range ks { - pos := i % split - splits[pos] = append(splits[pos], c) - } - return splits -} - -func splitPeers(peers []peer.ID, split int) [][]peer.ID { - splits := make([][]peer.ID, split) - for i, p := range peers { - pos := i % split - splits[pos] = append(splits[pos], p) - } - return splits -} diff --git a/bitswap/internal/sessionrequestsplitter/sessionrequestsplitter_test.go b/bitswap/internal/sessionrequestsplitter/sessionrequestsplitter_test.go deleted file mode 100644 index b0e7a0f30..000000000 --- a/bitswap/internal/sessionrequestsplitter/sessionrequestsplitter_test.go +++ /dev/null @@ -1,98 +0,0 @@ -package sessionrequestsplitter - -import ( - "context" - "testing" - - "github.com/ipfs/go-bitswap/internal/testutil" -) - -func quadEaseOut(t float64) float64 { return t * t } - -func TestSplittingRequests(t *testing.T) { - ctx := context.Background() - optimizedPeers := testutil.GenerateOptimizedPeers(10, 5, quadEaseOut) - keys := testutil.GenerateCids(6) - - srs := New(ctx) - - partialRequests := srs.SplitRequest(optimizedPeers, keys) - if len(partialRequests) != 2 { - t.Fatal("Did not generate right number of partial requests") - } - for _, partialRequest := range partialRequests { - if len(partialRequest.Peers) != 5 && len(partialRequest.Keys) != 3 { - t.Fatal("Did not split request into even partial requests") - } - } -} - -func TestSplittingRequestsTooFewKeys(t *testing.T) { - ctx := context.Background() - optimizedPeers := testutil.GenerateOptimizedPeers(10, 5, quadEaseOut) - keys := testutil.GenerateCids(1) - - srs := New(ctx) - - partialRequests := srs.SplitRequest(optimizedPeers, keys) - if len(partialRequests) != 1 { - t.Fatal("Should only generate as many requests as keys") - } - for _, partialRequest := range partialRequests { - if len(partialRequest.Peers) != 5 && len(partialRequest.Keys) != 1 { - t.Fatal("Should still split peers up between keys") - } - } -} - -func TestSplittingRequestsTooFewPeers(t *testing.T) { - ctx := context.Background() - optimizedPeers := testutil.GenerateOptimizedPeers(1, 1, quadEaseOut) - keys := testutil.GenerateCids(6) - - srs := New(ctx) - - partialRequests := srs.SplitRequest(optimizedPeers, keys) - if len(partialRequests) != 1 { - t.Fatal("Should only generate as many requests as peers") - } - for _, partialRequest := range partialRequests { - if len(partialRequest.Peers) != 1 && len(partialRequest.Keys) != 6 { - t.Fatal("Should not split keys if there are not enough peers") - } - } -} - -func TestSplittingRequestsIncreasingSplitDueToDupes(t *testing.T) { - ctx := context.Background() - optimizedPeers := testutil.GenerateOptimizedPeers(maxSplit, maxSplit, quadEaseOut) - keys := testutil.GenerateCids(maxSplit) - - srs := New(ctx) - - for i := 0; i < maxSplit+minReceivedToAdjustSplit; i++ { - srs.RecordDuplicateBlock() - } - - partialRequests := srs.SplitRequest(optimizedPeers, keys) - if len(partialRequests) != maxSplit { - t.Fatal("Did not adjust split up as duplicates came in") - } -} - -func TestSplittingRequestsDecreasingSplitDueToNoDupes(t *testing.T) { - ctx := context.Background() - optimizedPeers := testutil.GenerateOptimizedPeers(maxSplit, maxSplit, quadEaseOut) - keys := testutil.GenerateCids(maxSplit) - - srs := New(ctx) - - for i := 0; i < 5+minReceivedToAdjustSplit; i++ { - srs.RecordUniqueBlock() - } - - partialRequests := srs.SplitRequest(optimizedPeers, keys) - if len(partialRequests) != 1 { - t.Fatal("Did not adjust split down as unique blocks came in") - } -} diff --git a/bitswap/internal/testutil/testutil.go b/bitswap/internal/testutil/testutil.go index 086035a0d..48af8a7d8 100644 --- a/bitswap/internal/testutil/testutil.go +++ b/bitswap/internal/testutil/testutil.go @@ -3,7 +3,6 @@ package testutil import ( "math/rand" - bssd "github.com/ipfs/go-bitswap/internal/sessiondata" bsmsg "github.com/ipfs/go-bitswap/message" "github.com/ipfs/go-bitswap/wantlist" blocks "github.com/ipfs/go-block-format" @@ -66,24 +65,6 @@ func GeneratePeers(n int) []peer.ID { return peerIds } -// GenerateOptimizedPeers creates n peer ids, -// with optimization fall off up to optCount, curveFunc to scale it -func GenerateOptimizedPeers(n int, optCount int, curveFunc func(float64) float64) []bssd.OptimizedPeer { - peers := GeneratePeers(n) - optimizedPeers := make([]bssd.OptimizedPeer, 0, n) - for i, peer := range peers { - var optimizationRating float64 - if i <= optCount { - optimizationRating = 1.0 - float64(i)/float64(optCount) - } else { - optimizationRating = 0.0 - } - optimizationRating = curveFunc(optimizationRating) - optimizedPeers = append(optimizedPeers, bssd.OptimizedPeer{Peer: peer, OptimizationRating: optimizationRating}) - } - return optimizedPeers -} - var nextSession uint64 // GenerateSessionID make a unit session identifier. From 23ccc16adc94274b9aca10b894e4fc20dd755ecb Mon Sep 17 00:00:00 2001 From: Dirk McCormick Date: Thu, 23 Apr 2020 16:34:31 -0400 Subject: [PATCH 0932/1035] refactor: remove WantManager This commit was moved from ipfs/go-bitswap@932e2d60a5a8e84e441505c3e240167e236b0395 --- bitswap/bitswap.go | 23 ++-- bitswap/docs/go-bitswap.png | Bin 84886 -> 81880 bytes bitswap/docs/go-bitswap.puml | 14 +-- bitswap/docs/how-bitswap-works.md | 13 +- bitswap/internal/session/session.go | 59 +++++---- bitswap/internal/session/session_test.go | 114 +++++++++-------- .../session/sessionwantsender_test.go | 5 +- .../sessioninterestmanager.go | 2 +- .../internal/sessionmanager/sessionmanager.go | 10 +- .../sessionmanager/sessionmanager_test.go | 19 ++- bitswap/internal/wantmanager/wantmanager.go | 103 --------------- .../internal/wantmanager/wantmanager_test.go | 117 ------------------ 12 files changed, 139 insertions(+), 340 deletions(-) delete mode 100644 bitswap/internal/wantmanager/wantmanager.go delete mode 100644 bitswap/internal/wantmanager/wantmanager_test.go diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index aab1429fa..f3320967f 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -22,7 +22,6 @@ import ( bssim "github.com/ipfs/go-bitswap/internal/sessioninterestmanager" bssm "github.com/ipfs/go-bitswap/internal/sessionmanager" bsspm "github.com/ipfs/go-bitswap/internal/sessionpeermanager" - bswm "github.com/ipfs/go-bitswap/internal/wantmanager" bsmsg "github.com/ipfs/go-bitswap/message" bsnet "github.com/ipfs/go-bitswap/network" blocks "github.com/ipfs/go-block-format" @@ -123,13 +122,13 @@ func New(parent context.Context, network bsnet.BitSwapNetwork, return nil }) - var wm *bswm.WantManager // onDontHaveTimeout is called when a want-block is sent to a peer that // has an old version of Bitswap that doesn't support DONT_HAVE messages, // or when no response is received within a timeout. + var sm *bssm.SessionManager onDontHaveTimeout := func(p peer.ID, dontHaves []cid.Cid) { - // Simulate a DONT_HAVE message arriving to the WantManager - wm.ReceiveFrom(ctx, p, nil, nil, dontHaves) + // Simulate a message arriving with DONT_HAVEs + sm.ReceiveFrom(ctx, p, nil, nil, dontHaves) } peerQueueFactory := func(ctx context.Context, p peer.ID) bspm.PeerQueue { return bsmq.New(ctx, p, network, onDontHaveTimeout) @@ -138,7 +137,6 @@ func New(parent context.Context, network bsnet.BitSwapNetwork, sim := bssim.New() bpm := bsbpm.New() pm := bspm.New(ctx, peerQueueFactory, network.Self()) - wm = bswm.New(ctx, pm, sim, bpm) pqm := bspqm.New(ctx, network) sessionFactory := func(ctx context.Context, id uint64, spm bssession.SessionPeerManager, @@ -149,14 +147,13 @@ func New(parent context.Context, network bsnet.BitSwapNetwork, provSearchDelay time.Duration, rebroadcastDelay delay.D, self peer.ID) bssm.Session { - return bssession.New(ctx, id, wm, spm, pqm, sim, pm, bpm, notif, provSearchDelay, rebroadcastDelay, self) + return bssession.New(ctx, id, spm, pqm, sim, pm, bpm, notif, provSearchDelay, rebroadcastDelay, self) } sessionPeerManagerFactory := func(ctx context.Context, id uint64) bssession.SessionPeerManager { return bsspm.New(id, network.ConnectionManager()) } notif := notifications.New() - sm := bssm.New(ctx, sessionFactory, sim, sessionPeerManagerFactory, bpm, pm, notif, network.Self()) - wm.SetSessionManager(sm) + sm = bssm.New(ctx, sessionFactory, sim, sessionPeerManagerFactory, bpm, pm, notif, network.Self()) engine := decision.NewEngine(ctx, bstore, network.ConnectionManager(), network.Self()) bs := &Bitswap{ @@ -166,7 +163,6 @@ func New(parent context.Context, network bsnet.BitSwapNetwork, process: px, newBlocks: make(chan cid.Cid, HasBlockBufferSize), provideKeys: make(chan cid.Cid, provideKeysBufferSize), - wm: wm, pm: pm, pqm: pqm, sm: sm, @@ -207,9 +203,6 @@ func New(parent context.Context, network bsnet.BitSwapNetwork, // Bitswap instances implement the bitswap protocol. type Bitswap struct { - // the wantlist tracks global wants for bitswap - wm *bswm.WantManager - pm *bspm.PeerManager // the provider query manager manages requests to find providers @@ -357,7 +350,7 @@ func (bs *Bitswap) receiveBlocksFrom(ctx context.Context, from peer.ID, blks []b // Send all block keys (including duplicates) to any sessions that want them. // (The duplicates are needed by sessions for accounting purposes) - bs.wm.ReceiveFrom(ctx, from, allKs, haves, dontHaves) + bs.sm.ReceiveFrom(ctx, from, allKs, haves, dontHaves) // Send wanted blocks to decision engine bs.engine.ReceiveFrom(from, wanted, haves) @@ -480,14 +473,14 @@ func (bs *Bitswap) blockstoreHas(blks []blocks.Block) []bool { // PeerConnected is called by the network interface // when a peer initiates a new connection to bitswap. func (bs *Bitswap) PeerConnected(p peer.ID) { - bs.wm.Connected(p) + bs.pm.Connected(p) bs.engine.PeerConnected(p) } // PeerDisconnected is called by the network interface when a peer // closes a connection func (bs *Bitswap) PeerDisconnected(p peer.ID) { - bs.wm.Disconnected(p) + bs.pm.Disconnected(p) bs.engine.PeerDisconnected(p) } diff --git a/bitswap/docs/go-bitswap.png b/bitswap/docs/go-bitswap.png index 31dff2b85a71af71b056e0cdbaa12941d13dabf2..805bf6562a822c8cd68ed310ff49f07e4be45ba2 100644 GIT binary patch literal 81880 zcmbq*byU?`*EROF2x$;d=|;Ly5s;AXmJaD|6_EyMk(3naZV)MHX(go_q?_}tbGY}u z&l~@JIEI`7=j`8(6?4rsH~zBHVz+M)-9SS_yDk3gsXQ9mHCr^aiURoL@b+ePkh2Sy?$kxCE@S~MYgvQg|6Ngs0bQWOyzo9{HXtA zwxDGUfBWckwx_A@W&Sc4L$>>E&~IMlsc)D#x;Jkn?qAO+XT5VLsaR?gLp#VWj_MkB zpzH=ksh@W7>R)o9m1-x}i;jdfk{>cEsnhX4e>zL_c+MstY!OACdnaspY>hG|CKJn* zF63c}S?G-ry)yrh8&%kq79y`XuldMrxN^*fF9=%vvI>&zdo<3lbno#Dvv12sQ68NO zh~zi5s_SN0okp(?LU2AK8ZJftdK=_-+tAPOt&eC#equ$*l691KfD-RrCB+z}J(7{$ z$lklG`Z{Y3mRuq0_l3R`bto)PYUe9yY+T^F>K3^%g8S`eJ)ZJxHy+AN$@5h4OVzxQWfW@wwM17YuzC_Po(A`V;SN&lb z`qx~kx5~EueMM&m2N$?kTq$rb4p3a7YK%7y@7NL_b}8O1wj0^hmdp0p)>g+4GUMoZ zN3BX2C`oqpN!8@))rGOrFUhMuo;(qsU$6=``N_)0rP$srfHjwL~w6p z-cXB{qhz+toJ!G=h8mfA(VaJ08SF$?yp0s!V^O_&73BBi?E@OWH$QvtJ$OU-0(17$ zAE8}Gt4#hN{w{0d`EAef;hOc%TDyhrN++Z4LtCEKPdC9_kzcPo?UMgGe~E^6DwcEW z@7DxqdWL_${ECK2f9dbnS6`_T{{4~#t*QO*m#;3|Q@;B5>sx4rtbf1MMe~b7x&`v< z(xd;cp~)Gjsl{&6_-p4ccwdL%KA?sZ?Q4&sCqeV+H7j-JL_P@{`M|G!VXSk%f9o=Z z`rKhqEjXX?A_j6jEUal#YGd*^vfbAY=i{sbMjcj$H<_bDG`n2-R zppw~H0~whf?f>yv7muA@G4xm8oLyXYU90Y(&h+##@?o8lslD9X+)qVBNUq>KiVv86 zdomr!>?)w;k37FuXQyT^B#KMNrpowh()rZ6{T`YPNd}*G453SxwgcDmIph=azF-&L8y; z3c}fjRXaL5YS+2D=mXK*g(CXQ3Qbmqa`h&9dStW@#e|N+oDD|HEHaXQa(q=ij5SWArK2-@ z`Esn_g?Giu($cSC(WC8@9)bP%pXy>_zW(K8czBL`tG}Y7qh*9n6&a4^n7wEP5|fgI zJWmd&6P+KhvGwb+#Pd2Vua!@(_ejPa|9hg-G) zB%)1c)HJT5?UIfdjp>YK6Jy}Dn-8)tFDp|XI~^*oqnzJ$J*}{rS%(#w59j$;Pb+uA zr?vSlkNmYR1x3}LY)LNVHRxhGwT6(4qUP*%)Sq;O*yH2nZAzh4$av}rbQTaOc6Dxf=IShq6Zj;A}AMndt4l|ah(P@;|5q%A{CgrUQgmto(^iVJ+Pak;UhBYU*My@i*LFUfV8w(uKulKTToEiJ8P z@>7@Tf~NLV(ctAWdu!&AH5*g*`uTaIhdP>?FKc!NRdjH-ebGsXO#ZL&Exy;qXuB?L z&2{u8y9@ZPvT4(;8yOkx!T!y(hK3Td8I6>f!i?u~HOjja`1aeVlR7_gNNT_6&v-7s zKs>$YRDY}`l*G+YxTPN|KsDdd-u~_&Wt`N>$qCCx`6wp*8T*3VTihD7(R zww#=tjEs!w>FFys@pg)nJ6-<|k8v6oj~3wPZ<*YLfR>S-?%*aNCDqB+!%rS99mmPY z$vL#MR~;6Ehl6u)d@OYGr%}Hp$i~4ztH?my)3dH-XL&Gtf6Oj)Ahh5E*F=s#HM(R@ z4H%&qnMVo2ZL~+VGABn0XZ~lQex_9H)@Cc2lZNZj&Jx$8`!@09!8_HdU&cusq;);^ zk7IZLr!xdU%i;f^E;m`p_;Tal-@Q6_6fAm3Llme)%93^!M+D5YGvGf$?0t^p^T;=J zhrTiIpLj=uN)ayq`C%Mn{QX+9N}+7o&oyLr)56WH+O!}RI~)1hYvTuhmnhjhzt%64!Qkf7w5UIqfCFUu4OTzrJ}e{)x~KsA1dsh2Nvl*IDRQ`?X8eR!ZurL6q> ze=Ll6<1+ac89a+RZuPP^YRg}}MSlB$FT96EPygqibAN;SVN7mIy9L+n;ZVGG1($&Z zI$yF#F$+%8ihL!nH>zr}{}{yIXZn&udA-G6rQSg#_E3`3%{_)#+rec%d&H4 zDHL%u9JsPlI<9AUWxfM7ZM2|&roF!FIy*nl+|iECq(l09?extSvj!C^8|wz0Pqnbf zzow#2oA1Rp?wNFS#4~<*Ih*g3h$t~q7n8oU>&3=G3Gp=|{LY8A%c-G)3|0%7LZ_eD zw`1yY0yyfA=#)B>gpv*-k~$Ok4;!wL#qZ3J)!UxBMxQ*fv58CcJc@((kRW)vnZVcR z7gfzjiF&paIr7`QNY2>sVEaN3_0=nk@AH+{v$2DWlN*o>rCK@U8PpcXprcFK^X=Re)y3cY(cldIw4dRqAVclm_NLhIIE zlKWOt$;eu8aBFdqU@U7h52FQfVbP{M3OamcKX;6#_S(H%1{)ifovk^={=%)(eVNES z!FA4D)y=(YWGB8Ku*xB{Pt={(H@T~-{%DV~+Sfds@4CCGzO{!z5)h70!hi5ObHT2u zy+slQj?nu5@zH(P(yg^oYDvkN(-ZBp1+P6e5_;cvXJc(`n>vVH>W2?kva|c=S~)(3 zdhVn`qK8rH4}82Hea+UJ7(aERrPWBswXb4s@($(SdXMvB(yqtEDgac^dTT{zZP*8aOGwVnd{<(r5lD1 zIXLEcd3c5f45Z?@wZ{avz6(z~etTZ?Nb*eC3Ovr$9CH>Eu&sDk^xPX<<=FqGPmV)8VW0z}rpNr@I9Em=Ty{kR><;&WGb7*<# zpO}PQXzSo`)QBPU(_#rAMgPQErgpjYrrwq}hIIHw-O=(kVvG4ge(gNdd~K}`=C-X& zm)=Ki8+ZK~8`HDJUXqqV+j41^`A!#Qxdh#@FJGdHbv@)NWFWD6EXClihC(#s?qeb{{O zVOvk=I9Og3e7{R5+-dYWVy-H3@}(dt5oK$;PeYvB^SpketL>Zi zZ)P-k%0iN6X@wGb^$Tn2Y?AIbXP55~X~5ds3LP4G5^9nX)=1pbAzwYZ{?F|9c?~8# z_Q&}=j%Gf660$ODU&EtsZ55M>+dB~04~iyj`|h{5=Q;KI8WmX2)yp-mn|ehx4Ji+z zR5No{lRpT#aX#)06q`-=@o`J?7_#X6aGW7!Gf^~$AZl&@uMmRET%zB~+aJp+ymwnV zuC8#{JTD&%;o!jisb+qm!(I)p?X6s!+wW(VwDZfPZ8T`|-8-$6@?%SFsP7N6Zl?=m zbCXj@r|&7Rmnj~4&^whmefPJ9HO8wE@7r0x|4w%v82=z#?{KZ{ z=Iw10^Q;*T$(6|siY#}xX}!NR$JCs@He)&N`yr{Z=*lD^5CqO z>YLAeu^RV4?_q7e_miJO|^mqC=>1^igzB!KyHF;D1qX#fMQOtjn z=IBna?FePY76yW-CfbX@hEbAICie)tsl6RpiZSMo|6s@*+;ySsySAWiR2Q!tBbMcw zF-VQ~w*7*Az~~d&C}qCmy_z!h8KQgnw271gwfAUhjAib%$d!&cI7mJhXa&%oj5t3= zNV5!Di7#D?0s>?FquiJ%mCGJ%l@7Kfyw+4sm%MIkxYLj*@R~qu&~|vxUs`aBnyal@ z#+AjwD7ob05T9z@mm~W{PE+p%^66QrngzMWq)IcHPyotU#wDfrJf~sgUf|Q z;m92S;hv6?M{078G1)!CsY3Z2%^KH09QK7Pc`GjB4*5K#W5>Sy6?$iUajuT1a=APc z#W7|fz-q|iobPlQ3H?6qT}d8>!XiE7aH{e>6GF2MxXkO^1H~($`kvWoJ3JmkcW6VU zA&>~0l@?o-2WHJ(zzr5QKb>e8R*1hYnOcin|rV-@@S?EVWE z@HWUEAVGsYVQ?zVz7!)yiita6ssG!fuiT-Q`qb?t9^Wrf_ZSekKS(RpE@%vkce%!RmOt&V+a$`aV2m{kucs;|7bvEtS zQ#ksHyK{m$*C%J>HNWgU0rR*O@fWZVe;>u5YUNtS6o5b%y3nyy+k?k014c8ikJWdn z{i32=kVvZCzFK-Rmu$J?i#@HFV6}~Ld3I*{7!Sm@$fwx}9@P%(_nm|ob$C5^?c;2E zNNqV8Lqonh^aCsM_v!)e8NR!zl&LIA>UttpX79pU2>*0zXS)S<3>KP#U6ilN!@?rVpjB*i8=kQ+Ybj8*%d1tm`wE91|FRFU4tv5>U5*Az zuHzmDdPehxkBt>6SlhVk#|(N5v{w%IQ82{NtsPnR1C+G6320i z%k^hcD*b!6jw&xDn6d7mn|PRBei9cN6~mzR0PpbNV0EIF@BaP!e0&<2{hL@sP^*ELGM2+?D7q`4jo zDMrlSI>2>WD;rc*>y8Yb)HO9V)zTs%dx18zsH~KGFju7}Ppd`mLCZ8)aV_-3iIY%= z27>alL|&oZwoGFRMkhMP-Ele6bXL;kf~k1vn)Wqbfh*`}j^_>-Q`*{ol(ID@V*XUW z+IctH)6pe1E-o%Ivczd);=`HWM4eMRhLGF#@WW`_>UA&02%Xc_HKj`)Db706$`N1tUGCcCgV5?_3!bPi4pYYT0kt0$UpQdb`MxBPkau?`9j;P=F(q{gb9ta42G zoYp)ndr5oJ3(2P&Es_xZD{k64Opd8ZJ(wHMYM3|Io7_tJ&>vJaw~w(HWic(izFhCj$HtXr!@bUj~~f0#RnQ}c65 zF*=$o`gzz>#;5Nf-|`pl^sBx!{sboE>3J4LE(S~8i!13ts2tZFN1P^Q?}bK=yEUPI zPV~~~+%b083~4&Pvmq^^mAn5oQqPdp5KZ=5PoBMpy@$4VZrPszUJd-zEO5Ks;rl+l zzqXaXP$YlkqM?`Ziu0b`ZqNs?2iwkuZ>Cz~E_GD<%bd1Y0I|GAsD6v{qlxLWMhHS$L82#qfMc1pYv*SBc3obt1YKhr zbJ}=WIclz7xUOsjTHO_r)-P$z1Ul}NF}&u!bf(Qd61&F|nD9+>3*9GP<~%NO4yTUR=v zFTFAGkmGsCj&_XViaRAGHle;P39zTQPixxnEk^_LFBBx6lZfphSQGUOMv4JoeC*y0>re2NF~WJZ+WP zQsg;#v`%{TVUn)!TUJz{{HiZQ; zf`$89*C6amE#o4Am#xlSm4RQ=OSc#7^%QVu(Iim?L$uxO_pr3Iv{Q29$qhky0Yq8* zW|W7d@czb#z(u~_rBbV3J$;T1VwU5!m6(~w5tU~3!4l7&VP>Xq-{0QeZi~Et);^q8 zJz_1blFgViba%Mfq+6^h>YYW8mvIC;_Y0^D`b2(QYjOEbViT`fRmb97yovjR>qdK% zBd@wTtqhtlsu#nM`S~*q7uWvxXU_u1!v1t>TqE5dJh2rvTAsrC>gwvr$qA6<3DRP4WCfJvU(C|c)QO`1thOXT-PfV=D6@i5 z%64eWEZoc9C@Iusk!AlkNs1xG>B84|0?_1H9&>8<> z<<}7qREYG7GyaZ}E_KPpnoO9Su?)9>vx=*^rP|6`^ zCf*senm9&7qri8(9N3E`$Fbwrn;Tx+NuU_QghZ7 zG&I-TmOb6gZ8MD?^N} z@sjHT5|3&p!ny)DR<8oMQ5r1NA&)Z>-xC#57aUACCZtq+roZ z;4Bcsz@ipMqA-S}O%~v5X>dSl0lx z6Dw5eICFAzfPF)}z6ZZy6zXZCdpE!8Lf=GT#Jvd}nP#(lD35=l6_mli zzcXrCJ!*VNt3eE?eeoE%-6eq@>7#D($)uA#zW(`pdnp8U{QTD64Qg^crU*1by;*z!tmf7>xZn_ZG=o!ekrEKM+<0+20xEjJHz z0ZYxZvhq7<@szk}&7~6xRjcxNoO(YN6FVt48;?WkibT^#rqM@BIh3;NRd%P6K;)PN z|7`nY+NTANRVAp2^&$ZyE6bdUUk~6tc7GcG*(ak%{lS$ky*hpg2S&D}#E&l)P*sDi znp`pi)^Y3%nVt%vdG#l?6%vLC4&gj-jX0;`v?4memCg|pxkEX=V>8W5(hPqwhRP4l z;V9t0sYT&o2?K}MMY4FLahz;F&^2(N;y>JYDUno~mY$KQfDgrPC{#yaaKWD)$B`FX&L_G_VjADV7C1 z6`Yt^t#@+`?x4}@*C#8Iw>_98d`xd_!yiz z;8!UCl^#Rk?Tg;_O}9o-O;ypv-K*_dPA#|L!`gDjqf@Ut+S+>0Qg=cfNt|a(BbBDq zp~B|N7+y&~lFuz;VYb$KGc(3!o_ak7#T~P}j9rCwm>Ewh(38j6tK?cDpt6HPJ<_Lj zWU5f3(l%Y#DxRc8gqOee=hXFUFqm8Vpjow&E7An{9nMNS-9MtCY>hxZ+1aAgXY8@gP9ZTo4Q!C?73hdh81X8!J&)s3_T6OQ8r$x;{BBu+W>{#G0k z;A~RfM9z@)S(;=t=?FUyGmm6e8&MA5*W(R?f{-V zCiqJG6X0A-F8zEV&Fd$n_8|RF6~S#X*jn9e2MSt+^`lAEL!j|S>%9`SIMjqmjb@X# z#o7)((lh)t{v&>dEPSeNRauGA=laSMWXJGb(sQKs@P6yqy1`3Q)g?G8>D?VUSNxz~ z-$na3Z(b{Kc4dlUtlg2-R|)7P+rQ~YXrAO5&MTx9JV4sKE;k{QBJNcy2qjR_>q)*Y zq5$~8b9R3(IsjcVgTa)GJxLYw<5J-mx-T!Zd^y#`mZNE%Zqq5_!6;BL$0kN$_6;cUVF|B`x~{f=!*y^3&s@{;vKWD8 z*Q_q-1p4H`&leeSji>0aRYPsiMjR#@NY}dg{Zs)LSrkaxE~3`R14qr2`9aTjdKnkh zGybHkVd9HiAA)zF;_6slf4@P|qZ|evGA!zDb|@~A_wOvEG;ebzI+)0KMiEAGtu9$*_f!E7I1ZQ3$?BuX3{r&u|8I*ciJO_ zh(CX7vmTgMn=M30IfaxzpOViXY_H$*x=6P|Hr;WrHafN^R`*a<&5Q%~Tx-n6Z_BsK zZQ7r@No=(xnAjuteMH2}+L{OAbYJMt*GtTG2h;dE?)RCiZ0Fj;DJ3;EHCsYR_$)?B z$|pUKp@r+DHNY6j?|x9=>)ZPFI(ADa8KO^;*}vM$ie>_fj7-S5JKmSn)nse7&8lXz zB?)n|P+j+wD-e(I&gic~eKD^+P1jxlw>|g$>PcM5`u*As|MD7}nU+>kH>uUMpM#*< zXh-Qk=tD^bMt=QrIoz5H4i0W1{#9-@x%#V^S~`C5hw#Sj8PcArmHU{E8xyAk3Nrp4 zZhOCs_4W0G%O|%>s{~m6yhm4vA9^s9Sxqtx;rw$%l=_BKP;!4J4`UGKLAvqRNhR}; zQM+V@k|kn=F}!e7BCY_2yYx4q%9*ACf~@8KD+{-SDc@$Xmn}hrz)?YGKipgH@RIXJ z?S6MGTV_$w)=GZ;*{>c?D&>&i;HYR zf-}O>S0!VZwCdc|fPAnmV#qy&Mmv=E{v;7cVt5*LE0XqX9!K_tMlOeweC3rZ>xak3 z!^6WHqvbMH#F&`YGcCbz`kI}A36bU4|66KeS`&s;$E>=DiHY@aO}I5ylpDtiUM3_Y zoL&dzi~S+`!+u6(QVui4o*07ZhP6XcDYC%rg`U848G&6KGOn+89Vl)BO-ABhU-$PO z>1fFrL&(HYP;!qRN0+Cbs2Jj_aNfo1zBVHoHV0ekm>>wj9YV<-zrll-6KAK#WG=H< zhtvIq`mIdrC7(f~qRwmFeRuOWXgJfVr)_NZH71Xu?NzE$_Q0YN5)#Ha&Go`;XlR?0 z;e6k(lL~oij_v;W!=k<2!CW7?PIT+4DaOXpo$(-0wB)!zhCbVvsI*(CTDNb1>0Vx2 z>oM?I4m1f%hj&OwNc2iU%|c)`E8}&fpkr0D|IWs$ht=b+nwpxX@v$+@r97H8%fc;z z{jvUG!COba_uR=dLPZo5B2?)@E}m~dM{BzlB8~xy815ScHpoo!laEIhkIhjV2QVD( ze0jj#VvSUjr{M9a>AOvDen^GRISN`oc&PzjqM|MEfGwm80st+Cj%QrpI#Afd8 zf@lQPm(zId-k#%3b&8+sdX03Z+jbWhY#Z1UqX3hUtA}#W!rlbS`kGn>M#F4dgd3i& zkpA3bvav*~rkz4eN@m9C;;SE0L)ae_7X6S{NSZDy&|A5HepwWi2pqo@*8}7>d|}Znv?F@0Sv5aTTYoKcu(v;1R;j;}R3RQgB_8^$ZDx06*i8$O zonLo!*yF{t+szQh%+6W({V6m-LKzl9Gpet0Zr&M}isQ`Hpe8sS6X78V(N9P7(YIb&;&`+bu&au{%vH!Z)5m;V?U8DUcbBU(VoWjThK#7#|2y_7f5`o!J6X33a? zo&DJy&6Pe(ZIH_-L?(_Q&m&b zeOT26?9kgU>N(lPEQOgm-Ub3Tqo7U05o(!4c5U88Z}aKk9ajbrur<8*cy+k^jGTb} zW(rf9<6bUw)vR^Xkgz~k444i2GSc@K=hQ9kU(wT~t(TJ(mD?GiA=*?`dv>^|v58R= zZjGQ9mpSmqR+aK4J??$INh%(S@ROF*tp%yT65=ok!Q-$zFs$P>J}^+?u%f)_uTiMy zQ?hfgIRkDI%V{wZqvMJ6BM`fKY_(Ar8+C<8cP9$CP1bn;fl*q@K}jjX-#a>boEPYd z{);-8dmn^N{;`R|d#A$vy^B~#%LEBYvpg9Y*Xh#YcXZ2`(KN*k*p45ho?# z7X@Urq}zNZ2tw@R!ZVNT!q9VK>gy5Z^`{nMVy%G6tvWwE)bj+1RLl4=Xqo&RNIef; z6b=jyI<$pT9v*rf<3~V{Xb!{|I-HHrZw>K)EE&zH;kY(ZTFtbKmq;q;{1K2Y&PYfcv>y^`7M%GLGbKgrn;pa_&Zln zmOi)l=^cK6?fYnC6--)WNnpE^?cu|REyRc0kSkk=t^D@SetTmCXbD*VepDVqFoxQh z9ENty$eN$mX6kY;+|nm8ZGxx^Bmxv7B0kaH7dwzt``4DzCE|{6`st6lxsZlufi>U7Cd&3=#1m)+0z(yT50(8 z?P+Y_>8}bKa_alK8NA_l^b8*l_^=8@vpxDt^tW&50|0``@Epm;GHPfcX;vkAg{%zv zU-^_`O>?dPITdA7I~2N`*bC4>;K&maPbz&&e&rEXuDMThJ6flfnEbOGhS@85*n{e> zSH07{4v_k+mok#iPoZFxPkvb;EAo0rqN>RIXkQC;rq$~wx9^#@BZk@3LXxw@F8a_pu&iT%y3GRC=-Cx6eQx7ma} z?*hmLaGGJv8~#xn_9O>J%%{3Ff9!17c~oIV-%x{TIUPAl0!cucKE3!=4+#$nuz($b z)_(e<*g&r5*&Vdf_FJFy-+OY#;@qJJ)x@|{=y9E!u~T4Ff09mkwaM*k&{|ck2ub^_ zs`iE9;T;YJe`I}@ZCoMiV7qkmlsc)eVC{1IDJ@fJ@di}60Vzq(GZG7Jp%D)#^>7Io zi6=V^Z00F;lSe(kNVO@PVv{;MOxCV%M%3gP;st$YqO|~N+V7exXkS?!DAJqT-_(89 zRJ32Q4S-OFq62e2eCD2KQYXLC4s_Rz*}SK;=@pDM6*HJ3y$C7=(Bdg;{48d>K}9JA z3M$))i)gyByx|xB@wHH|4(}_dFcf8p6FGO_`#Hg*BUN$jozaanA>L7B2N$#qvN)H_ zSa)&hlMpN^%GrAI-|YoiQQ|Dgp|D6odZ$y1rvW}CSh|+T)TtV)Zt((mm==y?eu=@} zK8e>mI?#^}Im}&txsgnBb--Ym9=eiWc====(i&7nry~iBui*|9AohF$)#=rwSCvS9 zNS6b6PBwvPbInWk4~dF;U-Ob-#qjDsRI#{~()KjxrAVQz`s^Nbhf-1=^uEc>%d<2w z`OOYPNjXq}LIW5NQ?^S$9oiP?3L@u4qXo@MT!*sH_`sZE(qKF(op%looXq{A9-xQ$ zoXTd@BB%~T10Qw&Lo{GeO-@R`fpL6~mR7t(7Z_|2^0K_VkbVL*G`7L8$h*j@B@?M~ z&{6E(d=rb@$Yf1$2oMgnaNO(HKg4z5yu!-e=l2sj$~euxKT1uP3F+_HY!=2Ct}XJ2${8%Zra@`n@wr$RLRiXlH)oiQODU0d?G z2UMb;JRM?%fRPx%MF?R#x&R{5xSX`kT}-dHC4A zx8{*N$*ZCg;*T&eP&aoG?}@J%;(W)v$p*C4uV6mffKTkPv#OQ(zg2Ir`$t7#(r;+s zW5HyJiL>82GN8zcq5xI30bd1UhM{H?ZZiT$)HaKNbwlx;5+MT!(f$o5!&CA3D|xAR z-~BW|$^k*ZCb04hDCe6%j0|?yO-EnyokG@@U%vy{2kk20Mi!u;1W&~IUcg<<$=k2b zLw*;%wryu$n&koFMUbFN3id4&6etKWU@R=MwrTwY-F!ZEbsk3oQREzgC>njf02zgS zcOhUDm>rz&)|EIhUuom^0Evk~ zHS*y!~w6{(*eC!b%J@|H$Ei$?#fNOD{Qz?7g@>+ZrPGG2D<#**e(NuRr6h z&QCYn^<^_YOM(JJD(URtXcsXtqz70hR4YOYsrXxkjO3gvNh zhk)|%PLJ!K2Jy**mf5dg$%Y0^=S1B0PKJi}DQ7(5xHg(=-Mpx+4o?o7l1>cPXWK9W zG7$%@`I_~K4#Qo0yEqR;#PSn(cOMo;OrFf+Z1%MrPpZWFIS3xkA}TBRdCdlBkSoQJ z7c1&hJp@X7ZPhI`IcUc3J}SU5fDb!7BmxEwdTyoj+*kB0=j6VC-2zvN(_Fw6dzFM< z2E<>RGu5tIkrBn)OPTewmT@0#>FLl94>~6*He>oy{x6y5QtO;5=l!Wm_%UK^YzP`< z%yPZ6AD?Jk5;ok8tDFl8HxHudeJ4Ffoi?Xkwo*bug#6Q+n~&4P0s<5#PuCTuAG>tu zsr)Y~sMGjM9T{=y308+?ef2fRO&pu!*$4zheSkkJ)=JG*`*sXdx^*egO6^(XYNk01 zngNpO^tW!2AepAdoYeGEehDBW1m??u5_$Go_0fL?+i!QDXFP6gzlP!Ydv^9SHxCbOw#M`8Bpe((o_wrFocT{%_T=vFR^06Xs)B7Acdixtj-c~W zdh8Z2Xx|WtU$r~j52mgKCqX-Q%JxBxN$(2X;IYTsHgF%mmjo^b2VSkjqS@$Op1b}&fYlA*G=x=cR49Ac>n|6}Q z6S)srZS0GxYEljkBOgBOHZ)iX9OsOdn1(LQtwobM+1l1DB=+jv5e+7y)E4QK416N& z#T0$EQh)Z8zp#o|YeG>cw4-oU7()PRd$~iKhs>l8>nJLRS=2vbNhZ0>Yv40e1&Vi` z%rX14tSs8=T)td$^qm}oXYBsQ7k{K37D~2|c>5(pCh4U}HJyN378xvL6-nB@=+9A& z4?d+Pv+3w>Un^|ngROZUS+lFVHv2Q!G86$K`3DNpd` z3^K0u%j6Fq_Ep?OpOI#PG6VrTCv?zo>!ElI9WnENid0YiqmErAK&RSNQ}g}ldAk0} z8xHyz$k~7vz(3JxuKEIod-b>vKWc$VJz{ZI0UCIY2{0jkxigSa&|I@a`D*>2Z$Y4` zS5~GfFL77X&eN`Oo(QM3dKSpangSbTJ)DQg%kCcb2O>Jl5yH|F3Z zoh)0cRV%L3KeCy*kJN2;w;w&txo6p20}529;Hc+sD?y+-3R-G01qI8;4T)A>Zt7lo zb9IT~ktK#BC9TE9LL3Crei{#%nOWtiP-z!(;Tq&Z&|0jmN@`s7Mz(goz8W;U2?qT1 zb`C(X46C$9qD`@-y|JVngkMK4qTH}GbVI%3PiZR|q6Haj54&hLYRhx7XjQbo3|e~4 zMJ-TE-$nco#;Do*=w>ueHfKLLdhBzJ^uyxpECJ@9<>tUPIDimMp|b;_B-i%23AYWb za<7vab!?SmxRk@hFm}9JY^mQe#$_0=9nFX+7XqjvgVm3W&u!F*rMlyF@bl?@nZ_nw zCq`^k096$$dabHPqh0HBlY@mDxwu=%@PUGqzo7~OWQX(p86@ zEW7d2@qRXTal8z1sDKL~f7CAuqHBt_Rz!5`i{?j2kFa)vqXujRr`=3pRPn^o0D5>} zb{5>3sWc4z08vOMXj%UF8h5XPCbz3&^mM0x(j9S%i%P!wByF5P0zsMTvU~!(i5T7` zWE12SEzAku>26=eRRfnRK%XhxBk#8ULPRU$)rvNX*7gOVpwEL}T%{Mt z$^D{GfTWx^oDoV8Cea8%^XucMpHL$mR0G0-b~JQ)8UMP$1&#DaI3jl=p-ujINxcp^ zkjIr)`DF1NDdcpt2E$%5DB~5?yg8_Uv_poBz+Qva(oW~)()6GP1T0$2^sJw*_n;GS z^$uYe6$m7eZ(CFoE2>q6S~n3iff-h_n5ja@{n%RwhDThQW-*_5p z8Lddgdk8&OSse1g|3r)tgmF*f)L+`HvxsN?Nu!0MaQDhr7v$d@|MhzDAbwI=0j&k-4ywGMUIOO zbSulfpq?xpd2*wPZWclsenp5a!RCK%E9eq2pHuf*PCsfyUh#Mpj6~wMM-vxkBdSe; z>psU{EX14s485An9&<|e*rR}`W1@zhaE0eB6OWlo3&n}J<>vNgdZOR1qKBd^cg%r; zI3$X1ln*vkGvpHLdIx8=xYI63Bw*t}<$;9vez<=-v9J$KTHAn1PG?$^X+!?L^T{^c z&rk}NWNj@j^{tGmtTQ7ffg||a)L$VX-6SSF+=4*ea2SLxyP^IB5v{^ z$g<{7SMHVNeB5gEK-|^q-DeHz&l~qzxvW1Lg4Fz^X0PTpZqQB{b5*^2H5Gh5l-OLv6NTrGFM@^`stLwRh z#0aP%gRV56&Z;B{9jUKw2(Fs@^hBQ;Z75hZxmVd?5+gvR>Bb+^qRM8&-SimCS2VNV zUw;6kQ-mI}?p)`>3RO%TPmzQ!E($a>VK8j>9L;P-XnXyjCpkg}rUERG)T(JePMJ_B z+(lC&qaU<1ftI8yp~w3aS;dGp`y5S0MT!wgXkfVQuLqaDN~qp*M(AxslC7UDVtC{} zz{^_SAFHLceb2l(#kpy}`J`}2{O(`p^T|MJGB+Vl#`v@v^=M!TwXs25AFf}&wM zwe=S|+Sf-A!Jy`ZZZ7CpxThD9H{rfkAyI#T`!`94p((OTmqx8!9vs{lf5bAN}uQ-fw%ZL!_1!{f8b`P2WH5L|?7 zqRtb{pM@F=Rt=JK5aT4M5o={L&i2?se-7$BjKZR+CrT+17>L=o+Q^Imm)0cBn};r) zSJ!4;HaD6^0E&P{qk-%y&QAkY5_*ti2DZq{$;nw-u4CD?^QZYIZ5mC}Z`>8{VJTE^ z&#>53IAK-5z?drykI7X+{D~3EiQvYZw)XRWfc{)qSXiiD+Pu^UDg3Bl4g_szonnvW zm|Bv`w*`>DV>?i|`9-CxyTZ=hwMSi&FA#)$QS?U@r9gV-p?&>|-X*#+(q{>U9}rC{ zM#ys;`|(2-rlqBo+rLEDIlt=fzH5)jNkj4xV^#?d!$$lVyRkxn z{+9dhbh4kY-SOVqYf}5ohdJUM*txAx^}#k3FXUoQmLH|q3PKy(KAZfPc+Qx-0<*gI zt&>31eSBr*Oxk}-bEKjHb5u4WItC&!2zumbirWl$mCMke>c%Pefe2cir$$i21AY9) ztrW<#Kv%7uTsGEy&jH%z@v7KH09QQs&W;zh$BWg_2G5_vH+!QVfMTCJ>yW&H zms(UJqB{fPuLuu7I0Z0vD}A(A0>fde)XI2+XuX&-Zw&2`_(+!5L+%jBOwcxg+5t2k zw3r$XmNP&OsRQKH1dk z)|O06yR3DTiOLdEWSt{S)AZ)5;-?`(p0-8$fJR4sH`0 z7#aS$QJ*8RU`7SY8VFmZQwIIoo%-FzU8@%bw~@O?2`vxM4`!e`(U6^JFykU2lz*VP zHpdk!N}&w$zkFyz4C3t>&%u?O-fqG@L9eB2`pHo1!1)0i1e1W16It?Swi`(Tbzm;} zFC|pk)E)@oK0@7d#Yro!6~<2$x6$~($p&)hxQm4^U?0KfKp7y9({9D8Ca9bj&?8|T zKZ>CDfvXD%=;NHL)sKY;gb!Ksd+2FzhIS>EKky9CJSYgfqzEVtkpTQ~R)~RBVbNzI zVEwbc3M1%w$Qg!f2XMlNTrTFOtZH!0A)x1u&K0oC<4R90 z=|a^>DHqC9-CMx<`5q`EbJ-j&4g@H;kMRLN^asrqG*vf}wh2AM`>jH0LYHV8Q4$SN zRTOtsJ@QLsKn_*)b!GR^i`#E4d71cO!E#uYqgm(9oQwf0h4JcdvV;r_--f5;heQv7 z=ohJHKsk8q94>`~syp?MUsxznh9d!e;NC}*ip|E`*TDvKn@R=-lGF)-v_?uNP^nPB z=GPP7+x6x56H?hvTDW|hYfB;Z(Ps9hl9%HffAkl|lIsw|2c zA%zS50uj&xS``u!kbkrOvPAH1rdyB7>!9)6dxJHzrprE zSO?}1C~hR5)#T~-#Tf3BL2k$K8RUp%z@*1`pnwPPlBbw+Wb9Z}hyd^0IhpF{pGGL1 zsQRU95_(uCF90Km_nG{cF%;o0S7P&TvZ`VsL5RTxeZPe{0#`wGy^m=2^iWU2QE)0an&l>228Q=)zSYbb1$Q#sm9EG4QnRu`otoChW3r@&ql%}cCfF>(t9 z#*h!inLIUhO=_Q0Z8x`1k3F-F=L4OUd$}JoUSQOvQ(c;1ZsYb;McB~k6L2TU>P()7 zi>lXA)v`cq`*aK!u%O0o5|E9bTlD}?dw+vk4`~D0jA@Tn)f#!UAf>6esyjrqNfj1x z(>Ikj-Rr)+d7C^@HAzLU&^DRu7G*zvR8puPwh$LYe31GiLT@yeryC&(ETF7d(Sm9v zxbqB=Zrc$74)SIROZ>=AF!(b_SU_|5R3YhNp&c|^AUbN~WL^Xk#2`dfjs3~h)BUTP zlz=L`zlnfnEBDsnE+~Owd5Di2ZdXAC-~VY8B&B{IE}$8GA^&9uK_t|#*gfi201}}P zkiwA2>H*bVny~&9aPcwZ(6m|q#lITYGp3C7id6KeDw)B+q@ur zPl@OOHEK}eWi{>JRd8>H<{GgH>uswX{zxe`-h9znD_rn~TF}zM$^GlpSx_9^49)GK zfANh#XO6(qgFN6){g-$QN8}%#VR43QI1oC|`#wQYm-VM3zgz3H_`ELbH6UtH ztxV4l1UqwyG_#h0=m$d55XGdc@Tf~oZ3?+Oo@Rz=+#k=gF*g~OSj=;@R}F(sgNJ4W_68}M{=j$_ zZ2E87?PqPUv4m2In}--=VT5RpgjSw4waf43sN2OWd=o^1p{t_o-_GkG2qmvsUBln$ zRjbL1%l(v!FjYm7+U##flFO|j#R{6Y{S|%nRQG2ABoLc7NcKi10g_6WR!Iiy(LF3S zw6FFg{;FyfP^b%7UCDgwfh-MnBq1W3*#50A!9y&*)#g~r1KIju+MFCt*?<9&YYs!* z!}H77LW_*{B_&ic2}08zvapr}_s}Ih_Gx?E%=m*R*lg2z&hNu7An^(oy<}7gpo5l| z_WLs(wuUxuQQm0bvOf8s|Tsgu85+Z*2w_uQeue#P=(`6u9S! z2_1@z3Pv0qCy;XAQn`t?rz!S8Ihq}Ig!k~r;}jCqmOUwn04_4Hy#b(#gr9A)yKwzo z+yJ1&D3$ASBtZ8{JUy5&y<$Q`VseZ5sNvN;PeN$wK&)(%*}}lRx<>2>vMn05o!sjjJ*$pqB&^3AvGiSzdA+y z#D3yGE8@d1Ax zGIBkwy?x$${gmJO7CPEn-^-_^14c-3BlMEC%l2)hAV}|wAAB7cA~a^-j>1FIAkzYtj-0r3P9Z2uL^Ak6c90*K1SHh572{T8(q=Ky!)oYn*7 zpcWH#<6-lqnm2q?Ow&3^rGW1L1(q)aAyDIVQl)jAm4*cJn`3~WFl7W!jT>JTDjkqt zF>xGMb3Pl1N61|DxMsOx`lw-ltmy=;d7m9088u`mO2l0G(&&A$UPp(kEb=(AVL4 zy$z`cXK%bjoK>IQKP!3pCxx@deaXPKJjJoXif8M=CLu+3@~t~uR?&)g9=jbzJKBqv zoIR!#K~BUHKYX1OOIdDSN_>e)HVhv>9D!8F#<+En%HeilySyp6kI&lK!=W7RCDyRz zYhvG)_fF;*rF@o_vo~7?172;W9T|J^WPhLxg3Uw7hLR<--aAe9F)gUdbr1mZ5feH*ZBs>(U;+C9!cv3b*ZdLz;KFpV@o&T>mPCpIdGo=TA5 z3wsD&HBg#Aui*E!VY(MDlh;R>cLjeeRvGKy-TMg|XQ^lte}4 z(2X1-od!$L`7$DK#8H{m_%*v+BZZ%Lv&zb%HDD z;({EhI0!^>_ucrE5-a-$>91~xYD+p(SYR1|$YuYru6l7R_oN_>xj=w}(vHvz8<89V zRV3G5^28e>&5Rc_bR8V{_YhY+_(;|s01h5R#zBr*KMh23p9Q1>@Mvif^ETW%iHM-5A7}& zRZz}8{+;HK_xqQn!qu<9S`0J7p-z&TsH?jvPRpRB4X>V4BpX?3`H04m{-lzh-rk5A zslw7M?ycTw_z-<*=p}cm_UYI1(qA(qn?B+nU~4W?2RQT%Ce!&W@``g28p~}kf8~pe zx)=<{x=ekcx;ik^Qu89qg)3;QY%^ivz_N4k9Li^oa}Io&jXRJgu-?}}} zEnmUciR=ALz!tUgI{ypi)WwwgtI zm2>`VoDD|BK_7r)T!D4&MBgHeBh@NaG&qY=E+`Ply^Ap&_1&$ekWvcE{#bmJ_2tcX z0gTT`Aap?h+frnnJHka3dqaio8xduX|7`7XX5zS+U4#GSr9==CX zljX^n=Gasu41CVS?L$}0`wG`cnr5pRFR-y+@BUjJ^88cM?U3tK=%=uR_Hp$I*+Te= zOnP)$O-xQ^GQe^B1~y{R3cYq`@MI3ch*^Bn^GAq*S(CWd6sEfReef}FEaYe#M0N)} z7ZZ_s(G1~2Lfl!4e^r*A2w@{{mQ^jtTEcmQbJijNcE;p(vy$<{*v~g?vm{Y}LY!Ep z@D{ZZ`T`0e74|pCnQFR9-aCZhou4W;ym(37kLMK^LIf5Km2x-N%2P!<$jz0M1U{5{ zswMj;TgAqRZqKQy-aUU-w8Gf%Le4eLy;-)}y~Kvc8qMR7PX4^nQZsnhvyE)88zS-Np~Stw^?T=GTyiilZZ9ditsZN@3KRn3x#k{%|Q{KtO==6~q7@Y=EnQ zrs?YC3ljK5=Mj=EIxl;x%9Np=5Kfu<{vI9Y4nEbWrfrbNAgfJ$62BtZX0>bD+e8sw z?JhFw}b!doI>oDUoc(?LU(d5X;tnAg6 z6RRl?h3#xrv66Z9&g*H>-#a_^o{8%i8O@H2Egg!-RY?5>lkSS7P9Pwbj+t9C44b{QUg#a>8wlpo5?ad+doT z&-v(Xl1SiRIJXE+IADlH5QVW(Z2_+)Y7#3uG=c*kigb;{+tM)ljx)-20)$Ea>R^LS+_H}Y_486M2e#TfZ1D>c;pnf&Ye z;|jEyQ<3r6UnxMJfF`>M7@7BQY!y`xFVXmJe^SUn;r(SgI0zJm!QweO(&MWUn9|kc42MA25&!2@}6B$SgW8RtNig z&>)V@?F_b$p8C=S>r<*x`pGG_)7ueZA4JeS_CO6e zivQkl)Y-P!vJ8;r3Bdr&q>O1Wa|81Ei}wFM=HA5wh%E2tKL(izZO@r)&|@SJuHC;q z#^RNnCj2w>> z_YVGJdTgxob9iFs@e*L9W?J4;n(!*zMXG;sdt^`=$6TD&Yr90_-rhGnPZY|v!~XG~ zx^Y?~el`dR<~)##@4O=QKXrLmTf-i3t=g|l5_!w(ON)gn1^jW)2pl*`lpvCpZeu$f z8HRy=SbFg0$7efjCYRXHVzCU;zhCga1325~M{+qeMqYoLkIrhg4yVida^VvmgA9cc zz(gXJYUAyQAgwgnZ56fRTNJS8n274?5p|&_rH22GKn4B7kHz)IcRr_1k3+2F$6ure zpy_H(j)8|8u@CXKL?voEG3DvajwTer>~$nMqLC3Lr#z03nrRZc(kG=qX5P$*ItA{;J-REsZ$31p)|8Kr zi!fJ$bV}@Yhw3{M5fhMWZIAd1)}10%!H&${ARQN*32-rPq9E#zOMC^$se(bI6ywia zk}c9vP+`oNA@jVP*hUo)XwbxUbzRdc^9AWM1F89@OT9+h+n+Nt!8JNjc-!UPWRR*xUzpVyOEs0g>%&NG#|U;*he1m z7+9XwRPqoL!rjWeq2LE~mh3%!Z0c*2pUNLZ&|{w$S^HGy5u^ZYgaSiChLB2pGtBAbl1MRO8?$})kHZm)@zit z?eVgr+4qmtZp^2tH*Y>0zv(hL$~ZiHex@5!nL6h-goaSg1hA_l{Yh!N2!Cu?TifJ% zqAq1)IAwi=acc5=S{XznqgVzuV`E9Y=3FF%zt+w=J`YY{5Ok84KM7el znWzj@Ih&r)DV!g7ue3w0fFRa?>r?lqsrBpc3Qh*5r%NLEcJUxM^#kaykbhHRil z^$3#z9k9%sjLYqK{=}losG?FmW#HXLntI<*|&om6fn zq`-8fz_TazPb#=w_4g8%4xe-73>^=~y}cfkVexyY~GdRFvtraj~&jANveqT&xQE zVtm89d6-9g=*Po~o<4xV4h^GTeu)qIRxV;7DBs^OFz`D3nJK*eqxttn^qK1y+0#VU zzlUa1JRu2LO=@-6j~&T~SgcnD94d?0Pq#wX_6O#jDA}5idd(K}iWMbDtN|(oSj&Da zzVQ$xl>p&aX#X4EER)O5nPyzAU({DTUrsmD=XGmX1MC@JOJMunK6A_n8JM0brqedP zmBNE)fgCO%R6BeFOX4RzIy%i-zGzA-c=-Aq6;5CMIxhZ&zF!-g|Y* zK&_9mJ|=A~)=m4fn=S70)oF85vbT9umOPHU7J^FJ?dWZ}NZ zCf`VoG37Y(RN^$K4Nzqg-gZZyxw&pORF4Dfqw&eTIN<>tIK`9CKHnxFYJMAN2t92%a*Spmo5^FQ9z13AJ-x|dZM6DI^d7@;)$^@v zgO|R_FKM(*8+50XR&^qkQ*1}U?8xo4gf!nEUyVx?c(1G;qpqx7k~v~?*(vE3q88A~ z2f(?O1ZX_HfbK2XC-T5RtYZja>%%=V-=w~`*`bp~*wFr({(U79leZ9SaRd|u*3bHOu$kIAd%8?NdIt!1*DR(m-} z*$~+?XojN?{h{_dqz*7;gOPf-)~jQ)s%ihbKpzM(kVb?~(RA9+rhJjBTxy&lw?ftGR7P^HmBp## znco02F$54NbegL?05!g=Lk?3+(p%+sQ8o$zHpS9Y_EF^ou>~h3@i|Cb)knykd_8iD zrqV}4+hQNIAKBPQzCQ>?p;ss*J|X|%JxMbzL|vIG^}9+gZw4e)9<8V&yj209u<8Mv zNXMiegm4QLuH1x`d7t&$if96ALtyg|ZtvH6;8iN)RBjx2Prq&`|DA@HDtR=kW_H?N z!gr=)y{CO-6se~y|4F2x^ip%h&6RyMG6929mv5&4<{{v?i{%A1@G`e7$xz}@=w^@D z-vFdRelWaXI>2GCc1gIikbfdK!(d2d{{GAD0rENZV#TJy6qSbi+1x=COk7zc#Gl;D zI{vf#AfjwN9jEA2$Y|IX><}1oVic_Rl6Bk3&2{;cT8Y#$qEl0lUrKH=;+}miUl~MW zMNgV5v@N<*pIn&AfoFlVH|ei-s2pnKm-u37%gSt$c5)U9ikW2ABe152J}eopr3CUn zxY+^SK#*^{<_LCzXorNJ0diTfm1s6#Ehn9w~ZI78g7oPuDT}$O)&L~n6RnWUN*K)h3&$m8*19E4M z@_>5Smd&njlqgiQ9F4k>%Hb}dW;5 z90R_peqpZnY3W>>rHrl(PO{$WdBW)E72+uE@z^cGr@l-a#B;;H)aygW3u{1slzs8U zW^zO120reK#nTIfiuz5+|+YF=vKi^+xk6y z5s&G{2_c)y)4LzUH)3ntN%nS>1d7PxW3+PX4q0)FV9fNg?SRUC+BU*@S>#Ah<_|?m zb6s-lVllxe!9K~EuPFLzfh&2<0-u8coL$5mX!Ivvv^0@2XHgfBD}9y|O&f%?DCV@I z{d7M?fQFrGtVbnM)c(~JEtI>Vuh%-B|!3+_9`ELy}g90bRc5wMcG+B=cEU^Ei7Mw}296ynf zBwO#Q;bq|Y3@}zw4oJ`yKr2K)!wvd`R(q`NXBf3|cK(e6l0?5PgUHlLRIFC*wb+)d zQS#G{+HJ!)0JYK+$y6Vuz1@Lq0c8UOvhg(x$s($Co~7D@N@nV#Q>z#$2RSk#&-|d< zJWk*%BUNT=l$pYx-}JD|DQK@=$WiH54(uOrVYA~`RDw`-iTzRa1u@U1!4n9oYl7*E z=+N{Ar;qD5Ejo*~xXMjvy+OwE%0h|QGa9_*Y@m6s_kty#xp9}W)oR70w~(Q3u|-A; zAq79%r^~RyeF9ZEKZw3Kt^-19;=<^H=}qWMahvFhXx5{O(FR}bRfu-AVjgK7wWjiK zl#G>ZmBGlwnjsK2a!-xaLDYvj2MC#Iat z-=e|3e0~aaJ6tc_dUX*qh=JpX`CGU49dkM;4;1#}Myhu*UM#0hYo`Zbg7MiC%p-f3 z51>i$Kp);dn%BWX=unwQPPg_NRO*$f<2jJMna%NhIX3TFQq7e5Wf$Y zgLAA9Fw4O0dCebB1T}hv?iNIhVgu%0zgh76>8sU02^XOHSr1T>Y@kU{^pDgVzja@<6dqdDW{kZu=&>>p;?5p~^+Z*=Bigb*prE>WOKjhPZ`u=LOQ3Co5iTrr&g3V47jkW3}CTssxG4^-qI*AiGW{YHUaYF_i5ssj-u;GrE<85T;PNd{+ydCc zbh?Mk5E}|vMN;lLPY1`L>1k&wRA%xhMN}Xf$PkorywVImG8*%`^5=STkov5E!;C<*}qUU*XJpYt&cS0I$I9fxt!>L zm1>aCP`mtYA1~qL(}3CS^)u>z9&5i<%gO<>7R!M%1}Wxi1_#oYcBZ(6df}JZ|&>y@dlhO^__|H-htRu0V^&21mRdDIP+< zkB>fZDvkatKsq4j8VofF?GQ#0nru<~Na7%5(1`H>NAit>D`|dL*tvk9swyjE04VV@ zgiQ4erRW3U33yh!@SDEjKfK~}^z^>1tk6sP3aRoj-acJ_9zI0p^JmDGvin12vD|a4 z!uQ&o;WRggSE3IRLVpct{=oMoik!JYou5IZPeG>x@lfnOMMiRU71#rWwOvY+k1loD zr_hWMFw&#jk^Jx0KLvM=vbqM1SPzyb7*GDJ8|SH9C`J@BK`9bFlALU-TP;Amv|PvE zgkGz&7?IVanpGc3@{xmGj2czC`PA^R^Tv273|a%}Sga=3<>d7=HSp}Ny=-I3zWKLJ zMxI}%OyYsHXLs@BA1mP9eLOQU!Rj4HPhUU7qXk9zq|(6hYY0Nu$hvNMHZ9DRUP{{_ z_&EMRCLV8_S;B09V_fL%(qYm0J`=hMl52irL8A!cV9ey=bn?EpNb6fPD8w3f-Wu8l z`EmHefoKzYMJxG&_9}Fh3L$=29MBK};}9gsujvYu5!f)iuKQ^%dn9)Dte3m{_~ax| zrJQ_xKck=^1xu-S0l9z` zkDlXSv$;G1;5e@r%S+=yANdZiJK;N?;>iFqcqo^z6#m2tJkE?Yx|(fU;zVe zBzY=bHcOWfY}miR`^ekhlegXGs4S+N{cyh9G0S~INbX8Ti0PB4>g2hML@O4YG*Ht6 z^#GTGmX(*QsmdN^O}*I;_`Cci?w(6f(h(`WQS~eHy}u04EVT*KF?zYE{0SL@Gjr1G5QOD65XU| z>x0t1h@-HFC0q6)+{KSeP{yIzeN1c4oV<_vxCiW>@O4yXN(HyePB&?oAjGV;9Y7S* z70DIKW@fpDFSj=~G&xh~&lFL+pIx($Gp!lDoFgLQ3cof@O`UGZVM(j~#~k3=5uSQU z!39{>^&*o8i?DkIm)n!MSke(JLPpLWa#AW;LZS} z6Ldhcu{}ZX5F(`r7wZ*Szr-gMA8yqo9C8hb$*}UuGro3J;7`c}!UJm3x(~&FIWsLN zC-a&98k&^98qNmq!l{d@n*Vl55W3C;R^o!zC{JjFLjN_Nk!{|cn1lnQ29SMR7!|mt~;i#L5! z2k4Y)_`W+74pkfsOi%ft1zzC!CfSbfC=e{7LGLu?BtnKt=?D}UH-r&fKrcHTj`~0z z0wial^eh-wW1ax;09uami7`vNe*=sslW72`kY81=D`?fff6kPbK$d@AT`AF83NZLjMRVeNO>GeH%8Qe_x7^KpYH=w?g+6qFH?6eL_L6 zM5^!-j(Z%=#KG0}Qjm^;>p1MeE>%{~ag%}8D0CI%d%3NAXg~JUA$6`T2^M8ER0_!H z+Rzh3abkM@cR8{dq($P;aM$>Q8HCI)ECQ3icR3xi3UkFKIqo%JXAZ8<>VNKLA6xjp zhaQMIHV6cOn*ol3T(@`bJrvz$Jf)p)y_O2b0ID4nAO-pRT$SxP8;9n0Ymm^`Nj|te z8mYTo_i{-lHB!DP=!<&oG=#o+w^;QH1LodcL?Dm_8T2>}d0$*hR$_W`m|8qhJ8)BFdZ|BG?$A&O-B8%=5!C&+Sj#0at|63Zhv@ zmJfOCZrN#dkO%QRSS;dBN`btP^xM4;3AtXo9l%&yxmruwjv;#Gt5$=l&F=x)x%xU8nkWIS!6u z;I3I#;yp_3HLUMIFvWIGWTV4g8bqb`(=O^6_8l;X@8($=TIlNKp8Gano^0iTw#sOm z@DU3&r_b!v^q!#QDP6@t&1}e+tule82VwwJDj7vo2D#|BRnb!rk2J?Sr-0iGjg?=M zugH_%%?Do=lr2#Vw!;q$!uSq~seG68PWe&dv=l(}soswARMZ$TSjlT0BqEqv*AKvA z@{Jz6dd??!J;STK>sLblwyDiR!OxpzoHxqmoUSxKVFCBaveYc(y%pUO?I+ss{?FXixA)^`OOq>$4(^y1a&%g>cMT1?Kx5{ zgr`_qI{HMv;=+PvlWyxv_uWWm#A3Q|p(TyahMcp*Q>)e(N(1twQD|=%aCfPDIR)E2 zQSat}5PXB~J$}is&|ASn0bySmHH*vJGVIFGChiK8tsAa5$bld`ekXZ!aN`;X3)nc6 z3l@Uu_c38TL5dLTw9k2&C0tVkQ3>dn(8>-JMb#^A;5}s&$+8mO!JSyl_M$eQvl4H8 z5#YT0O4JKo0R5v{(88ZP`pp>#YW(NOI`+u0fOP0yhE@-#=JB~bdQyLl6N-sDx3r~sU&jSG6EI>B`LHBF(iKCpna7E64Uc9IiuIK-@%tZha*|sNaeob>6ZXYLqpay zD`c$8R;f^CFZ3ex>b19K4AMlhaigqyrbWjMHicscLKRRKte#w<$vmd zrhkQ}6ogO_Kn_C_E|1$S8wyfDxDdbXplfYK9hQX#E>%$q9#PK4$>~?QvLbDCU4*+#isD}8%*ekZg$tkbvqt+pE@(uI8 zOZi6U49u_iy82OgC@cjymMdrUb--pOqZUm1phV;_9x$uTXBDBq8LJ;E{eDxe$uIWk zB(qTks8NU#CRB|mf>k|*p1xvr#td_~=Ix(egtqI9?{q;jTY)Bu$AyPcitXf_v0jw&FV|RPS$V^&2kt;3 z(cDP^Ng-L)QgtC+tBK2A*-I^>jC(>`hL50{TMr5Rx-!*bA<9@??cz??QwCZ{dT(^s zx^3XsgRRteFMKhAMj*NaJ14J;7`OuzWX7sHH3wIk?;Z1zt7BlR_ily$ok%n!2Hy}^=}~y;G_cV z175r;@(@qli3jc`GbH} z{bbzb%mmd(vbu#M34%?QoebrIM#2QR{3}9!lT9KCMeDjWez?d7VwDpU>CsDf9)s7A;0Gb-- z)Vowq?oypVg3?-W>w3_akdRO-_DG~VV5zsY<*CmH8`c{)sL>>haFq}v;(LxdI3)BB zpi^hCuF{KiClvOAOHd}E+RgG{v?myEuc98{^jDc*hMi5%8Zd+Qrh9vMiwFzU%C6oV z{UHtdXb5?!4)RI*V@Dn!_&tsiG@T$}H14Jy+}2btc}IOcQ{be=;PcYsx1P_4XE^(5Zp`XzSl&__9dd*Y^^7 z0)MXCdu~OU$_tv*e6d-TDM#AT3$wAY@$pqQHX4>{4|>``)UH~;c)nxe?uG#Uok=CC z5P(TnbGj{_RLotSB)}F>A;`C}P=h8@?`s1^W;Ge? zB}ILNi~*&oO^Hepq)}PpTI$vKl$shoj+FXz4gKx9n1(=)e)3w+6WWp8r(5q9Z7QCZ zBd=_|XoNJ@rqDj>k4O*wmr5VHV-oU<2&rUr)iml77OEVgRv>R^C}gAF9Njhr5dldt zq~FB*qhhjM+Qo{$v~uI2RazdW!trY^%27BIq~MrS@lZ3T%X*_Jsz&UQ;m5NA1A(t? zjC6CVa|GP#h-18=PL7e-GjrO0OHT-mWk?{4w%h`@v`){9I(!c762eR8$Hn1aBG^AC zz=-(NJc7B0OUuj!vf#}`-GV#%%RtdU&0G~qhR&>AB|w5f>RiVNT)xZzMhHo3I9c_v zMk)YNJT3ekA@Gd+ytlx;6<^GdP_R_i&2f>xxu1sCgje{`#%-fqrvfbpIf#Tt4~Z)<5KZeeojwq0_;IR;0J8N7m7+Ak%&Q^c zYZXjG=fQ?h>wCz~5NL`TpX`Gv*fZayRy6>%V5WdkzH;O@U5tO{7O|@nwgP{1{s>V%jMsV7(DoimJCho0<1B`t{M!arioN1C_Csl zlM9pg*i4l{rM-SFL?^uONpUXCHK;A~(ms;0WN>iV31u!Wnjc;};mIN{xi)&4ua(X8 zRCY4pbo6W?fK#GU#W{QWZ`UuXKs{U)7HR3v{rz@Ist2HKLm;pku5l*bH~?*HTrWK$ z9jnjPrCYq}8X$h8@>~7BLNi^;8Od-43;OZ)bPW#hGFCUz{(p(RlNX!xd7!LTghQ|k zx&ev(9dc9YhoUOnrt683)jP9Jtq%8TNoQxyMjdC4JU<^jnSq%Qms@IXpaaU=uC_}TMuyM)|HiC8qA*_HT>ORO5 z6I{wf6$p=3&GN!WB%#)IDHYBQ2bL?3n=l9HzhsJ3-#gRe)lRFi2`b4cMViO}ZX|Nu zYK$X^GUv%0OCXaBw7f&;f1$$V8Gq+$8Fa_-n4DebC*I-Rc@1A$PyhPD7vNA(JiINS z2S3lDs~?^q(l6LFY(bThj;S9FjtAzTT&xnG@-nHifFyzh+#VuzxMM!V^<9LAp8aE( zLD)DmL=-y;3m<2)?#FI}F%u;WBE=ibFOFGV15Jh;=1T(?5H{bl-uMb^90bwNF+dGn z-ZOG**Er8&7P4vakb#i|RBbibh{1wXQIJ7)8^Dy$pRP4v?^2_0U*$5w8E-dVLzc7W z)`izZwpm9s`k}!w(g;l-?P4&;E;bZRjyuk9x$Ly2Yv9|ZQ?t0J>e>K-Z5ySS&%1i0 zHm*;V$r4+J2ljx^&ukAGG>_ey^fg3~0~Gmaqz6KFuvTl39@g$vPiw38^i8TBIW&;q zHuyrnxJ|eah$&gxfwDKZs8t(~Y&gYL)(jdzRP-2e+iqN9vO zzb?<^s$OwwsKM>(zN5<6tx)(`;UmdV)bWR}h(QlE8FdB3($S@v(=Zuu$lld?*Elt$ z1~4kr{^RJTTS>)ycg2RyT$jh$RPvU}g{_}6l8?D(ULKML|G`>*`h4RJbr&Wg3`0H; zqC2$e_k~Fjgkg&y+No!!(#KKy!{gJ>X^q;n)H;`5fu@5w?kCjzhD&1_k~67Lrf8}V zs?J2I$7`)oMisHxhg~VlDO==#aTNk$8Zwq&vatJ8jtfIu40mh%zIu6bF;p9UNTsva zHORokA_)8Lu3u%|4&Q}7gc}NDvQ?D}R~&$!v!1uje8-E9lT zO8wyuQ&H2{Rv3uph*t#eTP5X~fkwD(0_t?S@Wv8y!8$jZzQu=sdqtbLH7ty8V%5ca z(VafpF{1!h>t?i_w?oi)h5vW5%MW@lO9qa95uDCC$S#mqTu5jcSbp8pwP6rMr2&2$ zhdOe>!fQ5+rrYwhH{8AK1>eOTEmDRLG9187VvL`*=)_#DJEJHFsd)Y2hUU?+v68Hp zHx#2x zu;ZD+c5BGrhf8tNgWuCVLk$cJb_uq+sFK30B=MAcR$LhwbDOps?S6gjEZ3`)a{2lJ z+fHq|yg!Za@#Du55)wmMqE7IA0`zsz^71%*bUL|a7Tk!SR(EL2?!1aq3F^QJVpS6# z=f#_p-MD1^r6C)lM=aB(xjM|gGL^RphgBPh%aHG46JF>!2kQ}ZU(yt57vJCcy#h-YP~cqjq3No*RPho zjW!=MmCbIE?)LYMIh4G8o1F8?ux8_{9(>$;Olx~V-Rs9Mr<-1t^Ec<~0ClaccZ3sq zw66JmK4J(MnVxQSWS8{WS>CGqJZPW$H;G|bOTj?Oc@YsH#kBZYboS)uW9dS@#H1v4 zx{%Y*BGW9v`-7iq>|h(t;P&^H-BIOTU0>hEoOcE9OQ_u3+~6~tKBvbA?;n$*uhVXJ z)(!{^e~my$0dgx{pjMsoQ_;k2vove@nY-(~X! z-|4|qQcu#cj6j%dJ(|-ZWkbU~C;a*a!p~>~#ev^f{W9P7L0ZFONYQO?QIgj$ zeZJ4N*&_eV8^Dov6T9rz_K&lQEFU|6f8#wyP+Tg-;o;(@mzE-hPp z0i}h?PmH!ipkujB8_?Q80B)4z`p`HiTZz$C%m7Gmsc!+9G!;-Jsd(F#;deb(WysB7 zEr|-VCAO(|&Og2LjX1R4k+nVvMVfF7iM8!4T}h_^(aj2ze&zIJ9>e`&9;=6)?$;CJ ztx>D5L+77K*ZDEcTAZwJ9i`_6vva*X$=n>s`#5lGl7&lck0+w1k6a|%`QSAS_&lWX zWH8!F6&(HP)p|6aoJ_tI_8A#x2Q8QHCAE&YS|Opea&haGj?x=5y%+HMd-@<=HGG*o z{r+Ys@YhqbxkqOwhBTQOG9Lr}`__a)WtNEo2;F>NtG7QNW?=@p7-%|D{GqI_u4!zN z61c@RwV?jz!DjCA{25Gr;d^yt?oBARY>WELF9Hb&y$(%YLn^Yk zrqy6b3#yYL_dgI$t#bb%oAt9x3DL59gtPQBycy4Q*wgs$R}V3-O6l4~8d|)IY!`B1 z@)0P<-;DmjU4J3h1LH`ZI=!z(*pZr&(H&iX+%mrx=(vXvbUT2H{|7id>2-2#B&3dx zm+YdGQZf!YOifL51OCn=EB{Tf8|jplSmft{_SlfLltYipb>+X;%eO}KhOxF11Bl?F zHPouq*k@@7qgsxtepOn$GkOLFVqV*GEB$GY;Oh+Vg^CAuc7E{5nVUCn?tY&r(FXCu#x>@Rq15lddfDk&aV-sN#L-vji$f_VJ@_ zkX;aPxG`QcxaO*UKX@}X9i2$y}?6Q|4UyG+8rOg7cue^AH^Hi0&d@L9&3 zU%(ss7CzIry3IXL2fu#rToCb=d&$$&M|8Q*l1|EhKmRd( z2E+6qi|c{`&H6x=OE-A`spG?K)bN=#kG_OYo7ia#BdxbFr10I{UE`k=kd#wEA+k(w&){i~F(9u^TH zwZ9k&AChW1J3SJ0S{UHI((}(TX)qSK(+i1$Cgd+iIE=+lE#Sad{uE zfkpS8+@Os}5>u(slDsRN<>T zZaO+Tp`oGQ2A^4Nke8I4Nz8#t+HKoZ`q#pf&dpsua-xCd^J)z7jau6j2}ASEWNz0m zswhD!22~C;9Uv=uIF<@0zc;cIuev%D3th6^+iyl+nmu-Sf+2Ow>o?# zuTlU04yp?g6y7ZGMv3zOsMW*?BczI?$o+{7y~O)X2~ajxn7;G<(h&}7@xMDeeIGwQ z`-zcKFLcD~q@uGL@_Yk5I!32Ve_fFVVJCOrEJMv2s(clU`ZgfUU>vUEO610nC#3iF zui-fYTrd4dP=UfmVrT8dx~0`0(fyO;0u%d}i?dfv*f~ce+AbPeYN^#@tELu6Nq7=; zeW6mVJPmM_&hg&w2K`916$|9eY~yE({aK{je`-oE^w)B83BGWZDy|A^$l92mo5)3_ zR-ID3Rs9umpI>KY<6rsb91d(IK(QdgarqCx+q_qk2;zF3r&dVulz}>N3(fE*-Qfm` ztxj+&r_Zo{(ok$7EpuloF#a3e&mSna?A#K%O6!lQ&;%aW!adBts8=DeZr^er0&4f_ ztFDPuFCx?Ll=G*%Y4f5^F|~5Pw@F_tP!lgrldzUQ5;M?EeT82QYZc+*iX)l3*|g`p zfW>Fc-y=g>eU#+P%i2Nqx~v6S-T%tQ9s|*#=%#x5qwf>Bg~ruF|sq;gC|f?R}&_ zi8<8E+|gS8aG~baVM*mF%L#S~w}@*%+uS$&?6G)H1I^Qk4S&09W_uC`@0>e_PREZ+ zyoO4a-BT7Um6KuYZTSJxvCf26cm_Ane7Y`<%PPW9NDdrcw9f+Neq#Hr_M@>EE6 zQ$VBIlh`0-#|1~{gng`%9y8A*eio1 zD*q3D=7R@gH874Wx+jMDEsU!D>F}b-+8yA2)|7;nx`@#=KMg8V2vn)it2y65$O`NJ zLftQo5Y(!X-zMQKWBdEyA_72Ugw~zk7L>jtFX!jej1F!ozb&-J`hWg({|dT)k~XJl z2crr6VC}Kc&C6=F5B2yXfnaOXZmV+aHI%D<2K>lY2~M?TR7rQY+H?53SE*6l$eA{= zFY-T`g}kW_M}S#uymX3-X7j_9j5%F)oS?^Z4TV7WIGmwF0=)r#oJDd#{~#XT+zt(% ziJe~Pub}8nP|BxC^X>gx1dv`-NJu>szUnhEHHG)>b|_VTd($xYa9n)K$PEEU2$M4E zuu^DT!icZ)b4kP;8h=f6!b+h@3e!aWga#R}>|hjtvem1jQE1C4fJULNHRbNFx3wUm zjW^F*S9=%R*^-f#cE?uJ!GQ;*^st6@FcOzfHq@)V2_973kKSnAe;bWwljsWc^yIte zG>AN9W&4M~B4wup*#Ubt!uozgwB}!HNxkF!*FD;5UShWIc^}h#1Ag{hfscEA{P?j* zrS#uK@BjA+!JwUnwhP*Y?a*p7vs#N6O;p7Q<0Syl0D%kxM2V79K$s2kWn2SuI z;8tK)cE0CL){IZQ5N>l@x|)^iIoF5^ohT5uZC22rfs0juWDK4+3dKMa3&NoPH@aaS ztv2$YP+*RqY`v@hX;lc_+Gpy}ckJnV5O2Sj{gc_7jJBEZ1mA#$82ltCoXZ!wVo)hU zrm!GJ)c;LVgF690_W&_h5!7BR{dZ|wWW-E%^s|r7Dz8ZIopd(}eTujuJyOB@zgCzD z*I~G`(t=6b$NmQeW@Et^I=G&(6%1$2IT@XY@f*r87XV}7fxYMsWs0q)Ckn-_720Lc zi#vFMFXW%uF`?fQUeVm>0qrLcufeSdkh$Qm=n8?znRA-mU!#WAT`-Y?OI+w0#yUYB z1{WGY57g+hgzNw6xCx%mCS7u;DyNy$zS=JHrulz;@Bt+lcFF^EuYw13Q-j-L$v|K# zYGd0W+Em3!tSK793xtRl1)%lB_d9aAXONngYp(_g_m=RFIj-^XJ?K@Kl+m?XN+ixs zI*Q@Zn$DwYPvBQ*uc>F@ax3Hd_~D0zbugDY9J4GN^$WtE{XyZ`I=HkGLFoxVUZ|DJegl?pz7f^&G$~ zDN{$GTB2Qv0JsIfHy$i4Yv=0o&As-*uB%u?fBjC~X=m_%*n00kD!}(|_@Gi|iey9) zLiXMwWUr8fvPa0?B%$oRx9pXJ>{%Jd-r1DB$)4|}@AvmU&+|U-fBtdKx$pZLpX+n2 zQ8&8OWh7EkbsM(Kt$AT4Zd^9HPL@!w9ottKlJ~UeU(Qb1dJkMGNYXL3Dxy&n3NS#N zFYEoV8Vx}fjk*rzx>+Ji!=r#=?zC$NevmsT%W}4rEwU%%%6+oXrFg!Lzl=$_miEyr zB;rvaBV$c9Gb5wykb4QV#JLb4*$-&QRL875Zik0%xvmdPpF2>`pN55r4At-Kk6&b* z3=;B`Hs&%s*NoH6fXTgddwWuKbwVCzt2Z4gSOVW*=@S~aKC16P83~~UaRbl4ryt_} zm$<}f>92o)uOI0|VPSzfSird&Ku+0gzx$K-fm)s?_=kmk1$5OA{B_UztY z7pd0m#zuRH;wtLSn$_|tx_YLIq45uAZqqYFr=fwxFFRYI65j6 zcG>M2OX726K4=xuX|P+qHTIYC%w%#gC8pbHaNrEPJTfxYu%rG7_vKQLrF{D?^F!so zV#v7e4Bp>~WnsoC+PP9n*22~fy6Oy#hTS@vw>1J#dtET zazWyhbUqgLg!<@S91C%5e3=!ZiT&?g3uVoO6$l<(NnDGLaGhNl3U}SmOlREd*Jn3U z?4h4zB4e9tX^}kL^6QcNrRCxy%#p+`G;X_2HeKmxvUGdBVfR1-igkDQqaS>`?2m9% z21t9)9rEZU`^1P>MsH0jp&pH24dlxmd?YC84^_=0s4MLh>U@dZ*_l63Q>RGYVX8Xq z1rk`!6B9F^UVC>>yWF_PDz+loshNw-y0zS#@?Z}(3Qu1&;j1ruYA28x7&l1wc}lm} z?4ljZhR|>KdMrSfdpa?FW|iB~TmZ|lpYO4Jbd>RFXP|&$%BzZv_5fn+K;h2z(x}!J z;mmvHZ#xZiOigaOFA6X&NvzW?Gb$vx#O%lgWl^#3WNUppHEE%a-_U)ekzJc#@Hpz+ z=NzIYPt%5}aXuuY*ASKXs%ZK%wf@=2ox`PAsCHxyQ*ZaTwVATBvpZ~8 zOqQ1!Q&WrL=0DEeXQE8WHaF+6D}8KnwWVg`&7dqA?E^^hh`f+2Aa+-J)M<*ccm?Y4JO_wL`N-eN>TnaFU<-f^SpoS%&=PPRk3c2 zDi!}90{H`$9>Kp(I#UhCUdm$mXLr7b#IuBV*=aZ4Tl^x#2d%eqQi2z!FI~1KY?I}d z`xQlmWJZho@K~Fi0|nIf-g>T&(Urv$mb!l<|7@=FqgZ)kxG>)LTHis#g~8LOTY&;&9Az`BK3k0WC)qR61AEOS`kq5ccNL6`0}CdR*5VtdN4Bo69kgpqb;#FE zwIqgy7(L}k{%CGXlL!oB58wXs8&fZ-Q`fzmp=~B|>xunJVjV z=sM6o#cY4$_aJkZj5;l}`SvNlU}RL!@96LUO+^j#!tJ7tnCWaK@;5qAd|&w|cs!S& zV$y-^9)Xb2l)wM+)w+{>-Lu&9lKlKr>R`mq8B-Qb~?wPN|c96SD>qT5qUb3&W){`Q1Twa#7ADltjAt?&PGK^--WBapyR&JoRW$l5MG({rBhqGDRTvf*hC~GCvnzhU*WiP%R6_uCat%N zuKOle?Cy$zfKJtg?Y{0wp6;b@L4^uv6W~~Qt=*`})=F#MvRz;56LEF%;9ER~CAwBd z!tDPMy-yId1C|4vYdbN8jd*>pyq0aG@4&^DP7du5-TCnmQ?cPZG`8?#4R7MJirMxh zos}5{od|5!^8aagRT)sNP;FKl%64HQ&DE+VIVt5U0X3d& zv!f}`VZ(*i#h|L?U>Q$@+4RdYlP8{@G=C5Ogy56fJRLae_(&$PT(Zvu8XNK<)spyK zD_^b<*hDU_do>4BHF52lb7XjUj@nf!1vSjk1fa5A|5R4D=iVqS|E*R-!~gH?{JgxK z8=~sN1K4V6rqjp6>@K378VwFm58)k4*%|_tD4{8;-Zkm zyIo ze*DXBRVr@j*{i=)9zp-70ItCmpJU5iUh!c4Q5R-=VCU{Tb>CMj1S|0Xd+hAUDJhCF z8yZdx?{i2KLEwzJFjZwVOV@#M*L!@CE%aLv{iAkQ+n9f^F7|DYF}!OP4C zpZ#AXK9g>P6ibo2VRceUZP+Nq!pBYahkJR&BA2Guc5zBNZtnc-q9^pyndh9HZT7(m zFI~qezSf`f=LgdvnLV9Oj|HQmvRBbJc(Wb)N9)}M%8O6;ipQ5f_G=HfU&oA{F=|Q5 zJ3bKP+EeU^cK5daf-mAGe!!&qUk7A~;+|9>ol^ZklEo@%?GV_nkM*D(<>KNswpx7Q z>y~_uTb6nJ|m!#uw7FY3^;ndA**V?B4~e5oF% zPDNfGC-|?o7D0dJv3s5RW+Y4&$g*a%KTy&XP%-|^bl=xZix?1!it^K(w$Yh7kI^}P z@XyG+NP*O`;JeEKiIYF>B3@fUJv2D8ppf7u!uL`*48;(IzF?7lOQadcdpl|Zl66SE zG7_y9zij2@TS20U{HY;vr@d zA)&)Kg^23k0i%|q=`RQ1(fG1oMKmlyZQwgxl6#dm;D|5+LFME6!b~u9X(rUan}#Z& zemVa)V9{@LXgFI0U#MsKcf&5k_6cm?x~@>h{O)2H6w%4lL}$av|lP&*O$ z(mN6Vi)!Z!NaL*g(ytyJoe+#)R+Yso;X#{sD70zgPC!uVcKXBw=BQ*LYBM0r)00el zZ7|Owr&Adc7oO4ZW`ihR?IC<*?e$BFK&_E{nPqcnf0Yd4Eo2iXJ1q(qgVZ8n2@BF_$6d&GN zASO0!b;%qx$bfiXes^V~3kZUQ2qCmsg6dspvazw9h9XG$SCxvMXisSKBpG~+B>w+` z??!ve92Xc4mNPr$j5urqDhW8Q%jRVCEkY3_sx@Xf_X#O2WP7-J?N4!HTuPuSfn$j9$ z|AJGW8`uYL;z&6k`}qL7MBG?&DNX;0^=GI!C1I2d)KMe2un^}q($AmL?I=02fH#y? zl|#X}f;1Eg+Bl$koO%P3v`TpEsC92_IC+=Ar|+^MJYWdtAD5-6gY>d(4gek5V&?o0 zL--Hr$ym~R_55d|CcI}1c;BLo&AkN$xUzUihb1Fj;PZ7Yo9JNtV(X8|UR|A=xLx7E zA>7uva!G`xX^N^#{^z{5P>u;@9gxL?sbT=g&)NSsa%Tj`{s2mID_>1a+X3xX3pIzd zIu5|*{pT5){(Iyt9tXquuU}bRc%UN^s2+}ixNvIg9Uk{?Lf9X4nJ>1(Ugr(Hs*0%3 zkc)uECu?VXm@@JidbfcAi%%v}tT4kR9kVkh(Ue2Z2H5I^xnG}t|3A#2QFzqV>8Qz{ zQnMOs!DKE5mw7dpj**drwlM0^(b4(&d6~>x@U5yat}6y}1B$-)%IPm(t_w!8z^uaR zPfoGu*WW|b0VE4?ZGoJ|8yIt0oId)a;4ZGkw=TVKT+;u_x5!Jl7jb=Abt6tg5s?Nc zjxaVbn46y;8Xk7Aw+Hcf?Z3^_!Ol=7i9LbecG8Ty ztQegmu-uKyuQ3aHtGfcT2QS9}QY*XCf0Yy;7{Q}tt1!?HwLNEh@HeLTliN3g>bEz$ zw3%#nhzd_N7h3McUHtX+S=(y8(l;hgBoQd(r^Nb-ejf+YGQg*T3xo-*2&lY* z+%oyJ^fv>;beN^fXrBZ0A`C|irVxt1_Wn(1MQBgx;$3U=?CtKjiC`6?|!DLSFc`$ykA*veZ4Tq_9^r~M}Y`K zaoMFZzX6hQjt{dp7grVjCXBG5Wf*lZn1}2!xD*2UhR@@sCMK!7k9l}vKFru5j6R%x zto3p86>R;aC15P!E}{13^ul=4Z6=+e^djU!nN7JQ=c`g~PI+1X)1UXBpl5R4k&jU4 zn9TXbsd3Ozv}WdS=|CI9e74#1`z24s>e5&-8^XbE{1W%60&V#2sDolN= za9BMz&uVph%yk@%VqG^A7Zd*qE`Cp1^q{C(a__rMK{cQ$)mlU8VZFy=&h2@26mxuh ztZRiY=yDO@mo$wdCs4|&nwZ`>DoVD%v7%?eFW>$5*c4Dk-( z`gDrh{&%?rHbxD%b?QqJ4xYNXzu24>nsBT!`6&LQCSm5FZv)?m71 zrw<#+BQ&Jq`0k!F%*F#R9T;3;)1a<29Fz21Lt|i~A}8k)ZPPSb@4Y#?Ii@*| zIlj6>Q{x!RR}qP>Ef+J^2lgd*m^lUeI!mdPo(8)g_4oMxNxacqDr3k_qJ z>I*Mo^EVn)6}2z*Wr#q574wO5K9aqsdS_f*idwQhl$73A)s))mD-y}gY1oE(Z#B3W z-8)y`YiulK%Lv8EBB}2bN_E}GSgvOw+4a{+|gsO_#F-BM?mOrL~Vd4RW(wC=R>PNiMwgWGl?tZ-CJjeq!j>9Uk-*V}`_;@teuqxxFMExt2f(8-b{fqc(l2e&_Lb`D<6=1kXNwj=Ryz zK9D6ZV7GMBwnhZk3{?TPkwTQ?`~`N*&U@BabVTwKdym?vnr6j7v>7y93NF-N=uw=9SU@J#)i$v=#Gq6X)8)w5Pf z9jxH^GU5+426mPoJ`gvL3SJmqrHPum!D;%C=UJ()eBEus1<|DNQUmHeT3r4SEYz3* zPv9MwUkeV($FwYuH}_Vwdp`sp;pE5(b1XKGdKSA;uAWSQ=VSh z)!{-{s7Lhv@S(Z6xs41~!*k`(PlGfCk}^o;GKBten%QteY~eEm<_dBbb^JgmMfi&< zdmT7XGsJN+Gu}4v`m?dhxhLR0rubdcw0!Q$=r>T7tkw9Z<3B+kXEBT)zF>J)t`%4Q z4x@_%E=ZU6rlV$}G>LDOpr&!O0iI)FY5{jfXm>>2eq1-V)z4@g;}p)5p#q$)iR;Xh zeF<_;HDm8X(l z&*(}5k)y$+E(PH;;WXuCoX@9^{TXR06ZkU{4Iz49`w$m}l8}(#1Et9G8Un!^dS@8oY&&#m`DL?O?@AnrakVA8lHg=VYjPouz z?bUHb?yT2zjM0uUZ>Vc#B`T$8*;witcpdCV=61BZY^!fz`KKYSav8bfYbr}vNIq~~ zw>{6Z2&|UzrD6OmmmI)ZuJ~0)R60thbAR{p;5Z7N(v($qqo1Z2GJA3-?(xLh7Zt}m z@2?tqxY+N^o*%<18?4kY;P|FkJex&g`e1Hsl9R)r9?$(-MwJ?`W#i+lK-bski1+mi z&qyPqpvo;%Wts0yXx(z@NBZI}%XCn+uMiT6J%)VO3)lxytq$iD`fr!>;$ z;EnV(DG~2=loFI<%_{mUWoVh&QJr5E^j*?=6r8myGpfpkE51-EMV1<5{AlskJd5sc zu5u6C<@d-Oq>jyZA=#^9V9+L|q93PtS>emQum^EcUGdRJ1cVd~Fl%iYMw zJ&$j7J^sr1;LN4u%M7~RnlLkKy<7gi~f^4Mz2eZ#kjE?c*e>6N0TTv0_ zj>fzv_gm0H_|}OBYoaK3U$C1hs@J<8=*ZZ8Wb7XlHe9h8i}ro? zwXu4`v?w>*rFh#-4IZuLCZ?ZC!Q(r&CcRkbcA&kb2AKEyoHnDfUBAX&&S-8Uzk#%2 zX|vN-rMP)bU<-!rHuA!Pq(?dpJX5GT=KQ;mQ!IXd?dQWW=9bJ)@29_v6&7?o;w%c6 zmPOip*QA$l2M2Tme_9h?!!5;pR4Q$+isr-1AMgrS@PeYSMh-FHxE+^Yq8R4;EIixk z&GkRHp;2B0y$|cc+3lT&n{f}js1bq-W-q}QyMo&Q2NDae)WnvKI$WNMnQhUVkY6@@0yao*hlZF_JwBYbgBo)v576g%dQ~6m%mS_3_cu1IJ$d z=(eV%1WaPM@An(^AcOq+t1WDAFzZ*LyRB7CGQxQ`2ikyMe@YxDCNZD2k&Z-%RIIB_ z9ryg>(dz<;2feRUDJ?b(0%=q$kTwZxEkD10Dexgn?(n zzg-Ss4WM0gv8aCBJ!qB$RxsS8hSz-7mv))RiQwjS3nUr5(tmX25Z)chPfbeX{$ez+ z`0_EHfH_@Wcnx?*-i8?_-rUUTVM+_tSUlH8Y+ci@y-`?OFfrv6P{qwRHbFmPu3A3# zvD>m*?C(eL7y3-E6i%+bk;iFtHF+2L%jutGU_HH*V9T`Gf<{*Xa^K4@IR=bnQJujP zm0L-UftY>j4r&65EQj{dn~k}klTgB2?wIb3h1Goa8o#GYEgWNnskYtsDF2)Ce&xIS zcs67+jB3iu|K1@`58k0l%Zvldbivmg-3gX;q#A<;fyje|DpR80zd1%`MFVmK)<6~U zfO1SNnOF){Iq=9Qt=h`I#zlNo<)C!bjcG6=gEL5iBu71AleEs0 z?`O@WqGA zYTw_z{PB(tO(LAkLig*o zZulQh(kkNeki1WQ--8|0N9%1m!|`jtweiO;ZiHV0yEC^#%MLi|BlN~lL&HxU?4%5;&q;4I{5!nd-|Igdfm_SwzFniSL?U_@X7h*pW(U6+UqJv>f=8R$ZIf;msiu ziL4Cdit#FWf2l)2*|JDgJge;OxnvPhRaWQM zed!Xrm8Ju-k&gb`>KXj zxO#BYfJ^;hYk-9AtKLx?!Sv3ixXFa$`92g zGQYzszpfQk->~VI{aXZ4U8UcIWqpI5qtR>Py z3b_*ylEXO{g<^ij9iP#>hFy&xQVZ&9NXrX4d#9XMC+ge4VeIaihuhAEiZ(R8sP!oR z(SqT~hK@k!Vobar$5z|EiFfDh6FYcF$!< z2dKj96ur*z@>bEo>N1a0F&NBb|odO&qB1Ju)y|$cN*)Pz?!~EC=?x~jKJV(+B|UzL{g-MdEH+2F;+Tk z)vCfa&@P<1deu$m{zJv>ydI{$GiwM^IkZOreyCSIt&rM4(P_rbx=0%WPJMw*?DB&s zL)3Ui54`dWNodr(X}C^Wdz9CN%$23^-`d!)txi!Bpu`ObQ)9P3vse_VXEBQ zW!lis5}TJAa%u4-;`o*hn}Yxah@~XKhQ8~B^d>Ka>^08YQ8-NYNaT7NIDdN87>#Q` zXI7L`)!vmr*s%pmL>_le+MzI-XNb_BYHMoJMmW2;s66l0zlO+@Ry^e%J`akem#x1I zRxdd$G<0TWrZpD)>7k%+mxU&85)!w)S_wJF$^J-rArl78!W zD89MBkyNvz2KU?bs8g$iX1JMV#Qe5J1~-DVxUA$7z=1KUC~#}JcAbHfFHp5Fzc!Ln6}ng|eVYgZ$n-E`duK!cmPti^QCee@sA_cX|Ftg7Orv-s)wR zDj(|7hNvKf_4Kmn9p~%*$&xH7m9*n-^e?AK3;a4wwE!x#-K2$hm&aMKIW9Enh>3Az zG5QnJB&*|xf(hJPpFfIGv9fNtBA$mHOT3u-Rnwu}j6{2cPcGmfS9&Q}#fQv7 zVDu`Fgs)=-Us^@~SmPM?8lh*HnEc-szJCg1c{X**;*)l5QtdvHT^4i)JnZKnDc{l% z&ZktN0>S7O?+Q~NL);cX58%?kkfHz@os@bBk)Vl5ApI;v`!~tkkD4Gn)#Z$^|Vcs46{LcdW zAT|x>fA6`1#Lwt%qQm;RCC2B5@R=<#{C9aDFk&YHw1UrS`Kxx_-_jdikGy+5;PKW} zQD#!bV0&;)v20v*;t7>uWxiypb2o5BU=_`PM1kM(&MH;TR^Arp*c0*|7Pn4Z-5J( z&32c^eX$uQ7wP+~%-R<)C;U9HnL`a?S2pqp;CtcQU$|?UT*zXOvj))rsc5{s%^B`# z!+D@PI0=4wjy6Ha?UG$yZVy}qdIc~ezwUbS`oQwYxkKMTTV zEWg)xIZb-S@rBCte){Wr97&X_!N}fZ+{6#^EIZrmc1yk4$u27aEcHukYl9Al=7M}> zgYCe`z3lIHA|b5ab)GLf?H1UasI>mx_(u%WRF?;1IGZ+Ns5bJ2du+-tx)&@<%OW=S z8O`sg>nbGwwr{z(zia6A{W3tk18x$_OIW?tR}r|bCUwu$fbeW7H1Bk=Kc~|dXOtw| zj~*=z7ivK_MWnS+cVceo%eoQHkwH}gdz*DjpMWoO_jf7uR(m&R9%bTunRl*SZIg zPICwPrh3)mbGZO|Yoahn$;g(Mw`RZBxE;V?g7S)r>`AKe;`EUJT_UU-pv{Md)3v|cf+>-v^MOInYV^e`h}33 zH{Jl}&>6?EySv>=8RN+#DCpJ_NJ98@O*aoKF+0U0{)T~rA54VV#qufKDdCJIKj+fh zX}I=_VH5L-^?0L!q3`jr3*;noyB}rL!-^oR)?~Fz^9YfF1GIqeboQ*T<#|ZX6eFZu zIA2>AnJSeS4kH{AhyE|pWZZ8*1q7%bzF{YEn`!iVE~ufdetx`K3^&Su z#x~TxBe4nHYytlMOWs-)mYLajczB&Pxw*N!k$Kayj%m0X4BA5;&~LrG97w{!JoR%g zcTt!l?zfTNs72J&;Na7r9736YH9TU>cFxaEIBGHnGH_i`cUFKCdtMzv#>33S6eM#N zL8piu87>h>tH?R0WPL6AZfn#0S0CSiR!K=oh2GdR75`{%YJr_mlm;>6)6FQUS!NRj0RW| z>9haF3A=;Ba(RnE$E>#YJs}vcTgIOuS5eeotSh3{`g@8)s(L{^-xaR;$%7w3_uD=Wz?_<+L4u-v$$zQ}1-6G#2^V zvlu)?POuLxBVd;P#*tU5Ayb&1&((U}cd;{LOY~cj8b-Jz>;~=OG?0wyta(1&ACRq> zVwBv!x_halQ7kT=hqHdu=CL&zFZ(EBvx9?!cRA_k{G8*HlDZ1XbH{qHS!zWU6%%=z zI+MYlCt0$`*T}@Q4h}{Nc^-MQ?_5bD`Wv~gsJOnq9+%x|hFH6*i}Hi#CbJZ7C=JF7sBPMzoV`vSCnq_}{cffan(F#hUB0ZtBlNFg zwe~-^akB0I`-E1JGDJ}?S!D@`0**(!_19ZKrwY3t4HV|%++`JwlC|B8qt?!+X3Xb! zZNC~ZcK0iD0LMIj$>`j+H0?nf{)>WdzcR0amDXnN98;l_b=>W?(-tQ?b6eS;z7el#BwH4PRbXJ@%DU`O|KMAuWz{?ilhYkkx- zCPx)HZN^iXA&PPAhF$So&|mV7^*bfF%G=OBt@1)E&N*hs`&6nD!>|9k&}6weu?6Op zE4|-hVq(Amgp*t9oYUl7a17Iaf4+Wc1GX$jTUk9$olre1f$WpjSYVy&jDzh8g9|2V zyc97ph7GM3fRJcN(Q<9&p;eWazxEYF9cU05Wh}p|a8Z43+OvYddeXjNfhcD1phNos z<7yZnrkx;2!NkiL@}2;*k~=Zd$V#u%ppjPk=&oYXBvZ+D>efehuoFWiM)~^RzhqQ# zIs0oA$LL|WMRTQ|+*L|-u!A8B7xx7c;^Nwg^Vfow`DA`uMo}+>Gvm#Fauq>#Z~;Os zC-GU)SgzRA5vISe=gqTQ*!`7q!J(U2g+g|SuZ>p}>o#~q(8`As2gB5cMV=`qZ_T23 zuex}gyE~2a?=<7QsD8K8rQl#;>9J_9vG>BIX`;Q~7a!k!G_4sggQqX=TWeyYyRyrp z*IIwrn^UqP>vSEfPPcHPzhC9W3to#cPM2fw4fz}>yH$p>lLO<6%JOnGO?CD1|J-50 zLSyy`T7IU&9RVZDgtcxLb!57K60IT6dC7hR8Tk6hD%eQa{@Er~}c*`Xx zpEo`YM(Z%1$$%)PqX8hzYe;?N4MjUI7m59Gk*3+Ri9a(d$s{X%D#;Rw+HcW%QK{)l z?&x_jZmeURg7RnJ+*<|g1I9GS2bCH-N3Zju4cksz?a4tOi=xopVDLQwg=9hLs}`!N zw`~Usxy*-IHBv>h&QsO)JXx<0s%9iEom8O@A^UP1{i(ED(~Ui2 z)p){Cke&Mu9p|~dmZoOi-muo%t#+(zDXyxzy0hMz&O|;AK0ZwkjckP^DA*-Xn-TZL zaRzAkYy5!TQ;NSA{K6yE#f1fC?dnJ~*W)%op6W%f+9zso?j8b=tCHHZ!R(G=)OaKq zcRzEzp3d(E?RRQFfk$&6bKdlT=U4Q+dFKwd_3x+l;TRv9-x(OnWbdNjEY2bK^*L<+ zp*5n6lS|L$lRd4(EGSgf%@Se6zh2{haQ+%@zf{OwAYRL7p(wSq?vL3^;ZjR0;9?;u z28rrPoo2Uxyn=c}A~uFu^jvmbGv+Vl*D4K!#nS=*g1e>O=h>uh`-=s#%F0 zpzXMA?j$1%$ocHJU5mrR!*A_;Y^89;`PST+n+q(#&(NxeGM`D$YsC~kQOtKv#RN*k zwm&i9PqNkyEh0{dBvSi`A20pK=tpXt@&T_(Eo5Ss_IbS7^b;k{)`8YFDU&bLzRZ?P z5OF3K^H;F!K#pwx*23|`Ej7>6@Ba90qY6hGt-o>N(-|0qT;>vEmAhk_RXGyfbKYF$ zsSx}*!MK>DhbB7yEOfx)Q_?i>wHAFK%ok4VYi)ZVAYe!(j=Y0xIi;zcO@D9z~-s0!{Ogaov9Z@ zNd#;rMu&)+j_p=RSM;n0e5^sgnw{76ND!DnND_fjD;l9S{q*#W7a_xGy04I8X=Rya z{@44yl5Z(QHfXSBKRwvr#%F;5pV&nRe)r__)@x!?w&q~dn6y_BhcJ|Q3llO^eFvS& z(V^){8a;`8wtz>V?>)rZTm0?Y$8UYSRK0;E2zU@tzNM>u`k~2=JHzCXw3ul}Ln71O zTo0%#&xRLbRb5hsMfWs0q%G~ROpEqp{sn|A}jz(p`9> zUsk1BopAs(!SBN%ikxjO72S*n?_82VslhU1gms@^7GDu0{Br@9^m#H^)-(m zodwFk7KT`X0iUN&P-R3rDCL5m6TeDWB;&`-Pm7))E-$j=rqJ)J4ncdMt)83J3wkfY zTZWj^fZR)-1~zaiY@Z@_dOaNVG;4fpT59Ut;iXE%Y*DS>f+;xpfRU>$fev_^jt4Dy z-%}Jl%t(knzPOn|47xoFkJ#!f`sa{Ah#Fj83%sKE2FB}X4jLic9K2QJ8>CcrWW!*Qc}{5#9g_KEDNRWw z7h*UMNCh-48uMeCq8GD)p^w1g^R+4t)!bjVMOE*7D~G{%j>AMH&KhoRfNkH zu$g+g5GSL_h^gqBO6TktdK%Mi|U@?+4pSZ9*nK zL)F^JZou@)%n>hFcNzD`t&;xnL{1riKXFPT**JW?&%ZD?lG5_EGYZG z?%_HvXFZ+@eCB;d$Tgz&ch}!;iu$1^p-_1niQgv(n{$Zzi`^@HK7Q=|!6+;&Oh+fh z5M`E!^B$gEc!&;_R79^yk`&yM@PFLGpVj-oLyOKJcj%|jKqtq^tkQVAiVO|aY}Ji^ z>kVR=9%dop?R|WG%~%iLaNZ5_-1}^kZ7!oOnVqpsiz}8be!lnw&##7xv-gny8Nj=n z$bwe{k7UZUCsgRfHPG8oO3&VKa>D&XCv#d-+7oq2llz>#kW;_BoDHsd$oWCqZb>SY zQnSkKAjJ~DnVX_uF;xh}%^Mo5vAOF~pQ!sP_?_ZwlKj8_Xeizr33X@i{u#Hurhvbl zc2{u4q7S^5@kIU3TbCzLybdo-iDVZwf&T`h(}wDe^h!0H|F#;`;HKO5&A%~&Q#JSc z1ifcZsJbS%k@ilb2`iNLdu0>DZ@rW~DD?upfM#)!-2y2f=u_!2ggQ`I#kR#eS-Fya z3j+nRJdy)JxJMS8`i`a<5mC5>1ANue#zJirZ8zOorY|Os{u*O5kWWk)K})m9ea2Pd zq`_eKBNL->8lD3p7OBbJgn_cE-KH6yiX3vCJ!+zCyY z&P(92<;ro(6)T;55OEJCP8s408St6(in_e2=w3bEdkpO7vSSwtRbhDsjg?j++drN*(acTiN(xVBWor`U41!5|W+M5c)b7 zON%nnW0ySx?waWyNfhXy5KRhdiboqEoaInmGHRZ|#dD$b!s$n!^@_meWA0A#Ahq?h zTzcc3IjL!35ON*TS+i{@NJ~LsBg7Hn#b`S1DEJzB zRSGipx$ex{THk61hSeME_l7hQmVW|cAPkF%{qQ{>HTb387cpPOtag(f!dtegKiz3i zS3iJAL$OO!FRIPUd{<*~x|ccmj_r zBMZA|aj#}Be_8y4-T+0Cq=JHpO_UJCNMv*hW$B2tR+jp$n!ol%SwSQmDmsN_Sa?vT;L^$M{ahm{W$_axMKb0`z?(B;#taVYZ?qQ;uM6JnU{eG1+_ z9gE}M5fP$Ev9?X1@g&q3iGwHoLGf}(w2^Hs@Ay-T%2@hm?xSz(DdYI-6&? zy)3ccn|BR8_>IPplpWi+SHa`j-b(vbznz^IR#uAh`B8lRuPdkn#8%Wjl@{I^sc9yS zU+`tMUIxE)+ro1MPrycA(IkmypD8Sm;}|6C@cDj4mEKB@f1Zb5j5;^lOn}C`Mi5Vg_$%VMi;gVav5W}AX1|7*#vQo3@@|I&P>X1H*X_jishI4mwrgFbxgku6Qvr0s?`B z?(&Epg|IMz`F{$>u(sEn!IUgh^kSrrdVFz!>(0H-I*UOJWuVM z?aV{+S6t*{ajNO2jeow~AWlJW1%ZJ49>iNa>TkWWaGV$!wO+*H-|bBy?E{wmm~6~w ziWFkL;szt6r}O&Q!5W2RP_@*M7P-I|i}*1Ifz@OBxl#eDtevZ+1qFide1G*bO`)`h zD0g%A=FR4VgDGoH_3=*Hta6UwF>3mLpA4r2TJ;E%yet!$!)rq zN5}O|d9VLRe^i#)@I&~(f9WZ_pTtPQSidN&I48Y@%&g{@@W4;tu9HxEB`zNQ;qX^x z!CNEl0xhrDlYrgds*;W!!k4e}1nZ`=Ma;KlBBL`YB!+5JKeYu%ieBLluymaFD+&r$ zepE+B@dfij<+tI5vIAn{doC(^muSw=@blblqozBUe#X;owhxX(EXGHL;$ka{v!#@Y z@K##0@C7g1&uvf@&4ygTC_}d69td@u!(Qw^tD(u5TROQ^1R;b|5Do{ zDC~w2{@8y-m;<}DUXspl2%lJa+G-?==E-=IQ_IHWYMnia`yQtT*zrPx$Rw{`tl~DE zho2zTbp*ocF{9%*M+4VmG1Y5s&@v~Fc~Ep1;~DU6w1U?mXQ7VEBgH<*n%`{p3v7}R z-Zn!GHe`+!zU5^cvL@+W*IN1I_oqd7<(E>v>d^5-IFx*tma4Z3u__*6ZG$7q9hg3( z-+iGQ*s?w3GRuc57T{ONyp@y3P*qqsGh9eHcad7jd-^A2#L0zUb(2-Yk=^x~;>FIC ztCZ}+l(tQ&A*#E(E-`-57i?L_x=*_lNtUVZ3Wcjmg`A(EQ(0N{Sd6=wd{Q=coa=I+?DsrhWihBy`>cX#L0$wKy5Yw@IQz_I^jo^1XRC4)36-VKR%VDUI ztA6n)uQ^SsWYjR#I`Hw%a@e2eaT2Vwa!xmqNCH#iv9^n~fvv5OX=h_L!yEg*rS_TB zFwk1R=C4p!$F7U2-!U_JRTXY#DV(mQ87Ke0F54Bc&YpkJb}@g!Gcm8BWNamf=lt|0 zlxsj41?Rk-l$8nUeGhV{KvPxtWKBf-yzV&QbuahHomM#>TIsqyT;!DC5m*P-9qkQsxWn*N=8*vLoHQ<7v=9_4GHGaEUz_Y;p%{+zv#*f&# z5XnD>W_4TS^CN!vlrDc-ZWx`MY~tG;HG%j5dWe`7_we|2cA!Phw?lBkL+Q5V5L2;s zvgxV8F|OYdSnI+SIY39o`2pABE|vpt4VH$O2l-gly|Cm}-oi8fkgCL3wulx+>{#@%HN z7n9zaB0ol*`7KIU?e~{1pTJR5Tez3^PE)922zfSY0%_GAPW<4%E}=*GY`vm+F*)IB zP?e?f`Tz9<&o#2YnzQFbTvH2roTpN8?2U}a$Z=EO47=zEr~)rwzu)pFRO$-E7A%H_ zlB!u*1lw4M?@>{zs@}lOIWP5M+S`+qmrvgl*k8tGk<*__w@`CzO*b7r+{QOD@)-IR zD<4wU1#aQHPG@!f(RA?EiEd!+=FeZ-*qruZNZ4zPcap;~I{!7zGAnP*u;cg1A{#M% zsnKReA8-9j7D1__z3h~4ljW-^^74FFuliAXy4AYTP=~qgbg(SXb)FntxULM+5$-F! zB;~T4a8L295356ubSSr(|6sfL`=!?3@{x%g+AqxK8#OG+ zPI@2yAHLo@9P9V}AHSujgj7amCnEOI z@^HZplueq*Ru}v|$S+2mv94sB`hi)Zv>70U80+Jq92GT2^JAo>{$V)O`H0 zQ3LlS9he^&2_8mk*y5y(2n!dNR-7&%$svKWFkT~Qji)B`ooDD|Z;E|URXzM=+Vy{o z7c0LNZ^O|(vOJkuR}bzq>>;Q9&8+|S-W(|J{k7{Q;g6u2GK~@=+5V;^I@?S^T5GO? zjI1&0Qv+?eZG0y}smd$o3L~Qo0H^JUD;b;KcM-L+R`pIj-HW|-hI@Y#lSM~a?Q=9= z3=uAiy?cLZV0!#L;W4_YNduX{QQ}ddPii)lq6=>RQqeYr1jhBlhzznFt6#QzhDVb~ z(QDD!>3(5{hHSQH`m{Mk8k-cIPG>!61*Hk^1`lcJEs#ZVP5~QB9v)(Gjtvo6QsKYH zs!l_gFyIaVEf*}Xu&lv_5HjASUHg+3f45JK!gY(>87p#o=~VRaHgkgD@ognCcJ}mT z3I)0KgQd;J05W!~FHy0hp&OjGfA*-(6id$-dcnp+_=AB%Dio;g^5_2PuV2|~@)KRJ zBPr&dJvS!)JKH`Y6+M<;R7B3A`whZz$Ia#Ru9cm-@{e$EqE9bLMr{z0z2o!h2z3oM zR%}S_q$x@JK7#L0)rat_zrf%% zt;O$feu%%4QSn!yP6>uDr1r8T#!!f;aR>{91$kZ(1$p8=l0Lx4B&0eW?F%g~#_LCWIdU!RB6BMWji zCT~=QpV)LP1Uq?1sb@6~+$$HNqn{th{zECgRFKEvEpo_#YwCgG<0=Om%c@=XK>tN`G*l zb9V{2I8RPE2O3zGdNR5T4JgP-+JmB>Y&47Ds{QKMLlxe#wrxSPxS`vE`n)JS>vMwe zS@cpF98Q1XDL|<}{pV9?0HN>&{SzpO z1Kp<}gMk0SM|hwz=i`K#sMF|&9eqt^;*W5to>;gf<&Fn$iR{Nyoa0dE2+a4ddNZ@1 z4}dxg%t|^Ys;lmatt~Uo7Yv6oMq2DKg(#JjFwP;0AF2N*M1hxs9|lyi_;o?=0EFlR zprRJA+$5y^q8Wj9{^f5Am!O%R4bL=S5QxOLs9uZ!CGEySZs@!kaO(oRlmW1d$O=T; z`7D5axc^Vkg%L{GfNw5t!j}`Nn1tny2Q#cVwQ;^){E8nlfCcgY0BmNodVy66rFZAV z>t)=J>dFnHb53&q&qa;!LNUO3ec`gnn)~`&_@e0m339(W$Q1vrm{F=cUa7)K9A$RC zNn82!TL8M6S}w>ip^zqkCZSXdFf$ayf>0|X1U?=xj5WBfV-}z%M~35T&1ubG7vwO$ z?Y?M+`kCba`Lnbz%nz`u0E^bn0=fC0Q#{`t6n+Ygsf5u{Jqi(jm6f4aY?3{Y5PnD2 z4O=LGHZBPn00d}++iqT;GH%P3wn+_I957^07kkOZ)^1lG0WAr6KCcImbuBnV zowBpL6HAH<>rrA|efmaeQ-n?%42<&z?r*`YlIXp*vT27sH|W%rnZRqm-J! zQba@q^ovg~=!D8e;4Ic+b|i=mc?P4M>ghRJSy@d^O$Cy2-_SYgDoI!q9{-vqs5Jk; ziFIhzRm`g$XY}y)M~ps$)01PMKP5CW@@h^25UVKxHFodu3UA%IiUMRiH<0rN+TNmE zA23|#zLMhDKQ%;*B@~qL0DVnCIbt9TmyG9$8t0=&j|2td^UWXyRl*C^Nl;7ynXm-x zPlT7{lo{#hl6dZNa74Vj-}fnTGtK<*xJR{Ipj$>4)|O#m?W#G2ohn)~+V&vO@s9R4 z(Jx#mU$%kr=6M@zDi%i^(JoP0T9obMYdw9{dg54T7S(kI5~J6!?%8s-)?Z>ML3u(;=3*%753(|f8Y?~UY!B_H$Y|o z^sBedeVs;u#2JfHdSUdx=$ak7yPW891?BLl`PswU;?#fS8OrYp3eaP_|~ z25nvYy=Uj~EH<8Q>(Vi?)MOR6{;#V<*k6JA;lkpyH7^6*hamHj0hy11JHZi5siXn# z$k@nlwV;k9egip)+boLxCB+4g?ZH{kh8wgeoAmdV)+um5M^ zACjuss4?xj9L`U`rRCHzkcn=OMfiVxIv{I#adC)ON{yJxr^urDMQNhA)1`)s03YTF zmoc=vu`eU~I?=E0mMb}^NaVA0-2q+`suTs2ALEkC*!pXYuDO9apCVY z{;b?NCGE3S=vE6%O3Rpa^}49@3CzA;^To@r z08kSC^irjw?dH2EXFI-*8n-|Z5tO$4Q}6>XDyrT7`vF*LH%J1Zy5!RZA!R7X1yKDG zaigC2ufQZxOk2Fu3WUG2;Slyd z0RgFlce4CVx7e$rK_Z91#!2BJAXTO!o(y@cn9gvg*~ z11Jf=yKegPL{3glTDrsjY{Uu1sHiBQ76Oe;K3(y#!m6QJ_8CoDL>8n$4joI_XtJ?i z6*W2y6wF~^PM(hk^)R14X1ZlY6>cUjl^-afyOfeDU|$_wljWC6?;Ox94v zQ14i-;{=8Opnyc8`a~6(9b&{0nnC5EtEsI42cMGw>N51oa~|VBk_!GtY>MrliQEMa z^=E!zS&X01I(+Xv6bV4(0mA`$=_ZiRU#C&Y+5olp_v65`0l)}Dz3V@pMJRoHXH}kT z4~3Z5E?k5Twtw$KE&@cwoeUvH4r#UJt}pk^>k0n(@zt+%&k8lK*+W1Af0{jmV{-W6 z(yXGFTZF#B0V0!t76#RAH>f6>fUfON26+QqpkT`d6@@@Ojt#w&7eZkIK(=z zA{Pn@yBYGJpauhNf&-M~9{l8AGEX~)23bJhuZTlI{6B+C6gHbZiRN9;eF2Jw8mCKv z=d(JL-9u%x6a;1vUv|2+V+c_|1FCYGUfXsk-h~x)pf)3xW+5CvkAIB>o`2oy*zzoX zKp+;a3o>@X*4#jrq25$g=(al}Y6wwfMLhGFF`5eK!{)(-KC4-Gwp?r~-sJC>E+oo5dUcYNP8%}sPJOuE43 zu3_l-P$7)Uf+5^34AVoyO>zB8tU4`r64URPv0ZZi41%E%voRVE<#fx4X_QEkUl*{z zSY=OaM6HAYf@hH5QhHP}4BY02p^rbIsu^?_3Ix}!ZnGf@uj=Z3e+tY_Slf2U^G_E> zJhR%Up(p@!>pRZDRgkbBAu9{5Q^Wn|^FX{zF#c*|%^dy|xMaOsxhc{Z(TFarJM{0( zpdy0IUK^7a#ZPbq(#0{#z`o4%1BD^iZh|BrY7Q5?^qmN6Lx4E(X?Xq5hA!gZG6)AY zY&i(W&_`jEL7c^gu~C&3WLx;R{yNrvcvyL`D-~I&oFC`}HkdAebs%>P z1@KVT)Uwu0jwpwM3t2y?e}}4wmzZzS)uK3|1)w?V_d(DU0V&xD^iFqFmP{Q^y%M%;wW17+FQaH`7Gv5>UmFzsotJ11D$~n_Vuru9??Iu;D zfFb!8SwNLdY6zyECL%!z)YX=IwK|7N_9E})+prGcpSYov%uw-ZO4?wq=fC73xCb=- z_N`E0$W(?O8r9qxi6=#jg06mrgKU94fBuWvL{J{{w)c>^< zW=2plBZ5fq+or2f8vh!?29pbC8sa1b(hY48YH{9Og0;|&l1gI#Gl(y6h~8#xz-0q< zq(|;@q1II#&4g5tGYh-&v${@IIMpf z3Od^^xRZlgI*CHR5~y=~iTUaR8~|uCe(pFH?>s&5TRc&vbif7zkMu8zCZdrIMPpV4 z>5J>8sQ`UugaG)#MG&d6JJ`V!ry@?*P-Q9>V59#W`N1+!^Zn=znSJoUip5W&xS{A% zL<-|O0`=`SFey;)`w|lk_3eKh{)aQ09|<(x(j}->L5;wCjfkBa+0XGgEoCq_=kl#r zWYUOhu*L*VHK_cSiGQz! z$7J!tbLT!-^xeJ(dMl~ zajwUM4^Z9oH zsv*_)P7N`E1`@LdCHg6HiBfjxsn1NG-|+Ai@?Pv=HxT z*Div*1V51XAYli_4)hX;KrUdL=Wnt->-vk}y?pUMqL-@;TzB9~qA^bXBf3LStAFzI zZ7HgXy*(DH*a33@>f}oT#2y&Nu7OAIMX?yX?p}xe^^D+A>uciv_P!eG=5nk3N!o+g&WB*;19atmO zwjD@YOC3YiK9utUx4&muCDIhvl#AX&4RM_Y?3e!4Z74cj?uDfS+o$a(P#-qm_nz)E zRGw}D+vx_m7Kka%`*{J-#_}|Ar+cQq=!m1KyabqjFKq$s0=|%7&rC=I1kHHpOaW!@ z!@VlRR6ooq(4b)OEe7oUek_%Y6+MZXa2!Ka%o9k7&89`2mg3LIwL=fBu%*fy;r2?G>XWBUqay}^t_Joae#L9epx!r0xa3-s&PAnL%8LJQlql5vkiu+30WK!? zF|`~dSfCO#Ux2B4w;qs{|9r*=&Piz(WVKGcOu~c@%&9hkK`{s|IMo(7uqq0ZmSV3S zhgz-h7#3S@{*(b~NrT?Yby}6z96*3y&$a!^rSfVEi40%UC^-7=!r+lOg*x<)-aiU{ z9?C@8?==CZbqDKRg)#G)mVEyW0c*m+F$67ii^*-I=hq8}(=N3l5T&rbgC~PKvg=hj zKn3QA3Gd;$o45Y;h=oJHZKSS|5J*+u1^Cb*u)X=nF!f!kiAhuE8d#+^T)MtT7*B5%9 zNy2T2{)f|#SnXee)urI3rv9R~ReIKM?SX^|C^cL32TPmVs7=K)SV&#oWE1|O$^Pj~ zZC^ftAP=1MbncL7IByYj+<@K$S0yx{@e3-HA%&Q`i*N}fa-=WuRaG0blLH_3?MkP6RvUUw>Z}Lux7XW6w5(zy znhLvTd$pe_codpCvc|oBY&NhV@h6A4?aU%F3jv}AaR?l|etC`$i#0SsavWm%%8`8i zjRpe21|BFuJcFvnkI2PfU3uL#2cl)>%c$?*T4^22IMncRKJp{}<2}Q)bpy3kmJr@% zJ`lSW<7m~+NEs=A9_qLy-?;~kgm}vAcJ(Z2ew88+hF2hDtc9Aw#z5x>F7BF1D>|z0 zp*6@XCd8h3uV;M^q-3IsNq==ofU~#7P*TQwukVQ)6U^-PXS?V5KW@|AKsG>Av+TDYr~Iqzk~u6D#Muy^gHy_EnOd}r1$UAQztmY@1L8M z2HJZn7kxnyygBUOgfb>4WGxa7+$(vo2`0RRGPBlQA=rqlS^dBuPmuX+=yPd@n?th@E3Q`TuePtCwGgPM& zsMu#Z|J0Afj`jcxe4-aJ^&bBr9i~?@(a~k(x;EXImpH* zZ!2o-4cV(ddj@?r#Q?;!wzB#n|3Fvo$?eaH^$U@2PM7}tfy%i6#;LyVdU_Ybx>y9y z?iNCgv{`1s&^!hDCc(8Hz~5I8eZZYp%rz(Dg2C0NeRGEV_=G<>FMWVNPfucLk6in+ z-Ep@pe^u-0?(WVwI`%(@!~`*iQnPrC3SMYMu;2d>Uu(J~D&S<3({;$bs zLN}fYh7J__{m=4E(^W9;P1{3?c(l+G;cQC4+oQM0RZI65)qMR0b7L22e{wX~@Fqx$ z4H$Khu7S=3u98gUfh^mul54A8T>X|Y#K}OwNAoz#93Yr4LeqnOJ$#fx@w?v4A_TF3 ziH1ocnS}Yqk>ZENqwai z=(q^oKF)g0~j$$a|KViUZn@!ZX zXDMDwrC7S6_n4eanu}MX^v#+O2z~trN7{okMEgC02n&7K#2)c>;Y; z@(KY&1D_b3b=0R2=(0OX$Z0+cIeb?aEzP1u{Ml`TxIFC3s`kKp!p2sMiqfvyo4n@1 zUst!BpI;s*(5eyhw4aN=X9w6(@|I&gA!51A^^RhVfw+9k5ssWZ2OViWJ=F=LKs{=# zzm)In_Q%7Bkcq~M)cb|Dk%a=?Zw~ z#JqBsef6P^*27t!`!vkRgw{<(J>yG{yM_+le*8E=!YSQ#Mr0(9oKj3zQuGF%=ew*d zdo{J{MkU**io@k^-%OXut5HNjo5)dX`5w1l*gd^9F7uQUI}dS?p*7w&ERB~f=}6V0 zNJDtaOSxhkQ@%diimZ|RLfF6Vs1%#BXsa}JN6gEL)V1es^i(4*oeMP&>9H%&6H7#P zCKb*_+O&6YjFLVB!0l$ZX+a~wX9|bViUC-+P^n-ZqV*P4wwlip4+$r`W2cDle%H1g zZ@k*UCA9F}zYA&Az+I$fLU8lS2-+{(`F)cHeGn)=t;++Oey8@HdqJO^?~>cJHq&M^i}f_F(Yutc?qD z?IGLl>AxAMPCZ0z-ad!(PVf3UQiDufRKFqw_(b)_3l$>Dzf!VBdO}nb?|x=A_*_zA z5XqmD(Mm;S*Q6~a$yoQ~dc_>@F+{cezQlerx7>?%5s4voj=3tPoZ6TyR#HG{$)vWC z^lU57h09_q79=z6=1k+`0Gt`F^7Ms?gM(`xqdoK{gw~hqMsNoGWX>q-h{7+_&ei> z&XA<8(g0HE^a?k}fj0_I6TU)_0y-`UTt+B(qN7Jq z*q&GX-xM~Z_C={JOke_!IjEN-}Xzss}cw8Ffm>e-(uQYu2P8cON?TWRk? z!qYvxkkE6)?*Rdb9c^X(%4P;nX6t~G|9euh7E2o?zn9;ZKGBy(9JP=y@_zXo4>=4_ ztF3pCT77d#4;pgK<_<;h)%$e^8%3A(lUHHg1V^F^4UFxF{{9g$AL0680)+){8S&%) z|0J{Eqj>(KzsG4ZV&I;ATQr@9#!iyU_Pyy<UA~b{N8x>P zQqXzQt59a#omA$9<9g?`C5F5Ar&`D6nQNjACG_H6Csgb*=7^VL9 zq1pzOuHO5QaPD#jUf$`v#w?@vI{XKQj8@}*oxA{KPH8^ zKfiXj@P&kbszZ8r7#kb0mZ^B62eG&}RZ{n}cX+aFSPZ`3E?5oUiS(C%Zrno4#@B-x z?4BwrI<%jDYOWZ)Kt=Zr%SEu$aVIJ^c9cwiwrzQzJ1$vy&17RondgV$jKdrGuW6d) zaXW>o`^ZyH^PVs3NzY2?NNV~TUZ2?UwmO`w!W1GaatHMzK8`ZH?TFUMMgaCYi0= z@Xhw}TB11JQ=cM#D(0X6T;{ZmdG^!#b@hii#-U8l#jgPI$ii@5(3VHZ)^_MEI)zO9 z$r0J>gm2&2J33DLJ9c-qyDRL?7tD&>5x9?zyT#}w^W#c}`x+z~>P?AR^&LbT=4@>T zC*>qba4*?aD23-=o06OG4sWMaHgk>S-=CJLfAwHgVPtw`lD2#_$TgePTo6HLc`C9@PPdnc*Y_Gotm~Y&4@7z{U=eUt zpOh=;>E-YJE-aLZcQQ6n7uXpadx{ceZWhFK`s8GtnaWE3PJuV0HOLydD^ zWlnbXrl=2gR;bAZkNXbomVUJmrYlk=wewj_xO-8!&8I|~$SVj|y}m(tY<>_D;_Au0 zzi*Z2V!5vM$o0tK<0k^^^No^<7%6UFN)tS`tZ{R#S8+Kh%l)lquhD<={tgbw&bP!y zgpMAoPq_M^UQ*@h$*>NSUn9Xd`HhXh?qnnt8AgRY^5s#_NT6ie1VjxJOpcJm-pp>TYP-8FS_riNj-2A+K zkM(En*)GdfReq{~sEdn?DrxFJHVQiW8^SJML(qJaaIP#@u*g+#Y8efY$C9^!_k5|o zv-3Dy#iXKfJS3J(-<1C#z|W^<_dV{3$%aLuS6$1kMWg$Dz2rCO?H9;~zPM~kC587> ztEnaGZ=VMiq=^+l2eYPM{iOa_O)rNi8H6)~uP!A)khL#tYpE*$KZ z_bo>1BBHLPu4VVPK#eArhB0lAK-zWsab%X{!{qDq)`OF6GN3N=q?}uk9cjkT-%oo* zW{H2EAU1s!&E@vE!zDMw-82gSS-X z{cOkGq6}zL9&39?yfW%?n!*bLp)g`q@D5+w-xB`soiR*qI9MXI@+*4P@=ht5g;b^P z1$Vt_hh(1G=eg=?6A7Clx;4*}8|qhJS|)Zc)(S#|;n+`6$1a@Jrsu~h2$K3;Xpx|- zi4z`5U%a^F*iBF@ep91AN8lbBUU*9uJ$^r&I7-J@JS=`lzSaYrXgwn`k`Vr1wQT$O z<1EQXwW|AZkQQ<8uun;Pu}Gk{viN$QfE$LR8V|bq^q~Hy6$c)ytqiNNrG(_Fk8s)R z;D*-47kL|#^?vmd1^VqnW!46gl9HmLp30dk^QuuV^f#;+jX1Zr?DDMW!+-L<*1M_v zHO}RZ?QB%d`wsY0fSpvD`Pf4%&naJOdURMlR$0A`-!;?;D#OIA;!|MP<;nld&l3?7 z6XW6G5fY|#8`9B3#H5@m+m;wtOhjLz^`MPaIo0Nn6^R)OVC+g}bw($1{kA=)wLn`hI-EViL@py z#h<%tf=XuW5@wsuWv#ry^e2Nu6~gv*KB|L3Vnn&7a&p1aX-lQB{cRd8N&Op-nM6BY zj9Hfm&3DY^ZIID>lxfOKDJPy(Yn@EqD4W@85_D3C=K0e${&>y;G_y2||Guq3qS~xM z=}onn32E=M1uvk;Ws{RT%Y!#Do(T(#@9zfMRjuY74h~cknJ`hXyzbn@s_V6gw|y1A zW>{Uy@6fK(Kv`d%s;%@G&b1cfu#q9IW1LSO2 zF?4eXj1GtpN@3$lRUb8Vbj3b({5wW$`v;6(@ePoN+a=R;b7?6lE5ju+Z2dpq>f4$9 zzVT-c9>1$PsoWfyt2f)>x*910^R`WpdkE7U>|rl*u9PO2yfysV9O4 zK~FbJH%G4_TaqR1XJa{`CA-ytqr-!y@$8Q=e?PlS_yyO--eQfUaM-QQ@~)Dn9?#O5 zv1_AV-9%0BX0Z8@)JcM7*g!e?wn{FvF`I4*ra&I$ab4Eb&~Vyb?2mZwCghR*Y^5A}pN}GWpSN03xzNc=515JST^+9{cG*-58XQG@A zS@5$n0ijW@<_!-IPx6R}k7xR9Z1jDUtJKwgL;J!FWEG3ua?HF}Y8rRtL$@z+GX0PMn2%Rh7O|}WR1!%x5cUM=Sf#>-|19D{7 z{Z46cLcYlSOlK;gF0TB2qQm6v+Fi{rXM@^Orv#D%3HC){(@z^;=YGKqZ#FV{7k|SK z9Au2s>A3BfFHlpMcfvz*6%3VRmdo0c_p*guPg-R&`ytk+IKdE20CIIR5%dxJc$tPrysuI zDxjpGtyJrI0Wp#DS`RU+@11peYcS|(z8po9TeSqoy6d2Vj#Ivg_owm4<9kP&onD@v z&!?UjNs#|oNKwxZ#l{Zir^l&}``b8Xnlf+Hc*M%m+Nsy+Bl6Cv8y{B%G2f7*sHk!; zy|wrW1YRoKr4l8S_oXb_qt=wBNra)rJEBk~D{EjWMP1Xy#bpe6>S}Jjwu|@_Rg-;^ zIsQ}7H0I9jx>NdMDM)x$3XO^>y895*{yi0iOL)`roH9KB zmuQ_yj-4Usr9z$zH*q5aG>#wl&HdTkOa~(E3+)unB1glvE0!v4*IWC5%cI-5W^rgGndjL)0tpCM!tQ^wHe%JHV)A7=hC_GoQ_ zbz>Sl6%8pS3Bo;&gx-Dxv5w$*mcjC<8>xOYS+n9g#%>i9s}p9u+-gn+rsIA5YWZ+r zOH)&qAsb|AUSt=aPej)0c}%83VSlT5{0I91!F4yfr}q4_xSnp7OMIIx!;^@F8#d7} zm9{SS*lJPDuhZ8w+)fU8PBt}J#9HiVT-DRn06 zigZ*39oU7hm|D+VEFBx-k84geEGL(ac&k+uk(M{Q7Hpt7;7XjBh0{U~8^xM3AKzwX zpuk!Laul}2%&~3GjR60+Ym2m|lWU`5@Xu95sveQd+TIWu=*+Z_-)jZ~-w>SwMLvAi zl(C>PY>4kS{ln30?n97A7p^A8Uy?dp@c?7+UVwJc(rhla!mqfle9NwYI=t|g{4%7P zoFv49#G|Ug=FhUi75vMCq~I=U;J-bNQP;+Ug5|B3?Xd} z`+$v8F%SY%g|n_r2%2%QEpAGngt=|kmx7f!g}n_|I+S5tp8 z=sk;BEl1*VJ9|eUU6rua(GgvCMfVFKEb6)A7rOH zd9Mkan$Nhu&lKSk_20x8y*CA^I~(*h7#QZpsnHPgNpRC2@oOEjHkfXPHPFatWD)5` z!r@7((Dp2hAWQ{6)cr`d+fGD==j1>LY4%~$V?>AJ5@JgdjtRR;m1z7uLk+X3F<7Nk_t`W{G!oDBaMM=?aWx4EM)P5C|J* z2-JtvMw_piev&U_3!y0$RsWqj#3;B}ro<*lltJ>44tJ~q_M!Wto`TIo8|d9UI*fgs z0*Z<6yQOn#(as{c?#KA6Mf|8RRJ4aTpC!mG2=?L+?1fDNSl~GY_QrwLCJPO`*W3!+ zW;sLsVr;@&`za=*M15berbM~GA%A;New9Z7!+mx7voupqeH!(R`XbdMMTtB$I_Zrb zi*|5(An0*J)!Ii%$+@i1_57piZK1g`RaM5%_de^n!GU-e`?mZz{$y{F!m}ySW4T66p zwxz5hK|{FnQo+-G#OG?3w4G#Sp>i!>;#*vr+N{0t;fbHej7bdn@GpERjhWK2kv6lA zevVqpOR*2o-S;a}d$pS$3Q)^Llz+P}STZTr8PQ7Pvy{d5G47Lg;xoe5zqsYlb9@ZM zks0gYsAprM%2d&nm*ChAXVQWNL2ONJ+2t@GzzJfA#e>B(TJS7wX^5q^u}1}%t} z%5~gjJ0FT6_Ab6x{<1$6!h=wnQaX%5A8iG=>Pu*!^$9a4Y&nFt>Ftg*?pX8gZcO!*Jdg? zjx>$<`|SLsG{EIiXAb3YXyaAw6X3az=HAh*imZYqz|FAu-T_?k*IUJi!+v(5=fApE z1?pqM2q#Pja#HxEZqr@ z&FtL6POukHvW5eCJ@x}EsN(QU4|da)Uz>FcDo0I!Fn}`fvKS?rN?-%u^m(K_-Y_AQ z>Ba!>*Ai3kqOjT~>Jk_NnO)iNFJqiG%99l*bT;K-WZF+~?R}A^;dJcT+xNDegbfge z?RqO^#!7J_8KQfbEBy=wGDXmRO zt*<#5FwBfp5Vk9+PVn=?%42blORvrlCFallEyy>}u01o6L(0jSzZRxSpLQ!c1@;+s zTYk3@l#A#sgf})4sMQK~C-IU6f10ZnFnX5>abU*E23)&48uXb-W1>P?^ww$l9}eHB zGM~=^1FfDq1yZVnvue5^sW!x0b2z$ImmC;~8;v7WqU#+fae>Ne^aw2aUDZxMh=d1{ z;k!Najx#@;>xrh#yuSRkn-3HCAv4G z!fB%NpG2x%=lluR_zx?+cuyW)XHNn};{A2WXsz;awSq4Exg_9qhek$5P>cY#0TWS# zW^m%#Ymqs8@GEd~ud`!9?lX4St>2n!fWQS@9N2@&l8fqqR>D_pag(mPhf(@dcv9y z?xoi9DX1fW6mYA2fIF7i$)q=Bld8<3*WB*ez_+^%OMxh$plmh%cN}O~k)<=SLCg5D zcGeLjK_GsDDhybA9=y$4dr4EH0xU?FsSm+ii3wpRDlo~8NdgfrlLlb`lL$7$vjZ~! zr4(@VUHPyb02T>W>Pms5uD`?EeU*QfOe6dYfxi-mbYt1;RFkfqiZ|U?(^+RAEHLH% z7OmSdjtBPe!BjvXz&#XJJI_dK?lf4X(&`99>;s*#(ejAU=^(FB$OOP!X#w1jVB10N zz+Iv~pxV!{$qN{#eQR-Uu|Z2n@$RLM${Yz?--h$iqG4Sy_`CzQt%|>M4i`)dETPrN zNK>`KXB!t64i1r+UwZ@EaCjY$0YbBvDtz&DQ)y+9`g75v2v!g~QtgTFH5H7Lc%CX8gixGJM2jnE_#sV|wk*=XOpRC$?(I47@>USA&a9!r zk>jVmT}yeTHnVNJH&i-H@j9#q@2=CP){{OOw^({aJr5N}pHN>nsb)7G7p&&l6c@`#LL-vNpm$uU`)jTFWL!uszPP4! zY^=K8RV?dTLK%cuU>QdugB*dp=02Ani0j&5%7Ph|)3EH*|zWFUK;~9OZ|r_=LY*&DdT>?JC^G zi!uWfhkYS_+OmP>?p-1f1-M`FRzA4xxB(Drl4Z_+Vn%0uqM?Ai{4aO!Q2lCI0QsYq zvMNY?)ahCUvI{D~BGkz1wrA%&^2S)lKI2g%xsoES*r_hYyPBFjZZZKrhq*CItE3twfETle03I zSBiiT0ldah391`?;XA;I+ay4CyJrFWlQ{#J>4)}{f{{S$`7AxIc{&Z2$OEuY2uI-F zZTEhRFU4|m^^ZT?#?J%3k-0HSWsLHRmgSlJiP_3CMzvuOBwmlaaW-)qMUikR1Y(x- zdW=jzO7lR2KF&x&oCTf+%q@gWt6K(kvROx?yY_QN5&LlYk#!ax-a6Rj=j1GzWgWKJ zuN;WkCC~k}yJahH&;o|yaGbZU#eM&4ZU&wTnp{!q`(1*4Pg1RX(V2-jxF~7R+eYq0h0uD(JN=D45)s#cbGy97Er9w8_Z%~||asrfIga8t*L{|{thzU_V z{`=({S*TUV4ppC$H|VmK1q6iG9M)9dshef7QF(X?m^HX3`<+O)sRnE#eF`v1mN3no)rJ9dba-01A2JI&lCB4(c0!ZoETv6vLVSw*gL-u%REhl9z8Gl?Jd1iD*vfyCKuzVjnRe!*MJ^##>AtUZ>0$FE@w1Mn^~MW4?bEdK4QOdHaq?I!aKTGUBu~c%-v=v-rEGe`U;*^qGAW?=TlmRS*~M?d zeQF>HavlrcP=%hF7f@^E{9jgI>hr|srCEq_^Hy~|9CIw@>RQHKPfuH5Jd}RNYtvmS z;xr$N;V}K|;qlaQKf9tbt<_Vl&J4V5@IqCU zZ>vkh-{0TP&JKjXxUeurz_r|gE*0|{(H(-Ih+DmrRkpUaJ-rfdp3B$Y$L4kX;Sy$5_A_?!AIKy8gqJ8kbb z!f^`jy$V`h5NvXz+m=5FL>`G`eb3RXAP?I{0gIPb<+F}fO2&&__DzhgRYhvMskKSU z$gGXmoUT>hr}BYXT(QL#+b-=;5Cqrc#uURs?6^WmcljVW^ z?ymBAuIMbRp54;x`2cQZ^*)ba>ml}Gd!hxgVcj~`lZT;BnY6fL&Kw-cqME;_wNaJ0 z?&Kb*T;Nx5evrn5tRdX#Al{o4^FjB0Y=6`*K<9ga%LGF%dXCt9V&VQ zz3f4qZf53*>}WXt)yi|Ps;Y9xkfnX$;tWvoXRjLY;X0{(jZX~?4OLY?eRBBYL0C5h zP3f`y*cUE}NV2V6OkU_eIMv6%#5`5n(S3PpfQ^{=;dbX^5)YRbzlcb+TiO`#hjKLP*GDd%iX~(eChC_1Z=$^7I5+EA204 z=0c-ND|n`+Q{d19{?jvz+z5^LMhU6EU_s%Q7h`n2Sgatjy1e|wf#e@}cEz*)3*{lI zn0vM@%Dj4CRFaJ%KexUJ|E7-8Ouo2za|66rleKNV~d$C}w4X zn@Zw~@KcnFXy+WiWqm?f02l=LkUEL7vNCvnz;QZQ>|l3I&}^i%-jDDN8}Y(QVq#(- z|BmYB`r!`})5j#jer&5Iq`_z$1aPOYnkb$$OEKyaInGy(qfDUyJ%gDC0aB1T$g=DJ- zm1d;cow5GrQ*>0}&-@h#R}uCWdEbuUjsu_$T-w1WE8?tzSA^BKiW;Hn7m)_cLA*M*%^BSY`_u;qNsHR(agijW{|Jl#^ixBlc)yqwpX``x@2B**a;|UIt2LDdzJ7-aoDxDojS;Iq0p@zH_X~h%u;TT1DnMJ{ z4hrd0NY^>7qu30ZM3A3{6af$wT0f0@G)X00k9xjR% zTMEW%elu2>n>MU#F%*M95&Kw?h=(gev;lRQO}q7oG7c4>Gx=Pq)6sdg9&wOCTucr# zd8v%rl->C=YCs`u8YJ=ex(fic(*IX%{^qrI3b+tR9&hbk>}_a?ESdvcxN-cH4_FyZ zZVDi18)KoY$f}mCVmmOsJC7RGmuR93k$ahgn^nGo*C6P%w;2)5`dIwtYXzNgs7*y4 z7wfYU5(BQBXAs+Q{!}9^2NX(Ys7uDf60>o#%0vjYM_3TRAqoTGCJSjD2wWd{+f$ii*a-qOsstlqZ#5%=Z6Lynzdu0 zDL!yD02pk2&ZbV~CVg!JV48X>+;RqR?~EC$u4&?YQqaa^-C=B`$~QIaU!chc*F?Wd zUxxj`8kM-Ab_>xpgHrb)?bpY)K!AZ*3*b_iI-t(hv(DjXc8hbZ{PBKjXLdE2XIrJi zmhN2cB75wKiAjT8XErP$nIUzBY%+n!@P2)f*SP8%n&ARK2C8s*M|?vLhwAwFSTjxw zOM`1#;Oeiw=aP{s;;c%eyejwkqYug&^^Jh2fCuq?YzOYJ4g}uV_v-)%v;0~EzOT)K z-4w?O1)$w8HyRZx~EL}r-e-d z9FFF>tkWHn{eS`+L~N#)r3ve7Dqn;La0y;>Q#*afQo_Ww-{4_mN}~=*I*?N0GmPJA z9iOY-f86Xj8onMCyR(AH>2?yfoH(68rgP9xUrys3p<4DPSpK!hKm%aa8Jez7TqVE7 zl>pLvj$(N}T8DTu(iE!K(yuF(-fZv!9wnm71GU3EN+P5E8Fy_VtKd0x6;uO2hC{N` z>ATJhEmq|^L9V(K(Z3r>ZKE#fG{cPjMipcwm8y?fa+}BQDsxcC;Eceqq5GqIKjRTZCuIZP`j8VT?|TxI}|Yv5NTwc3G8zrSh%+cP(S z&?-{^G3o=z1R-G(ls$Vxy{ zI%^~mQVpmq^4Wz2phLJd0=B|Ey&SAl(C5Hb34Z0_&KpVGvt9~3x8a1phXHU)@3vT} zzH@|YWEKD55V>It7c9tznol>^wdmv1-@oKJ4S(W`$dC!XZrzt70B7c_2iI#56}Urp|3%5vCrY?1xHlWthIQjsUf=H8 z+SQY(XWEmneUY>N(lQ10QYFx)I?`IPXUYYlg9a4ytiskj0cR>KWb+qQP*zUuUGNS! z=U{t2H4!V*?>$v^_l|IRH&?BthfT62Ai)is#1f+lfC6#Y$jCtkH68U;p-6}a5zPKz zn88egM$5B3ARlHCrQl?WW>y~C9>a77wnd4#_KO!U+}zyYag&n3aLDN_Y)VA$N(CEN zjFgJKuH|w7k4&arA;Q2AumTV~C-Iq6$U)gZnqPO7t4E1kd%jxMwa|PFw|6@5@bFj} zEV$3e$Oz5OGy6~OD~}yNjr0qTe$oK$0uH4a&|Kn+OaLJ5h1Nd-{N<5YrS5$6q=^Xz z|M}3U48{4-nidTAqR&RAcD(>^&i$;d#ondytKxX_3)x#ZkYu;Y5wBWyW{=8TCO<71Cr6H0ZRm(CPTJAr`Da}1r+-VbxJTB|y z`B9qMOX67ru~=a4s2oI{khB2@2iIiOUC{Iuz3-=Id1^ynUSFj``5}ZM6G{hez8ShA zI)U`>HlVe6_ewd>s^|Wn!)(pEHtklo5k^;fi4r1`hU!qK6iDT@+O12L*aOT&_-5vA zwb^)=+Iq5qA2%S4I818R;CSIg8JNbNYB!+9?=m{ugN7hiO$Ox?8|Ue)nP7tA4Xw~! zDXqQ;%0mFMG#MBO4|f5z2B-Z0Rd(iKO;}HjNd9ST}*XQeJ2Rl?dy7XR!T(HA@MV zTEC&iXdj3A$@lL4TN|j_nfBi*c#sbU^UqNYwM#aU1i9o@FWgI)k%;8T>i!n&*9M{a zXXE&D{N$5WlP@2q?UxDl1dYJKfuR?^0kdIHRZP4Cgh_c@$!nv0gCFK&I|zEf!~R9UIs;HdeM~TqpWU(g+#i7ZR2!doPVWbz1MUmb)W3f0z8! z03n9#CmrNpgH01hgI+JeM&#s{xY;W(`8`DxD`8qw+}uQ3rstR8@bX7R#}916=j1-peKYOcmgVm;nR z87Y5wMGyK4MlI2|sJHhK0!J>N7>rd4+aSN(#ahknlJ@+5$A%&-wU)=jLfJJxX?2DF z+v$2u*7t$}N#q#{< zE7XQWHbU@BaKl1hJ!NPPRxj4R*g9Al5-^d+vPzSka9;fxG%00IG)LSH3mUYpt?N zI@*S{aZU%lnRlFAUc_=3I^ZU&yxBYfL%p2K3v1%~lFLifDTP1(3o6GO}EDSGva^J@)jNm$N-_Tl;@ z;^>F;mUf8H8K3p6JbE_OXtFKW)ZC6cAl;>XnpP3#5`DN$-B5U1r*XEYuyPcWV#8gGOWw5?4oK|A&oRfV!9&4(EIPISNE?=NYXH~8 zZD+qpb1IaTy)Z1Fp5(N$Jp`)nmu6hmTv$LXTCAZG=A`m(5=x zkVmAhJo=(1;rH8?s^15{EU0c z^~?BJBWpk9Dr+@)q&C{NR^{{ck%`l3IFHkji_o+hZ8P=WOia$o20NNm*ewsNR~sT< zo1ubD6NNT2nN52oAb{1HzJ*$j;k#& z1~&b}WUT>Z`N2OoNT-taTGfh29P2$TnmaOSjmb1|^7J^H)KHN{iael4?kMl@5YO?+ zz27{s&yf+;G1eB#hgL*NWBQE|f&AI98qddSYOENUgs#@%T=c_3d~6IDOcl+fowj;| z`43};(9WOV_Hlu2Et=t4AB_1}B%a~aEl;y#1<#Pk8tjJZQZHs-t-6(LcQqrfcl|1qPs7@ydt!q^jLT*Ds%}-ELHjI;H*n1vdlfBWIaT3~9 z-|UQ00k>fg3LO$^a)y0_nf1HpSxr;=+fC|_P7Wz+6Z$092Qkt@`kTlhrL$bU5Ap>D zQ(Iv9fMD3ad_vg#sJ~wR5OL)A4r|UB_KA7trTf?LfdWQwnf4 zyHXCxH(9cM+%0N$xN7C&Vpw>%yn;e%Vj{#%1ppOxG}Sv0;X9Sq6!7`dvW1N?f9I50 zKhoun`YB7yIIG`jR68Z76Tl#PZwxrP7Lu3J8OX%x=|l&%g)3cDCwa41AK z4&X4hg}n&mS|K6Xq0E)g&l&m`#?{3oBRyRJClO3c3M-J^ zT9Ha2!%E*)<*Jho|NG~D4(DxnbxX_9r!Jle4i(Yj*_f4&_H>-rh2dwX&5s@Hgf~uv zMp;>H$X-)^cBsrZi2{TNZR==CRrBQ|^|ocp76pX@GjtdLs2)l>-%WK=2*Do|6%-T{ z75U|UifdV{we1VF-qlzRoBd`*%oi^$#VGz%UtdoeeRa96t`4wvIBIayrcFNRkKL3$p7=E+&+H0?Mu5+Dh{N!aNFPNKMFN5|Lj(jT z)6SlRSKe>ASHpiycH+wL@D&h8~Y2gghE7nXMR z7Ut}FRu-4**{x{lx-$}= zo?jR(l_*QT9s70k%!yAZ&TO5T$@E8uJdJ@gw@`Pfzp1c(56xF5@VyYbMKsALs7o1l zFWHHZqdrCU+hulIj7z-!&!;}JmvESjZn?LHsBaaX&hLfJm5CCy_0Fdc1RIyJFRhe3 zN})=+&P&78tJgde6=QQtg#BsU6Xiy+tUsweRLBP($sJdttZnV+MNX^LDN7 ztm@7)A5Bw}RL9Rzg%TaEK4nz@qT?yLq#O8An0JfXg74uovx+F*`(s5Hd_D6 zV5;B;$&Y=kgLkHjU1AoUD+bNZ_|(QciMKsB95_0+7q_>3m+ImBk|%3(`u`wrlF_n$ z>O>1sSe(|-e{%KQmXBGn1dY4-N0Qu{rIRmfuhO4J-Raf8Pf>HVdR@ru`bkXBxz_r! zcGeP{Cdz1`J$J+e>+*<6wcceh`f3!5iNnP+nkm)`wePIge6_~Wk+Y+0s>D0smG6$od)KI&ub~$A0AnA;HpHE z0@&{HIC}gl7dM#n#=Lp*Uc0HWa5`)1hrzQaxW0H>#oBR4hr^ zwHnWy_@BS)`16D=;rpr87{`&O)>e$b(=gWlVhc?L1qA~G1O*`h!Q3tUt{yhf278?& zX>=TMmRw{>n0RygJQ|I*v9-15ryBgj@&*d`FFOXR%_*a;VgLu-L-Dz`&fA zfFSGb@rtOtA75b>ymiq}|NQYfegD4t=J?fst4%lXCg&$PHY{t<@vE8$G#lQgcGSnm z@eHk#OLF$?bZ3hHkm1P~wg(TK;e&c*=rr-hm=-)X|Ci(-;oZ;YJ+|{xMMdf|1Eq&TgcF+!gEFlM zu*28gzs8pGLd*X-AqDUA9Jqvcm9OZt2rwfWzmuZdc#C-=E+aKkKe>k@g{5_M$HYj4 zer0R&ZMY^EQ-8!S;&V=6EgUL==V^5=Oyy7c7OHg#Rkqa8ZK}4TyJ5S1rwEBL8}p+z zqYflb@O%E{b8MxsUram*0e)_~WqC!FZr(x1hgab9<9Ywxmx?z!iqC3ud3m}kO>SwV zs^J6K9TLqd{o!(ZIl&b#*5wW{61T&hDGTiyS^Vs}I6cbkD zaC>|;qp$B-Et->)bM-@I(hO5K60f@C z%$%BhE>cJYkaw3@p>Qg_+Hy`#r9(?S*-`%fEv*q;bF;G-So%w>M$K}o=&@V#{cBAO zaneto+?~o6*!83Xdp)%H*Rr!j2tKQkp=`|(g(L|op;zmDd3rXI)Z{i&I7IW%QuUr? z>nG0DyLax??r%_3FIkif^E%A;t*)*v)f^qR@r^3y%SIIG)}1R`nwm0tww)5LgEmsl z(K6ZfX0y0}pmCYcOH52GFE2+t4Qs5m9@pt)wpae_M$w7qe~X2|MS$-88FH=^)UjD z%p|rpHWLZHzP>9@FWBM&1zk7$joMH(y7PT`_srmInrmlgO`W&9;trJ;=QehAs$LB{ z3|cVA#bSJIShnB;*sxLVFP>6}HRaYEYB)bbp*TofHs_s_LO-pgj=HV|?4WAO%G3=n zQr?$$p2p!2BmriUkvlz9ZvW{M8(!!tm#r{{xw*MeUweD|Rry$Mcd5)W$5FSEojJAj znXajUV25(YrJPo`@c_PwKZG={x3+4(f1hxF{yhExhq)4?^GqiK_XpP!o*QF!n5II_ z%gY;ixXT~22rgr9ZLLs0t~y{%k$beaRAV*gGyC5r;l(JMUf!JPYS>0(PZkSc1np;k z*-shj>o*R?9qmuqpezHrIJ@v4WO6vPS!I$r73Vxj=DfXCb969P=N}LdP;VQ$!J_hL zurPuDzx*Y`qB@(qqu6Q9LQW2=n$&-@!gXgAZX4Zs*VQHl8v(m%dFT3QQVlxH{6LXe zX4!&EmnxP;@3&Uz%SVf(q@)yBx5cul33pvxT}?h^Nt(Zx^Wxv&BV}=F_yZZ&B=}&n zJZ~?tot~f?b!u8#RE@j1tgLJhwa^(7>ivygRGf&%;qPy6GtdLs_$6$uWqKaR+Pc#OvF8gy--v>*qVMVM2)l>&F)pEh|;MwCt zWwv#7btg`oFtq$kEOZMWx#TJS4ScUeo5C{7l6`$8mog=SsAIyydS($2u7iX&KebW` zI(_jAj<3)PdSUtK(d(L;qhZI<;UvkR1TkOP7=9iRk^j|Z?I(>KQ zpy;B=5c}`Ql%Vg@{Ebic;JF7O9o~Q$EFbdlDf9WG|BaWk=k(0+x%Jtl1UhA>zn)tD z{bm$CU*6DXNuZ;g{m+2#zkm8ap_Y;kWB#jG#puSs)-R{j5liC#2#^!E67=m26H;5w z?#0Sla1=)U=ly)3%;-E_`(;)-;6TRrdo4cdJoG)z+@1*doq9}qnX7ZjfeZr|H$JAu ziycD&e0*P^$4H+?g+@hbDl03is;a81J8#ab5B}HrRQaH@C5o92mXYRayD$2hE(E+N zPaZXqOPHt^r=de2EC!3&L_}(`va(WB>;EgZ>*ppUKe#j4CsQYd!0hi2*JPn;goTCO zA%lvGi-(4W7Q6iC)!rjUSaIhE+`IQ7#v2{y&FcUDy+)0@o1&tkrDcw6TEXyt?!DiU z(K)G4L26~56*KhZE-N#$DI^A7sy_T4<7NNNA)->EQrG`YQvcqAruHZ+-=o1sUY{oR zD66aRZqGomzgbc@9z65E->xIpRL`}MR!mBs+WC%yM_U3TQE4SO)P^{a6_v98uV=%X zRizy{P1@-+{p34M|GJLpzmKQIa0gOde6=ylso!`2;Q%u*(5jk{e69-?NkGu?^Z(@I z-t6@#ign(%i32XPUGUZ_M=0q1lFBl$8?*cj)ZMBofY!CzjG=U zXSY^cu(AB1FPxI5rwssTgp5?V5R=&)Me_KcMQFDD7h2%UU~-<8ca@j-Oh}ywh_n(} z@zQ+O7u}Us5+@WD6S^^Tp5udJtfMVOp%GT1Oi3q}mE(cCYr)WH(cGFOHCZC-@^q3& zfAK=x6n1K6L*UI@R21j9LG3P$>vBUxMCRp3}F)B<6q{Fut|z-nY$gKAfndalRvH=2pSA&0phi|&jghAmRIbCE)O8)l@! zw)C1KEng0Q{5V^iB@-ollW!3$K3<0Rk!Li2qobevYB>*rLuaVS!y_whlEp+e=$IJyjZC$~#2ad=mYjmQ-R|q1PqB?L*5k4JxksyU_g5a2iP%hFl@wJO&rjQ- znnJdQx==(9D;$>$pB-(D8j7m&6j($w6y|HP6G{7jqhyQ&!zy;wRkJy=&q zG2&vS`q0zy=+Ke5=#BxuBYSUue|ms;aoS zhMwLx@AD{%+#0up4vZK^r7Fi-7TuR2F{ z$hb)E^ZNWT5!TVG`K!N2g+<6?9Ohft8bTZz4KXvW;zh{+j9kgz<@3w_pl_QBlE=K# zw;wwc&wM|{KFVXULR|BJEcVer!qpJ118!}JAM2}6U~g?g^0BR(aSZ3@W2xBBhf5gS zDGZDgspp?j56a5E_n?9*LEiuuRHO(*9?z9;%t7@~*OtE2q@9B+?2m@zzTblH2%_OC?dvLK-b6g#_mnAQ-5M7Mv1_+q_E7uR+`5=k@`+s?&u)d zCG@xm$9O-@cbR@kza00Kt;$P-Sgus;vwkyVWLd)AlQNDHAK$!#`X;)k%}+5x6>Z}S z7J43`djoO$Q|?!8aulGCoRaX%r=T%M_58kWx<|A?f;Ge`u+0x z2^pKM)i#J5g4b?TG3cjUxt~fLmH5lzgY7C3oT-+}*msmNqR8=4GAsRgwG>?Uhurrpz@#)+R(r~9%d$nA`ue_nd%Fb4H)^%DCp#E?Zq(TbWn`4*cQsDH zhOYn1_e9zNRwc!JsYROmml(EiWVJ)zzi-G1{^?0-SU5#k&Wbu5txwRmThEuIIgnZ! z5yAzi=IR*p2P>Hu*}%HhRvUict7Pv=d$0t^80!v*lqnM7ut6lWum?` zM&aUXWG;feI2>Dz)^ng*aSs0zxp;?f^QPYC?`3~B74lo@)24KztL~HwmM>b$wRu!l z&ZP4x3637pGulCfsq)y}9+V5goRV@L)y(<=_(tR3g0F}PV{Tch>`^LeqGA#KAhnE; zIYRG^zt!v)Cb+$;t}8=0{qT*oMZ<7GE#6VD)00Gg`BSWboM&A6 z{7<9-i*BGvF(VOXhPx~JwWabs?ZNj$#iC7`Lbtj~hPzUxcx5U(-BAzvnnePKkEY}t zv4?k8Qht|dyO*0b`bQo)J=lC5e#J+tiJR|%bKUUHd=0Z@XSao}NY)=Ggx<+|*Pm!By0nqQkXa)-KV(LW&H@q_yR(cf%e&WjOD z6HtgM$))8@G8bqv`0H18sjdY;Rs?`f_u zgMc`M(m;|`a%oit)qG7;(^LAkZe%2d6-~BXKADT}RQDlw!(qqSs=+R`lWi4MMIqCK`-91Sba=Y&>R%!t2>Z+8oR`<=r4EqouC7|HDZlWZ zhKHw3=S2nFX1L1;_`~TN;cTGz+5?c{!k&@be%baP!@bi18Xqb8kelrx`iDR=bIaAd>WJT5QK4Gt_| zvb#KDrCm7Rbs#CX#mwMhKDByOLQ2^c1Ln8FKU*mYo}WI3qQ(ol0f%;vq{Pi7q^8T? z#LFYdl+Uz@!hx`}*w-t3IEZ+q*n*UQi-z}1YovgPFBl2SBj$*kC;|1bD zvgMq8YYVCs)#f|7h>QEU)Dt>+2ZBf0x|122+LecH`B;U z2cw6Li~WCGL{)F{-DPO_hoJD7#PS?3ANu9G8K1+zUAJ{^Ek7d$D`Sc7SmF6^O{`@>A?MR!R?Bs=Jo{&$_uV7Lz zv=|W{XDryRInudF83vYzoibUphAMdSG8f&;B|c$?_)$4vm5SD@UoX3ND7X5yrV_0e z7|qVn>N4(;V@FzHo}IPkBpQ~5B1?JifFmF{E-!sMls*2~J!;}?3p@3+cRsTCnTbOc zL?u8T1@&0wv**}|wAC(0-EQHYoom%HvaLmki3IJDW;e&>v33dEcZf$Rhcoc4Q>6WJ z=9Z<#&d6$6nil`O_=Hi4(N9%`X_Yklf<^8JORJkl3{#;Z&7+553~4$uENj)_ybs7U z!h+k+_)hs%T=oT~4*6}F){!uT?gVM#|(pt$8dZRPC)yN}cx1gy>Y@hGqa$r@h zt=N`A^Vvy2Bt>}*q5mj#ZJ6v(`P;r?2c&}WWw2Ota%tc=yd@&M8bx1YAKKL4jMWfR zVT&RsFh7aE#hw-+EXxqTaKTXySmMz?d<=CKXsxQ2?4fX@X4M~!TDB5z%Z7ew!Aj&8 zes**xb^g{M8`Lmq%l>fJHmN5Zu12Q>X9v`A{J5>QyT7rH6rCjA;-p7glB;utdf$gE zsI5>$oP;u^S9CY`ZvPnSa7*s|sAZ|Q7Z~8UDfQ|rhkf6~x&oKY{?tO`?v&7N7M$dK zU$|!bJV)9|dICH4W51!Oc$|xDVF~D%fZJxU{j1M?9F7fw&5eJ&n{m436PqryE37 z&$}lp_zS+6i%WxxX-dil*{;5K+-zDW9uX3#6tq!-OLz4^g#3F$=h*<(xCIe8vLE!D zrdB~dmkWbTiW^_GrXx-0T`nRtIfv`$5|UqCLVowMEx&5CNh!RE3(F7U#bdvXsPniI z1$Xn@3-X6wImQT`P-Oo5#R`#`M3bSWpGtxTCf2p~g*aS!)I=+O67abC=MNN991_kk+zhJqXEfuF?v>6J4 z{Q?h7$%VK9W>)}z*%x&6yf0f3A&3wvb#{zoH*Yf9PGRHZ82zGl-M13J*-~gsH`(@+ zp?Jw69hIUHy0a173y9u-Zs~egi&&RMvf`eNST9Qv2j2ssaL85$Cw9oNn{g#(17qkE zvu5Eyo(qpGD^7Ww@Dx90$L%5|++wuGsL%Pg#|Vnp_gyP{H+;|4^zeg0%H_z6!b;Uz z9>&_<#cri>?{6EbEAQQVwo(~1QNrJ~S7NgGx{2h_M#&?$PgpNkwVbjDa9w1M+E`1U zdn}O%c`ns%b5B~TVL)7{h;4tYquOj@VZ+t@2}cR{&6&L`o>j+u(3e<$o>vHOOpEeD zhabT36qnW^A%T5kyV(dgT)jV2k~Yn=a(a*Y{0rE4IyI3LtYn!!+u3uWX2Y^1G*&+n zM5Xp(=LImzSDIGF8*;TPc=`GHnV6V>{K?Np$1y#d(u)5en)6{~t<7qrh3IUP(3q9csvyb(>@qwmc0(eR|TB1woq(x#sR;Z80a7ZX8ZpLEvQ$8lp=h zQ4fkHZl94YEIR+3U|Q=qcT&0F)YID@e6f=$KB}ZstGUmRE4}o4tgHnbHM(Doo%Ev- z$t)(U*T<4ZZ!aK(1!4HZIucu+R)G!xSn zR1@Je>c8rL*|FEwZ>uVqYSjjh%-j#lHxt5(-X2`la)pi6uT$PJ`kZ|yt%b6{ChovRaX?m{Ll9xcWAqFS7BL8JS-Rf&$Yt0zfQU0N~z z51obEgkBmwe5bY7)D76w)rBg#`-E5%G&O)(tZQhXM(=Ey1KjjVX?&^v>16EKC#3!tH-$B+0j{Dz_hjlWs1Pi1^;saARE(l zHk_6!c>?F-Fr-I=wX6=4Wk_e7vw+L@%1S;`H=zyej@41o?}eI)sud9%hmbln;lU-X zW~v7`gI9zUy`@>mmdv(6NxpCb={xwI-QBI-1t(X-Z@Gt&yK79GRdM}&dWF)QRSl_v z`x=X89s?D(R*mg4wBDw_oFHSd{VETJ%m*g-^JhjrK1WU8VE-cvRff?g)#0uJJAu3d zh5eV1=qD#9&3^SV3287Fu{uHuC%a^mas}5ND2XPNk+qn|}hC&dinz zN$Xpyfk{x+Wf$MH)VVI8Tx4Ip6y5YHvl$jYbh<-)6B#SyQk_}EAKjart| zZP3Gzhq*;kxHadefMHoipD(1@)9!<0NkY-ahxk zd@=?y3I)q((f%oTKq&nlT$UvTiXlhr?(<&Fr=616cNKFF%5I?NXv^{es<%p*T}5EL z>Y73yP$Ue;zM4%hGV5g)+xqzAWKZwsTnIqaa*c|+xkF@^XpM9poNm~p2 z3?Fn0aw}DSF)~;>uj;?5hD-H)hFo1ywjWs&wrUemD8|6t&1KM*kJgK1Lk_-d|Lxg) z3_e`wd+BVvZ5dh1Z!y}z{l|Ecr zW`nfPZXvaIE|lE#JT&(6^_!qGKjMurF8}cSPGN2I1KSbTwYJ8_#_8$l01Cd;7CZ3a7aY1u6Z zvJ|u}7fS-rA?}eb&mrvEB&w1708upKkEAjs;C~39_ymFH!N>DaKtduC<{SV>8*D4% zG9=i=C-~C@Z9=R$KB`G%OEseFCTzsnU66OU?Xf6bbWHlI2T3V`l$1mi` zNz)#AmCW@l8!+7i^_B?!rf#`()iSPI`f`oUa0l9EhSYC#kcO(lrzaNr`22y4uPA8T zeaf)^{U=}%5)>W~upAczysmYLzuvUQ2%=`rD(0|H=q5QW+LUeH*Ou@6oNKmm1>uir z3HqBHR~u!fSTg$2RB1_s$qtM$_jZei8=IS(vDncXj~dyuJHh{$W1@dx=UKwnN;?T? zhxH8K_~&*K`bYPZuxcw^j!NKQ<|!zv#ibgiaI_@-Oh=M@>OWk=NZ=F`*&K|{&wr_;HAw3CZG^}B_m{qfjpYgMCcq7jzeM2nk! z^XgL1&!49h1u(K?ZiAM6-|mhPNM9oXhKTZ6xrFSxrjJ~xty`wU6^n~kQywR96REw* zV+ABqltLUuYZk@?<>sJA?8-YKuh55k$A~PH-$fn@xjaZ@OuJ^jvL9w}x7EliQrW`H zJOKbsCch7liexEzuxeB0^>GmPq96Q4`}n33JSToItY&m0Ec+ej1ye_V&Xm{iU?jsQ zID+PQXP+;t^3MvdJ|)X&)D(jkTQg2?u~JI(lf-00(Obl7Pr{nVbBNtZ_9bJ z7hJ_RAkU+<4myPlfsiryP2P6`{>->!t{(CU#{#3FR>@6ULsZ#4-Jlmv)+s8!Lrq|o zX0CEZ3mFGay?KH&c)$~H`(#D?{UqQ?@L-_f8L!{QDLzX7-m!Qy56bQw&I$5|s)%qx zbECeC@c1wB{ye*|Q6%e1U|V(oYbn=Ss>AQ;%BAp1o)cen5ZIq&9R78){s#00kaJe|PvR}s@fx=` z+bSw{jtRF1m_d`-oBK1eCFF{0bO@=+ofo5U&%Q<`%P07EB4=o!2|&hZ1=j*D9n~+9 zBDAvV4?~PsPRN^(7e8VFngVaaB%@+#+peZufO;->d`S^8T`~8`V=^$mL&M}NjC@e7I0=T*zfr{Wq6_~qU)fA7OX&>8fTTupYQJw0I3)^KzQ}oxKj&hR z)=hnR|+W%ChHM5$yN%;gTx}vniAm*oO23q z3Yd@ZfaK=%{o{jgOYYMKPX^u*Si8`i+qh~>d$v4fFKHtEmCSO0LZc?eGRhrpix9yVmm z3K+_6A58F@B1JMKVV{)Uq<_rMLWTZ$2>cj~hE(f~82Kit%HSYRh@meeSCemI^q*qS zeb@H1lO01rsB;SXi5)&q$5&b!$3GN_cZ$H25t5{PS-_!4Ap#_WJg^Q(;E4Bx*Ow ztDo#-6daCTU_3s)>${1aKm)-!)ooHRlt5m{XlI=tQtb@bEeF~YoEj7Q8ZXxRVy$B0 z454l;#tj-)YGOc=XvwX{6M@SB;#peHr=dJH0C6^bg_B$(lF6@`PF$afU9-T<;cD`; z=!sbd8o%$H@%V@>@UlAb6(n+K5CI>ZJd}c#vo}QT0H<6W7)`x2A3|EX{K?QCdCu#L z75@D1Q>Gvkpj?NeN_J_5@>U!8cNH4^jew$T=i@xbU%KT6r$$tg{lFQt^d2X7N9~ zZpHKS^U_~OOz}(DZo|hopIbDEd)o3>Z+gZPNL`vxg#f_=m-lHxa8A{6upb!L ze{~4XHy&Miuj*}3maOuN8|j~$*u!j`A!+eenQqSaYZDO0&)jK_-e4YvS0b0sP0Xtpbkpay>_#7Nk zV+`m30A+~h2cq<%pA_%2U|hF`9r-}#2;~=40~A$XnAtc+`llLaABTE;gP;u8~@r&_dq&hZJ>1>MK6MJlE6!xuXRULNn)Z?}2?E zW}mF(*XV65gpQrh&`DyZd`M}ES2cLH-H(Wk6L5Bq4(V<8{0P8DW{X3IJVDAj>Q~Pp z`U%~iMmXKCK3Y*viVHb0=4AtQ`alOjacsJ%s|X2F8>0#v^fCI_Oc5(4g!csbirw1l z(3zkw+qEDQE`k05_dAC@znr5j4?DrydY0l zP}N={euwwVP}7W>cYWn zs3EzU6eK)*^1dL{H0J*hd?CV1KG`}81u)lu=D^2#thOafO7*1AQ#eWpQ|I1}mb~>5 z#s{3QvHd8n1yy2J?E?FWb#OKdw*=M9&?~NwAOCRp`ucwsC}k~vjt%9u5n3k(0<1P; zfGdl!RF-0OHNb7T{&q5=VIt(^(@OkGff?YQ4yK*qUabZcP13w5O zPCPuoh_)O{N-Z22`*owr^jo?;s+KOCm9p6=2)!}=g&6+?ei<2umA~O4|6qzrt}#E& zaG=+{%gHFtVJh1T{SdF6?RyhQjLyl^H3>dGs_{wdgK$Nk6HCM|9Xa%;zrxmokV&D~k@HYnp&rags z@fDf;>sfkR(`Fp$)I=x*N4PbT+dYcLver?`)MYb0En8mbmoJ$wAVOCtv-VR#MI6jW zCE_mV>lIqH{QZ1BE}TuXK9#^y!+2&|KEJNsR%)jeumX_8K(`N7SGzr@!F%8IOE|7g zuPn@)a152b!hpkvb={=o9}zQDu5)dmt9gBcOSI`J$hXz=C(=X?On{zo*_g$RjjjGt zlCz&L^6D0Af;wo(ZEIM&!eO|^!^24DvAqQO;0#J1Bn|mEgS{4=-%FS8Y8&Ulo^5!- zp@!2$fJHD?K!MCqqEsSqD9Mmb2#q zU{!zk8mc9~aQ=~hW*6xaFP`BI+9ozbFEokZvgdwEw6NQ*#Cd%qBNrE!QR@aOkZDg% z|4gdcon@)b8gB@ai{bC??k+OI#=a#aaX;ATb)4_B?o!XD&2Ee0b8&J44QQI<(uiLD zhhX0$$8KvCb8}Fs>g{bUi9G-6i2}9nsQcc6%xGe0QwT%LD`>h~v@@uUtb>AayC3UL zZ|iZm7T2PtQ*Ji{re~m06rO#3itxf55eZ4ScZL1j4vm`!=io7yBW7U({5&U z#m$p{jn^&ObfcmX;a&Zi8`v`N@DyI(q(*CeZS$NH;0e$GPy`tJ%CZt^Pc>>!Py*_( z*$_Y}SbSR*0*zt+R-M5rpTR4wERe@&9x8%*nb&Hh^2d+IVp)dU0_6+EgCKU+)X_OS z*cz?bnT*O`F1Me@3lKz4 zggG1HdsHlj7*Ib@jHv0YwoCLT?%1}6o$un%C zxIZ!F{f>@~<(ag&=B&}f&60W+XOOkxLiqUjPB~=5YY}r^;jQaF%O)->mbUH()%m^ zAY8F&GtuQN`%yI&>vG}x=qnJ8Ib;AQMB+s)zWB`$3fBRm+ESr9WOpNf7Mr@gnk+;;xV!xIc@z#YJJWo-CjnqT25T7Gg^A z`*(h(re+ka!}L_Ae7zQj1O0UX%502HW2QS(EmM9ql_obmy=gnaJkd;~QZ~(Hqgzc| zOAGuoRgl9mXD`DH<#LdjHCNTV&VKXm^RGn|9j)^D!vb3a9J;SijbUnrD{4fa*bASD z05Twl_Fi!xu?@r|Dj8@5#0eU|G4RLyl^qWx@fXOyM*<_IxSiR=Zupil`BL;G^)sO9 z?lj{gzw{_+xW%+Evdh3rIx5bI>swuK^pOWQAK%fUV@-%iwEMv>6PB^88`?NI75gAO z-|Eq{VyaSPz7R?Yx92jF+5Yy$na~;b%QBVXED)nCyIlucgN%0naA5`SY#kkL)u1Y! z^78V2+8G718Tz8LW4Wu&Yj~rZ&u|~!^ocIS%|4my^+7N6mG!#IcI}CpR14`hUOK!B z?j9yfgt!iPK}IdJBzx5XQeoj_u^v9U7rss~O~!nLHY&8U`{?Td6oe z_zWUx)1@PHYIxR6*Wg;N`i{@fX9+565H{4;N4j_S7Z`;e-he3P(xpxxX~mKqB;$qZ z4Ooz_tf0Mh>*1vP?o8UGtIe92R7%^b_j$G18`numN4bw>=mMfrT@?>!T3006N?g{b zUucQj_2y{T+YVOQ%D4v4E&9U&b9G#NE}M{YRZQxEHB>f3prNsBfBP3GYS<1U@F1a8 zSBoc~*NTQ>LL+n_dg9Sl{=_;!yp&;`F9+{qxPQLe`w2>^!O_EF7$?9FNPx1OMK$ZA zNVIx^A(Jf(D7;#q=5$*A4HE{t`#Wodpk|*4k@F#;$!VqHG5rN$(T;x@CS^cTJmEbu zGQt;QzwnT+`92#QVa!mOLwuA+1u^HV`X8Ews}RAPv}kB(YPk&E1-nQj11Zx#bh*v` zN}r1JI805Lz-I=zFc2P34b4@9Pkyv80P%22Z)43qj$>nE13%laztRveRqQgQvnMXS zblzYb1kkZHGH^Y?4)}EWSk5}i4v3mz^KRf>fa(D-)4;8HzkZ$w;1B^HQ#aK){~qF) zC*6%vH*P@lMSw8}sNC9tjs=tP$E&^)L(s4hH2z+6es6D2WPiC9RQx5Q?k*5VCJynV z1{FN4jEo-*7GcgL0%iv~z+)$;rdSGKz$8^VV)gf15-8KwUK3J(f|Lu%(274(Wxd#W z&5(p7Xhbc5T8N*GZQ&?~U)l#vKav<~JJo*i=G}PE*F!F~nrq-%`I4OcSollU3kX*< z9{VdYVog9UkGL#V(UV(`8i)C5$7eY-e!QK(K@FoS&{!A`b=_MisoAdMn5Y=F@6}#a z(9_k`s1o0ESS&*Q`0+iz&7h^l=}*Y=w5O@Br$&KLGWDeOZ$>wox-kvH~};g!sF) zA9akJ766`yo+wHuHwlE43@!C=U0vU>;|s>#6W#B$XFUG+-p)6a;fKBJn)SEYyOnPs zPu{KRNc53cr0g=6qUKIW$mnV3&38?LA_!=voWTep^BL&|7@c}(E$f&F{1Y$*z(zRq z45Wy~^s50ya7QyY=@^ho`>NHGUug;uYA~N&>Q3;ml>eDRoXxtq?0xdud4`(|9PHJb*1uxt8(ziOSp-gIIirAsls6#89?d1MaF9x(SDC~maF zy7>n{B?L8XmJ%L=pE`P`ZStcky@vE+$%%(J<(o+4WxiT2pw?_$h9`Fu6*Hs<7~!^i z-cdZ02Q@;-D%r)`gW14p7AIUUAEv`R-fR4(9;0qd)3w-7Q@>8;0wUwh6?<~*FI(4> zU!LMo`BmNbgUP@IZI<_Q@bN$j3v}E}5PEHaS1#VaRC|jTjz%Q4pO&Pbe!(G6B5q<< z4IWs8$4UdpK&+W>j-g_$ovF+11_pwPV`?tzfF+MR9>q%Okwg_#hd%pZY6RvtU!)I zfvIhPscOx$d1eI-KE&0vQY(b?`gIjmRh#vbUr1(25Ottw00|V(T$K80jXZB$Q3BMi zLk{@Pl=(;<`2p~6_+o9lEpO@eX@OzraeyeIXSC4^eK%w#*aCA~6(muYknT6~@lz!S zyBq%!Eir*fq@HeH{GjNYJ$UO_4(!6J8)z#PtRJc17tTB1TD=PV8?bQSe{an+44gxX z_Da3TDZt~3Ql_q$C(PD26OzeAn=YQOg(i1Twr1~4IF6v>viFS*Uo@MVz(l{0z>u{= z8!Tz38dyMZCENInfAEe&uabhpQ=uvEfr8$z?cxU#2k&NGG1FPcC-Gh$4HuL@{GvBn z(JL(9E`_(@Cp#@{6dF3U(|o260rY)Av|S7j%#xL+;zdBqN8J33H<4L$jNoYV_+TOP zx2wppRCnWQsza+ggEs*Oz;KMG{W&(?%gFZtq9Hb|UGF9?3YyphHo9fP7^QW5Ud0gB z9F7uIAbkNekt4-?d6YE|-NKEA5)+j-445A>$C@qBR}DhnKF6UwRL^c*fIlh4PQ+D-zK)j)ZeqJRot z5V3;3EjZQvSK0V!lz4uhz&l)mZz;2vEUqiom&^>kfp|R70Jt>a&zwQa8yaW*Cop`W zrM-0V>~ed%PExVFqK+_3isZT-&ZPPFs}8}??bqslUC2E}%nyOA1bRwuu?SCc4+)XS zG@mdxz^9+hLP0w(F{T&EWgX<(Y} z=Dmr-I)9I@zGv}*DAlm(rK5SFt@WkeT z*wgh*kovtUXLBlc52htgojirgN^jzB-5v9<+1?H0JT!+&U#I3U*N2CO|JqSAi_WPt zLkJF!9h{L|Ompdssnze?+IXJ_KVEFP|%(8BhATfusbg zlbH#SL}GU91q2~UwFraJg1wP}QLgj)&$z|~7y&Ra@DmnJ3S&RWzBlK`0Mh=+XyK}Z z`TqH=EIE|Z3(U;a#RMI#Awrl`N=-Xby75u7Ebi0I7tbbxC4Cx0?Cl>k1c9X6ew0b6 zp2Coa&*P4*zds)%_S$t5_ss&|38~shT$1JBCU$LVZGD&O+qc}dPb;f3YA#9PbPOCc z+|$)?u&Y^s(NGd)RC=`$12*BAYkj7+km80fC>+q~97)_{uF)K^x} zMZV#iIbM(KDJy`RODx3q7LU*=30~)#h?|!$D9ZhhUzM435w^F>=2q{zKK@Y|Bajps zSeoV4M-VBEDBmP#MKy3Wx5paB_ZM zW~VRqW7TVvhEdV`^ijYG=J1XIrNn9Q8v-k+9|EV2$Kbvc11dW%A@I$W46gJ2w`T2N zMg_XENC!w20}cB>OUz8~B_-Yg=R>W#$L4nfN;;{Y|;MGFMNbqExeWu>h>{{yNsb5q)d7X!J1_yaUPtP&(~qTA zT-=)NuD$=voaZ{JmD5s{`iT=G2YaJC-v~YK9#s0t+kEw`u;dnX%#>G5y+|>#*Jee3 zu-lt!Ika7(S|BLo>+4miq7dBE_v|GM7xq?f4s?%9BeF%M%B(&a=th0wnjnS?vu8M$ zko@`AWOAv@>^gg$!0^udS?;Z`z@T&`W{%@8Ocp-gLrV(s0i?M(616Xnlx0=A@!wbQ zQ)l5kA|R7JkZk;LrNN_o=4l|MRP|hrdukk?Wi=HIjpdNjQ0dMlR0>B2qu(#z@o7p{ z4&jc4QTwIxa!TCc=0av05yEO6t=d+Ct9}vYXMz6$1BXk<_W+BbX{T$&+l>M#fyl64 zs#ePv9)7&AI9?B0z)h6t=--nx4#U0PZ`om&E_}_obmWmxaO9a5@AVjo|hnM1#3G z$L8!IzSetrG7w_F3X3*D76o{6VKY%)t#K%9rW{;OS)i~Z3h zTV0ML{V)k=GWKH%PJkCe@bfJ=5TMO^VS0|e1SZu_Ugfzscnx{uMvliAQ3CC&W~ zgPqkQUIzFIho}0x4I+~w+rNXXY7UAM#AvqHyTtQbWlHd+X`dt`6HHfthE(5|nOT<$ zFa|D6=7l;wS+#UQuX+#LMI7U#Z%=#hvyT$`G7fR)j&p40CeNs0xx+viS96=iM{)d@ zkU;*EVPpa%t3zGp=Vn^^^~E%WKc1$4cK>AKIc)9N@~e>BppUVOy7y5AI;AFC^DlY5Q}C8% z9d{B{h30$21hHnVL#TxDFIAa*XsH{5vJK|2Vak^zcLJ3{;}g@`aWnPlZp=S{lhS6r zht7Rp65MvzZEPvl;k=D45AnUeR@OqiHGpmhSx(%r>B~F`j$Q%i4JjRbr^RQ~lE>{0 zP}GBf44{H?_xalc31xp2L2GlaVuc>#6@keJPGw-0by$3SYzuWpCT}r7qoD(on^}(l zDFcNE#Z@E1iVE-O2L{)`obQ={w7(=dKxnTTVtyOr)sK;1?eKj8MT1$I*q;|(XgG-H z-lG1)Ug$CXSME>{;I&m{XYzR*&UiKcFf0?trvP3vnD2uA+RGJur}Mjpz44Z%iqWy# zeQL9V(GyAu7g{3=k4>HDYlppL89)_@+8(h$9M!Y^XH+bVgJj6EUD1GzsW zn1BB+z-U&^f`eN+vE zj^^)q@nxu110eXZCk+ZA+R-5YIKZ>ag+K|?M}SYnzdgt1K!$8SEgI?a8~UJ|tM)2C z3>&+zmgz^~9aJNn7lvN}nEeP2)PvzfsK_?(JNc=4e6k%(-Zv^PAjF|w)uM0QI4Q7O z9w?CpQtQr|S5U(j-xFF4AWyw&f_kPeq@vlIxhbII zSb7^yZ?m~tOe**f88A__0=p6*YWd)zCk(&%dL25@MPdwBB>CfER06~gC4R>>baS+| z)hB)f=o>%JofpRZA`OKPhjyVZYP1n%=s>`wvQx9Kg74^4fvIlfi6~8|>yHCy} zUoWutS;UVX3jLcwE+IFnO~(^Sggv%6VB&x9%iXNCG!fR}ms6z|Lku+yEf#YvmOo{@ zUox01+~k5aIW#T8Vo6J1Lb)+-cJ==w>O1_o{J-~K5ke^0n~;%_y|ZQSQT7UDWRn?@ zk-bAkc6LT7DD(a{lbl@+QQ`k5rh_=3{1+-``rI-MBDRE_0gRiD_Q{+tX0l ztT0YAPF<~SHM;C3g0G3%Z@n(v3C-PZhL<*Yx>fFlDxv{uxehHDt2124KyTO<*J)0N z75u;d5CI+a4i{W*0KMXx$Rz2nf-=u6t2y^3s?)E_f}`X3KQs@u(Ew>8jDpJMem<&>1YaH3K;T)u zH0SBg-@l7E+^np2mW317cJIEzGMPFYCnYi2A~k)K@p}5b{gFH+y{NRui*=%Le)yxx zg)SewHtJX;eHpFDm4Ddu-a%^d`OKu~bx12hI84zhz(tDDN0vQHejB9(Eb`cY|11{- zw&iE=eqh9U_t!Ps!<1W3J-TfuMJ*Ot1AOmp#sxYENP9eA#~){{e%uQRb9$T>388x| ztN?9bdEGK5eFPxSckx(GA*bi?ZtJr!d1So<)2tymyOoj45&sp#x`8u+9LC$*%3k~S zs;a8i*7<6M>}!lCp1zHp*1mHMhtWQfdFB;<)OB?|BpuPQ)AXWp9^b!4k_yXS8pHaA z%>=`BCW2QQ3C2BYf*|M;fH4{XdR1o*@S4iM+GbK3wvco)F!HBbFrT%8Dnwu?8J@>O zvY6c&WU4Z=a-8@eAv$cu3hd>m5t3$Mg%|{ht;exyW<^mldo?Ir?IVB~fC}D8yaH+g zn7l94X_|h+lzQ9NR@ZQeA@MmiAd>*l#E*F2d=-94@;Au?xZdN_)KDq4$REf$CuarF z;;KLSV{KTAz?yw|94ru7!OR8ZTLL=6)lyH&faHPow{aunsO#2XArZ)|dNqu8n03R* z0?~A?qx$cMkYHgm$-xW0X8NuJJ8Yx2M%m;@)%>Vc{2de%HN14stG3OgVeSn<>`)FV z0DJPxx^71fiBRa$$9Mky%Kr@a{%|!?X?$;Ktl3sK)aF{_DJA5gz)TR9GS~>E*%F}! zK~@G@AN#l6!qSRk<&fwAzyO3T^$ky5ShZOc#6Z;unIL^^HRA;~?<~lNVYFkScpi`Lvd?EoPgq=1EFHg4Hs;Lp~F+7 zT9N>Qqi`Vy^|OPD8nuCk_z-DkxADNKhTnHo>B-5(+7hblihL907|V z@BpDQ9avv~6dM$Mf8)Z^4QOVy8xTGF7$}1e{qwdtd0O}5>5Y2ylag;)yU6Dwrk{lE z4rsEbpi{Z!U!w~P(jYyVutg41U;&i&ry8D5=~C}nj)U{9+Qa0}o^bBJKYT^O?`MbsP7Xiwi4s zx&k)Sw}!V~Jt~tRBH<_`*vCZ~yz8G2%*kW9f-?0gC7m*4Knuc3a+tIp76TWCOvF5eZ9J!PLA_EpSpXob&H_RT^6=1p zG0b;CbH3xmfvS9v^YxC5;-VM~?xNeW%L-SZ^nd#_g`7lENhhZoFkGOHJ4OqZ1NIsy zOZx^S2$I+huO}?g01qG)lH~vxp`7qi%cBsI7kEsdg@({{-)=RW$bm&lpjxQ6BM}B{ z6(kMGu7qV9W^)YHo=;#7s*053oQKFI0zB&(o6kk=n!r5)M_zckwd<^?O6i=h=1`_K z?)JrrD%xj3n+lCKOaPo~xYQu*S4AkPLF!yi3A>Afz4#=s_a)NX+3j)hSQQ>no8fMf z=at%9R@QK*g9i!9Ag6!6Bge{NrU?)M6}Ce@7aJ@yYHkDwivE*qegy;}96)+GWWu&0 zK;s>dgP^J(e~P%>>JX$}U^9SFTD{1CBX%k4;6x9M=ByENVPOSGYPU1oMCb#85Sd|B zfGvT&p8MIHU^QWM(7}&WEMEbHre5%S^1ok8gcs72zW861rd*O#Abmh3mMHd5dPs#I zS_kU}w^N=2u%iCW4u+Goc{DV-fqfrlR6*z-bZBourx>*%*jsS1qQrtd4B`}BfZ>$K zC-Lg+qY>*usO5g?$lw?8Kv7avGF8hVW>@_!q>FH?*@IM3YN{@KAPd#Xl&SsfNOeEc zc8$98x{`Ee^ewB-zV{AMG_DY)60UEWqNlM?)cUs0aIG9spD|~OFcrbZ6Ob>#wrTK7 zGvo*|T~*(MKni#u1he6})qw67K-7d3Vzv?&SRsO9ar%04>J|`^D&@rE!Ki-c5R!kW z05TtbeCd57@OWr{sprxWVoeu`hvATbJ+&ZV~USn=ajO1>WU1%_m9?XFSC* z?IA4KvdcZ43CcQ~g$aeS*PA0?lVKx>BrJZk`P|qjB|B8nKr$;Q^7BhQ_we-!qFflH zUHM{m(Na8J%%))4Gm0slt3F8d^hM{*rWY*OUw-wi)QL3lt2LzbU(3{~@fCkX2s%Zg z8p%Y)2g6Kb#@{U_aNkyEoR)i#E%>Ef0{|*{MI_>}%Ty2x`np7!f&+^F2d2~G2vNqE zLz-O1YuLKxqMcdFPye!r9JQ6RPrB6Ffw%Uh-ljKY zVKQ9F;TKvSQ5$z(&7Y%$VF>eDYU}y4t5{jx#~$8a4z4jhBeR`J_a1Z4zl!;(e(@2j zeqv1x+!ULO_kd1`haxv2uW5Q-h^OBt>gglzi=Zo-$n$rk9&Rop zndkAxiUse|WYY)kw|xonBW+Fp?|00L9D7`L+8)u93{H-)7T%|j2?Qrc`3@A9eRswk zz;tS3#Gnhkrm;atSBRVenL!eWZvL^EEhA?cs7Bu`1xi3@RjPv1)_h(lJnECc>V3shw|m2PlDRz z#rP=HOMN)Yfz)@M-g4ceZ34F+RtC@on1^vnaWpFjBM&B)tv+ZadXhCp!}Uup?Abzx zJHaMPd7QQ{fhOTgT!#-6w}ItYw{T(@ruUR0zd1pHLLK73VN%GjYQYJRALOAu$*DG3 zT%NR9jvsfIf}D?fyZj?lbiQ8m7ctwYwM3w7P9;}ctHQ%S;uh@DKtrzX6KM3kT3i_YBzU9t(w9(9 zDdW0Fkv;Q1Veyh?a12x)#;o1rT5YU&S1TgNUdY3(p(aR#$t<&!cRJ$+c^k+;gt@4v zooV18CcOPjB-rS{Olj`vq=+V^sjrZMV+FG$P z;eEFWk;jXkU_zag+z*#fP54E^SXkDBJYLJ#@>Y|Jn~X}`ySsI<=oa!|hc}|P-u;WN zc*{8-oW`~VqJwnT&c(+b*(YNT{S;H=Fs`&inA9q_4C4pqmcROr%c~>!8Jwl1>)#JPb^Faj zp8}6>cj#6c-ga&XCpTP~pXmLx8Y5C9QxCzeFhzhCCWKnthWTkb`8 zbR4U12p8+$(Zge*3Ox#!-ec@}no8-Cl~BIRJo^(2*H-`U_v(x|nC@5JShLe4UTnZp zZhRMtM3>ADrlNNlr`(c+_lTnOi>~_fQ{M>!quGBu3c^WxQ(?bfUGw@ab?CF1$3@_{ zmA4@k2~#ARYnFt>(fWp*B{nD>6aI6J`pQf33sZ3^XyK4OIUJSOId?9!G zm=l)-z3JeoWsuuB;j2^|=mlUILfZ=(~daSUUwACSU8<3FcKJGqz6} zb&r&dzeudSzKfKE7L+0#ZFI<1++uqx6o3sDld2CE4veejS8Njgj=jD+UAPy^d3%AxdXg(5= zkz<~>ePf(^-{8#@cwkF@N1+0R;kP;|_jz_EQ=LC$ze;qpmWIT`LMk8`KcIOv_VP|P z)ZmF8rfRCL%bJ8^XnwDlyE`lD%Wml2p>o)+1J7x%Fe)nTb0?7hzn@babjupC#1u3& z+TgYlI@Cx~`m=z9I8g+rRGG`}SCUCdwHi{rMXsxH&PY6NcOd4#xH=qF6mgy2W>$wCI-2{pqMjJdjNqAh%EQdF8Ul?KHT z#e!=b^oo!J-nTl>>+3_mSMu#hj%~qRxZpR*`BvYa!=@6;WA`IA(;-xS854Z_!GFI+ z;5hTw2*-)ha{}=OrMX8ha=gASK~_{5L3d$lYyDl_WcAZ@h7Gk>1GG(*;h^ut|ZH8hO$J5n%yLZVuUMeO^vtJzz_)(G> zh^x7B>j;(@qnYvgwqhzPv2OqvL&frvvaJz(EHAwp^*Ni2&Zp$Fhd?`XwEWM_|?41@T{+2VVI4+bUGG5F@S|Oq4S(9l2DJt?k0Xk?e5I6{*cC z+`;LlV**oz1Kx21-G=lUq(6arO8gmF!1MO^Df8ZhKotazu$;j!N-GQ{Mljz#0 zl4#GRLPI6S*6m`Fa22v3-4bHVG&iDAaF%Aic4lSUN=D>NruzS7p=GWTG!~`64pTCQ zlYiOqB>*FDnzz=a6EVv-66utd+P|bECF$zxYwOKUOgx3lc@0ESl8lHk|K?0>SUB8R zea4l?Z$OAdtOUeKB4+60PtrbazcnZ&HHQ?KXrOTa_tSx33v1EpS%Ie=y#i5A{`uql zP2_$i2|Y=VL$%Vvfcaen_GN)qI02p1j`$E3E5Oos)&C-LF&TW9CBaM87u zm6e8tsj>0&!~{+{7AlMpva6j0^;iB6o2^AyH0HrftT==h#YpPtX#K4hvLUwKup@0P z4u_cVAEq7u2AH-rb(z;qb2K}4SWIQhzO61KE{o;J(aTC&`ZK7}VL1oXwQeHRJv1n~ zc%(+9Y6u@&lv6ej$*r@SzLPoqjg1_R17wOsSG`bqq<}b}tOxuaWGyZfK1WW1I}1>B z?&8{FW9hRy1*CCSgs)F!I};s#U}Iy4hlh7;TtZnOF&7=a!!HVDD~HA_udhF8?x&1h zP)FYlNkn9Gsgipa^mi&86 zq4bPfC$@TC07ghJ%oGt{RvdUETKETt<3W?jjJDn-Zr-0l&~9M=^E+mnco(P&exaO; zMmvIo^_J(BHk9QZ?of5X$83?DXBpJ6?sj60-Z-j3d^%Ech^36!_8!lq4%Fs$<~MAp z_vL}cj^*%BmUCd}ke~4eQ<(~hAAaEd|KG@OaWyt9C)tBbY-n%)GaVDayu`AJ_+KPe z2&hN3ba?EOUOc_?3F$am@m3tx;P>lFloLJX{764GALd1{M^(`>PKr;FeH`@qm<}$7 zyd0eh#G6Yg_$eHAH5Vte}SX=m@rox*9J$>(3$1*;7VrOMF2Acq)e#bhu3PsV-8zUnR{z2$9 z#GE9tiwn0-#_xU}d?T7N_w(k2xj2#@Eq+73wmo_prO$%=zED(Er&8)FnkHeYD;Jd4 z0|cNL2^uE)9xWD{dbJjobDni2sbB1I?fGoUiS2|@7C529#ivME;}#iEdOz}HTYcuK zgx)4+ScUw4?vk&RT;00><1bCE=a+^sQ8(qCRGnWC5hBjSEE0M9Rx{1CuntC>il)m% z4XdgDeZGm0Y{TiBo3mLwi%Xz6b^R7?da$QkJqiL5qbiFPhx+en!mh`;as*a3TRdkC zf9`cQU-)&O9;qeVQqOJP`t)wti_iMu!`ptat(aCnDgMt>y(R z-hJYp$%*TOo~W{6AjUS7fJ)04Rb<&=oXi1VX` zgh-X<-thR96(=F#X-$o7{4?~ke7G04YU*Mt;G%KYLSJ7|xm$s&X_=C!<{yk{_-9n; z3E~}_ubyCy1)YY_&l_RmArbm=ce$6?tDN1iHovbw1bJL|e67N#Z01dDkk%QVLAUZ~ zCOy1_yIn2N=op3-wMCxdcQX^j-2ZN(u6-S@R>0Oj6Q_E zwLHj2QK51DH5uZ`%m1|=^lg`ah8mSRbUEW{sUZpW>hG}Gc4+vEUeKpDpLi%Jm`i_- zuB;>_x%~t-Gem1AJjd7Y@%L}snvo4jKL5m8 z1`B*BqZF4^V$P)S+b+(*U3B!Uv}SR9gpttXehvVa&m_md%}Z!0GQNHB?7;M7j^OvZ zZ`y@3_P1~2v-0z^Rx~$BaQFN4X54rB^hb2CBK;cE@7_%DSC{|gmldRLTL0HBBTc~9ki=CL; zpC1a-u+u~!czVy(6(rrfs?CmKexf&TwtMZo|Fi1=u+{1$A8Se6y~SN_M~189wXg2G zC3Qf%g0i<(2#2g_bg%)UKuN76x{8uKM4s%c#&3VXi+e4iLzD%RTsI7V0;WaL;k-I{ z_)z#LQ?*w{7iA^^og}-}ICWUO*_F^$dFU;!*wAD54CA+bohSfGrQx!>pUymFC_oPs z75zP;Spo`)z6;muJ!hkAA9ZcQ&DK?##6&f|d#g5MhDS%vATgh5zUZT-4%yhC4LF>; z;50tI=F{EXEmP*d7RkkDY|P}de>&6l%YU$f(#eh2>Gd1=zXZov$I3BLf(=!&iJ#oZ zu?~)Cy?!1gDg>18oViQ&Z{5X>D)C*cfGkK{;+xi^)RObeCDb zzy22|@ZAQU&FRmd5A&62U)`~s$Pj+_+RMURs#P?@WM$*5qx3#6#4$P=jgw=qz*$4p zC-}TYKvDu-(NMItMm-%%`b?tP=)JhrwL9AQ=y3HdP__p$M z!E9X0U{CA&;9WpYE^XW^8bl2&=5O!7r$LY`6{*>~Bl}q<9DOf7_jm)zP-bE|vXKnu zZzwG)8OTtUAw1??8N|d$(jRR$DK`E^{M7exhJCrM?D(BNOZVgi+JyGyJCl7cX7Vqf z>vezh7`#@+Ei+klHd*6NV*z8nVWOuWNAJ>q1^2c|Ra%MW;<9p9Zp#}e@z4@wm?-Wk zmT`YFE{b2Fv!`QwoC%Cc*XVk-eHvwGnt&@dbwYFxzNk@a-hOT{Bqg3{D-{5VC1@yI!2fJkYa46OlwMED(Ns7UP+H44 zxoSMcxOQXm*9u{(yuhpVu@cS?8u#xSG4s-fM8-3dt<#!my|HWDF*u~@8VCC>*j0Ft z^(f}7R03{vvr=m<9j}Q@zo)kO3jZ~a-SrcaT4hw@y{O*r3{DZEvb1?IykEyNxG+#* z!U)*Jdtt0^5kf*KWlX#$H7`sH1uf7y(Bc42igmM|`4^hxT!w0|cJCA>*(!0mL zyvLuI)7VF^=0l%iCtIM;b0j({p86_=BJOc3yskU;DrA8awbk3{mlS5Dv+}tf>83n+ z*Ris;{L_lp-ciC3i?^-y#Y*1#+qa)RTq4Z|ur(~Oj|fpekosHp%!t9gpa_dH4gzfc zT3p6eBsKPHT+LiuP|)OOzH`Y8&;9zzmpZN5vR?pna3E;d+FEU>Mm0BpAX~|c#`DUf zjJ;8&6Wzp`K;j&{9_<)b9HV6QSae_<=bg(9N(#G%4!fUu9MIMa_!*Wb<2|!l&6!}L zb1%6y&Nwa!?bgsH-VeoNzV86in)U@6C(O;zxzmO-Co;P@g6=O<%kRtUDcs^VYFdyt z(cKwi@mL1~rKeT*2-iA2 zA_7jC-P~N zRIyKdu-=(ibA9O83jLT*)zW?k(yTp?(4_ruXnEnN*+=9@LkW;nF-~KczW5coq^tf2f~7?FvVl< zAcY)F>JDefQsP87y*IifrpAnNT4gQ*FC+4y-z@jxj^X{V9N3oh4S20_oi(a@x&o#u zVw#oNdc& zeq`SCVGjOLXg9`CXuqLccOfLW8}BgP#^LLI+gBYLHOE;R5p&xVR4P7VYV5s}E^N1u zLJ79Gd8#`Z*od24(};-&H@VJHV2|EHYR8)rX2din4j8T2pi9qz)%2ZZY1Vw5*~52b ztcRQrnqE`>6`Ch}#$Z%7phBml#8UrTuiWaPqDPSj_Pw;4Fot|*BK8;klOvsuStPpG zr|A-#sx!Xg;k(Lr43xlQ_=f#&; zCE#4($`Yvb>(zLutrdo8J*7zG_!FQf1{zA`_6vo285llf71)KTS){Z6Zosk;I?Pjf zz-LwG{c>3IahJ--2dJ>ItSH*J}cFODFld?Mq;!Oqhb{Kr* zqw(zYSXWH{@Ei|6|6_c4Sb31csLa7HaLZ%5>G-kzYaKjDwqRP&?MTR`u~&fHXfdXG z1u|MosK&fPC%wlwW}qhvD{&v%aL`w1dWi46ZD9PzXt`DfWS`W$b~tENegE)RH^dio z^vdXMF^*w5+YiLn#Bi_$8seNC`{eaplGS^&9B~?as{*>Le(Q~<8JfOBsbi5jZeoauDEf%`+8&f?)Za@-TQx*D^aQBKnK=H#Hplz zxIDOSkF`g!@a)?eAJX;MW5~qb;~<5b_bhkl+Nn*6%SB~d)DWVw#j=|LdazR5SUx%4WBC^ygEj7Qw? zUA#pZ%%SR9@fWv&X0{i6(hxqjY^FHHJe95N_bQZQ{{eAbg2RxE{uj3~XhM4ku99+g zQko6B{A+jf_MkQLeDqk8pf32++WRd@T)#Z?8 z-m=wr?T^~yufHbxR1VXy%1X@FSGgE3g7Xf%5KG9FQ5nZoDEmc)qt1izKo==J6(Amu1? z6L#%h;U?{6C;|T@DX(@BgHv-AL4WqGUBGVw`=^=F?$`#F0Jg(TUu#Ldcm+sj^2<(c z*!DhufK-FN(Y{yo14Z45_o|LMn?1T)op4|5T=JMM)iyD5!ZbKEm8E%Q!F;P%+C5W+ zA(l~Dz&i?d0gOPzJM}8&rW!M~sxMU()GZbQDSZoa09#DY z=1ezyZZx`n@pH1$SYb3lPJ(lPRGJ|3Uy_7bOav-*INr6CN^%rpxpGi85}=$PTRfIlhDHXgtItB@bvXW#hH5NU=5wUaBvqS`fQ6P-xe_Hi6T9-(9bw_ z@!$A?pO?~mr5vC#qsuc}Az2u}Mci3^gOqpPR z@Z__XWJ+?H7v1la0HySIF9-Cit;j;nmcFqjEiiTHll7gKkJr3q-PG_gPeK#?%6zv| zV8MLt0S)e~OtRvh%z<5KFo9ryHBjdzf_20Mx;KRlZg+4N{(Bw1t?xsaPBP+SL^zD? z&KqioFFiI+?fQ{o!3vNeNJ=q!V4gQDUd*EOF8=8CcIPiQGRdr000i~RKh)E=w0Eg{ z*WAuNwELNLcz8JR0KQ5_CG>>uM>5b-87x9>nr?P(&)m|ovcz<0lkS--0>DY#yhTW$ z{qsZgMo|L5tf!p&e=*O3;{!S$qH~PrCIbaCrY8n%o;GouV8KZkq!dR-p^))~gw8x- zmMtX_Bmi(1IN=bBunbQeT!wz}4^bmE#O1eK6*E&68Gz3gps+-t-2XcSTq^#>s$)L9 z`oo)x4aM5xu2Vq~`u1QmWgT8lR*rABS6rcUVkD3}lCd*84Cy+iJilxGiI|u;@%W*& zHFXUW9cOym{qwpr0b506R#bE(dPwlt$@H?1%vD-R*Js@IYy6{B(`}_@?2hW{oR4 zq{$*fsVs|En}f$){VYm-Md9C5;tdTASy#%+%i+T3lQHB(POEQx)j zZHVDCe4isvRS>JJ9{1oLxnysenSWMAkoNF=;s{*!h29BwD}3OuSH| z^~ZiHP^oUsGurT)w{W$uz?KhHeOQt#h8A(0f^~YReyBY4pUHde6)wArp$5ri6I$== zNZoJfWMxh~5HgwkcOto*7PDS>^V&oUobA@C0{>n`@)=!-P@>2yUel{6vzy?7N|bkV zpJ)zviN!$-c!^yXt2+LTc zIQW{q8c@*j-!z`7TtBEql?S4!&3&Um2;WqU4~3BSa1{D!QQS=cCErr{C6Ui|t0z*$ z8^8Nbo5*`5jPnAhIB?>rQCP-qSY_C+;>gRdRgXNBQ^Xv8^kZjFrwxi`zurP?euBup zA->?N`19&oR-ih>8(fPRy0a;P5`CRcVAAwZ0mcCnOo5Mt+FEbHYkeq&rXSQfV?ULy z8gU~)e&-(a5zMRDs3VoRH$2E0Qw|b&hA|N2mziS)3c=}-r+gr`2;0i%RC4)yO&D8i z?(el2r>H@%kokH}e5G^NM)i7A`Lt%D6x-Mcmd@X?Ym{5uy(r z^7INOBN}>P*ayRPEx`$!WoeH4icW^NCK)m|>0FH51Ywx2EnfKDNV<_yS9I9JPlv(X z0h}@a`{frbouc-Q_Cvv_1ODl+Xy0;?GFp($ovHOkEN1vHjvN&yd0!8pw9>~%;Suow zMF7*$2YXgpq6hN~1HpLptvR5XPHqrRAmeMteNGO|__y9WMfOMsHs7y1?};iD?@*@dX8?}E)A(DxjN4m2IK_u_4SJq)4+=dAJd0~$u4I(}2)!;ITC zCWq^1@fQ^yImMDvoH9Vn`+?X;QG$=fKvCH(G@B3a|};mF*|# z!HpD63VCcYYilAE%QRlgA^`xFyQI2O#iI9>p{`tQUsKdfTtIsbDd=f+{xwi7lk~IJp312++5jIZ9)$tE zq1OsH9#}EfGOtp$!|6Fh3fVAi+#2bjCn3zTHJ2`vI7h@M4&swvQDoISdPO}X`5Tvw z&f%pzoYNlHF<;c@zpkt1o%S6Mdl?ryWG=;(vve{w0^)X75X19Lzk7fv2F@IeZ`YDHl`sQ zu;nFT8(BRx$xGo~KlD5P{6fz9rdKYc2Nt>(G7^;RJve9p3yB`X4A+^sMg%eyyM!C) z3qZ_*@r|g)`J|D}mvcY0fhR|fn({Xi}7n0&TN6Go4 zJj*|WWQqG+Gh$;=y3QHqI7?4)eJwqRMghHuHSw3*4<|!;eS}+}qxD;hHfb!EEjO9s zAf%8^X#x$W9Wn@b=g$E}>S?_o?|+Hh3BW6iTHB`4=^idK>;7!l4hoNNXR`wSsl-7o z$5n7<5Z|~?2Se(v-eUp1 zP^;SW7pzI`mTcqePIbk-K=DGkent(0G(*Wb@>;G{DYA}2YynQte}F=zs1F1^UG{9` zsGZ<=T8vF`Yf;S|7<|d;ekqOx`61B`a4K&!u3FL`P)za0;MkSNPFG=CRxMfTC=~99MMos5%~Z=a{M1WYpX!PXH8Tz zYtgteOf^AMrTurN2f`9GdMfp%?bQlJu>v8-q+Lvje?O8gHoy!O!Lu4x6|v))F2qda z&__^MCg?^t0dWDgV+O!wuSraeJKHvLmK|Nd!?$YhLi2guIVKwhUkkD0fl7~35)~tX zq+b35jtuB6=CKUIK8>x(-}4K6WSC0d4#A-z0#sko-EsrU;+86X)V)8Ux*spIY=15z zV(M(JCDj{-VLcT3J0!#A;8p-FS-kukb)V?n->4{El8}! zY?UHk?8D>f@`W`zUe4p;i*USrsR>~=B0?MHK+9nqL;;-{V>Xq+FXECf+-~c+jFfK? zMIC4$fcyS87Z_91_hK1;BWARpqFA5{h-9xMv7gPAF_TbMv0ZYj7F_L73d(v@4DisnhfjD{TsBK&96?#Z;Q{xcP3_kD&Z1I&$lE=xbz|kW+l+ zV03|q-S50kNu=-g=y9=KQT~pY9#ZP+eZgl9_du*Vjfj8ZJu63}S{B}8U~jikLKB{N z&&kjx$I}2RzJWg_$_^Qvy|YU}s_F9#d2G!Ad9w=2xxe_A9yS1%Cg}`vs#{KK*q@|Frm-Z| zG_Kw5+2ORXe9xlz%nk|;WJ@S&P|Tqf7)*ZqHSC_IGqA$d{HtyYo_0gefQY);)(rw# zrXq8%tJV?mQ~BJ%rcpq|N-Z>%rWpp}CpwTs!7%FcE zjru04=LA^W>VSUp>zNKERFF#fJRuk+6&s@=)oi_ih0p1wDl_hoSBSI^MbKx!TKDDP zoO%2g*BkKde>+UL)FLWsg!(D%ZrfIjgJFFkXlk1)?IJTDca6F>zHlnV(j1^5dLKS@ zs6|1hhG{=o0ixhQEwbfC`AiFd;(Bi{Ne<+sq6zBK9VAg!{TB52t6ktt;#K}QOzm@1 zK7$RAeEp&-50&wtL_zzS0LqU`4YyweJRp!&#IIu|&r@t61&{}E44}%Yo2kAif?LwC z=gD9_l)|dAs1C}?B$>50gm<6*%}UsC3Bh zzW!bHfbtVwa|Ifm-Lm}V&#RXy^us`ERh8Wqin?s7uJoB&OL4Q4D zG8B_HOMCb_^P5TGEv?tHv6S!XbL#N|iX9n86Ue=7~vWwr~yYh^3yTjVg2gn5Ik9Qhj0=T!_o1^xHONpX_)qCgNDv7S9?pQDI z{KeO6TPIa+(?s-y!6wAISnTyNieItmpK$IMY*^3(e(xYz?SW1T7^!=nq*q3^LPJ94 zM%hqgiGRDF*e=Y);5)rky6|slB9F|fJ&A6dUYMH)!M~#9v?{z*-htMe)bs%#m~~kn zj(x+-W!wU>WJ<-j)HpS`o+!8;JX@SQF^8$TKNjFIuLcwpjkQK$1Bv6 zBYAj&jPeeRX#+?}ah^a$?EfC==eRii+c!81*CR)fwIci@RMP(>Itj~12M{K+6beVa zkf6Kne@HR~y`tiQop`-cv0H#JX0H2JD8wC<+ASKiNmn*}j4BNh7eKW!kjuuPAk|2? z=r$zo;5V1j@seyvbyAH^o9Ffk%^jj!6)s<#-b6K#f-mwsKjwxZI|%9Bv%>sIQI-$^ z)F4HVd2FYf0ucUwm#nbx$Hjn?$=T|AXK-5zZpaD{L1krS?bpR*74K+669$k~OYASu zeazANAsb&L1V&JrE-JBWCy%;u$!IF^pkEfaO*&D(Z2$*8sA+WL?1|*dY4ok$T>y8{ z5A^1Sm6M)8@7dEsGJas2b11pRzgot*HtFzZ7(6nT82Gb=0?;WlO&d$aT{e@2_9sJ{dC$Ijjw=k4Bq0>XF5);*#`H=|Gqz-rdG%s{gYlPA~|%K@zLOqkrqOZ z#FZ-Qdn}E2OxlPHFhMwk5JTWP+c3V7S&ghhbEY60Z0c;WZwTPz4mH(`?|#YB{6kjT?`go2W8Xr# zU;w28oD-)7?cZ23ObjzKtrrTKE?;_zv<-dSWUlUg0xEo{ns7D1dk)0!_7CyHZ=E!m zv#9CWymo;{FAhh96uBPb$q}M%EE5hvcDT1$^TC_~IvSbwOL*A9pkGU* zgZUWf4%?;QrGm8G14hum5CoXti!8Z9DYXrh&)UYjI|BC?mkr^?bT(P(_$?!|R5pe^C#(?3s{qYg=gzHE(#tj`{ zhcI9V+5u#zo~5{zb7N_t<2q>#1r4S#ScRX3Df>z-;IKpWq^M!*ld&;|Jj_vgr5#u$ zLVa}yYy?uVxqnN|#IR|&))3?lVv;imqo56i8@V-Z;m#!K(%0qQARn>!7TNP9AJV^)iGK*5rD#-bgjw9YuWYWR~?AE?0-Uo%!qHfWV$oOPyc_L>( zqgodC^65ns+bOB+g6kEZs_Xx;nq7J^-_yeKYToZ92&7PqSC2xV4~Gsno~Fr>n+y#d zz4PG5tTd`+*^2YRUCpbeLw(0GNre50WmgZ=?!m9!(z%P+%*;ls&iLzXbYbxW^Kh`B z!cVP@S95TH%#i4=y&3i0hF(c25t$^~f}y%v=vC2X@;iYK=}?3QqiwYT`bqE3mpd~P z>QXl`EXiLtA1*s>b#O82T|6?}kGu#TYT>z!`<0wX?++&yXeA^|KqU@K9GX|@_i(+} z%IeoqsE6An(~ZjORI)x#q59&~eB_;RZ0*}v8Xx3Mfa{y1SP|bAfO!tE5|7S^E2^nC%ICY4;ZHvRq3X5HwzKiT7hz^u$) zS3ibVV~5jLrJsm?Jc*?+Xfu#;=4=`O00Tsm(cN0nB(y;o@bbJTsePat%_ShPe*+b$ z$U{3Gq=GDWkJ#D9a{fG*n(JrwW>Ucd(REu3-G&uFr8k^_o^*@XEk_y^1F#ksc!<9`pCg`IkT5yKkuNr%amv}U z0+%8|oqVAde^q)u=&aN3?;;wG*NJzZ1GT2*Kio*`27)4E2ZuoX1pu8 zd3iaPCE;01jDLPqUmGu18WJ`sb&#fvx_ONWBO3d|*%zz7GW^%jVV@3I4B()k4WvId zNSF>&OqOP9V;@1~{Z2 zqke|M9#QpQoRCMggIwYR}{b^ zX0F~XH=$VsQ$kb8ojy6gE}xOhh~@nfDZ^)qF}ZON94}SPPOM^WV1hHZ<_oo&4i6GI zbcj~MOn481UCg@Kl>aDyTkLsp6I0WrG9Yj&4kjf7X5D&x!7QxCV=hq^WF;`o1Q>KD$EdgE6oOE!wy)=x;~3iRp&OQvzQ+NdD>$BM(?5 z^v3p!y8HO!wT6d-oFadn)tO}DZ(ezrg=p9pNW9*fzrTu-#m_f{^Z?^~D1z2g4l<9q z-zl6vO#juH@cVoE!J6qs!`rvY3%71d){B37I8u85?^jaO?ZbQqY@v3)J*o&jWU_fG z@PL;FD>qLTILzu#j(^OB|5)%r=g>XN35FC_gDvh-4yT_&!mB;8A< zz{tX89_(K3+EEOJs>)E$p}%(eW%DsTc}!S#{KH1g$@RT=3o5dlN&|ONCZ_ixA?%t> z)Y!pA>n);NpLm6YSXAV1ex{P*KCuG&S6iiHv^o!t`5J%Xx#qrZm4H@zF={QFtZrlXfP; zpf_Zu9VnKY{4_H3zIs#8RfW13I6as2m%w?VkzAy#k??T@nyiPHe18;|-TvcCxz)?TN1J_f-sBGFcEOZupE%qQKOc8f zq5R_a^4IIlVd84a%5bsmw`I1a-prF8-o8^f?fK}8#6-Rc?DX=q%}vLrB5;xU_1UxK zH!))3;wzPd1*$%~KNMDq|KvP+{MgiM4nsjfp&=vdbm10T(xXEuC1qu}*j`l~t_^p; z!Ec5;@bE6@r#U+D0nYZglG6%qu6U zo@TvGc{*z$$O^MBp9lK-3Kge6vF>u8=O!j5%0-fB%C}Hk)!hF1?e6&<(L0-m#l^+D zIGOF+4|trb3K9yg_bGLV1(BXCz$Pv-FuCh-CFN3}P>uv$cjuP0b)$5HgDswA6eWB% zbeYxtw-LYEE57jKL->LpSM%x4Vv1>jXK|s`7FdasmoHzk>zBjV0iIr*pAo?Q`nJz{x8EsZ5 z|7fXcoh-T%z>`RnR52b0#{{R*z7&?l^;k`FilH(FO)w7##JoN>obWFuqpJ_}#pY7|55j`JNzpGjS*SXqsFr_vb3@Q$W*)SQySs2vIrTYb2x%LE zG8cvEN{Xq!@Pxj~Yq7L*Edqzr<%Zec0q0B+F9weHItl&rZ&GbTn0Y} zCy1r zi%k^X0bok+{3hgILh;HL0=Y@|$$Lgcx5<8ro*2q%kf$G$N&HTAL$13ZI^j-*BE)4c(S1}(tLYG$144eJxNK1l=gt{{{pe2s+g(l$ zHa;!#zV}_(tmhe)-bg&y=82KOf$!h^8?cYdlf*JV<9{N<40_EWOw-&LAvwkU_kH~! zQ5UN{eCu%_GNH;JXRT6`ZZ`OHcE-^qHxp~kGk%@s>1h+z{QUg;*3g^&{ca>J%rO_s z3U_nA?T)@bQps3|P3&?T;G4gX9jmbYI7=p|9wv4yo|Pl`b3CYPiiQ5bvMyTU7=5LE z5Nz$-_QCbHW783`SaUE7-z?a>NLMuzD;p!LGwHN-#akBIJ=|IBUbpl1KHB!ic|(0< zcs4(fPatvF;`Jg8`v$YYZKY~*s;N_a*8-=2bee!8CUN&CPsq+UW5X#+Y2G(o{P{*} z<65KYiy}`JKz+n;~)8qtGm~^JA zr2^`IpthyBD8QC;4fZ>gOm=!mbmPLu#Sw1rERHo^m))G)`aULi+N^gM2^f|LM+h3M zCVN(xgZ5xILlb%&c0bWdBazJ3)8^B35KK-Q)YlYxcsA?Pzxof=Xl^(6_8tXJ%NR8$ z2>iP9Yq(G0G<)T?*^?(v&QFhayY8Od9$DV&t|zPzB-6@T9cee_HlstrVw{%K_AV^7fN=YN# za3}uGd){-;y?^e#KbvQL*P3g_m}8FRv-7d$?{(k5bE7Nur5YhS_tBKSX0NJ(g56#Z z`z$0ALtk_nJWdunB8IZvTwIE;&LKyT%J1yPeeJqgF0&r$V z#)?A?i0>2edc^Q9ZEp6=oWg??lAtfS-VUv3`Hx zuk-lB(R|{SpZiTN=>#=R@CLvPJYKNB>5u@=CSRjrcLKx0l9}|rZ&uA}D=Qn;+;;iB za>|aNsFERzK0~U;XlNDc_|GIE zP-)b?yP=fNV_ASGBdGp_7`?I|Mv~AFIM(||PMt@?N@ZVWr z;-W{KvvDU?L=KYwI~|{Z;9!GCHLKe?SEr*`vI7nhhMcBPcJeI}MgLkQ3qoEZepFlG zbe9CR>D$Nm(5V@+r*)dPH9^O1N2iJUw5iaZ1UOWoTFE>-=aIS9_v44*!P+qJj;8pw zXB)5JRfEh>5c(vM5))(MooKu~C^AlRHQJi0E;H`cu5+=4H(Wk%Gus*zs1QIV0V$H@R0P=E8)-;CI(E5(7Dx-;0a;owP+Z zl7bI7=ujR(h!||}XJBJzA3c_94<&NHJoy7coK7_fkfGYd&)ny@tM$aQA2L}1tqSn* zev$FSj~_ag+(Rs~=W|$WlFX(qfS`r_#X5cEstL423zEI{YQ^q`ejkyWak2SC`G`DM zl4k!wS%Yd#zIE@n3-O-P+54`EJ;a$({=1KZ^TRnw&vHmK*3*5ON~jE@fcx5w1I-q| zOo6chG+vMMK^?;$cNLy14zcK_1TV6^T3HrKo4?DVgGSuZoJ;+Sfk6YtjLLBrRQ!>w zNqra1-R+mN^cbA773{y5nXNpZuNN9;O-L{D92cn_#qBVO7aGiNTA$WP8Q*{*sq`X! z9f>eeF7ZdyQL7oIt!1T-#Lcyy4;t5@q4UP|JwP71+#C^+>2~|d$0j#T&==mVK1P0< zURfFa>+o#$s^O-_r_$En@~RWTWbjp#mnO*Kv0yhzC9BiZ48G&c+8KVnQi5@7QIl6 z*yr)cPu^*mm(o~JR&mS#srC*fJ%fuwqG6+L+}%qI8CM6F#t5q`m(ApvW*!ssGr|Wy z9HElRmyqlyLkdP)OzbdMe9LC=<=&4PVH5P5q+as7x!FlH@Jz?*s;P;Iu4FlsFEquO z48&ZXfB6>9%2&e@$4W-y^u6lHB0~jB^tWy?;?nDQznNvY{EszEFmbnD(ZnW{ixJdW z=C2cZzr1nYaYdmaoqTb8Q1nH>8t#pjHY8p!=wHHe%b;U8G^lNDcvN3GX_(FxerMLRo5|i?mbx($m^H(3dfA`T0dq+Iv=5)7|tO}NychatVWTL#VI7U4{cM!bM##GF6OO9Y$nJ+9)( zr8<+(qUu*!hhjhHfGzM5xk zF53d~!hpE2Wr9FJblDYq+CYY|0U5frdc(S~po2miQCDMlKP4YL@N#9D!{H|p%|1nD zB{dbz37w$wSZ&hKc$Mi(vfGmH2oz4suEZM+ksozB>??;}P(h4b!SLKo?dnyqI$c(^ z`NZa`Q?#Mu&Qe`1gik>o;~DxB2>bep3i{WRIUi)sp@6B#la#|@UU$1uY#DHMPVzuT zI{{8M8eL-{=fwLep80pY7B!WERBXAL=e9inLtb7?I>ovV(lYQbik8m~1r)i^Zpg_G7-$-^yce&FFElK$hPPUBaik zX9XHV7r#CE2(0)ZE@cp&$>&pz&w4Hm6=DO+WrAMYRAj7MNb!Byyt#0S?_w?s?U|aVZG{FD*Na~Q!2+y-4Pbfv+&$OuZcLV``LO8vvXRJ*IX#T?BL{tdZ6F;PyWG% z?l}yn2HaVzlNOSpQi+Dyh=;Eug-FvNVf@QPRs0-V4eyimWVo~ZL+LvXJFLV6DU`iXHu^d#8N&1jx&=TLj>DNKU9qk`u;wvo9`W#i~@E*qdo3wA2rhXsw z^%XN|dj1OtLVk)P6VzL2>7=VyK z3%1jr@9ON#`}&nS&eYPf-eDD39oRdQl-&1G^Y5aJ_`Mx;`al)P+~C!Mn`QtaVYssQ zfz1g@U;T+)ju<1*RCx}nA-*p7rI;=Y@j3^yA=V;_qQ9<%CWP4PkVXDWYNB5Xq$W^U zL29Bd0AV789Y66uf1zAEQb;jPZGqk5nM7sIC!j$C)7VGtm5yAh-M(djjDRw52-3bM z_%-ctvFP~ryEl4oHu>Iq{P;0;pOUI7)P}pyCDO3uEzJJs;q<#*AI{s5*^+8)>7z~m3)a5^JZ>DT+) z{|S*UY?{v{dcR#&H?ovI?q+&xYb(~0FzvModyefEDV(1TxA!zTihH$g~2!j;Tb?B+?+e zfFau|i}yNk*neNcQvof_Kn|k#_mPcMd})&p@Cp0o$78i4G*C1WTUjyAg!dJ?a3JD8Y>Vsm&{JshK#QZx3l#~h_JdHz z$zGzFxngzp9gNG6?w2neDUUh>ADJ>RIPW}c>mT$(^JYDtC3bC-qz&gn8w0i(bBAwwcv>2Ai*2Bzi5^qcHFN1Vc+-WR37llGUu1@0WkG|VdY9I zE%%es=-EL<0TNV_m!Z581JLxadc7_z{{Ht*YwxpxI@i#DdJF{Yw{_~&=z@P!vs9LZ zQ@wfoWEk7|nzO|QM3lMXk#CSMbVb{u!1~LDI%dy}mVPBRE+t)#dKaXbP}6OoY(-Vg zoRTbLtE}-B7XI?{+i7DoW4%hjdnTbyD$#JhJIGi!r>OeHYpFB)>h#NL{n2x(smOVx z!W|s)CB1}CL<>f?SOa%ZZN7FYM@B|}S%=~DyfvRMU)qbj>k+dHw5b?=VNXO!$Q|Ve zve;98;EW9_9?rd=J^R6!WND$KRj)4NEhupAeXg(SP@9t7tGs%X^z2D zg#T#Z`$*T;d~LSBW)4~(p;c#Nv(;Z5l1xo$ep5|yx5`z&z~wP9`3X;vM)xFg!8>xX z`}24@Cf@=mSy-I<)9$PsOj;)s5S}ZEil*+!j@&=&Jlk$uUO&s(xksndaNOkEXjjs& z*7&1UG^h24rK8j37L=GStm_ z!^$de3l9S{&8@@DR9U`@!Dq&jMSWMnSr(xO+GktN)^J{tXJV?Vf$!gY8XKp+)h8eb zL;$tY!r_*WB@9;q!xU&>?}f>rMX(fr;NdaER_N1llcFXO##b$h9;BuFj*ovFe3R&K z>lVfoJeII67huFhcn{58Z*$_$H{JQmvu%1Rmyd5#qNBsKJ^Ie1C2#h^C#gs6V!yPqtB77YW<>7NK$2IdP+Rd)~t zk=qcnJ`Agsb-tzfl4i5KMf&(U(FF+-MNn6H+ z77_i0pfE7zmH3{9#4}GSkD*tmIsLzMfWubuCUgYV%ZpE1;?rX2-x+tTUf$Z)c`GJ01Fbd-cZ{D$NTcrM^TE&TrJmJMf{ z%E5q{Z<>%e&CE=g2M9R~-1^j!wtl=x%6G9ER#w_@5uRKadWKuG_~{eb z!p|s;u)xcYXwwL=NEA6@pxz2oB#iuZbWj(*Tp4jw zMdc+bGdmk}X>JYWvO7EBKH$)BGX(V-c5L#Vx=~cL1ud;az$|GF9Xb%nggmFdh<{cK z6n46J84!;2AppLK98RSoRG=l5_x^bfxyazqnd8|jA|VVm%hMAA>+?|Jy313yO5T2W z8q>F4Ksfu1>FM0gq*Z)N7QlWk5j$fcSo*nGjwGjl?vR{o+uO6@xHSn-ZSm0`NB8qG z8~>tPSoMPEqrQ87Kxmw@)3gHI>F7zlMd!i1V-s=hL}ulq)f4H!PG&7sbNk_;&orysZB2p3pjg;O{r!V?IR$2zE$Z?u z`dVdaLhr4q@rC)N4^_`|T{zl$*kzNzk2Ls&=&*Pqma8HKPUVhsZ1?5U6C2CfY94#t zdhnvFUp=+zNU_F)qH93Mt((^{vx`VNG6AH0(1peKwqIg|ohj$BuikL=hto>=JJu$W zMwaMf_>b?K1&kpeQ3C6DY}OOy-~K5^a@#)Nbf2w_C+6Po!x#@sTUy#KS^DJRNmB4t z()en5X^9^zNjrIRRMizi1x2aVU>aFeWaOy##d;LH5Od!A-kV@I`rjS*Zxz{(aSyuL z+Mi}=uX2f-N@c1!<9$^Dj^Gl#C{jXN8kL;Q*LOxqRa1p!Vx7W{26eE9{q`Pu+qqHc z9P^S{!hB?eh{eqv{A^v`zEX|h)*WdBAGQO#g}1^FckNn&0wY)(GqDFgeiscX3VX6y zL&oko*{VUI{*0BCiJ3W=SO~m`)eCLSEtA2jYCryu2}wIsW_#P9Lw32gdNSN&|dejXmIA(BGYl1(>a~{|LI^*LhBgfjpvf+S&Wh@y`3IX||6SR1>pTvGo`9 ze|{|y6lL)0x0M$bK0G{*kUflhR3R0|+V6rOIy!B<(mMFtVx{ygmSRc+*Q3jXwG-Xs<+)!7oaGf3wXIxU*!~~$pb9_E=`WN7uNA05MK67Pd6c+% z<6_BQUt`q{p0!aGomyEA^VV!9o^@N_*!zR_t%~<-!w2O^9YWn>aq$kX(|{df=FVVC z$hw`(%wbpnB0>`-6n)Spg5J?}^@5bPSmcHCcgkM!;-VhnvRkM9Lff3G_Q~U;&M@x{ zc>lO@eUMwQ=t@5BC;3C|Pk{>@9OZax;hP#;%<``$XO@pgCq%3Rg2rGeq&j zy(<&xr^E}`-FMWTzQ(=cy_htqWf&Xkx@Rps@V|^I9gOPSkU}EP1cheyY-U<1#KMGz zk>QK_hd2td)~li1Zu3!^m2L3!@2sfa|Gs;6$Ib+lrzR)jM2{xgNgD01E{@cliuz4^ zlehQ(@HNu#Wk0(uzY7AvKrewrz6k>el0ba^X^U2BaYO}4?oZiB1HQwyR*QMwiR7sz=JtDCGx)Xys12 zbi4%FWW7)9sx8=Kq~pD0Hoxbcg7IV*(9gOwPeI@A^g78(;<5kzo#gR021}Ov=0N6r zEK4>4$(xdr5YQ-rnMH$-*SFUfbZlYT?3z}Cb*-S6mXRi5YWnW$R~E*P4^Q?jvB?Rt zk11!;#L}8R$+dmO@N?{Fk_--_qK?`<`NI(!S_P71e=dH$c~h(Hbz<7e2DKMxbU@U( z?N$VHflcZ_c+4U6gfZ6ZC=SIAdF0$&b|?uSq`XJcE+2S! zEv=dL+R_D-HAU`I;>bS?33xv7fu!vo?K_;*rvF+I74-Lk4iB`kd|T=3ln9u*8Iz2_ zVhg;6H6lqRLUA)owm`N7kQgYBO&)pSQ|90OS07_R<*Aq!z#qvCLuJ4~f};+Og;om` zslb9_P#XDHZwDX(e#3g3{u>Kng%iULKp1_xCcDF_u3W1IwgLY7)#T(9DpuNU61a$B z@{tw+Ct-d~)$hg`6yh2}M1-(5hs6H-$GK3MJP82gnMqp&vbj>e!}aRtLEZ=w=}{1c z7~}JqfDgc?l6b3S1L`(_ZuCCZB`?>S!vcYSouk+}Sul z6aoJ6b3@Ryj)%gJsJ1bToGYu~Y12B+#-9gKB z29NUMe+I^`cZ7jO_WwQ1OYg&9cM5aeuC9(ruE-zbdJ3j5;vSTxl?n;-zqpzC z^!Z>Kt?Pk^pEw2k=^8qNnLOs(j*#!z*pBBUOgCrTZpWcj`J80y=r~>P?yRn^4pb;Lx{*)RfzpAOS=H1W zCRYZ1^Lhv-a^D?$H+R(8&U$j$4{}Y=XL*hOu}xtxlRD5ptKc&5{(FDz5B@WLZ39mG zJ3G%_EG#cqLQ@hDp8WiLcqeFi*{C1}VPf(m4iQY6Tj$LT35L??M{`;{*aZ?1B zYq6wy>+Iv_X*U!f`rNo&iioA)W~5=@*bm|%@hs(XK?_+CG@mg?Y}w5BxNw+CuY8{6 z+HYfVmOB0Wh{)q}mgT4wQ4B(TGQcEj1bbh$lBGN8<(1Sfs#vLa6uTa8@7o*J%XUde z?&8+@f9Tbt4Awgccs?30b=te|n2vK=Z7i<{pRNdt*B9VJO`gH5_0`g z)O4w%qobvzjujB%FQLr8nAq+NoV=qoPJUI=4iYYbfYde=qN0{%6Y`x zU0bK>&t^!V_V$*0_eW#iFHUPhqsq0jXyV65>|<2dAB9vOv<`=XjDpf%;K0?e+e!O~ zQbw~__r~;eg*hG%A|gUjUY=j$aBcXjdVcWbSmf;H6cKLqp9)9Tm5nNg-_i~fXG32D zyKY)bj(poL4w2%p{wZnuMpVJ|s&1U3qjJZBK5yXdr7wKw=V)$u|4egz3r+zzNhF-y z!a`n$6*&=++vqL#Xo}DF7>_vWe#O@@^?M7xvl>6@-feyP?4G92JMm|kO$siV#@@xo z5)T|nZ$2_^y37@x5U!wP+s5WR%YO6Cpl83w?V@F<@%A3~%nbX5-=xUmKHi~QUA?j8 z%2rY1kIL7KZ}k&yULWD<^XJdsb|A>Z!^69sG;17I0q;mmOiWKt&!lVv<&N;5rsn3g zu6tRPH0g!WhlqlmHDrS! zS!7H!r*3ZR0t==2JUI_vv+7FROyOkRQc+Us!<1>UDYbLjlyUn_l=S0WhT%HmXiIj4 zV}Dh=s7QACCbkqRHA-~&ON)4f;8*|>)k^Y{0qTY^{CMq33&A))$*n65l}e9=ux&5+ z7y753K@pUk*<9x@Ero3qe&V@_6Zz2A-=%(#;%uAP%Xf9&FG`ZBq3*R5rHU`g{QUX+ z*P;f6t>gO@%xkOP`c)L{X_!lfC}Sk%?n{nNp;#|9)>E^eujenaTHIX>;g{}s#)0_r z(0*^j)4jGv4**ssa8945M^FnUXte!fD0qJ*ujZ6{F&KZ>li>d{i?>ENWIiA_ghNy6>8n;)Y@Z>9etUuOOy(3T}M zgU@O$mU;Y{uwz}ifXt3;7+t-W4?I}->)Jaww_dJS9USaGf5Jq51D}S~VsJ%rp-PR8 zmj^RymST@&pOeXW=|ywupxaXWw!*4;!v8t3@3hKn8?lJ@RzgU(Ht@$ z|L~sD41dUYT}t9XZ8*j4Ti~24`#1Mz>qr{S_?v=$j{YDACYc-8&i0nx_Dnr^6Lq+C z_5?&k7sIMLKYskk%*>>ACR;+mu%|N+WG#9-9L?>nvpTC4QnIRa)v4Mr7WtFl{W5x1 zSao%E?v~g?0b_3jyTu|&45A4ivmNbNWlK6&;KNPuC>G~`_*-m3rw5|B!=HI1gTJ|{ zVO_YryB3QeTMt6&!}I&N`@M2c>q7CeJ%s(cukqGT0$$n!ZRg8(%6DciN1GYvKa6wS zeF&q91sYXx7yfIZVUgckhhmMKw`|S`W>>ALkE$s{9HrdHxYje95*&Y@l(>JmgNCV{ za(3zJ<}Q)CU>5!0>K*mHZ|Ix&QJ23toFt2e*#2#GAA=VCCiuQLBgF7w{`S~xo)H=0 zWzvHX1C7;ULP}ZUSKc}uaTyKrvB#GHfhcL5#l0u#q;n(isK){?cKRY^{8-+o1&aza z-*4>#sdi(cBB=WCr%xybHD{h)yn^4s?|X(_Y>T~`;hfFXKh{pXV|dD^EY|4CIuv`_ zo(I3yfuN}&E)A{;f{bM54Jq*^y+Za1+*kQ@JE#aAK~#upukGDxYv5iF&}oLEv`-;4 zS;~vP7;ZKvF*8XiDm)Koy4nJ^vjq_R7TQ8cZlsX&K2WPNJkh`kQuW|)UuAb9uHCX7 z=j+tlUf~GT|FcE#yV&@9oppAtD7^>IV&dyocz>oTC10azD};R}AS6UuN{Wmp0-P^! zOmwt|V>AXZ<5kMMCiTJ`Ve=wHer0LU7>QecT0r?iyDn}o@E56mUsgDDaX8{FO@Mk( z-WV-9_?|+$+~RkgyQhDV|Ji|hMp5>cSS*1>5YKE&?i){5w$=5?%AotkO(l0Vf1u<2 zWxqEdW0(M;?eg)c+Z^JEYr4@M9q!)^Z3(wS$C<&R9Sa^;eWHjh#|54$PHHB)lS!?+ z@c4+&vtnoOfsrE9d~sJwvdv)(X59K8RXRy-*^+A+es_`&Xphj+Zyw%kJYEyuNiqKY z`Y+OA+tBvSWNLJ zSp!Ys`0i7%!lgc4TbuTo**kGdS1GAvoAM-2q+Lh}6}IeU-PbDU!d{+OTXQ}nR*grD zdd!zUQKkJZTzmV?JKL_sjuW(|;l9h2V<+t!u z=5fdz4tvXv$bAudtDP+F(du!2U7PCc@Jj-O4(8%=uHWb;tdorjv5WRZ% zYtjhA>*AX#Vo&07&l=CADEm&(^oYc$_+$XzFlmdMIK-jYZJXKzPH?%oK zn!CjH^)@G>Kg_@9M*01?{5$Tv)L;Ac&_IDJ>Bv({emrmhRWm2NwhuR7=2z~qnYHQ9 z#zyZyk0z$dr*m(Sg~fm2HnM8FU?hwwLvS{Q&#ip&-sYot<&bxXT>rH)O{8Y4*hALDAJaWRU|e{^{Q&zva{*o$NImI2GFDTR=(d~1!G5Sd`l z$$yzZD}xk+@LBImk$gkmG7#0KRk#>!A6Wf-7W1W7ZbnTixhz>I-jH8ge-XnDDvga= ztmQ&1s~ua1c|#Nz{UE7;@s&dIoLysa+`;5Z6!v4cE)*|YbNOM2RWdt2;w>MdJG)BD z%tn7?OV~G&enK6iPxDwOii11iq<%lBGul$OTl$IgO8bDUk%e@d61{__v~0im>BV7| z(zAkMEhWkIcAd91AKac;%8kpokLhb&cH?yW>(|gaF68+HJc@T7FlWF~(d40yPQDY( zM#v)>A2ow8+~h}d_)`0*3|=pKpev&Mr!8FG-{a|tR7XFf2t-0^iNGDj0KUx%k6ve zi!3t**~eCp>%po4+XEkbOxQpcIahJvyXd#?`h`ROw-Pi{XoZN$;0?Pi@_268BJwq& zzR$Med_lrORAFpiQd}F49`&iKH0S%g0Ql1MOUKo=QauT?uls2x`&<%{r?>sSLI%_M zkOBd*BREVP?Sfm=?M1!cjF@$H)K6U1vFXLLJk~jE1{zIjhxT{+8U03ecH=UVj48~! zHyeQut&A%0h7_?cI9`grfB2R1$6a2erbs=HHc3C6$Y%s;u951Ql!A9H>J4T6buSCM zlB)&WN(D+L)lU=&^KL%Jjz!>nt-SA|f;>o7)s>{x%!*(yHP*d9mNEO;)IqhMb`?*2 zwEI4JX||OdDLvW6WE6I!zjy6TU1T{T{7R@O|>TNn;`>~Wf%6YonWRay>kZ1lA{z4RuS=!BoVh~?6%(T11 za3E_j2K!~)h8|t_y=^J|VBr%%rHqJA4^~*1@1*WKO!ZkQ=?UG0l{O@4ce-RCi($x7 zkhw+}{T2ujx${Jt=AwlfBU!TBFmy`v8xqndng`+50on}HV6gbCtR4#FBFg9jdaZC$ zoaqZvih-(fl_7szwHx&EZiqYHQdE`0kuRt6d8j+^a~$GyZsDsZ_`{ez5TQ~h-qT?E z7O;z2;wiF`SD@H$**9{q#%38DGQes-=yYSTN)6VOyi(G=UfrXINRfx)ZA1C9A_#x% zcS@9fq>=Y>-M*)WLaeo|&wc5=jC#E=WMU}tzsa3QM~O%#-A!e(f;}d(k+j=1(N%(c z#?$4UFQNQTwa-ebglM7RBRJJ8(Syve%0opvGgmO>&@9my#A}{^zlo`U2DyZ+wQR0h z1vwlNw!P@X`Qq? zoIdM_3X0Im(U>pTuM+g4_$!3Af|tZejsJ|FDHMK13o)0#c~DW9vyTCa6@RuEFccr= z2&hV)Rkv#5ha(|Hyemz3rTx0b&p`lO%Ct)Ugq@pvCq(~xlAH%fOiyyy^eA4B7L~%? zxo?Y~WtZr$UDzFVn-w{WOgC(=&I8iyK-JG%MMf=zcU_%Cm;0Njo@@-#X+k~rz&)P} zmr!t4Vo#7@t>qD1lZAgjPnGsHGx0E7;_5G9ZRWM1XCayTLzezzT%3~Wi%|N|C}vNs z=9{p0cac&YbMEUsn0QssBfPD18>&6JI0e6Q>wpE6yMC(x!4&gD9_icCc^*qEwzQs~ z_BlzHoTr?Bkw8HB2P8EGI(h3bj|E<BC>9j9GB(fq?O2)CZZ>}Vnn~1r1F869Mvd=7892FH9P~J_0Bk5X%}1=eDt9sx zk^7mb;rurTY_oEQC|`9=EPyYWd=s>*tZz}1k4DYY{Ap8=0LtbQ7F8SPFCVGz-A*8V z2Oh}a+);@5>5iN9}nKqlP|ez|}|3i&=% z8L!uGZ3gymz1cDs0Ss`-*LLS{ZShVOT?%93mImwQR5l-tlShZ7IEb3k$*7(|XPCH^SCjX-~} zE822Sqk51S`FdAT`;ew1Y4X9YC)*;P(D!K(dfh>7h;3(3b4A9ktM!N2$RR&32(B&4zTW|#gZ+Wl!h@VPZV_Q9R-*!k)t=vOJNY*_ys zT)1}k>Ddb+xw1A8vU3!q2w_t#;e7l;ze45xvN5g)Qjj)508zS&9pJ|^4hVLY88O`R z!JqtIfvYoc{Ip(ji9J{h$>-hx6!mnkM0D2^4L9XW%pk{j@$1)Zq?K?e`XJTMBWvM1 zmo(2Z5EmE6LZVSDw)b9R4eQ36rKREiV1obXVaMm}9N2=P6mV-05y>uwld`Leva7U| zCNXV(pLdtP|NnliZ0w(7$GNa z?Pi+#ntQ1TnX~hABqTXfbnQ93AP+&RJGh*M6!NL3td==(zV}AlUK@Ky$ZQToiMqf&+>3apa;V_Cnw=2gsaQ#_@_J(TLi1LhdODl+V z!)Pl)ly6`utpf0BU)B9$u+I5;)lFp+ykVpN!j73b<<~BbW{vYdNXN4gaXRGG)YMd0 zyNW$PfgK=oXHqSzR#JPh66DTl#hQsK-0&>-QNoHAT;Tmfh_mUz-SizV6`m!WO)!z8 z<4$<0?+U@~H<{Hdh=>sVo%q_T)OY zb+4;ST*{>FARs`^WQxlm9CXv|)`%>b9V~q@dgTTDyuTnj;$1LeLO~50XX>tHa5d0pnD+tLM`7w~gua;;j zeHE?<&U+kVS8#{*BlZO3B5>QSmt@EvD!*>qU+*5;O8i-%D|g*aAXJH_pY*xK5bPCs z3c;iAp(js$c2#y?fJTW~P+A0*Ri*gKOK?6cbXu#1@byw7Qq()BW)e7d>s&37L!$vw zc9jEs-0fD>`a&ZgDe-p};kmvPRLD^wTUHPTSTWu4-&$N?Ex+vyFCh9#$Yo54?Ga>J zuqYP6vAyGOCPPrXl(;|zxBeb1bJ>hd)r?>%d2{ufE!;8_`PXrNK|k3u$YqiF{0uq) z5U}zzjzMsyL%mPPNCh~;^rz~nN5eCkb#$aiNG5njBX}b6a)>%5wNyu8-@L~uBe%>3 zZL?NemlM0WAC~JHd=xoskqY^DG=6zcrJkEAqz@H=?=pfLgU5M>;qo$mrf>`Ge7@9> z0AF#P+Jl%gPXX4x_=T+G170NK~nRR+b1^x9t8G1agIP_?9WOcWp@hdy0M16<@nI)D+tr{R`}pG7voyU(x!Wn+Hgiok;1bzOX1-wK>6%EN zVw5^GV*?wF87yA~v;)=mNn}ze0hC8f)ZOzTLzT-4LhZ2BJjz{6d-$oi+-I9 z$=IZ6sAq{5hXF*!4VO(i_xszb3RdRp6nUve`*Q^t3_j-a#Py}4<}cyQrvC#WxV*_# zo0q}-y)SAO89p`*#)n^*P)lU4zQc_P{F~##6keTBAp;o$ggt|Y>A#KQKRB%QH|21< z&w}SF?}87nz*UYtZd`%O`2p%$0Je7url^hAj8>Mt*)<<-*fiRw}8|f5-mVv zv)FVpXdoepQy@ut&4n+1rVHgBhxo8B_kZ8i z`S;=9+!Q<@w+Dv|!D83A@KwW$%7TpBdh*Cp;))Yvjn5kM*_7zyQKF2uzs|?LS3i*} z$VbK>dhNIWb_%lO9JCNgh<`iB(Tfw1CUp{Y=Z8;mHuUPIGH<}$|4F8aop0=H5Pjxn zIO(Fmp*4>)gxNFL)!`37D+hz>NJPTVVu#GqtDB8uHkaRh^M`GfzrK&dt4PYFW<5w@ zhFqob?J(}Z_2MKv--$2Vn){R2Z}y87oo3e7x`QsmwWL7I3;ph%o`U>*pejBl%CPQv z1ayNM@4v!^z}jM)&5~5U;q-S z0BwW!MYj)T;eSJ`aXXZV>k#H-ua@JjPft(B3eCm~UFFwf0|XRn$4qH+IbesI^vHWR z#-|>4g`u>U<0}7_`jYzZqn27Q_%fQ>+S+Em*{0{_1|I(90HP_Ul8K=*`y?6IXKvhD zRbz*jkg@Tf>|`$q=VbRS4tlQ}_vzc%<(}2!lfzxiF~a0Lt3vhW#)*aC1ch)4wQ>M| zT!@^aMY#BVm4eD3vs^pgjS_~Q@8s=PrkUQy)vZ%9JHifHkhT%!?Lpqq9gEBQ_wg~l zacLG=na9=?2T17E)OwydU3fi0@@ufDu^fgjYMETw5_L#=qbV%v18Ip`oFZy+5R|-zTQD6bAtolbU(}znE-ob%snu&x)-P`qHkv=nwu0Z+-_P&d9!A19 zS9S-3*ulXeV;HvF35GfyRGpM7Dk^~QmlqW^3}%t!b!y6+%-Dz+7#I+bpy;3O@}2UmRTC4uVdkj&w0w_!%-X-7O1r?%k7{QUf&$1qx`wT^cP5+@2X z78Vxgpvy6HJ)Hj{Q*6BTwDplJOq~o8uv<UPpmC2!Sa2c$ixkG$W3pGc1{GoGh23iEmHl#ep;qk}7v2mj zV7j*p2xIOg(<9Uv-S zx*@|k@UJv#4u4pb0(Be$moVw{BNx1!JcPR?18 zaBahAn*)kjQsFX9FO0fl47;K$DlffOw$D%YR+|^v!&sbGZEMazx}bw%1ZEk1tGfuJ z#msfLV8aHdL3v}@%G5M>7rDTM2Mw1lrB*bQ@Z_MXqgWw}o4w1TG!5C3;v@75_s_b# zs91FSs@`r5oURt8r<2-$;h58LbI3RsH83&?JwYGm>x*feen7(8J+M!Xltm_o9MS`> zX;Gt^WCt}(PeXTo8nfp;Q927)<(Zb<&}Z2Q_9q>a!vkJPUvzLV!;QDQ&W#`BLbiI{ z!ghf;6J2qq)SAyF5qiZ?2I%Qwjy;-q z`{<3uP#v;zBole{*FIMMQ}(jHalRi@;jG$F86L;SPH*_`=UsVuudDF$H-lUYukE}L z#gX_G_7irigt{K~x2<=_TPLP3+;;{?M=9euDPF*df4vyR7(@QJahT=%Ms_B=*}%SNoQ<94-)yw3)* zMr0wncLRN>g!Tkcju^uc+M_YI!I+<#%(YrdLA)49NO66W!=6cBWLw?TpTuHCw^{Lz4h!Fu`OkCfeMer#YKxBmRSfYp z5>|eeD->$+zRpq7>OSZZ^s6gna`PX#OFR$%c}Yl~MYea4A$rWhmvBOB(G-?~1tOw) zEJod*D~P;LSF_n?JtAVWBKX}_$e9*>QCEL_<|uJB7EKpWEG@`bo>TPhsoarDKG%fg zKmscu#%g7KmFEdQd7^y0k`j2@hi!e$fi^8>iwb3xrI97%em~hWkvij1oafuX{-)=SAh-nC*44gg2g?zPN9> zye`hu>W7BM{Ktg$4g$TxSlC1U>k=Z3R6m;BgI6etQ=8kw)Zd{MM>+ zBlqR;0zB|~(w{Es4fKm&zhsPn%Vs{x+)E6iKKm>Epbdt!)V@Ail+5jlK^)cRGUMDIp@|2eL4?k^YTYkJB3 z2R9mL{{C)>EfO`)LK#P!?%RIJ&fa)+dFChZtROoptF66V)QcSG9xeXpwP#zkU-PU; zl3&8eQ$_|^iQ3fkbYet=AfraP3D$oF)Z3DmW!4lSG1J@4P<`_?5G8TWP&-bYlg!*D z+F!J4B95~5lsKv8v{|pNcg^?;8NUK0r{WAn2TBK*((O6}C&fHWRkNE&NCkf;fmUL- z5v`ELQ%gGdGgR^&b?#|d=Q{sekQM8;{o-)8nw@M`vK;08frDwVqeDCjQb}S}2(6BS zfWH1@C!1_z`7u=j=~w#JU-mLRLyQd7i}|IcgP){fYHDi1PhOhAW|kA;;!_MQ)Z`X-Td5iJy zebHTJ`&h`P+BBI76z@Wn`1P}$?yn)EOEH$W755btei%oMzQ<$K&d7_%Pwe4$|pnoGCP?% z)^+yba_j3f(0OqC4jFE+TI^FO5I+*&dC5F^5pT>!yB#0+&amt1UdLDDWyCmCO9PTd$v~<6&-wxAquN1z%kY*o z5pz6XnAw1S^hHnHm!K@NrAH52060?c2jD=g%!XPW`?ae-u~74O99@ZbeBoBV2dVjv}!V+*%|UM#6r=KKQK8w zjUS)HrO)wFoyo7!`>A<#7ib|!N=iysFE4$d9*3p@wXzG{EC1N=XpX@p6Wt+b$w<-s zkiT9lK!Lm4wqMxEBMOp^VqdsVeY16*vi%7B6kzYYHH>Ql|N5Zu8?WhD{$%Y{u`vpU z%dS5cc6FV7OHP>i94izl)+BT%8Z|PH*=PSs@+vcTT;2AJsY}idy!P|}M?#HtqOvv4 zmIPh(ypMP4%pO5kpRFxRzxhR^FXa2s!j6ek_s?H?e&+!pSa2xyXq~^Qgm$i_g#^w> z{;UeL-KrHThx{_K`>aEtUq6SjCa|ze>@GbMDOF|D{9Arb6~Kz-j$UWV5S!x#zCd~V zP?xLs8AzAztSbeF<4KDzLd)wV>Mr$=V-51g$4s)91bfKpwWc9WfS>`kCSJTeMqjXx zLD~GNL-q#J>o0ON3rC^;KmL{}R)0VI&|hWhR| z1vu}O80fCs);;Z=VQ;rug?b<|S^5pV;KG_#UjPUAu`T1g7SiMr)vh9o%+yOOv-y_4~hJ_GGsJAU(87g{HdayMdd4T{WJc` z%iihUJAaYQxsh$+m$jsPZhw3TwAQo8*lI#2W5@OvU)bYtAgZo1zv=Ujf4w?O{%}id z+Q0flq3xGBhuW`cDS^E5#UBSlgW9aMs^S#Y;}oek)=hM=?K7XGmhp9rK=Yylw+TQ5 z9RM)Htb>;z2R&319mlVHU_k$5)V%(NV}*)T2Yjr!V~eY{7i0#JFTf)xb`y#BNBuu+ zy>(cX+txn35F|uEQ9%hsx;vy>Sac(e(k)0xV}XM7qNN)Ilup58pYQOvGmz^Ta-mVy*3|SPobos6sjLCNLt6F-C{_Yq(xC= zXu7NnMn+Zo`TjUoC1UsF0P}_tubY!Jud2obe0?5)ILI!^-4)>whsH2;nJp|S zMzCN#sI&EeLcLZ_;Qe%aH9gKUURlF<*+`V>LlcJ>q-W54`rDtix>w81BNa7_ZgpRInt(~h& zoAH4+n^0O-L>(c)EG#UA#l<-jZ)K~f0>t8n_mc^^5(b8(;cg4gh?R-V7peA zFnCjs<9ru?($OF9TkpIaQc~&X2Gce3k5zkjpV|6!apmoLs9QFrdjlR4}N@LVZ53u|Nv#2Q6G} zm=MXR;(AIv znK#mUC0Ua}sPCCU=greQrj$zX+01SZ(!H-x={a9H3C`!&rw1U7obImG!&tAmlRuj{ zEANFnQWPiz1O)9_u99^(GUZ!`8CyQkQaQA%dsg+OxmnXW>1-ebJxNG~HYhZ1B9|WC z2DuUF(wWXMIaXivbCwETFnNFU!atm=Y|2hd!1MEbL_O@^8#>uv|2&1t6!bnheupfw zKgg(L!n^k5TkGlTRK0KH;^9^`j5htZf|PP5H;<|;RUJI1DUTd3Ui^g8LP;d3blCW_ zG0{d{$a?-9i*3ywEdrl25ybwL{d1*_?@ilC$<(V^zi)ej|F(iQnSjhDc#9WZCs4k+ zbg5GrnTN|;+*USRE$Z}EwbV?PDc0v_`PhsY6^naOSU$062qKlKfbFA zzm(>pb23`o+dpKsv3ohG+E960NXq@!q$84v*+B0SFBXB$(^t8qU^%>i@J5(YL5qar z&xKzGHBfV!h%%J^=V8zqZ3<=@S$Cd|wy-Yjk$Y5II-uC?dPRDgkZh=qcU}ou(6smECBmM{Tn0cOud$wsYW8+(Jtwv(8f4jP(fFi@o`&+pU^|nMcm3vlYRuroD0ZcaXIWvC72X7nc z!`~JQbhuAP zerrh*XkT=nRrt=3TF%2b*50jz)bo@vTU{EM3?b=!Fo5b;7BCw_m6w$Ii7)1)oDG$JS!1DoCG`;K;Z~^BY?B!&wxy`B3Vfj`GK1xY@nqABu zb0Kzf{?YzX(dL|J>upW$+xPoIID`^@>_1#vd$hZHcon^!eecW}3#ZfH@my6zju-H~ z=Ld@v7B#1QNfn5~$oMum{1YlY))`9eUWQIsn{A3?ie+2B*h&PdWNyW_DBRjKor#o= zs9pmTCE>{(n=dOzRyyPM(br!xAyq9b?|x4?-pfK>S^MB!TC)DDvNA(*nx-yoW>tYd zm~bT^>{|{XVkh6}Db4V#8?g$y-(5{R-qKtX-5t+vQzf_k9yuih<0tBmO51_a!C1i2 zQ2t{+^+m^YuBw%nCtE!CY!}{@Z|WUoWmT;u>2Fy-wk;LM$A?=B^7fmYh@A6!{vjqK zT*sMIZQAG!4_(6Gq%p7Ko0_9C6aR+$2d;Lbd<#2CPW(IVvM^2JWzGIV(5*_O(s+gs zIn$!$2IXCK`)h6&XUWy~Q&>)~ph`kFFc*nOrhd)w2bl+N;V>@^D13O8QMJJ8Mjpo0 zszJj2;vbcRQtRj$?AaUe1O)eSajTS&+O9eE)eWxszl-bWTW@wswDW$bs=Nu`Sd7v) zh^aMgk45ypSMYm5Js_IKly6A6-B*Q~Qj#+>eHAZC*Vxl>cdZi;yqo?Sb|sA;;`R2{ z{R`y_`{=Lt<}wygX_2ZZv|)|p$fPsnd%3S&H2N?OZYO}rvn~d;;&b$;vD?AQ>c;Bh z;^Hy^C%-sPW&wdf_jef?)pkQ+xG$zuk}TedHfVX4|X}a z!r9cz-naZ^Pkvdqnw7h^WXb<)8)YJ(lg2hFXZd{pli?y!!``&xMyZl&k1$qx~DoA5aFY!%&{Ypcpn%wq# zcQ;a8gPU#D{Z0N@UtpSnepkoq*fqZ0NxOuYiq*E5D6{4$6TcDJeSQsg5)0wiQNzqj z*V_a#qA2~FQqz1K8XClVFWA{e#mf~GdY-PID(B*4i>s-Z=xiuoZIC6+AN!$ z*sL-;`%kdnQ{n+OlLY4dFOqb%%(rsoBVhKruT_^ha(#4Uq+70A%FK+vA;g@REiP{T z_D|k^Rdp@NJoYzNWAI9qUrA|hXy^0s{?1jc3S`1FRhV=HCsB|mk5aC#tkg`H-QP@& zns{XGA6tBXvYvs73z?b`hfLKdmGTNL_ak>Dnh8yzS(mrnjX)C2qq1HU6!`l4tWKR= zBIgw=s5e4h)T_Weprm4&;$Alz-!sZt5?!6^bYbp?VG!S@rmVWrC#UXrPmJIV>NsKc z)s<$&Q{!lp&$=e(`_kBjsS}gJW4nSFy%(m2-OP0AkBhLcnay3SY-$<`JYWiD#vOPOoxv?x$)s+r@PU~pA*a0fITN1J1PUf% zU*~#ohwlN@OSj1MEmIOjJbO7BuPT}#nN({7VYI$6ohb`jd%IW$TN&L}A zFAYtX8T$`0-mw)y8Kf8UZCtXPz^9nl}yc|OG*qh@G1gANC`5}p^CxfSh1{}@JL3{%9 z?ZvlgXV}PiijCP81-=nkE2zYCI;wfA?-QwmI~3N4;@2@UDhO(>Z#)^0_8W@0rdqBP zE-(Aarz6Jx|NnUK^nYGa6>;6M=jYQ$dmwY&!aQA;efr?ZPqs_^9rzRPP%o73f0o<; z1Ueaj>)3Yj+uyks;fF4xla^{bg@QbuYp=$Ni*H%Kwlrj`&9;~`>b-1uW-MvrD6N0b zM<%221bsfoSbCZoRC`eglI^XFi;c&fAu^ZQgzt#NMwj{KrDo(kRWrK7#C+x8*p@Ev zRYpmhVUvW=cW7~&F6GeDo!j2w8p*ZRJ3vKUE4VYUvhMxEi2abwXvX*b8#$BC)cqlV zECUS0O>?B+1jYPEox9{6Gne@QArwZSD8TlnP6+BW?YEK>9j5g-mpe~sDY|K`75oy( zMCDGLt>zPW{doCiCu+4lr@s-Ea^&SB4_3-5#Kc0ZmpT1Ea@k$F(AaRe*2;_3RxH=E zIJUJHaEr~mD`!bu=BtOZt4eDKDLyMJJ)x~a6jd}ck8H42lS9_QhkTJrO!wEfx9{pX z3;s}5vv5EnQyVtBdaI`VWDiWrx0q!AF$tqA$7dqG2FrRaSNSPuIS@eJgT8*|6Lsi6 zL)HqlQC!?1W#qL~4Qj{ZW8X@9ytEgcNmF$BX$R`8%!B9KRHt+{zRvKyic;00?g=_@ z#!UI(d*GV$R@(~3F7#?}`W@Y?O$NoPr4;Bg6cWx|LJRzQ)I+j&N@@?{TbOz2fY9Iz zQ+n2c=7V-ikLE{DVeYE%k7aFf-=x@VRVKZk-50V5QrF#158<*AMK6RTieNQ9@(avf z2G$&)$QpDj6)q^!{ZY?m*Cv*(Tnzx!4lpYAJnF8oJ)RFQt1J@jWiyO_XLp^KPFxur z+36huQy71IKeqAIuQ|X;#GBu{Hw&-SE-z2Me%DxWgeC2G*~E$Hae9*f$s@ippCO}Y zn4g3a+nx?T)Eeqqv~T)lz!{KJ_+GY3sGLhK!?^* zQE~c~-*6zwf)4HI#QG}Q3so%7b*?klbcaT9(3c=lJ{i=$!Y(r}Q&MgPZ8>kO#^|Pu zRVeIUx!XdT* z6S?1xAGXjCwbaU<`xqyF%9_foBO!dp09t`d{rS5-$J^M8Q?BI&F)X60sa-2; z#c5+`3$DzRaf$c*xH0PpiNoah8`Y$}?sS+;wk4^}pX#;#Yi)6?RCgyrC08op`Nk{W zLivH~6AkMh>TGymyqe3=+(=G@)*%YzYZ`6dviXYk+ zG56@u4jgD-D!^xuwRjq=-MA6<-n=!n_!(T7;v<*@f1Ed9yw+QM$M0wuBE)pfJjDh4 z#>fwJ=%9097vlMB$wPmr&U35!?iwvD^kinKGU=x$p6{JJMl+Ug3aA?3$lG4$)jqSD zKj<{taRo6K(F!o3<4?uulxat~1Qqiw;>Ow)>-o7WjH9UCDu=_M6^T0Qp0~nl*PU!^ zey%qBUKz39=R2AgKR0?W)$br>i}}j(&}qyd&x0=yy+&xlMY?-6sC>By%$(}qo428a zVI+!#t%SRd8J)OHgNO5)ltBL~GG{BvM-Fw*g z4(nikI*QpbYkB|cjKM`2VO&|0jE5}&;*t$-8GRN0O|x?I$JG8e%|Zwh=VURfGF`4p z;%`ti6PO^}bEm7Q40ijty_^vdQMIySFYlwTH&OBUeWu#pYMP0?gmT8r6`cO6tsgM9 zt@)|~v)>yZv#udAH)iPrc{jdrV-u59m3HWV^l*!2D3r&@SNLhC1b;#z6VfuIG`F-> zQ3 zNSeBfO9nSkOs~Fu&3V>e#Gy${t`eRAg&$M7bHzzf) zaL~7HQ@Ky5=kzi><}a`{i61th>gj6kGHH*J{l7Jf++}(Zg3)GJUe*Mpb6T{AmGP3_X+5BWiee%u$=_T(NHmIn6MDhOF zdK$?39Nvpw#^1lTG_NJQKyW>l$@cDJ_=(2~YLPn`vr@Jpl_8u(>^hViWct^48(UEUVIiJjW zuJPC<%G<)uPE}b~V^&jnanN#6;9UK$T+CNnpi-o8`fxb%o2`PBEQk#&;F@OX)N~s| zyM%=>6>j1y9H_1GoSHT|kBedD8NozUKkg|MT|vsI#IcOwr(^i+lmK8ALz| z^yIEJ;{)qOq5f%c%2gWZdi8*O&ZY5bYV=(EFmBu7^I53Sew(~J_Ngx}S)%)YD!Z4s zL3$4p0S^nMI4Fz(%24Jr>P#?eOSX)>rrkMNTztt;dxAA~?(w55!emf>I{VpGGT{r9 zx}PF9~Jj$PJ&(j9*dGjl-Kg3{Wpr6Q8pkt4LaH8>D(_d1@QA znS@I=sz$y(jVt@NfdEzJ?(Wek8tsA8u<$&=*;~^*(47wKyB7CqLD9Bd z$0a086{H4BzodkVO<2vpxbrTzd&ceWTam}585vK7tbruX{ol7%R`SDJ#hz8#2{>hF z0xkP8ZyGhahYvS#g(^%(R~==fy=wp~FC)?ki>~2!SgR%2YBvpxcz~7}lpW#i6VQ4B zow;o{G9V(E`a8vwz}Zway>%HBpb0~>w5BGZ))MBP=;@8zW|y8y^*et|av-Z)Vr=1t zxgcPD?zDTW2EE-#_*#?8#!M@GG-x1zJXssO9wik%fx=w-x~7i6h!V8cz0v_Me><@m zIs>5DS50Kjw!0MYhKL3_2%-Pac9ScLibB1LU@AkW#lm4Z`3o65VxDDr<>rJ8Kidw*8E?ln*n@Y!x14Vw$H*3RcfFY%ENiX_G`V;J$!95A{fB zwgJrM@+E{`Ax{{V9R9J`U5a5%>kYKe0^f;c_`F;TfLdQL@=|a%Oq$O)Fi=zz7ujCOKG2pv%4%q)!we z3An$LG|S^kaJOGD79OWbd0|Mf;MI$XV4xSzn`NYBuuv`ZeUg+R=v+nfofiE{B$*&N zY0^_E%x@4we&fKFU~zwfIJ^J+-8+hH>>)cBDf;LAb6%Q-EdtM-X2S{(&Jmpb{AYq_ z2o(r_8ZG=QKvb&XX1T$c_;=k*u7(pj8klI=+jjRkPw#yj=EaW#T`@Eyw-!S$TmcyT zpdP;vFR))@LLXZ-f-v)Hc%OCvI}TcRc!Q*)S zEn@zT_xOSV0T+jpPSXF&pQmtneTmsU@^|$Q7ewggje#ccF#jF(_etG_6^?vI3Elt_ z^x}1F;PXinB+dbUH_wU4`VJIF_;Kgij{~703Y;Jb^rbx*DCGd=RRPW}U{I|QMd8j7 z6>oc^7?|A+LeQ#%-v9Lg&~6Qz>WmxzJ^?0DL1R1+G(SKR+P(P~UIGpqSof04=VHI1 zR{*md+KxY+X+H&MKc?nN37?(mEtq?8NNA9 zWv7=75KXdwe=l(xBrL!CpC4bl^fBFR4BbSkopG`71%?-Z?_r}dm1gf*z57 z&)|s^j1$Sxs|K(Y&DDT`GesZ(X3eke3NmzsN+`M%P!Ats>HyQVNb`jK>>!wc>XJiBeKay;# zrmKm)zvz`+c{v@?2t2zcwRD-z^vqH8Mpk-;`hasn4*;-njukIWmUHK}xY6nu+&yxu zj0=u469bO(d7$+qAogEk0%U;}fsJ(1^#GnOI`#t$rlo8bcq)$;emI+=Y;TmtY6D=( z_#Y#71|z;GWujWrG)V4$_lXG%6*7&0_?p2Q3Q>U50we_FqzH%U5frlwZ@}c44AMm# zG<`1rD+A6u^Pc-aUZWxe#X?}W0HYTCCcL|ecX4$I-ONsWfj`m9eeT@F`)B)yIWo?R z%-}_NL64XtVSEMDjz&pljYovkbmrDZM3Ywn4b-rVN;d=jd9KCjCK&f5E z`uC@RgOp7q@<-lJgWNJ8U7eH+$P=vSH)jEh!EmLLA3zqc92v~(qv&7X1QHLZ+nM(# zgRDzi5R?l*b$3k99v}()OhrIEq8}gwvpt+y@6dmC%#H?{h6SY~a9X7x(i=oz&6Q1ge>)^{$=SCH>X9A_o!*NqP)K{5gPxflmNsLF*e? zlPOx1JJxIkzXr8N)vOVWK(W1k4ip-?P?vBnY5@{8!Why09MV0y0RnV8b9(NtQUi?{ z*r+eDIe&8zmd1(#1m`N-1qrlE?m?gC6R079?*c*%?s;rxHbOwKBdpJ5&jJC;oEQ=C zUW%z00OZin2DQq-xWk3s4900BJJ{b2e`^72u7_{bvCL8qYz><3ie(uJ4_RVH6Y!_?^EF zj7kKn%#g)E2X}&|qO(0ff|GZf5HeD0Ald|W05ArCJ(tl^AVW(*jhtO&7abVYx?s`@ zOrASq_lsp4qz@X4>kEmFISuA&d;h?qpE+$ZH2!Ry@WR5Qwm@ztM@wq>+U>B?%eG)Wj zFk9r^HjEWnf}GzPp;Bl8XUVvYm!om@KM$Y>vKcTWsR^&=@!mSyMbcmY1E+wkbwY4Ja;)aC`Fa!2I~{k0F=i#P}dO(Xj>a&I*W!k3JCIo)rtcED_ri9?N zHejGialWtUVWH#W6`%p$1uKCGa!`SmW#m+Yimy)PKN~XI;I=KE3V(!a}XK%kZyM%O}YX}h(2*QTwLNBkYSa*j0 zEYo|41>!G(9q4I`-SH*==lwZqqHio#Z|6Hk_`ZXLCh{X%ixA#m03;n=JQm~i!7=yI zsvxbR16)mWun7FHpX07q5P1Swlbd_qi-S^pu`;z>h_2$=u^a zr(8`pArawOcbiWTO%m~9uOd_lnmOO(T5YJ8D*ot2_sHoIk}Cs4Ne(TaEFUZ)ugDD~ zm&XxP{w0hje{+Gf^Kb za6cl4<(j*zytvRek9BuSshvD zEx{j}N<)p|`X)u7Tc0g)_8_B{0LK`?ky^4!^g+?lVAB2xp;{4hed^a*+ zKFw3i7ueNM7frhiV8Nrh2^cRyLja!zp?kOOc*IY0Ts-e`Q_m|UE`0CrtCnfQ6DL&T zro_xmkB)dwRbD_kgC2>q1}JXu-jF>MAIY6VD1zTBa)*;Z>53s~D7e=Rp8kMiF2)4< ztb%4Dr(69Mf{UK2^(nhqz|Le~n-?iX(K0;+?jR(|MVcGGygY5X8t3-`H7bSPi!0rTyo#Nm3GUEVgl(cGNSztyz}|D{edE62g5}2<(Dn!PZ9H^l)SK{m;|6JMEq` z1K^YXQSx$?=7gMn=gm{s_?cl(fZ_v25^b!Mj~enT>_@NxuXyHc5T&sMklphXk?iu& zJ+fkzHvcD`w+`KiNy;KMZh7yWJ#x$2Q~WvM^z6UYr;szX`01J)F`RQIU|_3m zC&oeE;$!yKHP9Jx=D^ZcTe$RQqq*I078hLjn!NU>gy+!mW$~53&rcp;1Tqx7Hc&B&Z$TS-MD~F7Jn?2!C#9+^t;<@{ zt?`42OiPa$xpfN89&o{?E&r(cc^NfoSzd&i(O`yDzk=|43thk_&F{|BT($3yhdE(N zNSY_^Zu`p_8R-NS$49=<-9kiUE3=(FBpyW*wd*X~NoZRmyImT~a{0N}ycfKYGPjg0_+)&KmC(W9sPCPUUWl)EV^MB?-W@|w~H`9;RW$lhp#@<^1QfdTp1c0$F@wu7ryaCO)#4lZ$J z%1^BkY0xQOee$h_>XoQo8$#yV&#U64mIG8ci^Bsw4!+%!iUQ=j2`M^?2@1mf!G$p4yjuU)2Gnb17 z0E=(mDzTD4M+Tucx^RUO;)K@>%*KFJiI=k=cN#kbBVaCvwj75n$W{qnm_u9DZY2Bc zLfNKSizN9{K;%_oB<5Wysi!|@eBpoVk%g|x0DdXJ7~Cdc2-#s|9zaxKXKb3pqnM%6 zeDtt=P2yR0GIX&tBKy7q_<}THaIC=F2kQloRsK2v2<^mx1VR64l=ti!x@Ky?>Tu`I zE%=YU_!6;q;ameSsPi2eAKqhf-B~|NNYdQUuR!D0IO*K9)*g_VLCFx(yNCcI2cYLw`KBh8bLkfEbWB z&Zqgha$|aHaj>F-skg4RHMOr@LxVaiyni5jbW~-)j2edVSCp4qTmMMv4^THr6|-nd z1|XWL0(Aod^LsBLVR)-Jd|mLObQfnYeIy02Cx)Ov59|)wiDH9!?^y*rkc-kA7*zy= z4uQ1|MU|J>2yANw1;tNXW_M3#Ir{h=4YTWQyBnBZk@dZ_J5zuxyqLPtPDjk*pA#N& zcV+518tN=NxeF9A&l$&v!ROF#wM`EAQme=hZ~Y2Uc_ZekwJHwvPc&u=Wbjb4x2r2B zk5OeuGX1xKSNCt_)~>EnhLpz(odPDbvVNPEf)mAti~BRt?xW21AoM0dfZxKHnf8Bu z#r0CAR#sQHV>RuH?<1ab!l#W5-cLdbx=zKzhJI@#-q(9zqraRure7GSz-){gViek< zOF>J(dq_Znmm{g2Z3=YPAW%Klv(D-FJ2?^X9y60((7k-5WzQkI4nV9!(=d|Bu zUL2pB`V;5tlKJVw_~ry29xFntuKkov-$z~hC--ZS$$9eReXqyjpGQ%lXi-P+--z7U z|3-erdwU7ENxMTPRfls)v=%<TZ~;dfo2nz)`&s^6e~W3t?4vFXWRvXC39xicwl@%vbA>dD0D!Fyoc z@NLPq_2uPle-1pJd4eM(e(X9G!*cp?({jflH_c?y=h4P|YKcbR_moiImG^S49LIA4 zCCr$o`>J6_4JQ7Sb~F3-*6Kq{CnxO(yZ%yh^9fcd<29So$LkK(I@$JPl@I2WaHNn0 zr16g#naC?BCKtn9A6{pZ^FA=}lGav`c9WN$CNJ0_7~&!DJq*59yj`Ln#kG+VxFZ~z zVBaSu%ZOKAOFh*s&vH0jSUFLUZ()OQphjs0^FDP|&N?V6IOq(;Cndh+nH6+$N@Z~` zjJ>U$XhC(q?7L1&*FiBMo9heyoEueKcyyACNXC6L&y#&CLW*J5x=TenX-Q)x^WXiA zZk+xKdDhS{!C`!qQNYYcH(RD_d_&vn!sW|$*=^C^nXTI7r*&>xz=vj3A{%(t-SJW5^_mFqoL`GG%}wd`=qrUU+3G*TmgJE zEA4f^!yYc)I`bS+CDFT6T*kbHs{B7SpGQfs)D{S) z@=o%7v@{;TBT&S&UQW;Qv&&AO3(TBumy}GYWm*}2uOR54CNMjEPkbU>R--ppRcvIw zcxSGhtFt;3$%rR~lxiJADaL7J*Qy8J@_CoEb}G1?`}VGFsc`-$OQoN!_kpy>jc=Kl5ejvPCBF{e~9{3#X@3 zYzoTLH?WrJF4FASk1B6_S5sr z@Nu;&ha%qoaa=sTB7geKm9&__RL3^6<_~$P=B+Yo?oKM?eTM(FYir)aH*e^-){ft~ zq)ro3jd zvJ*n-qRBi%r#p`09FuI}d9{Lgc|WAvPDzNG{TL&^Kv(Cn?$!Lb z=b`AxbJfcV=f3H>NkyCTaM;u@$7s^A5~Ml2hzjmX7AByuW(y*yozqAjRFLirU0UL> z$bCy|vcdSdE!VYf#kj$X_hK#%))LF zIpw)W`wlqF7o~GV6*Q*n4>WnEVPbEwF^^`Ngep)P>BM#y8IXFSzSW2*d@ZPB367lK ztPB}#B9sm!SmAUnD|n-Sy4B~4N0)DGdXSz4UjPX|`5m;_czT?4`*z`Sy?L4fms-%c zyN~q1hbLa&nyLg`_bb<8Ro!ewj_rEMO;q_ez}Nz~zQeL&OFqt85>qU${U>s?Q zyT~hGXp}4}I$>@)P$1rhp_Cj>-cnCiBajs!5#pz@8AUm-%oHig-!HKGj`KZLwY!o0&q9xQ z_-n&lf5OHSrsZv`iWAb-L-G9?Cy%<%k2gD8F)&WMZ}Hs!^m6LB3t98a6b#n<-c18v zv2D_dCz{I^_fGo{0xh*`@cz8MQG#|mY0A%|AOLi=L`r8f<|3CGK>#*Nlo{gx(TdYQ z;ZswUnMsYOzE8N1T4+Ss4dweAwgz`q5Bci#Z1y=u}eQH{N!B`gB@cIy^dmR*rhk$E%}_h7F04$qk#i;*@FL zpZETJ9#{$mem1Rxu6W}vj8xFDy3icO=a&3)$ zy?!(BWP&lpr&JaES#2HV^3?clWtskb2r=;y1l`*e{<5+zp8HRH55>^VygB#|=fmvG z=~LdMhI*1przzw8xX7xix{Ll>J)d+Ez*AS82wMhI4f1?iRLpklwZ zn-ftCix&oZlug4x;9TPe2t%9yst5+_DL)MDmu(5_RW!eBX}gZuyEpl6ja2l zR+U0xb`mh%^7$<%=n~O9=I7HMyL$-vi(vV1&E{wNl};P8L<{n0HR9({yEcrT;-spo zRk|h>eBq6BqM{Ff|2XmRNjhqPNfo&{6$5y5PY&y29nD2GbA688>{G?Uv$D^}_fJVN z;*AT>VztV%cq^wRW@E%d4w4=^68zVyd&jfO-~alx0Xg1n-4aW4OTUw&&F>2)xZT7K zZc@k}VsEpPvQyLx1;qBvQ-g!erI1bbT9$K z4|%s+maqtvFKU&{>7F}x!sWVRF%)wqOS zp?y+$O~GYF@-_+!moBt_+`&3G!~ST+g%KTf(qeNiDV$R-Zr<5R^;#&3fvxG0Nf_YK z3GSr9|9Bq7>j6)C9tD3FOOL`D%}Qh$av2|#F%XU|=(|UTNT+=PLYG30=hS$+Oq`Ba z-P64zm@37H+&*V(154>HUkX95fc$?~Fw6f9_9un>yFW{H1K;W28E$W=@X8m_TZ5lT zFpm6kG_Kv~Bo(aHSST~7+>uJYmf?q7s2MYR%aC z9_NJ`F%`<5GSG=iDxKFQD7`eLw9j3od+WtVMX~WlH>;jk(a)n06};)Rrjw2X{k>oL zO|jEyn{UE;0@R}B*t6NzIVb&`n(I%;1ZO=8E|+~uaYb=)JJekH;SbE*(gZt!EGum(C!! zW~|#NlnEtFGCMfr=5d`t#kh?_j5J2$N5yfs0m~Kpxfi={uG?5}VCrp(s(RtHDrz(E zR@vGb(4pQ4dlc83J2{>1&!+|j1?6UCS@V*i^&`oGNpW~U>RNJH*`sJX;xy&xiQV_t z49u(i=ow={|LxE+`8qQiABs=6o?V1* zgVnHIv7_DdY5&sWHo`kJv%bl z7^%O3jQ3|E?uH}M1JK;bW9B0 zBLi)1S+ue6R31ln3R~8!``K@-VT1`!?i>8D8YA166c=v>E#sve4Y<;MlD4#251A(dg=Cshj z6wQ!gUF-H=-1xp)L6iDSIKAnymEMP{%E^`H>u*oJTQhV&RZd{9ZFc?dF0{k%gaiXZ z%lug!!5yq>#nRE1Hk!f+jqR$fC4D0u&)ZyzXt7f=t2#|wS+6>l3<+xsE4Z<%fmU9A z96ND!W#hCZex2s3O<#1wrx#JOkxzi{$cM(qF19FuxdlDQ(;=EV^|xNdNSiQ$%!0k6y z54|O}3tk9hZ{eH(s=Vv+@AC2KkhGGFHe%*9ajQC&?xA-zvBv529L#4UwdHkaHS>;R zyiAv{4nL9*o!I0}bTKuK?f)v#R=txaH2hqzkxbZK@<2G{yjb1rz189aqHHgWit-vk zXYS?cWN-hU#68VFfBdRAALO!SGG6n@Dpw_WWn~4`3{zna_V(Z#D9CJ$?5wPZ_4tqI zyuUpQTXX(w$UaL7o<(y^L~+1|q2U>~C2LG5Z+|bp^+~X$>0V;NTD=;~g29Is?rSyM^1!Lk34_C&o+GE&(_}-6aFea=4=ZDzz+L{

v5&)MErD_|GN3X;E4i}jv>&|g+Uyq= zgvV@Ke$*p%re<9S&68V~Y;9P=!DW#=c8L1}7Z!u9F&n;VKS!973(h|?m-B}o$<3R_ zx$TwSPw=4ujjOf(1gM8r$WX1b(8Ae;xf8QG6QbbG>vYIwHAFZ!bXb z-YQE{Wx?3+JEq2|Mo+m%1GE9i{2CXwW{1wn6V3MMNB77i+G~u{8zSPh7c$!t*KdgX zINsZJE}fusbrt{h(?jQ4Nhh2AS8w`mTWkl>G+8O6*9FWqD<&qU(hwo{mH4}vHaiWd zVy{=avR}L$G#Zy{=0nBhhCA@Qd({U%(wFbf{3f-MJ>F1V68F0_y;vn~y6bSbbKdkI zD%0%OgvjSx-m;=T?vhO@ONCP|O4w!2Ty<2S*h+D;e^ax~Z^xy+$&Ll(LR0W4(|$Br;O>mZ^umGTC?!A3RsW^yRU@YI(9FwFncnz zGuQB*)YJzOlJKHo;sGlK~{&4H2mJ%EC2ua63AgHQ*N{7|!0ttR&B$q6}>q2Em`D1hx9G3OFoDyQQxp;zgy9 zh~4wYS=@aVH57*m%Uo{{tHDwhyKaJjW`0+3^Q0EaJ<;)i3K#!?%-DIuzEFNiogQ#( zr2;>B*_#5uO}Nm~2N|IK2K^Nf^W*QYTts45m$YZd^;6z_7QKh@LrjJd@5c#UP$MKS zT?N0wOGa}PnP)Ev`t6OszM&Y-zA1j?s-gy7IZ5$i;G!^T44hW*3df!IqSUp=MW&WrqgQU^rDUqcn}MGhR-$ZmY6{O zyiAlr+BE?rJQc41Y{u@xJlKpycvCN;pq&{4Hyk*nmt8gm_Y3{ew!ugtht|tP;`y~4 zv!t5dn204B0HzDm?Ph< z?6uSZ6f<2m>exOvlJE>%YzHca2%{zBlx70x;UGgvy)?{!})+)3y0hh=7jl~uz zX$hv`RC`tC3M8(}4d5w{bH>4`^#9~^0w45UHv8&h2UU-qmy|+0{k`bo^j?7j_eonm zG09~_njVNuA_Tp)PeqCPHa8hmT|$<4WE0yUkg{GWfAhDV{@@gsr*}rBAitA(ieT3kxY#k3Z{a*Yj7st31F~#6xrwQjCLzK&wPQrDB9Zyl{*Uva1hgba(kbOb0C2XkBVD2cb zR_fxyL6lR2&^X;{vo?l<*&suQl`zfoEilAf%l9m2LCEF{v3V1}&<0*#P<2?>9}6*7=xlPg+|Nv@jS`w!ed z>S-VS(H?U}T7diQJr`Noe3_^b?q4W|c^?CfuF_A($k9nbM>}++jrFv!6vM~zkWDhU zE(}yc5yQ3%n(hr!EQbdmde9X?;s`R1g72um0onT8z&Ig%Vc-{txXA~W4?gCK1F8J9 z%pxz3+bFS3#??bPL-`J<)qjWgl&N1m$V1uHLIx>)ND+E_St%>#I3evG$$6#Ve$&#p zA*=$6pyD?~HzyxGc-kp4>mC930P_VzKqcHp5gmqbPN1=WuQdu9H~x>qV_-`tt?ppS zRj4*2>xh3lQ~F6xf8^Gm-#jBcFEuqaqk9SnpUcUGy`dEQBIxWPbF5pTzc!Dbso{v@ zY5z4A+?aVu(KsQu*|*;4NlDOub{I-r?gp3KhvHCcxUHNFfwlKt>)-UoVKuMxdj8EB z)US7(%?`@x;R<9TaLc5P;bhQNiGh0{dvBZA#K`A<$E(%WJ=FN+1*EKA2L|<=6Q63I z$<~d1xGgA{5Q@u|Tl-d{hH@soN@vLtCjkt-$KnL)8Kpy(;TI8{Jn;5_TLU^%c|z%t zBy(d41 z4BDI<0Kb@(^MDBh(LoMQqbb6GuL5=niEZ!0sMuafx!w4Web0o$f-BuoceMt6zQcVJ zRzEr}m8fd{$o#8HawRRu!Rb$+PW@0plzbaaqkJLQ;9Emt@3F%_OpRShP9K=U+v7N_ zoYQSU(Obecl+@n#^OrKj_*#Ry;_+{m8Z}(-)dEErVyPBHAbxz11}n43)X6x+ zzPN(E1!qD;z&fZrqBF1dZ7DV1`qrQ*M5U}7Bmy0>H8@LK4CA|Z1Mgy~qal2WM#goCwJaXzrD5%Lu6uaAV~5eUZ+u#N6tY^QAL0O zI_@zSRY2eIcgmINz#529^3PUbi6Fx*>I+vOD^MI%(o=*SVMxtzfK{H>KwZz!vb-X* zUz|Wz_Tbfw$0-3F3}I=0<(NP7NeVe9+ z60l{V5J?f%$t20VXu(4ujpdu->2a3?w+QF*w-iFXitdMkjgo{uM__(nV}dl06f*zr z9wVKvmZ@T|%s;!>r!b!~d{u3@rgDo=$mDA0~oAx2;me_{X=tdmZ`b zsOlmjU=GS<2u>S?-Kp%Qa;aM}Pu3b2wQ$%9sQFYql&juG>~b>^w+?3DX~~^cxt*I$ za#g7JDbusyOYP~}jyO!hC^&VvN{oR^U$KMa=}uwr&>V>i`ZV=%!Oqf@Yh4PmRCcF& zqqKwu;3}h%D`brab)fcwYmmqUfBvN5Hag*FSU3Pfh7IoBYcb<~&B$cw-~asleN4ZY zcMcxX>m$(Hs&<}sOhSv$=B{nAVO_it)nOReHJz%9CO8{to;Ev#zD6}*pH7`aG+Cn+ zdQN)ixT;t)88inL*tf1VFZF;VdFh97|NmFlmB&N9w*5Irr%t6*LRzRKOQHxJYjGlE z$(nV_nkX8A{$N8x#If_Y-Ly^J7K%I>PGJm!NH1l+1vW$` z&0F}yeca9M zD^)+lVe!6^u*8Cdt;jy=Zx_MaS-qMv%AcDZ$&VB*(V4#+ugaNt(5$eXEQY@=LCqcawHhqkl!5(+gB}{~=GXjTB&yvi zw0Ki3<2(rq?x~~eDOg9_j`#Om_vY071d4fZ*r?6xQ~FsjYXLh<>8nn8NanU3&d?)( z3V>kn{&UX$X$Qa{kMSz5r!JoB?VbN41U;gSI~#M!51u9Y@LtXfJz&sGdO}u`1Y=#F z_8rYBJ*T^n{p=^yT~lO?Dg!YZWJs{IFaxgd{t_GLQLZw`ILzIcP^1qRr6E-Y{IwfY z$pHX<5F6rYVN2-KF_#{J>jvMTYAS01@i>p|kb2^fwvefg+AaKMO3vpv1jOF6KPKHg z1_Oj79J!w>k%{ee0t^zR=|WU8efF%dKNrE{i~`{ddUJs=)fI45v1Oxhlw8xSnf{aF z{Rh+s*XkB`X);zT_{WO*GcSucDw$mq?3o^&IA@SWSlmxeD7xgF5T>66Ot1#l8v-wc zbD4uMg)f9yDRROnwPZw98XNdRssi(#to@=?x?wdn{m?>Hb#)hS z&uZi`FX!jW#y!Q5y@79*o*jX1b=UK$lKU;IQTh^?w<8~<7sArEEv9d-vS-&OnAgpo zgmLmg#;G(P`UIl&q_Y7;-cV?jgn1kdv-xlvhheqF$^ z&*_UYk+;<0tEjv@v9+-Q*Wc*sQ}F_+MU0f&VzzRMlb&yO0#mK-&*$WvdRB)3B?<`G zIdni*H(o!BpPlCC0Oz*9j2DGkIR*BJGEv1E+W=KKRp0|5qYe7Uf7L9568ulH12InY zjIB$adWp+Pxpi!qfReZ77gA=;d$uM2_Lr)W6Ujbvu-tB)4@*_k)FfQ8{}jlkunHTJ zNASpdQ(#L7%;%db5ZiQuU0q#mR=WyJi9Rd-iyf*qSCV-Yd~aEJk3X|>(;Fn=NCgE> zu!nYy+myI%^ARr-yM{-BfvOQlI}1#uZPwS(G0DlICFivj*C+W{a6?ccFK%k&bc?j( zWmlIf%wypXyQ^}Dz87nT@`Y>b9DD#T zKz;&q=u>jMAsvDtFg`&F!uFrgWU=wy2C0b;KI&(A+M%f#8+#2cYvGimrk7b+S&Zug zX4voDF2G#FiOgG!6_Q zF~-AWde1o6vNUr*T}l%M%-Ae5zYWOMoc{f~@{K~+UOePTFn_DcL{X^9mMGyc*np1R z$kY!K#Y}yIAy6PLrmhg_ep?Xi?Y(IK#$g*R*%{0O#xju^t|w-H?udAx>5(2w+{~nH zlz&MbWP^{8gpttD_y}9*QyJ3tj?22|v+~M&>wB44Scq!B@LM*fN6u*9hhY|P=;*i^ ztho6@d2Anm8pW}k@R;H5?ptF7S_DnsqsP<4BvB4dL+E@#GeY|r;G9>!_bk-0)z{av zIvxf`F@04Fz_slr_E-L6%G1xt&Ax!Yb$@|UieR1#?Mwv(4q!`E4ISs6!Rn@?*mN?S z>y;9*UY@=QX;=~N9b^?i8i3itj8liwg|tC0`D|k+CW55a1vBk>1xrn8A74(p7E2I^&On=V~1k6A)Kz+oef3(L*S?!wBQUo9{p!jol{l>AL zJ?GirFrppWsm(#bZTWEz7Fns%b!dQit42OxExg7!xSz$ue5rDskj9;4A)}>j$cG2| zrXS8IgHA|Q4#^Z3t)RAy8iP}#Tfhf2c&D&d+VZs2H*;af6VXllV_Ot0rhDp!2wu2 zI_R1<>`*=C;W)}cS?{3$rwdz!_t$t}=ur^Q zlU(;vj3fdFbU1$@7POA_iR*09W`oDOD3q;XcS2xqM0?~`A|2`cR=*=ikqjaaUQ&Jf z;Xm+a8YVr=8<+&e*=tl-I(YXK;+XqMP#ZYM6)E}y1Jzrzk3EZv>w>=hiDf5pLOsQQ zV>XmsK!LvadHAo8bA<}?@7QUrj~@L@w+Cz?toNsydFB1oH*d_OTVTKEsliM)H`qfz z0UU%sed(*X4(a7F7S|f9Z*q`#sq*?=5)hcEf$e>37=>2Lsp%rbu%2Ivn|LAZ1IX$4 z9cIAU2#*#QQ$D_P5({e;U{MzkngsPjF=lTa;Dy2YpYMC4NarHh_&JI_B_B@1w1J5D zQM{17fj`1eU~@RsqnUrk4oBJ;piYN3x3(rrIrYM}9-p8|R|9vp5M-s#ozw6VI(c%a z=mSrlVicUEBJ5{(P8-gb5Yo1>CSWMr{yd&HoJU?pRJ64&d@3?uG>;Y6sAY4p*l%MQ zg2e4+%GZi_Orw(6%a7^&@^8$A_n*;@jbP1WcfU`v*#Kx#YusUN_>rFwL{KTg!WHnD z#glKqNiat*DlvZU`$53+FfAZ6BR4`Q7+RLPNkrRMCD)jC7?D=?%qUpEi?50GyxN*6 zUDDAt1Sim|Moj^x4YHI*PT{_se7O&Xj!_&9B1Rmr_T3#MS*U=_&*msCwjqx%qdK~$ zKI+8TrN{aKo-A&L&kbHlIPl5qhCc5uiM`K(I*a{XS3S(4*nI1&y=m&G=B!Om^aO^MV-W8*DQvES}(wfki|Boti0dU2qR@73!V# zfS;&&?V~)(YC$4JmRo;7Z2-R3cb;66$`&5Fs_;ZS&rICmZOEw;szRyO&3~f7Xmt+7 zefdOCcYvL1VBX0>sC6^;7ZuOA z*0#g(hG5mA7;axr!M#)+ns-q7Klv_gBlv&w-5u)nc&JxVa&t1OA+(M^Mm{ki>3Q!* zh@bGkJ=?T&@@S?R>eV(c!>o3iqf1)G_kUr`*)#jO2{LRbe+b(s6lEOA|nd&=je;9#iF$mj-?4;yMa0T~LI zHVQL2d~F8mg`q%!0;>&S<&fjJO|^((b@LWL&ujNAu*Mm?TXD*%){q82Op3DHMD!b2 z|ChJeFt{+TK@V4qiJ(y4+;)v09BA<%LJNSw0CBC1DFAY|N!TgTK?GhOb{g4D#mRIV zWIjvje)(||V6H%SGZ3r%aEtq7GjLz~pKIA^>uR_U_|S6!D`)mgODhsCUtZpc@)@!k~)VCU@G5#|9NE5I0<00<@K0{ zI>mCv*qN?7>N9bB)r28>LPT51)6D8fjja9jHqBf)H9cPmI8&&71A__O7=)wnt{*lY z8&yTfMEN&`-p_VHSUH0@-}}5(4{}HU1FWc%WWdD<)2EXF+zhsVH$R4M7X5Wfa(;qS z{#SlY#h7r@3VuF5KARx)^{+*5XPxSKj*@V*oaWTN(4vjV?`n`1KL^SgA{p>;fGCi$ ztnubp3nmxZzZ&=e*q51iqVAI*{U;`v-#6sI`F55AU#~V|9_x>_1oqbDR>DZR%9POr zG>_<}cxa7=#2I1Tt#8;R&E1)rgVd={*sJ)3* z(#zG-TkiyQ8IP=+ZFWY%D>MNRe7^xnM$N~{{Z{y|{pI1`NSz)fQ~gc%=*;2t*Wg%UpM%)`JJd z#P?Qj*}`|vt{0Ont(RrP_AQnv3HAtQ4Dl6G0~_~CQMmYH95L+6LzP4c#=W6rKm)LR znfb{+oe9uelohYuG}G)xvP4EJ@nDM z+^14+%^d?}yR&;E7E3+n{G>gYk780dsCM1Ojn2@cE9LR;+lN}M9`(^Z?eE{;h+kgx z$(YucOHH>A!ha;Pu&`8MC_KfcWw?c>i=+*e@FaT9`@OQ~KRgyO*MzJF)h>dh<*4ve zLbjym@9Tz~t#EHdD+e^9BDGkjtl1eu-QTpOr>iT6X-@2W__us4tf3=RWL-EC=TN07K#r5YiDT}G zoqHQ6*X5<`^`KQg`KVAdzqVr>KSH6QZeKzG7^>mESZaC@kH*JVZN-6=0S`Mv!?RyQ z2dAy?cXpz^y{q?e)7(IA-PINOo2<6F<^a%LL!ha3a3EFp-FiKyWPo%tDjL>6z#~~# zpxhi>XueULa?iAkN4 zz-7<-TYRcY)+v>qg}&4MRdjLR$@ZtGl#>kye}JI!_~qAYeHe+d-auz(QnKO}cECt( z>B~q{lc^|LOGABqu}NU#-TSpETuo#fzJii$h-=l;t7y73Hb31No|Y!Ry!_IzV12~N zZsX|!Cr@n`teSz<9tH+EAYAuZUY$H^V<-Ks`B{RD-qSx;8GJM@7g#Zd3J7IR;0wXy z)miEFa8HSy)xHosEGa3-_pV(Vx8{Ap4~l0=$*nEt$h};xCN;}q;wK~Fj5SK}m>;)A zQ9ZUJ&3K7K|CFJ>CNp3(dFaa6xrT_1AV$T72_veK%2;oJgdbqjE)vMWKz zi@u&}^`ZXw33Rf=hmQM#LK$hNe*?)gERaF_y8M74m5@+0YRKz6hW44Y;XWcg$Auxu zw=~&5V(shR6gk^q7 z7%#4;T&gZ_b0wweGVT^ukIjL7}k<61}q_Vk=AW)2CurdwX9iMzw6KDMZHdg%mkW zFp3UUpPbHPyAh9c0|cPVxq}ge1sYq8eAwtp&^}M1Uz%r4zPKr90}DY&8-&lJO;Y5t zPVsyS!K1?29E?jZn6zq{=PzhH0V|EI(D{ndV`IB^vQGL8E2TM)!fF=<(BM}Piq=b< zJO@=G8D7?-GIXzYl5Y*;!AJ5Q?@gCH^BM*$v4PghY&*x)9x+N6)mMevbA76|sK~#p zOmU~L81d5LLYol>7q#FOqxkiOdmtUJa>eC5e;WU_9<~ZQ=Yd&Vc8?#9R4~c*SqkFX z`B4MW8{1JgK{`Ddf~RjNrcyd0=5nWH9u6Vm?$+a~1C5V&Y{8O@9XmE(rk$>OvsqW` z$J^HvQ90{KaT%=t>nnMMTro3#mGts?pQyC4C4F7K{9n$79jWjl1J>E0nWc5&g@1J+ zj6tDAOEx<(D9DTLNPC5H$TB&hv@~GqvjFVBeixMGatqDTRHC%@%0Z#3Dq%0vt5+Yh z@+$I({w1!ma!;%E6&LzA2(q#SUVJaP^A~zaxhdsj+r$f;N~RE{3}olQS`+yN^n36A zm{?RjD}X5~Io45N8Xv;zf0t(c9Oi2UVOF;FIo@;s@w)>>R=d3V-*_1-FQjZOg3n|- z`i1&p%NI0AFf0CcH-8Zy<1_VGDdoilTucx8l8L}B>_mjZ;FKq~y;2~o24dzefNa8-V;_4{?^iyO9Yyvn8C#f@3z zj0Pbxytf3YOTmaf#yd&JxVgAQQJkkfWiUUf1t}_{>W;96;P);0C||U_>CK(Xt_~%Y zk8F1TArbDfgAVce`&q%tYEPyP({(;IHRS+V@ZWOY{5E*=${*X0hooa^gBS`Z()xYx z398>}Rfye>2bd=p!=~wyiu-r@zIu%RM_x-{ghqdOdLl17`?eoYIhCs0(YRelFb573 z;ggZ^1|H_&1=0UU${G-WTDxg!X$dmUOGh;g3^-;apoQ{G&~PER;V~XgnuL>@{s<56 z2VJ21k&Z-32MeEBNel)vHU*;CovcSLz~O`1+SK=lI_rQ@3&L8Wd?_pz$r(E+VN zNmN}n^MjGgBOJ(NW&XWnjx%z3Tc2q+$YnS)mq)GwZqgN}E+lqiBELYRP$;!)TED+i Iy8Y;X0JMwC>Hq)$ diff --git a/bitswap/docs/go-bitswap.puml b/bitswap/docs/go-bitswap.puml index 6a291dc35..af9134d7e 100644 --- a/bitswap/docs/go-bitswap.puml +++ b/bitswap/docs/go-bitswap.puml @@ -11,13 +11,6 @@ node "Sending Blocks" { [Engine] --> [TaskWorker (workers.go)] } -node "Requesting Blocks" { - [Bitswap] --* [WantManager] - [WantManager] --> [BlockPresenceManager] - [WantManager] --> [PeerManager] - [PeerManager] --* [MessageQueue] -} - node "Providing" { [Bitswap] --* [Provide Collector (workers.go)] [Provide Collector (workers.go)] --* [Provide Worker (workers.go)] @@ -31,14 +24,19 @@ node "Sessions (smart requests)" { [Bitswap] --* [SessionManager] [SessionManager] --> [SessionInterestManager] [SessionManager] --o [Session] + [SessionManager] --> [BlockPresenceManager] [Session] --* [sessionWantSender] [Session] --* [SessionPeerManager] - [Session] --> [WantManager] [Session] --> [ProvideQueryManager] [Session] --* [sessionWants] [Session] --> [SessionInterestManager] [sessionWantSender] --> [BlockPresenceManager] +} + +node "Requesting Blocks" { + [SessionManager] --> [PeerManager] [sessionWantSender] --> [PeerManager] + [PeerManager] --* [MessageQueue] } node "Network" { diff --git a/bitswap/docs/how-bitswap-works.md b/bitswap/docs/how-bitswap-works.md index 4b6ab1a74..303b05763 100644 --- a/bitswap/docs/how-bitswap-works.md +++ b/bitswap/docs/how-bitswap-works.md @@ -74,8 +74,8 @@ When a message is received, Bitswap So that the Engine can send responses to the wants - Informs the Engine of any received blocks So that the Engine can send the received blocks to any peers that want them -- Informs the WantManager of received blocks, HAVEs and DONT_HAVEs - So that the WantManager can inform interested sessions +- Informs the SessionManager of received blocks, HAVEs and DONT_HAVEs + So that the SessionManager can inform interested sessions When the client makes an API call, Bitswap creates a new Session and calls the corresponding method (eg `GetBlocks()`). @@ -101,9 +101,10 @@ The PeerTaskQueue prioritizes tasks such that the peers with the least amount of ### Requesting Blocks -When the WantManager is informed of a new message, it -- informs the SessionManager - The SessionManager informs the Sessions that are interested in the received blocks and wants +When the SessionManager is informed of a new message, it +- informs the BlockPresenceManager + The BlockPresenceManager keeps track of which peers have sent HAVES and DONT_HAVEs for each block +- informs the Sessions that are interested in the received blocks and wants - informs the PeerManager of received blocks The PeerManager checks if any wants were send to a peer for the received blocks. If so it sends a `CANCEL` message to those peers. @@ -114,7 +115,7 @@ The Session starts in "discovery" mode. This means it doesn't have any peers yet When the client initially requests blocks from a Session, the Session - informs the SessionInterestManager that it is interested in the want - informs the sessionWantManager of the want -- tells the WantManager to broadcast a `want-have` to all connected peers so as to discover which peers have the block +- tells the PeerManager to broadcast a `want-have` to all connected peers so as to discover which peers have the block - queries the ProviderQueryManager to discover which peers have the block When the session receives a message with `HAVE` or a `block`, it informs the SessionPeerManager. The SessionPeerManager keeps track of all peers in the session. diff --git a/bitswap/internal/session/session.go b/bitswap/internal/session/session.go index 34a7375c2..ef7798084 100644 --- a/bitswap/internal/session/session.go +++ b/bitswap/internal/session/session.go @@ -25,17 +25,6 @@ const ( broadcastLiveWantsLimit = 64 ) -// WantManager is an interface that can be used to request blocks -// from given peers. -type WantManager interface { - // BroadcastWantHaves sends want-haves to all connected peers (used for - // session discovery) - BroadcastWantHaves(context.Context, uint64, []cid.Cid) - // RemoveSession removes the session from the WantManager (when the - // session shuts down) - RemoveSession(context.Context, uint64) -} - // PeerManager keeps track of which sessions are interested in which peers // and takes care of sending wants for the sessions type PeerManager interface { @@ -47,6 +36,11 @@ type PeerManager interface { UnregisterSession(uint64) // SendWants tells the PeerManager to send wants to the given peer SendWants(ctx context.Context, peerId peer.ID, wantBlocks []cid.Cid, wantHaves []cid.Cid) + // BroadcastWantHaves sends want-haves to all connected peers (used for + // session discovery) + BroadcastWantHaves(context.Context, []cid.Cid) + // SendCancels tells the PeerManager to send cancels to all peers + SendCancels(context.Context, []cid.Cid) } // SessionPeerManager keeps track of peers in the session @@ -98,7 +92,8 @@ type op struct { type Session struct { // dependencies ctx context.Context - wm WantManager + pm PeerManager + bpm *bsbpm.BlockPresenceManager sprm SessionPeerManager providerFinder ProviderFinder sim *bssim.SessionInterestManager @@ -131,7 +126,6 @@ type Session struct { // given context. func New(ctx context.Context, id uint64, - wm WantManager, sprm SessionPeerManager, providerFinder ProviderFinder, sim *bssim.SessionInterestManager, @@ -145,7 +139,8 @@ func New(ctx context.Context, sw: newSessionWants(broadcastLiveWantsLimit), tickDelayReqs: make(chan time.Duration), ctx: ctx, - wm: wm, + pm: pm, + bpm: bpm, sprm: sprm, providerFinder: providerFinder, sim: sim, @@ -301,13 +296,13 @@ func (s *Session) run(ctx context.Context) { s.sw.WantsSent(oper.keys) case opBroadcast: // Broadcast want-haves to all peers - s.broadcastWantHaves(ctx, oper.keys) + s.broadcast(ctx, oper.keys) default: panic("unhandled operation") } case <-s.idleTick.C: // The session hasn't received blocks for a while, broadcast - s.broadcastWantHaves(ctx, nil) + s.broadcast(ctx, nil) case <-s.periodicSearchTimer.C: // Periodically search for a random live want s.handlePeriodicSearch(ctx) @@ -325,7 +320,7 @@ func (s *Session) run(ctx context.Context) { // Called when the session hasn't received any blocks for some time, or when // all peers in the session have sent DONT_HAVE for a particular set of CIDs. // Send want-haves to all connected peers, and search for new peers with the CID. -func (s *Session) broadcastWantHaves(ctx context.Context, wants []cid.Cid) { +func (s *Session) broadcast(ctx context.Context, wants []cid.Cid) { // If this broadcast is because of an idle timeout (we haven't received // any blocks for a while) then broadcast all pending wants if wants == nil { @@ -333,7 +328,7 @@ func (s *Session) broadcastWantHaves(ctx context.Context, wants []cid.Cid) { } // Broadcast a want-have for the live wants to everyone we're connected to - s.wm.BroadcastWantHaves(ctx, s.id, wants) + s.broadcastWantHaves(ctx, wants) // do not find providers on consecutive ticks // -- just rely on periodic search widening @@ -341,7 +336,7 @@ func (s *Session) broadcastWantHaves(ctx context.Context, wants []cid.Cid) { // Search for providers who have the first want in the list. // Typically if the provider has the first block they will have // the rest of the blocks also. - log.Debugf("Ses%d: FindMorePeers with want %s (1st of %d wants)", s.id, wants[0], len(wants)) + log.Debugw("FindMorePeers", "session", s.id, "cid", wants[0], "pending", len(wants)) s.findMorePeers(ctx, wants[0]) } s.resetIdleTick() @@ -364,7 +359,7 @@ func (s *Session) handlePeriodicSearch(ctx context.Context) { // for new providers for blocks. s.findMorePeers(ctx, randomWant) - s.wm.BroadcastWantHaves(ctx, s.id, []cid.Cid{randomWant}) + s.broadcastWantHaves(ctx, []cid.Cid{randomWant}) s.periodicSearchTimer.Reset(s.periodicSearchDelay.NextWaitTime()) } @@ -390,8 +385,18 @@ func (s *Session) handleShutdown() { // Shut down the sessionWantSender (blocks until sessionWantSender stops // sending) s.sws.Shutdown() - // Remove the session from the want manager - s.wm.RemoveSession(s.ctx, s.id) + + // Remove session's interest in the given blocks. + cancelKs := s.sim.RemoveSessionInterest(s.id) + + // Free up block presence tracking for keys that no session is interested + // in anymore + s.bpm.RemoveKeys(cancelKs) + + // TODO: If the context is cancelled this won't actually send any CANCELs. + // We should use a longer lived context to send out these CANCELs. + // Send CANCEL to all peers for blocks that no session is interested in anymore + s.pm.SendCancels(s.ctx, cancelKs) } // handleReceive is called when the session receives blocks from a peer @@ -439,11 +444,17 @@ func (s *Session) wantBlocks(ctx context.Context, newks []cid.Cid) { // No peers discovered yet, broadcast some want-haves ks := s.sw.GetNextWants() if len(ks) > 0 { - log.Infof("Ses%d: No peers - broadcasting %d want HAVE requests\n", s.id, len(ks)) - s.wm.BroadcastWantHaves(ctx, s.id, ks) + log.Infow("No peers - broadcasting", "session", s.id, "want-count", len(ks)) + s.broadcastWantHaves(ctx, ks) } } +// Send want-haves to all connected peers +func (s *Session) broadcastWantHaves(ctx context.Context, wants []cid.Cid) { + log.Debugw("broadcastWantHaves", "session", s.id, "cids", wants) + s.pm.BroadcastWantHaves(ctx, wants) +} + // The session will broadcast if it has outstanding wants and doesn't receive // any blocks for some time. // The length of time is calculated diff --git a/bitswap/internal/session/session_test.go b/bitswap/internal/session/session_test.go index d6f89e2dc..a8773f1c1 100644 --- a/bitswap/internal/session/session_test.go +++ b/bitswap/internal/session/session_test.go @@ -17,28 +17,6 @@ import ( peer "github.com/libp2p/go-libp2p-core/peer" ) -type wantReq struct { - cids []cid.Cid -} - -type fakeWantManager struct { - wantReqs chan wantReq -} - -func newFakeWantManager() *fakeWantManager { - return &fakeWantManager{ - wantReqs: make(chan wantReq, 1), - } -} - -func (fwm *fakeWantManager) BroadcastWantHaves(ctx context.Context, sesid uint64, cids []cid.Cid) { - select { - case fwm.wantReqs <- wantReq{cids}: - case <-ctx.Done(): - } -} -func (fwm *fakeWantManager) RemoveSession(context.Context, uint64) {} - func newFakeSessionPeerManager() *bsspm.SessionPeerManager { return bsspm.New(1, newFakePeerTagger()) } @@ -76,11 +54,19 @@ func (fpf *fakeProviderFinder) FindProvidersAsync(ctx context.Context, k cid.Cid return make(chan peer.ID) } +type wantReq struct { + cids []cid.Cid +} + type fakePeerManager struct { + cancels []cid.Cid + wantReqs chan wantReq } func newFakePeerManager() *fakePeerManager { - return &fakePeerManager{} + return &fakePeerManager{ + wantReqs: make(chan wantReq, 1), + } } func (pm *fakePeerManager) RegisterSession(peer.ID, bspm.Session) bool { @@ -88,19 +74,27 @@ func (pm *fakePeerManager) RegisterSession(peer.ID, bspm.Session) bool { } func (pm *fakePeerManager) UnregisterSession(uint64) {} func (pm *fakePeerManager) SendWants(context.Context, peer.ID, []cid.Cid, []cid.Cid) {} +func (pm *fakePeerManager) BroadcastWantHaves(ctx context.Context, cids []cid.Cid) { + select { + case pm.wantReqs <- wantReq{cids}: + case <-ctx.Done(): + } +} +func (pm *fakePeerManager) SendCancels(ctx context.Context, cancels []cid.Cid) { + pm.cancels = append(pm.cancels, cancels...) +} func TestSessionGetBlocks(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond) - defer cancel() - fwm := newFakeWantManager() - fpm := newFakeSessionPeerManager() + fpm := newFakePeerManager() + fspm := newFakeSessionPeerManager() fpf := newFakeProviderFinder() sim := bssim.New() bpm := bsbpm.New() notif := notifications.New() defer notif.Shutdown() id := testutil.GenerateSessionID() - session := New(ctx, id, fwm, fpm, fpf, sim, newFakePeerManager(), bpm, notif, time.Second, delay.Fixed(time.Minute), "") + session := New(ctx, id, fspm, fpf, sim, fpm, bpm, notif, time.Second, delay.Fixed(time.Minute), "") blockGenerator := blocksutil.NewBlockGenerator() blks := blockGenerator.Blocks(broadcastLiveWantsLimit * 2) var cids []cid.Cid @@ -115,7 +109,7 @@ func TestSessionGetBlocks(t *testing.T) { } // Wait for initial want request - receivedWantReq := <-fwm.wantReqs + receivedWantReq := <-fpm.wantReqs // Should have registered session's interest in blocks intSes := sim.FilterSessionInterested(id, cids) @@ -138,7 +132,7 @@ func TestSessionGetBlocks(t *testing.T) { time.Sleep(10 * time.Millisecond) // Verify new peers were recorded - if !testutil.MatchPeersIgnoreOrder(fpm.Peers(), peers) { + if !testutil.MatchPeersIgnoreOrder(fspm.Peers(), peers) { t.Fatal("peers not recorded by the peer manager") } @@ -172,20 +166,30 @@ func TestSessionGetBlocks(t *testing.T) { if len(wanted) != len(blks)-1 { t.Fatal("session wants incorrect number of blocks") } + + // Shut down session + cancel() + + time.Sleep(10 * time.Millisecond) + + // Verify wants were cancelled + if len(fpm.cancels) != len(blks) { + t.Fatal("expected cancels to be sent for all wants") + } } func TestSessionFindMorePeers(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), 900*time.Millisecond) defer cancel() - fwm := newFakeWantManager() - fpm := newFakeSessionPeerManager() + fpm := newFakePeerManager() + fspm := newFakeSessionPeerManager() fpf := newFakeProviderFinder() sim := bssim.New() bpm := bsbpm.New() notif := notifications.New() defer notif.Shutdown() id := testutil.GenerateSessionID() - session := New(ctx, id, fwm, fpm, fpf, sim, newFakePeerManager(), bpm, notif, time.Second, delay.Fixed(time.Minute), "") + session := New(ctx, id, fspm, fpf, sim, fpm, bpm, notif, time.Second, delay.Fixed(time.Minute), "") session.SetBaseTickDelay(200 * time.Microsecond) blockGenerator := blocksutil.NewBlockGenerator() blks := blockGenerator.Blocks(broadcastLiveWantsLimit * 2) @@ -200,7 +204,7 @@ func TestSessionFindMorePeers(t *testing.T) { // The session should initially broadcast want-haves select { - case <-fwm.wantReqs: + case <-fpm.wantReqs: case <-ctx.Done(): t.Fatal("Did not make first want request ") } @@ -217,14 +221,14 @@ func TestSessionFindMorePeers(t *testing.T) { // The session should now time out waiting for a response and broadcast // want-haves again select { - case <-fwm.wantReqs: + case <-fpm.wantReqs: case <-ctx.Done(): t.Fatal("Did not make second want request ") } // The session should keep broadcasting periodically until it receives a response select { - case receivedWantReq := <-fwm.wantReqs: + case receivedWantReq := <-fpm.wantReqs: if len(receivedWantReq.cids) != broadcastLiveWantsLimit { t.Fatal("did not rebroadcast whole live list") } @@ -250,8 +254,8 @@ func TestSessionFindMorePeers(t *testing.T) { func TestSessionOnPeersExhausted(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond) defer cancel() - fwm := newFakeWantManager() - fpm := newFakeSessionPeerManager() + fpm := newFakePeerManager() + fspm := newFakeSessionPeerManager() fpf := newFakeProviderFinder() sim := bssim.New() @@ -259,7 +263,7 @@ func TestSessionOnPeersExhausted(t *testing.T) { notif := notifications.New() defer notif.Shutdown() id := testutil.GenerateSessionID() - session := New(ctx, id, fwm, fpm, fpf, sim, newFakePeerManager(), bpm, notif, time.Second, delay.Fixed(time.Minute), "") + session := New(ctx, id, fspm, fpf, sim, fpm, bpm, notif, time.Second, delay.Fixed(time.Minute), "") blockGenerator := blocksutil.NewBlockGenerator() blks := blockGenerator.Blocks(broadcastLiveWantsLimit + 5) var cids []cid.Cid @@ -273,7 +277,7 @@ func TestSessionOnPeersExhausted(t *testing.T) { } // Wait for initial want request - receivedWantReq := <-fwm.wantReqs + receivedWantReq := <-fpm.wantReqs // Should have sent out broadcast request for wants if len(receivedWantReq.cids) != broadcastLiveWantsLimit { @@ -284,7 +288,7 @@ func TestSessionOnPeersExhausted(t *testing.T) { session.onPeersExhausted(cids[len(cids)-2:]) // Wait for want request - receivedWantReq = <-fwm.wantReqs + receivedWantReq = <-fpm.wantReqs // Should have sent out broadcast request for wants if len(receivedWantReq.cids) != 2 { @@ -295,15 +299,15 @@ func TestSessionOnPeersExhausted(t *testing.T) { func TestSessionFailingToGetFirstBlock(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) defer cancel() - fwm := newFakeWantManager() - fpm := newFakeSessionPeerManager() + fpm := newFakePeerManager() + fspm := newFakeSessionPeerManager() fpf := newFakeProviderFinder() sim := bssim.New() bpm := bsbpm.New() notif := notifications.New() defer notif.Shutdown() id := testutil.GenerateSessionID() - session := New(ctx, id, fwm, fpm, fpf, sim, newFakePeerManager(), bpm, notif, 10*time.Millisecond, delay.Fixed(100*time.Millisecond), "") + session := New(ctx, id, fspm, fpf, sim, fpm, bpm, notif, 10*time.Millisecond, delay.Fixed(100*time.Millisecond), "") blockGenerator := blocksutil.NewBlockGenerator() blks := blockGenerator.Blocks(4) var cids []cid.Cid @@ -318,14 +322,14 @@ func TestSessionFailingToGetFirstBlock(t *testing.T) { // The session should initially broadcast want-haves select { - case <-fwm.wantReqs: + case <-fpm.wantReqs: case <-ctx.Done(): t.Fatal("Did not make first want request ") } // Verify a broadcast was made select { - case receivedWantReq := <-fwm.wantReqs: + case receivedWantReq := <-fpm.wantReqs: if len(receivedWantReq.cids) < len(cids) { t.Fatal("did not rebroadcast whole live list") } @@ -346,7 +350,7 @@ func TestSessionFailingToGetFirstBlock(t *testing.T) { // Wait for another broadcast to occur select { - case receivedWantReq := <-fwm.wantReqs: + case receivedWantReq := <-fpm.wantReqs: if len(receivedWantReq.cids) < len(cids) { t.Fatal("did not rebroadcast whole live list") } @@ -357,7 +361,7 @@ func TestSessionFailingToGetFirstBlock(t *testing.T) { // Wait for another broadcast to occur startTick = time.Now() select { - case receivedWantReq := <-fwm.wantReqs: + case receivedWantReq := <-fpm.wantReqs: if len(receivedWantReq.cids) < len(cids) { t.Fatal("did not rebroadcast whole live list") } @@ -374,7 +378,7 @@ func TestSessionFailingToGetFirstBlock(t *testing.T) { // Wait for another broadcast to occur startTick = time.Now() select { - case receivedWantReq := <-fwm.wantReqs: + case receivedWantReq := <-fpm.wantReqs: if len(receivedWantReq.cids) < len(cids) { t.Fatal("did not rebroadcast whole live list") } @@ -407,8 +411,8 @@ func TestSessionFailingToGetFirstBlock(t *testing.T) { } func TestSessionCtxCancelClosesGetBlocksChannel(t *testing.T) { - fwm := newFakeWantManager() - fpm := newFakeSessionPeerManager() + fpm := newFakePeerManager() + fspm := newFakeSessionPeerManager() fpf := newFakeProviderFinder() sim := bssim.New() bpm := bsbpm.New() @@ -418,7 +422,7 @@ func TestSessionCtxCancelClosesGetBlocksChannel(t *testing.T) { // Create a new session with its own context sessctx, sesscancel := context.WithTimeout(context.Background(), 100*time.Millisecond) - session := New(sessctx, id, fwm, fpm, fpf, sim, newFakePeerManager(), bpm, notif, time.Second, delay.Fixed(time.Minute), "") + session := New(sessctx, id, fspm, fpf, sim, fpm, bpm, notif, time.Second, delay.Fixed(time.Minute), "") timerCtx, timerCancel := context.WithTimeout(context.Background(), 10*time.Millisecond) defer timerCancel() @@ -450,8 +454,8 @@ func TestSessionCtxCancelClosesGetBlocksChannel(t *testing.T) { func TestSessionReceiveMessageAfterShutdown(t *testing.T) { ctx, cancelCtx := context.WithTimeout(context.Background(), 10*time.Millisecond) - fwm := newFakeWantManager() - fpm := newFakeSessionPeerManager() + fpm := newFakePeerManager() + fspm := newFakeSessionPeerManager() fpf := newFakeProviderFinder() sim := bssim.New() @@ -459,7 +463,7 @@ func TestSessionReceiveMessageAfterShutdown(t *testing.T) { notif := notifications.New() defer notif.Shutdown() id := testutil.GenerateSessionID() - session := New(ctx, id, fwm, fpm, fpf, sim, newFakePeerManager(), bpm, notif, time.Second, delay.Fixed(time.Minute), "") + session := New(ctx, id, fspm, fpf, sim, fpm, bpm, notif, time.Second, delay.Fixed(time.Minute), "") blockGenerator := blocksutil.NewBlockGenerator() blks := blockGenerator.Blocks(2) cids := []cid.Cid{blks[0].Cid(), blks[1].Cid()} @@ -470,7 +474,7 @@ func TestSessionReceiveMessageAfterShutdown(t *testing.T) { } // Wait for initial want request - <-fwm.wantReqs + <-fpm.wantReqs // Shut down session cancelCtx() diff --git a/bitswap/internal/session/sessionwantsender_test.go b/bitswap/internal/session/sessionwantsender_test.go index b679e9c61..3593009a3 100644 --- a/bitswap/internal/session/sessionwantsender_test.go +++ b/bitswap/internal/session/sessionwantsender_test.go @@ -66,8 +66,9 @@ func (pm *mockPeerManager) RegisterSession(p peer.ID, sess bspm.Session) bool { return true } -func (pm *mockPeerManager) UnregisterSession(sesid uint64) { -} +func (*mockPeerManager) UnregisterSession(uint64) {} +func (*mockPeerManager) BroadcastWantHaves(context.Context, []cid.Cid) {} +func (*mockPeerManager) SendCancels(context.Context, []cid.Cid) {} func (pm *mockPeerManager) SendWants(ctx context.Context, p peer.ID, wantBlocks []cid.Cid, wantHaves []cid.Cid) { pm.lk.Lock() diff --git a/bitswap/internal/sessioninterestmanager/sessioninterestmanager.go b/bitswap/internal/sessioninterestmanager/sessioninterestmanager.go index 46888c9ad..6e345b55e 100644 --- a/bitswap/internal/sessioninterestmanager/sessioninterestmanager.go +++ b/bitswap/internal/sessioninterestmanager/sessioninterestmanager.go @@ -90,7 +90,7 @@ func (sim *SessionInterestManager) SplitWantedUnwanted(blks []blocks.Block) ([]b return wantedBlks, notWantedBlks } -// When the WantManager receives a message is calls InterestedSessions() to +// When the SessionManager receives a message it calls InterestedSessions() to // find out which sessions are interested in the message. func (sim *SessionInterestManager) InterestedSessions(blks []cid.Cid, haves []cid.Cid, dontHaves []cid.Cid) []uint64 { sim.lk.RLock() diff --git a/bitswap/internal/sessionmanager/sessionmanager.go b/bitswap/internal/sessionmanager/sessionmanager.go index f7382fad3..c69aa0417 100644 --- a/bitswap/internal/sessionmanager/sessionmanager.go +++ b/bitswap/internal/sessionmanager/sessionmanager.go @@ -109,8 +109,10 @@ func (sm *SessionManager) GetNextSessionID() uint64 { return sm.sessID } -func (sm *SessionManager) ReceiveFrom(p peer.ID, blks []cid.Cid, haves []cid.Cid, dontHaves []cid.Cid) []Session { - sessions := make([]Session, 0) +// ReceiveFrom is called when a new message is received +func (sm *SessionManager) ReceiveFrom(ctx context.Context, p peer.ID, blks []cid.Cid, haves []cid.Cid, dontHaves []cid.Cid) { + // Record block presence for HAVE / DONT_HAVE + sm.blockPresenceManager.ReceiveFrom(p, haves, dontHaves) // Notify each session that is interested in the blocks / HAVEs / DONT_HAVEs for _, id := range sm.sessionInterestManager.InterestedSessions(blks, haves, dontHaves) { @@ -120,9 +122,9 @@ func (sm *SessionManager) ReceiveFrom(p peer.ID, blks []cid.Cid, haves []cid.Cid if ok { sess.ReceiveFrom(p, blks, haves, dontHaves) - sessions = append(sessions, sess) } } - return sessions + // Send CANCEL to all peers with want-have / want-block + sm.peerManager.SendCancels(ctx, blks) } diff --git a/bitswap/internal/sessionmanager/sessionmanager_test.go b/bitswap/internal/sessionmanager/sessionmanager_test.go index 4e0152bb7..6fa118e7b 100644 --- a/bitswap/internal/sessionmanager/sessionmanager_test.go +++ b/bitswap/internal/sessionmanager/sessionmanager_test.go @@ -53,11 +53,16 @@ func (*fakeSesPeerManager) RemovePeer(peer.ID) bool { return false } func (*fakeSesPeerManager) HasPeers() bool { return false } type fakePeerManager struct { + cancels []cid.Cid } func (*fakePeerManager) RegisterSession(peer.ID, bspm.Session) bool { return true } func (*fakePeerManager) UnregisterSession(uint64) {} func (*fakePeerManager) SendWants(context.Context, peer.ID, []cid.Cid, []cid.Cid) {} +func (*fakePeerManager) BroadcastWantHaves(context.Context, []cid.Cid) {} +func (fpm *fakePeerManager) SendCancels(ctx context.Context, cancels []cid.Cid) { + fpm.cancels = append(fpm.cancels, cancels...) +} func sessionFactory(ctx context.Context, id uint64, @@ -101,26 +106,30 @@ func TestReceiveFrom(t *testing.T) { sim.RecordSessionInterest(firstSession.ID(), []cid.Cid{block.Cid()}) sim.RecordSessionInterest(thirdSession.ID(), []cid.Cid{block.Cid()}) - sm.ReceiveFrom(p, []cid.Cid{block.Cid()}, []cid.Cid{}, []cid.Cid{}) + sm.ReceiveFrom(ctx, p, []cid.Cid{block.Cid()}, []cid.Cid{}, []cid.Cid{}) if len(firstSession.ks) == 0 || len(secondSession.ks) > 0 || len(thirdSession.ks) == 0 { t.Fatal("should have received blocks but didn't") } - sm.ReceiveFrom(p, []cid.Cid{}, []cid.Cid{block.Cid()}, []cid.Cid{}) + sm.ReceiveFrom(ctx, p, []cid.Cid{}, []cid.Cid{block.Cid()}, []cid.Cid{}) if len(firstSession.wantBlocks) == 0 || len(secondSession.wantBlocks) > 0 || len(thirdSession.wantBlocks) == 0 { t.Fatal("should have received want-blocks but didn't") } - sm.ReceiveFrom(p, []cid.Cid{}, []cid.Cid{}, []cid.Cid{block.Cid()}) + sm.ReceiveFrom(ctx, p, []cid.Cid{}, []cid.Cid{}, []cid.Cid{block.Cid()}) if len(firstSession.wantHaves) == 0 || len(secondSession.wantHaves) > 0 || len(thirdSession.wantHaves) == 0 { t.Fatal("should have received want-haves but didn't") } + + if len(pm.cancels) != 1 { + t.Fatal("should have sent cancel for received blocks") + } } func TestReceiveBlocksWhenManagerContextCancelled(t *testing.T) { @@ -150,7 +159,7 @@ func TestReceiveBlocksWhenManagerContextCancelled(t *testing.T) { // wait for sessions to get removed time.Sleep(10 * time.Millisecond) - sm.ReceiveFrom(p, []cid.Cid{block.Cid()}, []cid.Cid{}, []cid.Cid{}) + sm.ReceiveFrom(ctx, p, []cid.Cid{block.Cid()}, []cid.Cid{}, []cid.Cid{}) if len(firstSession.ks) > 0 || len(secondSession.ks) > 0 || len(thirdSession.ks) > 0 { @@ -186,7 +195,7 @@ func TestReceiveBlocksWhenSessionContextCancelled(t *testing.T) { // wait for sessions to get removed time.Sleep(10 * time.Millisecond) - sm.ReceiveFrom(p, []cid.Cid{block.Cid()}, []cid.Cid{}, []cid.Cid{}) + sm.ReceiveFrom(ctx, p, []cid.Cid{block.Cid()}, []cid.Cid{}, []cid.Cid{}) if len(firstSession.ks) == 0 || len(secondSession.ks) > 0 || len(thirdSession.ks) == 0 { diff --git a/bitswap/internal/wantmanager/wantmanager.go b/bitswap/internal/wantmanager/wantmanager.go deleted file mode 100644 index 539017a9d..000000000 --- a/bitswap/internal/wantmanager/wantmanager.go +++ /dev/null @@ -1,103 +0,0 @@ -package wantmanager - -import ( - "context" - - bsbpm "github.com/ipfs/go-bitswap/internal/blockpresencemanager" - bssim "github.com/ipfs/go-bitswap/internal/sessioninterestmanager" - "github.com/ipfs/go-bitswap/internal/sessionmanager" - logging "github.com/ipfs/go-log" - - cid "github.com/ipfs/go-cid" - peer "github.com/libp2p/go-libp2p-core/peer" -) - -var log = logging.Logger("bitswap") - -// PeerHandler sends wants / cancels to other peers -type PeerHandler interface { - // Connected is called when a peer connects. - Connected(p peer.ID) - // Disconnected is called when a peer disconnects - Disconnected(p peer.ID) - // BroadcastWantHaves sends want-haves to all connected peers - BroadcastWantHaves(ctx context.Context, wantHaves []cid.Cid) - // SendCancels sends cancels to all peers that had previously been sent - // a want-block or want-have for the given key - SendCancels(context.Context, []cid.Cid) -} - -// SessionManager receives incoming messages and distributes them to sessions -type SessionManager interface { - ReceiveFrom(p peer.ID, blks []cid.Cid, haves []cid.Cid, dontHaves []cid.Cid) []sessionmanager.Session -} - -// WantManager -// - informs the SessionManager and BlockPresenceManager of incoming information -// and cancelled sessions -// - informs the PeerManager of connects and disconnects -type WantManager struct { - peerHandler PeerHandler - sim *bssim.SessionInterestManager - bpm *bsbpm.BlockPresenceManager - sm SessionManager -} - -// New initializes a new WantManager for a given context. -func New(ctx context.Context, peerHandler PeerHandler, sim *bssim.SessionInterestManager, bpm *bsbpm.BlockPresenceManager) *WantManager { - return &WantManager{ - peerHandler: peerHandler, - sim: sim, - bpm: bpm, - } -} - -func (wm *WantManager) SetSessionManager(sm SessionManager) { - wm.sm = sm -} - -// ReceiveFrom is called when a new message is received -func (wm *WantManager) ReceiveFrom(ctx context.Context, p peer.ID, blks []cid.Cid, haves []cid.Cid, dontHaves []cid.Cid) { - // Record block presence for HAVE / DONT_HAVE - wm.bpm.ReceiveFrom(p, haves, dontHaves) - // Inform interested sessions - wm.sm.ReceiveFrom(p, blks, haves, dontHaves) - // Send CANCEL to all peers with want-have / want-block - wm.peerHandler.SendCancels(ctx, blks) -} - -// BroadcastWantHaves is called when want-haves should be broadcast to all -// connected peers (as part of session discovery) -func (wm *WantManager) BroadcastWantHaves(ctx context.Context, ses uint64, wantHaves []cid.Cid) { - // TODO: Avoid calling broadcast through here. It doesn't fit with - // everything else this module does. - - log.Debugf("BroadcastWantHaves session%d: %s", ses, wantHaves) - // Send want-haves to all peers - wm.peerHandler.BroadcastWantHaves(ctx, wantHaves) -} - -// RemoveSession is called when the session is shut down -func (wm *WantManager) RemoveSession(ctx context.Context, ses uint64) { - // Remove session's interest in the given blocks. - cancelKs := wm.sim.RemoveSessionInterest(ses) - - // Free up block presence tracking for keys that no session is interested - // in anymore - wm.bpm.RemoveKeys(cancelKs) - - // Send CANCEL to all peers for blocks that no session is interested in anymore - wm.peerHandler.SendCancels(ctx, cancelKs) -} - -// Connected is called when a new peer connects -func (wm *WantManager) Connected(p peer.ID) { - // Tell the peer handler that there is a new connection and give it the - // list of outstanding broadcast wants - wm.peerHandler.Connected(p) -} - -// Disconnected is called when a peer disconnects -func (wm *WantManager) Disconnected(p peer.ID) { - wm.peerHandler.Disconnected(p) -} diff --git a/bitswap/internal/wantmanager/wantmanager_test.go b/bitswap/internal/wantmanager/wantmanager_test.go deleted file mode 100644 index 9855eb30d..000000000 --- a/bitswap/internal/wantmanager/wantmanager_test.go +++ /dev/null @@ -1,117 +0,0 @@ -package wantmanager - -import ( - "context" - "testing" - - bsbpm "github.com/ipfs/go-bitswap/internal/blockpresencemanager" - bssim "github.com/ipfs/go-bitswap/internal/sessioninterestmanager" - "github.com/ipfs/go-bitswap/internal/sessionmanager" - "github.com/ipfs/go-bitswap/internal/testutil" - - "github.com/ipfs/go-cid" - "github.com/libp2p/go-libp2p-core/peer" -) - -type fakePeerHandler struct { - lastBcstWants []cid.Cid - lastCancels []cid.Cid -} - -func (fph *fakePeerHandler) Connected(p peer.ID) { -} -func (fph *fakePeerHandler) Disconnected(p peer.ID) { - -} -func (fph *fakePeerHandler) BroadcastWantHaves(ctx context.Context, wantHaves []cid.Cid) { - fph.lastBcstWants = wantHaves -} -func (fph *fakePeerHandler) SendCancels(ctx context.Context, cancels []cid.Cid) { - fph.lastCancels = cancels -} - -type fakeSessionManager struct { -} - -func (*fakeSessionManager) ReceiveFrom(p peer.ID, blks []cid.Cid, haves []cid.Cid, dontHaves []cid.Cid) []sessionmanager.Session { - return nil -} - -func TestReceiveFrom(t *testing.T) { - ctx := context.Background() - ph := &fakePeerHandler{} - sim := bssim.New() - bpm := bsbpm.New() - wm := New(context.Background(), ph, sim, bpm) - sm := &fakeSessionManager{} - wm.SetSessionManager(sm) - - p := testutil.GeneratePeers(1)[0] - ks := testutil.GenerateCids(2) - haves := testutil.GenerateCids(2) - dontHaves := testutil.GenerateCids(2) - wm.ReceiveFrom(ctx, p, ks, haves, dontHaves) - - if !bpm.PeerHasBlock(p, haves[0]) { - t.Fatal("expected block presence manager to be invoked") - } - if !bpm.PeerDoesNotHaveBlock(p, dontHaves[0]) { - t.Fatal("expected block presence manager to be invoked") - } - if len(ph.lastCancels) != len(ks) { - t.Fatal("expected received blocks to be cancelled") - } -} - -func TestRemoveSession(t *testing.T) { - ctx := context.Background() - ph := &fakePeerHandler{} - sim := bssim.New() - bpm := bsbpm.New() - wm := New(context.Background(), ph, sim, bpm) - sm := &fakeSessionManager{} - wm.SetSessionManager(sm) - - // Record session interest in 2 keys for session 0 and 2 keys for session 1 - // with 1 overlapping key - cids := testutil.GenerateCids(3) - ses0 := uint64(0) - ses1 := uint64(1) - ses0ks := cids[:2] - ses1ks := cids[1:] - sim.RecordSessionInterest(ses0, ses0ks) - sim.RecordSessionInterest(ses1, ses1ks) - - // Receive HAVE for all keys - p := testutil.GeneratePeers(1)[0] - ks := []cid.Cid{} - haves := append(ses0ks, ses1ks...) - dontHaves := []cid.Cid{} - wm.ReceiveFrom(ctx, p, ks, haves, dontHaves) - - // Remove session 0 - wm.RemoveSession(ctx, ses0) - - // Expect session 0 interest to be removed and session 1 interest to be - // unchanged - if len(sim.FilterSessionInterested(ses0, ses0ks)[0]) != 0 { - t.Fatal("expected session 0 interest to be removed") - } - if len(sim.FilterSessionInterested(ses1, ses1ks)[0]) != len(ses1ks) { - t.Fatal("expected session 1 interest to be unchanged") - } - - // Should clear block presence for key that was in session 0 and not - // in session 1 - if bpm.PeerHasBlock(p, ses0ks[0]) { - t.Fatal("expected block presence manager to be cleared") - } - if !bpm.PeerHasBlock(p, ses0ks[1]) { - t.Fatal("expected block presence manager to be unchanged for overlapping key") - } - - // Should cancel key that was in session 0 and not session 1 - if len(ph.lastCancels) != 1 || !ph.lastCancels[0].Equals(cids[0]) { - t.Fatal("expected removed want-have to be cancelled") - } -} From d7ee8c34e2bcf7350831b5f7a41607761548ae0f Mon Sep 17 00:00:00 2001 From: Dirk McCormick Date: Thu, 23 Apr 2020 16:46:28 -0400 Subject: [PATCH 0933/1035] fix: race in session test This commit was moved from ipfs/go-bitswap@02942c3041f092d6a91ac5d17017a49eb5233afa --- bitswap/internal/session/session_test.go | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/bitswap/internal/session/session_test.go b/bitswap/internal/session/session_test.go index a8773f1c1..194a1ec96 100644 --- a/bitswap/internal/session/session_test.go +++ b/bitswap/internal/session/session_test.go @@ -2,6 +2,7 @@ package session import ( "context" + "sync" "testing" "time" @@ -59,8 +60,9 @@ type wantReq struct { } type fakePeerManager struct { - cancels []cid.Cid wantReqs chan wantReq + lk sync.Mutex + cancels []cid.Cid } func newFakePeerManager() *fakePeerManager { @@ -81,8 +83,15 @@ func (pm *fakePeerManager) BroadcastWantHaves(ctx context.Context, cids []cid.Ci } } func (pm *fakePeerManager) SendCancels(ctx context.Context, cancels []cid.Cid) { + pm.lk.Lock() + defer pm.lk.Unlock() pm.cancels = append(pm.cancels, cancels...) } +func (pm *fakePeerManager) allCancels() []cid.Cid { + pm.lk.Lock() + defer pm.lk.Unlock() + return append([]cid.Cid{}, pm.cancels...) +} func TestSessionGetBlocks(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond) @@ -173,7 +182,7 @@ func TestSessionGetBlocks(t *testing.T) { time.Sleep(10 * time.Millisecond) // Verify wants were cancelled - if len(fpm.cancels) != len(blks) { + if len(fpm.allCancels()) != len(blks) { t.Fatal("expected cancels to be sent for all wants") } } From 6257c9b476192549446ee2a781289b6a93d2d8e6 Mon Sep 17 00:00:00 2001 From: Dirk McCormick Date: Thu, 23 Apr 2020 17:20:42 -0400 Subject: [PATCH 0934/1035] fix: send CANCELs when session context is cancelled This commit was moved from ipfs/go-bitswap@2ac2ed62a164ccc915fe3e14eeb03a8a19bf8079 --- bitswap/bitswap.go | 4 ++-- bitswap/internal/session/session.go | 17 +++++++++++------ bitswap/internal/session/session_test.go | 12 ++++++------ 3 files changed, 19 insertions(+), 14 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index f3320967f..db0ca0986 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -139,7 +139,7 @@ func New(parent context.Context, network bsnet.BitSwapNetwork, pm := bspm.New(ctx, peerQueueFactory, network.Self()) pqm := bspqm.New(ctx, network) - sessionFactory := func(ctx context.Context, id uint64, spm bssession.SessionPeerManager, + sessionFactory := func(sessctx context.Context, id uint64, spm bssession.SessionPeerManager, sim *bssim.SessionInterestManager, pm bssession.PeerManager, bpm *bsbpm.BlockPresenceManager, @@ -147,7 +147,7 @@ func New(parent context.Context, network bsnet.BitSwapNetwork, provSearchDelay time.Duration, rebroadcastDelay delay.D, self peer.ID) bssm.Session { - return bssession.New(ctx, id, spm, pqm, sim, pm, bpm, notif, provSearchDelay, rebroadcastDelay, self) + return bssession.New(ctx, sessctx, id, spm, pqm, sim, pm, bpm, notif, provSearchDelay, rebroadcastDelay, self) } sessionPeerManagerFactory := func(ctx context.Context, id uint64) bssession.SessionPeerManager { return bsspm.New(id, network.ConnectionManager()) diff --git a/bitswap/internal/session/session.go b/bitswap/internal/session/session.go index ef7798084..11c8b0924 100644 --- a/bitswap/internal/session/session.go +++ b/bitswap/internal/session/session.go @@ -91,7 +91,8 @@ type op struct { // info to, and who to request blocks from. type Session struct { // dependencies - ctx context.Context + bsctx context.Context // context for bitswap + ctx context.Context // context for session pm PeerManager bpm *bsbpm.BlockPresenceManager sprm SessionPeerManager @@ -124,7 +125,9 @@ type Session struct { // New creates a new bitswap session whose lifetime is bounded by the // given context. -func New(ctx context.Context, +func New( + bsctx context.Context, // context for bitswap + ctx context.Context, // context for this session id uint64, sprm SessionPeerManager, providerFinder ProviderFinder, @@ -138,6 +141,7 @@ func New(ctx context.Context, s := &Session{ sw: newSessionWants(broadcastLiveWantsLimit), tickDelayReqs: make(chan time.Duration), + bsctx: bsctx, ctx: ctx, pm: pm, bpm: bpm, @@ -393,10 +397,11 @@ func (s *Session) handleShutdown() { // in anymore s.bpm.RemoveKeys(cancelKs) - // TODO: If the context is cancelled this won't actually send any CANCELs. - // We should use a longer lived context to send out these CANCELs. - // Send CANCEL to all peers for blocks that no session is interested in anymore - s.pm.SendCancels(s.ctx, cancelKs) + // Send CANCEL to all peers for blocks that no session is interested in + // anymore. + // Note: use bitswap context because session context has already been + // cancelled. + s.pm.SendCancels(s.bsctx, cancelKs) } // handleReceive is called when the session receives blocks from a peer diff --git a/bitswap/internal/session/session_test.go b/bitswap/internal/session/session_test.go index 194a1ec96..79010db1f 100644 --- a/bitswap/internal/session/session_test.go +++ b/bitswap/internal/session/session_test.go @@ -103,7 +103,7 @@ func TestSessionGetBlocks(t *testing.T) { notif := notifications.New() defer notif.Shutdown() id := testutil.GenerateSessionID() - session := New(ctx, id, fspm, fpf, sim, fpm, bpm, notif, time.Second, delay.Fixed(time.Minute), "") + session := New(ctx, ctx, id, fspm, fpf, sim, fpm, bpm, notif, time.Second, delay.Fixed(time.Minute), "") blockGenerator := blocksutil.NewBlockGenerator() blks := blockGenerator.Blocks(broadcastLiveWantsLimit * 2) var cids []cid.Cid @@ -198,7 +198,7 @@ func TestSessionFindMorePeers(t *testing.T) { notif := notifications.New() defer notif.Shutdown() id := testutil.GenerateSessionID() - session := New(ctx, id, fspm, fpf, sim, fpm, bpm, notif, time.Second, delay.Fixed(time.Minute), "") + session := New(ctx, ctx, id, fspm, fpf, sim, fpm, bpm, notif, time.Second, delay.Fixed(time.Minute), "") session.SetBaseTickDelay(200 * time.Microsecond) blockGenerator := blocksutil.NewBlockGenerator() blks := blockGenerator.Blocks(broadcastLiveWantsLimit * 2) @@ -272,7 +272,7 @@ func TestSessionOnPeersExhausted(t *testing.T) { notif := notifications.New() defer notif.Shutdown() id := testutil.GenerateSessionID() - session := New(ctx, id, fspm, fpf, sim, fpm, bpm, notif, time.Second, delay.Fixed(time.Minute), "") + session := New(ctx, ctx, id, fspm, fpf, sim, fpm, bpm, notif, time.Second, delay.Fixed(time.Minute), "") blockGenerator := blocksutil.NewBlockGenerator() blks := blockGenerator.Blocks(broadcastLiveWantsLimit + 5) var cids []cid.Cid @@ -316,7 +316,7 @@ func TestSessionFailingToGetFirstBlock(t *testing.T) { notif := notifications.New() defer notif.Shutdown() id := testutil.GenerateSessionID() - session := New(ctx, id, fspm, fpf, sim, fpm, bpm, notif, 10*time.Millisecond, delay.Fixed(100*time.Millisecond), "") + session := New(ctx, ctx, id, fspm, fpf, sim, fpm, bpm, notif, 10*time.Millisecond, delay.Fixed(100*time.Millisecond), "") blockGenerator := blocksutil.NewBlockGenerator() blks := blockGenerator.Blocks(4) var cids []cid.Cid @@ -431,7 +431,7 @@ func TestSessionCtxCancelClosesGetBlocksChannel(t *testing.T) { // Create a new session with its own context sessctx, sesscancel := context.WithTimeout(context.Background(), 100*time.Millisecond) - session := New(sessctx, id, fspm, fpf, sim, fpm, bpm, notif, time.Second, delay.Fixed(time.Minute), "") + session := New(context.Background(), sessctx, id, fspm, fpf, sim, fpm, bpm, notif, time.Second, delay.Fixed(time.Minute), "") timerCtx, timerCancel := context.WithTimeout(context.Background(), 10*time.Millisecond) defer timerCancel() @@ -472,7 +472,7 @@ func TestSessionReceiveMessageAfterShutdown(t *testing.T) { notif := notifications.New() defer notif.Shutdown() id := testutil.GenerateSessionID() - session := New(ctx, id, fspm, fpf, sim, fpm, bpm, notif, time.Second, delay.Fixed(time.Minute), "") + session := New(ctx, ctx, id, fspm, fpf, sim, fpm, bpm, notif, time.Second, delay.Fixed(time.Minute), "") blockGenerator := blocksutil.NewBlockGenerator() blks := blockGenerator.Blocks(2) cids := []cid.Cid{blks[0].Cid(), blks[1].Cid()} From 95b79debe99610e1c89665904baf9c22d274cdc1 Mon Sep 17 00:00:00 2001 From: Hector Sanjuan Date: Mon, 27 Apr 2020 11:36:50 +0200 Subject: [PATCH 0935/1035] Add standard issue template This commit was moved from ipfs/go-bitswap@ac478dee9f56492212386d9b91606411d575ebb9 --- .../.github/ISSUE_TEMPLATE/open_an_issue.md | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) create mode 100644 bitswap/.github/ISSUE_TEMPLATE/open_an_issue.md diff --git a/bitswap/.github/ISSUE_TEMPLATE/open_an_issue.md b/bitswap/.github/ISSUE_TEMPLATE/open_an_issue.md new file mode 100644 index 000000000..4fcbd00ac --- /dev/null +++ b/bitswap/.github/ISSUE_TEMPLATE/open_an_issue.md @@ -0,0 +1,19 @@ +--- +name: Open an issue +about: Only for actionable issues relevant to this repository. +title: '' +labels: need/triage +assignees: '' + +--- + From 2b03ff9bf00ef685036435281daf764b7f601b84 Mon Sep 17 00:00:00 2001 From: Dirk McCormick Date: Wed, 29 Apr 2020 18:26:16 -0400 Subject: [PATCH 0936/1035] feat: calculate message latency This commit was moved from ipfs/go-bitswap@6763be87bc7f052a315840b5134d6e63c1869d3c --- bitswap/bitswap.go | 18 ++- .../messagequeue/donthavetimeoutmgr.go | 120 ++++++++++++++--- .../messagequeue/donthavetimeoutmgr_test.go | 88 +++++++++++-- bitswap/internal/messagequeue/messagequeue.go | 123 ++++++++++++++++-- .../messagequeue/messagequeue_test.go | 57 +++++++- bitswap/internal/peermanager/peermanager.go | 15 +++ .../internal/peermanager/peermanager_test.go | 2 + 7 files changed, 381 insertions(+), 42 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index db0ca0986..36b95cfd5 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -303,14 +303,14 @@ func (bs *Bitswap) GetBlocks(ctx context.Context, keys []cid.Cid) (<-chan blocks // HasBlock announces the existence of a block to this bitswap service. The // service will potentially notify its peers. func (bs *Bitswap) HasBlock(blk blocks.Block) error { - return bs.receiveBlocksFrom(context.Background(), "", []blocks.Block{blk}, nil, nil) + return bs.receiveBlocksFrom(context.Background(), time.Time{}, "", []blocks.Block{blk}, nil, nil) } // TODO: Some of this stuff really only needs to be done when adding a block // from the user, not when receiving it from the network. // In case you run `git blame` on this comment, I'll save you some time: ask // @whyrusleeping, I don't know the answers you seek. -func (bs *Bitswap) receiveBlocksFrom(ctx context.Context, from peer.ID, blks []blocks.Block, haves []cid.Cid, dontHaves []cid.Cid) error { +func (bs *Bitswap) receiveBlocksFrom(ctx context.Context, at time.Time, from peer.ID, blks []blocks.Block, haves []cid.Cid, dontHaves []cid.Cid) error { select { case <-bs.process.Closing(): return errors.New("bitswap is closed") @@ -348,6 +348,16 @@ func (bs *Bitswap) receiveBlocksFrom(ctx context.Context, from peer.ID, blks []b allKs = append(allKs, b.Cid()) } + // If the message came from the network + if from != "" { + // Inform the PeerManager so that we can calculate per-peer latency + combined := make([]cid.Cid, 0, len(allKs)+len(haves)+len(dontHaves)) + combined = append(combined, allKs...) + combined = append(combined, haves...) + combined = append(combined, dontHaves...) + bs.pm.ResponseReceived(from, at, combined) + } + // Send all block keys (including duplicates) to any sessions that want them. // (The duplicates are needed by sessions for accounting purposes) bs.sm.ReceiveFrom(ctx, from, allKs, haves, dontHaves) @@ -386,6 +396,8 @@ func (bs *Bitswap) receiveBlocksFrom(ctx context.Context, from peer.ID, blks []b // ReceiveMessage is called by the network interface when a new message is // received. func (bs *Bitswap) ReceiveMessage(ctx context.Context, p peer.ID, incoming bsmsg.BitSwapMessage) { + now := time.Now() + bs.counterLk.Lock() bs.counters.messagesRecvd++ bs.counterLk.Unlock() @@ -409,7 +421,7 @@ func (bs *Bitswap) ReceiveMessage(ctx context.Context, p peer.ID, incoming bsmsg dontHaves := incoming.DontHaves() if len(iblocks) > 0 || len(haves) > 0 || len(dontHaves) > 0 { // Process blocks - err := bs.receiveBlocksFrom(ctx, p, iblocks, haves, dontHaves) + err := bs.receiveBlocksFrom(ctx, now, p, iblocks, haves, dontHaves) if err != nil { log.Warnf("ReceiveMessage recvBlockFrom error: %s", err) return diff --git a/bitswap/internal/messagequeue/donthavetimeoutmgr.go b/bitswap/internal/messagequeue/donthavetimeoutmgr.go index e53b232e6..14e70c077 100644 --- a/bitswap/internal/messagequeue/donthavetimeoutmgr.go +++ b/bitswap/internal/messagequeue/donthavetimeoutmgr.go @@ -21,10 +21,20 @@ const ( // peer takes to process a want and initiate sending a response to us maxExpectedWantProcessTime = 2 * time.Second - // latencyMultiplier is multiplied by the average ping time to + // maxTimeout is the maximum allowed timeout, regardless of latency + maxTimeout = dontHaveTimeout + maxExpectedWantProcessTime + + // pingLatencyMultiplier is multiplied by the average ping time to // get an upper bound on how long we expect to wait for a peer's response // to arrive - latencyMultiplier = 3 + pingLatencyMultiplier = 3 + + // messageLatencyAlpha is the alpha supplied to the message latency EWMA + messageLatencyAlpha = 0.5 + + // To give a margin for error, the timeout is calculated as + // messageLatencyMultiplier * message latency + messageLatencyMultiplier = 2 ) // PeerConnection is a connection to a peer that can be pinged, and the @@ -44,16 +54,20 @@ type pendingWant struct { sent time.Time } -// dontHaveTimeoutMgr pings the peer to measure latency. It uses the latency to -// set a reasonable timeout for simulating a DONT_HAVE message for peers that -// don't support DONT_HAVE or that take to long to respond. +// dontHaveTimeoutMgr simulates a DONT_HAVE message if the peer takes too long +// to respond to a message. +// The timeout is based on latency - we start with a default latency, while +// we ping the peer to estimate latency. If we receive a response from the +// peer we use the response latency. type dontHaveTimeoutMgr struct { ctx context.Context shutdown func() peerConn PeerConnection onDontHaveTimeout func([]cid.Cid) defaultTimeout time.Duration - latencyMultiplier int + maxTimeout time.Duration + pingLatencyMultiplier int + messageLatencyMultiplier int maxExpectedWantProcessTime time.Duration // All variables below here must be protected by the lock @@ -66,6 +80,8 @@ type dontHaveTimeoutMgr struct { wantQueue []*pendingWant // time to wait for a response (depends on latency) timeout time.Duration + // ewma of message latency (time from message sent to response received) + messageLatency *latencyEwma // timer used to wait until want at front of queue expires checkForTimeoutsTimer *time.Timer } @@ -73,13 +89,18 @@ type dontHaveTimeoutMgr struct { // newDontHaveTimeoutMgr creates a new dontHaveTimeoutMgr // onDontHaveTimeout is called when pending keys expire (not cancelled before timeout) func newDontHaveTimeoutMgr(pc PeerConnection, onDontHaveTimeout func([]cid.Cid)) *dontHaveTimeoutMgr { - return newDontHaveTimeoutMgrWithParams(pc, onDontHaveTimeout, dontHaveTimeout, - latencyMultiplier, maxExpectedWantProcessTime) + return newDontHaveTimeoutMgrWithParams(pc, onDontHaveTimeout, dontHaveTimeout, maxTimeout, + pingLatencyMultiplier, messageLatencyMultiplier, maxExpectedWantProcessTime) } // newDontHaveTimeoutMgrWithParams is used by the tests -func newDontHaveTimeoutMgrWithParams(pc PeerConnection, onDontHaveTimeout func([]cid.Cid), - defaultTimeout time.Duration, latencyMultiplier int, +func newDontHaveTimeoutMgrWithParams( + pc PeerConnection, + onDontHaveTimeout func([]cid.Cid), + defaultTimeout time.Duration, + maxTimeout time.Duration, + pingLatencyMultiplier int, + messageLatencyMultiplier int, maxExpectedWantProcessTime time.Duration) *dontHaveTimeoutMgr { ctx, shutdown := context.WithCancel(context.Background()) @@ -89,8 +110,11 @@ func newDontHaveTimeoutMgrWithParams(pc PeerConnection, onDontHaveTimeout func([ peerConn: pc, activeWants: make(map[cid.Cid]*pendingWant), timeout: defaultTimeout, + messageLatency: &latencyEwma{alpha: messageLatencyAlpha}, defaultTimeout: defaultTimeout, - latencyMultiplier: latencyMultiplier, + maxTimeout: maxTimeout, + pingLatencyMultiplier: pingLatencyMultiplier, + messageLatencyMultiplier: messageLatencyMultiplier, maxExpectedWantProcessTime: maxExpectedWantProcessTime, onDontHaveTimeout: onDontHaveTimeout, } @@ -126,16 +150,36 @@ func (dhtm *dontHaveTimeoutMgr) Start() { // calculate a reasonable timeout latency := dhtm.peerConn.Latency() if latency.Nanoseconds() > 0 { - dhtm.timeout = dhtm.calculateTimeoutFromLatency(latency) + dhtm.timeout = dhtm.calculateTimeoutFromPingLatency(latency) return } // Otherwise measure latency by pinging the peer - go dhtm.measureLatency() + go dhtm.measurePingLatency() +} + +// UpdateMessageLatency is called when we receive a response from the peer. +// It is the time between sending a request and receiving the corresponding +// response. +func (dhtm *dontHaveTimeoutMgr) UpdateMessageLatency(elapsed time.Duration) { + dhtm.lk.Lock() + defer dhtm.lk.Unlock() + + // Update the message latency and the timeout + dhtm.messageLatency.update(elapsed) + oldTimeout := dhtm.timeout + dhtm.timeout = dhtm.calculateTimeoutFromMessageLatency() + + // If the timeout has decreased + if dhtm.timeout < oldTimeout { + // Check if after changing the timeout there are any pending wants that + // are now over the timeout + dhtm.checkForTimeouts() + } } -// measureLatency measures the latency to the peer by pinging it -func (dhtm *dontHaveTimeoutMgr) measureLatency() { +// measurePingLatency measures the latency to the peer by pinging it +func (dhtm *dontHaveTimeoutMgr) measurePingLatency() { // Wait up to defaultTimeout for a response to the ping ctx, cancel := context.WithTimeout(dhtm.ctx, dhtm.defaultTimeout) defer cancel() @@ -154,8 +198,13 @@ func (dhtm *dontHaveTimeoutMgr) measureLatency() { dhtm.lk.Lock() defer dhtm.lk.Unlock() + // A message has arrived so we already set the timeout based on message latency + if dhtm.messageLatency.samples > 0 { + return + } + // Calculate a reasonable timeout based on latency - dhtm.timeout = dhtm.calculateTimeoutFromLatency(latency) + dhtm.timeout = dhtm.calculateTimeoutFromPingLatency(latency) // Check if after changing the timeout there are any pending wants that are // now over the timeout @@ -284,10 +333,43 @@ func (dhtm *dontHaveTimeoutMgr) fireTimeout(pending []cid.Cid) { dhtm.onDontHaveTimeout(pending) } -// calculateTimeoutFromLatency calculates a reasonable timeout derived from latency -func (dhtm *dontHaveTimeoutMgr) calculateTimeoutFromLatency(latency time.Duration) time.Duration { +// calculateTimeoutFromPingLatency calculates a reasonable timeout derived from latency +func (dhtm *dontHaveTimeoutMgr) calculateTimeoutFromPingLatency(latency time.Duration) time.Duration { // The maximum expected time for a response is // the expected time to process the want + (latency * multiplier) // The multiplier is to provide some padding for variable latency. - return dhtm.maxExpectedWantProcessTime + time.Duration(dhtm.latencyMultiplier)*latency + timeout := dhtm.maxExpectedWantProcessTime + time.Duration(dhtm.pingLatencyMultiplier)*latency + if timeout > dhtm.maxTimeout { + timeout = dhtm.maxTimeout + } + return timeout +} + +// calculateTimeoutFromMessageLatency calculates a timeout derived from message latency +func (dhtm *dontHaveTimeoutMgr) calculateTimeoutFromMessageLatency() time.Duration { + timeout := dhtm.messageLatency.latency * time.Duration(dhtm.messageLatencyMultiplier) + if timeout > dhtm.maxTimeout { + timeout = dhtm.maxTimeout + } + return timeout +} + +// latencyEwma is an EWMA of message latency +type latencyEwma struct { + alpha float64 + samples uint64 + latency time.Duration +} + +// update the EWMA with the given sample +func (le *latencyEwma) update(elapsed time.Duration) { + le.samples++ + + // Initially set alpha to be 1.0 / + alpha := 1.0 / float64(le.samples) + if alpha < le.alpha { + // Once we have enough samples, clamp alpha + alpha = le.alpha + } + le.latency = time.Duration(float64(elapsed)*alpha + (1-alpha)*float64(le.latency)) } diff --git a/bitswap/internal/messagequeue/donthavetimeoutmgr_test.go b/bitswap/internal/messagequeue/donthavetimeoutmgr_test.go index 03ceb4816..6f315fea9 100644 --- a/bitswap/internal/messagequeue/donthavetimeoutmgr_test.go +++ b/bitswap/internal/messagequeue/donthavetimeoutmgr_test.go @@ -79,7 +79,7 @@ func TestDontHaveTimeoutMgrTimeout(t *testing.T) { tr := timeoutRecorder{} dhtm := newDontHaveTimeoutMgrWithParams(pc, tr.onTimeout, - dontHaveTimeout, latMultiplier, expProcessTime) + dontHaveTimeout, maxTimeout, latMultiplier, messageLatencyMultiplier, expProcessTime) dhtm.Start() defer dhtm.Shutdown() @@ -102,7 +102,7 @@ func TestDontHaveTimeoutMgrTimeout(t *testing.T) { // At this stage first set of keys should have timed out if tr.timedOutCount() != len(firstks) { - t.Fatal("expected timeout") + t.Fatal("expected timeout", tr.timedOutCount(), len(firstks)) } // Clear the recorded timed out keys @@ -129,7 +129,7 @@ func TestDontHaveTimeoutMgrCancel(t *testing.T) { tr := timeoutRecorder{} dhtm := newDontHaveTimeoutMgrWithParams(pc, tr.onTimeout, - dontHaveTimeout, latMultiplier, expProcessTime) + dontHaveTimeout, maxTimeout, latMultiplier, messageLatencyMultiplier, expProcessTime) dhtm.Start() defer dhtm.Shutdown() @@ -160,7 +160,7 @@ func TestDontHaveTimeoutWantCancelWant(t *testing.T) { tr := timeoutRecorder{} dhtm := newDontHaveTimeoutMgrWithParams(pc, tr.onTimeout, - dontHaveTimeout, latMultiplier, expProcessTime) + dontHaveTimeout, maxTimeout, latMultiplier, messageLatencyMultiplier, expProcessTime) dhtm.Start() defer dhtm.Shutdown() @@ -204,7 +204,7 @@ func TestDontHaveTimeoutRepeatedAddPending(t *testing.T) { tr := timeoutRecorder{} dhtm := newDontHaveTimeoutMgrWithParams(pc, tr.onTimeout, - dontHaveTimeout, latMultiplier, expProcessTime) + dontHaveTimeout, maxTimeout, latMultiplier, messageLatencyMultiplier, expProcessTime) dhtm.Start() defer dhtm.Shutdown() @@ -222,6 +222,78 @@ func TestDontHaveTimeoutRepeatedAddPending(t *testing.T) { } } +func TestDontHaveTimeoutMgrMessageLatency(t *testing.T) { + ks := testutil.GenerateCids(2) + latency := time.Millisecond * 40 + latMultiplier := 1 + expProcessTime := time.Duration(0) + msgLatencyMultiplier := 1 + pc := &mockPeerConn{latency: latency} + tr := timeoutRecorder{} + + dhtm := newDontHaveTimeoutMgrWithParams(pc, tr.onTimeout, + dontHaveTimeout, maxTimeout, latMultiplier, msgLatencyMultiplier, expProcessTime) + dhtm.Start() + defer dhtm.Shutdown() + + // Add keys + dhtm.AddPending(ks) + + // expectedTimeout + // = expProcessTime + latency*time.Duration(latMultiplier) + // = 0 + 40ms * 1 + // = 40ms + + // Wait for less than the expected timeout + time.Sleep(25 * time.Millisecond) + + // Receive two message latency updates + dhtm.UpdateMessageLatency(time.Millisecond * 20) + dhtm.UpdateMessageLatency(time.Millisecond * 10) + + // alpha is 0.5 so timeout should be + // = (20ms * alpha) + (10ms * (1 - alpha)) + // = (20ms * 0.5) + (10ms * 0.5) + // = 15ms + // We've already slept for 25ms so with the new 15ms timeout + // the keys should have timed out + + // Give the queue some time to process the updates + time.Sleep(5 * time.Millisecond) + + if tr.timedOutCount() != len(ks) { + t.Fatal("expected keys to timeout") + } +} + +func TestDontHaveTimeoutMgrMessageLatencyMax(t *testing.T) { + ks := testutil.GenerateCids(2) + pc := &mockPeerConn{latency: time.Second} // ignored + tr := timeoutRecorder{} + msgLatencyMultiplier := 1 + testMaxTimeout := time.Millisecond * 10 + + dhtm := newDontHaveTimeoutMgrWithParams(pc, tr.onTimeout, + dontHaveTimeout, testMaxTimeout, pingLatencyMultiplier, msgLatencyMultiplier, maxExpectedWantProcessTime) + dhtm.Start() + defer dhtm.Shutdown() + + // Add keys + dhtm.AddPending(ks) + + // Receive a message latency update that would make the timeout greater + // than the maximum timeout + dhtm.UpdateMessageLatency(testMaxTimeout * 4) + + // Sleep until just after the maximum timeout + time.Sleep(testMaxTimeout + 5*time.Millisecond) + + // Keys should have timed out + if tr.timedOutCount() != len(ks) { + t.Fatal("expected keys to timeout") + } +} + func TestDontHaveTimeoutMgrUsesDefaultTimeoutIfPingError(t *testing.T) { ks := testutil.GenerateCids(2) latency := time.Millisecond * 1 @@ -233,7 +305,7 @@ func TestDontHaveTimeoutMgrUsesDefaultTimeoutIfPingError(t *testing.T) { pc := &mockPeerConn{latency: latency, err: fmt.Errorf("ping error")} dhtm := newDontHaveTimeoutMgrWithParams(pc, tr.onTimeout, - defaultTimeout, latMultiplier, expProcessTime) + defaultTimeout, dontHaveTimeout, latMultiplier, messageLatencyMultiplier, expProcessTime) dhtm.Start() defer dhtm.Shutdown() @@ -267,7 +339,7 @@ func TestDontHaveTimeoutMgrUsesDefaultTimeoutIfLatencyLonger(t *testing.T) { pc := &mockPeerConn{latency: latency} dhtm := newDontHaveTimeoutMgrWithParams(pc, tr.onTimeout, - defaultTimeout, latMultiplier, expProcessTime) + defaultTimeout, dontHaveTimeout, latMultiplier, messageLatencyMultiplier, expProcessTime) dhtm.Start() defer dhtm.Shutdown() @@ -300,7 +372,7 @@ func TestDontHaveTimeoutNoTimeoutAfterShutdown(t *testing.T) { pc := &mockPeerConn{latency: latency} dhtm := newDontHaveTimeoutMgrWithParams(pc, tr.onTimeout, - dontHaveTimeout, latMultiplier, expProcessTime) + dontHaveTimeout, maxTimeout, latMultiplier, messageLatencyMultiplier, expProcessTime) dhtm.Start() defer dhtm.Shutdown() diff --git a/bitswap/internal/messagequeue/messagequeue.go b/bitswap/internal/messagequeue/messagequeue.go index 755df08a7..9db2a8628 100644 --- a/bitswap/internal/messagequeue/messagequeue.go +++ b/bitswap/internal/messagequeue/messagequeue.go @@ -64,6 +64,7 @@ type MessageQueue struct { sendErrorBackoff time.Duration outgoingWork chan time.Time + responses chan *Response // Take lock whenever any of these variables are modified wllock sync.Mutex @@ -88,12 +89,15 @@ type recallWantlist struct { pending *bswl.Wantlist // The list of wants that have been sent sent *bswl.Wantlist + // The time at which each want was sent + sentAt map[cid.Cid]time.Time } func newRecallWantList() recallWantlist { return recallWantlist{ pending: bswl.New(), sent: bswl.New(), + sentAt: make(map[cid.Cid]time.Time), } } @@ -104,14 +108,18 @@ func (r *recallWantlist) Add(c cid.Cid, priority int32, wtype pb.Message_Wantlis // Remove wants from both the pending list and the list of sent wants func (r *recallWantlist) Remove(c cid.Cid) { - r.sent.Remove(c) r.pending.Remove(c) + r.sent.Remove(c) + delete(r.sentAt, c) } // Remove wants by type from both the pending list and the list of sent wants func (r *recallWantlist) RemoveType(c cid.Cid, wtype pb.Message_Wantlist_WantType) { - r.sent.RemoveType(c, wtype) r.pending.RemoveType(c, wtype) + r.sent.RemoveType(c, wtype) + if _, ok := r.sent.Contains(c); !ok { + delete(r.sentAt, c) + } } // MarkSent moves the want from the pending to the sent list @@ -126,6 +134,16 @@ func (r *recallWantlist) MarkSent(e wantlist.Entry) bool { return true } +// SentAt records the time at which a want was sent +func (r *recallWantlist) SentAt(c cid.Cid, at time.Time) { + // The want may have been cancelled in the interim + if _, ok := r.sent.Contains(c); ok { + if _, ok := r.sentAt[c]; !ok { + r.sentAt[c] = at + } + } +} + type peerConn struct { p peer.ID network MessageNetwork @@ -160,6 +178,15 @@ type DontHaveTimeoutManager interface { AddPending([]cid.Cid) // CancelPending removes the wants CancelPending([]cid.Cid) + UpdateMessageLatency(time.Duration) +} + +// Response from the peer +type Response struct { + // The time at which the response was received + at time.Time + // The blocks / HAVEs / DONT_HAVEs in the response + ks []cid.Cid } // New creates a new MessageQueue. @@ -177,7 +204,7 @@ func newMessageQueue(ctx context.Context, p peer.ID, network MessageNetwork, maxMsgSize int, sendErrorBackoff time.Duration, dhTimeoutMgr DontHaveTimeoutManager) *MessageQueue { ctx, cancel := context.WithCancel(ctx) - mq := &MessageQueue{ + return &MessageQueue{ ctx: ctx, shutdown: cancel, p: p, @@ -188,6 +215,7 @@ func newMessageQueue(ctx context.Context, p peer.ID, network MessageNetwork, peerWants: newRecallWantList(), cancels: cid.NewSet(), outgoingWork: make(chan time.Time, 1), + responses: make(chan *Response, 8), rebroadcastInterval: defaultRebroadcastInterval, sendErrorBackoff: sendErrorBackoff, priority: maxPriority, @@ -195,8 +223,6 @@ func newMessageQueue(ctx context.Context, p peer.ID, network MessageNetwork, // after using it, instead of creating a new one every time. msg: bsmsg.New(false), } - - return mq } // Add want-haves that are part of a broadcast to all connected peers @@ -291,6 +317,22 @@ func (mq *MessageQueue) AddCancels(cancelKs []cid.Cid) { } } +// ResponseReceived is called when a message is received from the network. +// ks is the set of blocks, HAVEs and DONT_HAVEs in the message +// Note that this is just used to calculate latency. +func (mq *MessageQueue) ResponseReceived(at time.Time, ks []cid.Cid) { + if len(ks) == 0 { + return + } + + // These messages are just used to approximate latency, so if we get so + // many responses that they get backed up, just ignore the overflow. + select { + case mq.responses <- &Response{at, ks}: + default: + } +} + // SetRebroadcastInterval sets a new interval on which to rebroadcast the full wantlist func (mq *MessageQueue) SetRebroadcastInterval(delay time.Duration) { mq.rebroadcastIntervalLk.Lock() @@ -340,6 +382,7 @@ func (mq *MessageQueue) runQueue() { select { case <-mq.rebroadcastTimer.C: mq.rebroadcastWantlist() + case when := <-mq.outgoingWork: // If we have work scheduled, cancel the timer. If we // don't, record when the work was scheduled. @@ -362,11 +405,17 @@ func (mq *MessageQueue) runQueue() { // Otherwise, extend the timer. scheduleWork.Reset(sendMessageDebounce) } + case <-scheduleWork.C: // We have work scheduled and haven't seen any updates // in sendMessageDebounce. Send immediately. workScheduled = time.Time{} mq.sendIfReady() + + case res := <-mq.responses: + // We received a response from the peer, calculate latency + mq.handleResponse(res) + case <-mq.ctx.Done(): return } @@ -431,7 +480,7 @@ func (mq *MessageQueue) sendMessage() { mq.dhTimeoutMgr.Start() // Convert want lists to a Bitswap Message - message := mq.extractOutgoingMessage(mq.sender.SupportsHave()) + message, onSent := mq.extractOutgoingMessage(mq.sender.SupportsHave()) // After processing the message, clear out its fields to save memory defer mq.msg.Reset(false) @@ -451,6 +500,9 @@ func (mq *MessageQueue) sendMessage() { return } + // Record sent time so as to calculate message latency + onSent() + // Set a timer to wait for responses mq.simulateDontHaveWithTimeout(wantlist) @@ -489,6 +541,34 @@ func (mq *MessageQueue) simulateDontHaveWithTimeout(wantlist []bsmsg.Entry) { mq.dhTimeoutMgr.AddPending(wants) } +// handleResponse is called when a response is received from the peer +func (mq *MessageQueue) handleResponse(res *Response) { + now := time.Now() + earliest := time.Time{} + + mq.wllock.Lock() + + // Check if the keys in the response correspond to any request that was + // sent to the peer. + // Find the earliest request so as to calculate the longest latency as + // we want to be conservative when setting the timeout. + for _, c := range res.ks { + if at, ok := mq.bcstWants.sentAt[c]; ok && (earliest.IsZero() || at.Before(earliest)) { + earliest = at + } + if at, ok := mq.peerWants.sentAt[c]; ok && (earliest.IsZero() || at.Before(earliest)) { + earliest = at + } + } + + mq.wllock.Unlock() + + if !earliest.IsZero() { + // Inform the timeout manager of the calculated latency + mq.dhTimeoutMgr.UpdateMessageLatency(now.Sub(earliest)) + } +} + func (mq *MessageQueue) logOutgoingMessage(wantlist []bsmsg.Entry) { // Save some CPU cycles and allocations if log level is higher than debug if ce := sflog.Check(zap.DebugLevel, "sent message"); ce == nil { @@ -547,7 +627,7 @@ func (mq *MessageQueue) pendingWorkCount() int { } // Convert the lists of wants into a Bitswap message -func (mq *MessageQueue) extractOutgoingMessage(supportsHave bool) bsmsg.BitSwapMessage { +func (mq *MessageQueue) extractOutgoingMessage(supportsHave bool) (bsmsg.BitSwapMessage, func()) { // Get broadcast and regular wantlist entries. mq.wllock.Lock() peerEntries := mq.peerWants.pending.Entries() @@ -641,16 +721,18 @@ FINISH: // Finally, re-take the lock, mark sent and remove any entries from our // message that we've decided to cancel at the last minute. mq.wllock.Lock() - for _, e := range peerEntries[:sentPeerEntries] { + for i, e := range peerEntries[:sentPeerEntries] { if !mq.peerWants.MarkSent(e) { // It changed. mq.msg.Remove(e.Cid) + peerEntries[i].Cid = cid.Undef } } - for _, e := range bcstEntries[:sentBcstEntries] { + for i, e := range bcstEntries[:sentBcstEntries] { if !mq.bcstWants.MarkSent(e) { mq.msg.Remove(e.Cid) + bcstEntries[i].Cid = cid.Undef } } @@ -663,7 +745,28 @@ FINISH: } mq.wllock.Unlock() - return mq.msg + // When the message has been sent, record the time at which each want was + // sent so we can calculate message latency + onSent := func() { + now := time.Now() + + mq.wllock.Lock() + defer mq.wllock.Unlock() + + for _, e := range peerEntries[:sentPeerEntries] { + if e.Cid.Defined() { // Check if want was cancelled in the interim + mq.peerWants.SentAt(e.Cid, now) + } + } + + for _, e := range bcstEntries[:sentBcstEntries] { + if e.Cid.Defined() { // Check if want was cancelled in the interim + mq.bcstWants.SentAt(e.Cid, now) + } + } + } + + return mq.msg, onSent } func (mq *MessageQueue) initializeSender() (bsnet.MessageSender, error) { diff --git a/bitswap/internal/messagequeue/messagequeue_test.go b/bitswap/internal/messagequeue/messagequeue_test.go index 344da41a5..32a7242c2 100644 --- a/bitswap/internal/messagequeue/messagequeue_test.go +++ b/bitswap/internal/messagequeue/messagequeue_test.go @@ -44,8 +44,9 @@ func (fms *fakeMessageNetwork) Ping(context.Context, peer.ID) ping.Result { } type fakeDontHaveTimeoutMgr struct { - lk sync.Mutex - ks []cid.Cid + lk sync.Mutex + ks []cid.Cid + latencyUpds []time.Duration } func (fp *fakeDontHaveTimeoutMgr) Start() {} @@ -73,6 +74,18 @@ func (fp *fakeDontHaveTimeoutMgr) CancelPending(ks []cid.Cid) { } fp.ks = s.Keys() } +func (fp *fakeDontHaveTimeoutMgr) UpdateMessageLatency(elapsed time.Duration) { + fp.lk.Lock() + defer fp.lk.Unlock() + + fp.latencyUpds = append(fp.latencyUpds, elapsed) +} +func (fp *fakeDontHaveTimeoutMgr) latencyUpdates() []time.Duration { + fp.lk.Lock() + defer fp.lk.Unlock() + + return fp.latencyUpds +} func (fp *fakeDontHaveTimeoutMgr) pendingCount() int { fp.lk.Lock() defer fp.lk.Unlock() @@ -587,6 +600,46 @@ func TestSendToPeerThatDoesntSupportHaveMonitorsTimeouts(t *testing.T) { } } +func TestResponseReceived(t *testing.T) { + ctx := context.Background() + messagesSent := make(chan []bsmsg.Entry) + resetChan := make(chan struct{}, 1) + fakeSender := newFakeMessageSender(resetChan, messagesSent, false) + fakenet := &fakeMessageNetwork{nil, nil, fakeSender} + peerID := testutil.GeneratePeers(1)[0] + + dhtm := &fakeDontHaveTimeoutMgr{} + messageQueue := newMessageQueue(ctx, peerID, fakenet, maxMessageSize, sendErrorBackoff, dhtm) + messageQueue.Startup() + + cids := testutil.GenerateCids(10) + + // Add some wants and wait 10ms + messageQueue.AddWants(cids[:5], nil) + collectMessages(ctx, t, messagesSent, 10*time.Millisecond) + + // Add some wants and wait another 10ms + messageQueue.AddWants(cids[5:8], nil) + collectMessages(ctx, t, messagesSent, 10*time.Millisecond) + + // Receive a response for some of the wants from both groups + messageQueue.ResponseReceived(time.Now(), []cid.Cid{cids[0], cids[6], cids[9]}) + + // Wait a short time for processing + time.Sleep(10 * time.Millisecond) + + // Check that message queue informs DHTM of received responses + upds := dhtm.latencyUpdates() + if len(upds) != 1 { + t.Fatal("expected one latency update") + } + // Elapsed time should be between when the first want was sent and the + // response received (about 20ms) + if upds[0] < 15*time.Millisecond || upds[0] > 25*time.Millisecond { + t.Fatal("expected latency to be time since oldest message sent") + } +} + func filterWantTypes(wantlist []bsmsg.Entry) ([]cid.Cid, []cid.Cid, []cid.Cid) { var wbs []cid.Cid var whs []cid.Cid diff --git a/bitswap/internal/peermanager/peermanager.go b/bitswap/internal/peermanager/peermanager.go index 522823263..aa40727b2 100644 --- a/bitswap/internal/peermanager/peermanager.go +++ b/bitswap/internal/peermanager/peermanager.go @@ -3,6 +3,7 @@ package peermanager import ( "context" "sync" + "time" logging "github.com/ipfs/go-log" "github.com/ipfs/go-metrics-interface" @@ -18,6 +19,7 @@ type PeerQueue interface { AddBroadcastWantHaves([]cid.Cid) AddWants([]cid.Cid, []cid.Cid) AddCancels([]cid.Cid) + ResponseReceived(at time.Time, ks []cid.Cid) Startup() Shutdown() } @@ -116,6 +118,19 @@ func (pm *PeerManager) Disconnected(p peer.ID) { pm.pwm.removePeer(p) } +// ResponseReceived is called when a message is received from the network. +// ks is the set of blocks, HAVEs and DONT_HAVEs in the message +// Note that this is just used to calculate latency. +func (pm *PeerManager) ResponseReceived(p peer.ID, at time.Time, ks []cid.Cid) { + pm.pqLk.Lock() + pq, ok := pm.peerQueues[p] + pm.pqLk.Unlock() + + if ok { + pq.ResponseReceived(at, ks) + } +} + // BroadcastWantHaves broadcasts want-haves to all peers (used by the session // to discover seeds). // For each peer it filters out want-haves that have previously been sent to diff --git a/bitswap/internal/peermanager/peermanager_test.go b/bitswap/internal/peermanager/peermanager_test.go index 469aa4d19..d5d348fe6 100644 --- a/bitswap/internal/peermanager/peermanager_test.go +++ b/bitswap/internal/peermanager/peermanager_test.go @@ -35,6 +35,8 @@ func (fp *mockPeerQueue) AddWants(wbs []cid.Cid, whs []cid.Cid) { func (fp *mockPeerQueue) AddCancels(cs []cid.Cid) { fp.msgs <- msg{fp.p, nil, nil, cs} } +func (fp *mockPeerQueue) ResponseReceived(at time.Time, ks []cid.Cid) { +} type peerWants struct { wantHaves []cid.Cid From 0f81d00d9eeea3b893822e3d760c73cda4dd7ed5 Mon Sep 17 00:00:00 2001 From: Dirk McCormick Date: Thu, 30 Apr 2020 11:39:05 -0400 Subject: [PATCH 0937/1035] fix: simplify latency timing This commit was moved from ipfs/go-bitswap@5c215f4179b976a42adc3838172fe8651929bc10 --- bitswap/bitswap.go | 10 +++---- bitswap/internal/messagequeue/messagequeue.go | 26 ++++++++----------- .../messagequeue/messagequeue_test.go | 2 +- bitswap/internal/peermanager/peermanager.go | 7 +++-- .../internal/peermanager/peermanager_test.go | 2 +- 5 files changed, 20 insertions(+), 27 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 36b95cfd5..bfcd125f9 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -303,14 +303,14 @@ func (bs *Bitswap) GetBlocks(ctx context.Context, keys []cid.Cid) (<-chan blocks // HasBlock announces the existence of a block to this bitswap service. The // service will potentially notify its peers. func (bs *Bitswap) HasBlock(blk blocks.Block) error { - return bs.receiveBlocksFrom(context.Background(), time.Time{}, "", []blocks.Block{blk}, nil, nil) + return bs.receiveBlocksFrom(context.Background(), "", []blocks.Block{blk}, nil, nil) } // TODO: Some of this stuff really only needs to be done when adding a block // from the user, not when receiving it from the network. // In case you run `git blame` on this comment, I'll save you some time: ask // @whyrusleeping, I don't know the answers you seek. -func (bs *Bitswap) receiveBlocksFrom(ctx context.Context, at time.Time, from peer.ID, blks []blocks.Block, haves []cid.Cid, dontHaves []cid.Cid) error { +func (bs *Bitswap) receiveBlocksFrom(ctx context.Context, from peer.ID, blks []blocks.Block, haves []cid.Cid, dontHaves []cid.Cid) error { select { case <-bs.process.Closing(): return errors.New("bitswap is closed") @@ -355,7 +355,7 @@ func (bs *Bitswap) receiveBlocksFrom(ctx context.Context, at time.Time, from pee combined = append(combined, allKs...) combined = append(combined, haves...) combined = append(combined, dontHaves...) - bs.pm.ResponseReceived(from, at, combined) + bs.pm.ResponseReceived(from, combined) } // Send all block keys (including duplicates) to any sessions that want them. @@ -396,8 +396,6 @@ func (bs *Bitswap) receiveBlocksFrom(ctx context.Context, at time.Time, from pee // ReceiveMessage is called by the network interface when a new message is // received. func (bs *Bitswap) ReceiveMessage(ctx context.Context, p peer.ID, incoming bsmsg.BitSwapMessage) { - now := time.Now() - bs.counterLk.Lock() bs.counters.messagesRecvd++ bs.counterLk.Unlock() @@ -421,7 +419,7 @@ func (bs *Bitswap) ReceiveMessage(ctx context.Context, p peer.ID, incoming bsmsg dontHaves := incoming.DontHaves() if len(iblocks) > 0 || len(haves) > 0 || len(dontHaves) > 0 { // Process blocks - err := bs.receiveBlocksFrom(ctx, now, p, iblocks, haves, dontHaves) + err := bs.receiveBlocksFrom(ctx, p, iblocks, haves, dontHaves) if err != nil { log.Warnf("ReceiveMessage recvBlockFrom error: %s", err) return diff --git a/bitswap/internal/messagequeue/messagequeue.go b/bitswap/internal/messagequeue/messagequeue.go index 9db2a8628..07c18a77e 100644 --- a/bitswap/internal/messagequeue/messagequeue.go +++ b/bitswap/internal/messagequeue/messagequeue.go @@ -63,8 +63,11 @@ type MessageQueue struct { maxMessageSize int sendErrorBackoff time.Duration + // Signals that there are outgoing wants / cancels ready to be processed outgoingWork chan time.Time - responses chan *Response + + // Channel of CIDs of blocks / HAVEs / DONT_HAVEs received from the peer + responses chan []cid.Cid // Take lock whenever any of these variables are modified wllock sync.Mutex @@ -181,14 +184,6 @@ type DontHaveTimeoutManager interface { UpdateMessageLatency(time.Duration) } -// Response from the peer -type Response struct { - // The time at which the response was received - at time.Time - // The blocks / HAVEs / DONT_HAVEs in the response - ks []cid.Cid -} - // New creates a new MessageQueue. func New(ctx context.Context, p peer.ID, network MessageNetwork, onDontHaveTimeout OnDontHaveTimeout) *MessageQueue { onTimeout := func(ks []cid.Cid) { @@ -215,7 +210,7 @@ func newMessageQueue(ctx context.Context, p peer.ID, network MessageNetwork, peerWants: newRecallWantList(), cancels: cid.NewSet(), outgoingWork: make(chan time.Time, 1), - responses: make(chan *Response, 8), + responses: make(chan []cid.Cid, 8), rebroadcastInterval: defaultRebroadcastInterval, sendErrorBackoff: sendErrorBackoff, priority: maxPriority, @@ -320,7 +315,7 @@ func (mq *MessageQueue) AddCancels(cancelKs []cid.Cid) { // ResponseReceived is called when a message is received from the network. // ks is the set of blocks, HAVEs and DONT_HAVEs in the message // Note that this is just used to calculate latency. -func (mq *MessageQueue) ResponseReceived(at time.Time, ks []cid.Cid) { +func (mq *MessageQueue) ResponseReceived(ks []cid.Cid) { if len(ks) == 0 { return } @@ -328,7 +323,7 @@ func (mq *MessageQueue) ResponseReceived(at time.Time, ks []cid.Cid) { // These messages are just used to approximate latency, so if we get so // many responses that they get backed up, just ignore the overflow. select { - case mq.responses <- &Response{at, ks}: + case mq.responses <- ks: default: } } @@ -541,8 +536,9 @@ func (mq *MessageQueue) simulateDontHaveWithTimeout(wantlist []bsmsg.Entry) { mq.dhTimeoutMgr.AddPending(wants) } -// handleResponse is called when a response is received from the peer -func (mq *MessageQueue) handleResponse(res *Response) { +// handleResponse is called when a response is received from the peer, +// with the CIDs of received blocks / HAVEs / DONT_HAVEs +func (mq *MessageQueue) handleResponse(ks []cid.Cid) { now := time.Now() earliest := time.Time{} @@ -552,7 +548,7 @@ func (mq *MessageQueue) handleResponse(res *Response) { // sent to the peer. // Find the earliest request so as to calculate the longest latency as // we want to be conservative when setting the timeout. - for _, c := range res.ks { + for _, c := range ks { if at, ok := mq.bcstWants.sentAt[c]; ok && (earliest.IsZero() || at.Before(earliest)) { earliest = at } diff --git a/bitswap/internal/messagequeue/messagequeue_test.go b/bitswap/internal/messagequeue/messagequeue_test.go index 32a7242c2..1ef0d2a5f 100644 --- a/bitswap/internal/messagequeue/messagequeue_test.go +++ b/bitswap/internal/messagequeue/messagequeue_test.go @@ -623,7 +623,7 @@ func TestResponseReceived(t *testing.T) { collectMessages(ctx, t, messagesSent, 10*time.Millisecond) // Receive a response for some of the wants from both groups - messageQueue.ResponseReceived(time.Now(), []cid.Cid{cids[0], cids[6], cids[9]}) + messageQueue.ResponseReceived([]cid.Cid{cids[0], cids[6], cids[9]}) // Wait a short time for processing time.Sleep(10 * time.Millisecond) diff --git a/bitswap/internal/peermanager/peermanager.go b/bitswap/internal/peermanager/peermanager.go index aa40727b2..04b015bfd 100644 --- a/bitswap/internal/peermanager/peermanager.go +++ b/bitswap/internal/peermanager/peermanager.go @@ -3,7 +3,6 @@ package peermanager import ( "context" "sync" - "time" logging "github.com/ipfs/go-log" "github.com/ipfs/go-metrics-interface" @@ -19,7 +18,7 @@ type PeerQueue interface { AddBroadcastWantHaves([]cid.Cid) AddWants([]cid.Cid, []cid.Cid) AddCancels([]cid.Cid) - ResponseReceived(at time.Time, ks []cid.Cid) + ResponseReceived(ks []cid.Cid) Startup() Shutdown() } @@ -121,13 +120,13 @@ func (pm *PeerManager) Disconnected(p peer.ID) { // ResponseReceived is called when a message is received from the network. // ks is the set of blocks, HAVEs and DONT_HAVEs in the message // Note that this is just used to calculate latency. -func (pm *PeerManager) ResponseReceived(p peer.ID, at time.Time, ks []cid.Cid) { +func (pm *PeerManager) ResponseReceived(p peer.ID, ks []cid.Cid) { pm.pqLk.Lock() pq, ok := pm.peerQueues[p] pm.pqLk.Unlock() if ok { - pq.ResponseReceived(at, ks) + pq.ResponseReceived(ks) } } diff --git a/bitswap/internal/peermanager/peermanager_test.go b/bitswap/internal/peermanager/peermanager_test.go index d5d348fe6..560868466 100644 --- a/bitswap/internal/peermanager/peermanager_test.go +++ b/bitswap/internal/peermanager/peermanager_test.go @@ -35,7 +35,7 @@ func (fp *mockPeerQueue) AddWants(wbs []cid.Cid, whs []cid.Cid) { func (fp *mockPeerQueue) AddCancels(cs []cid.Cid) { fp.msgs <- msg{fp.p, nil, nil, cs} } -func (fp *mockPeerQueue) ResponseReceived(at time.Time, ks []cid.Cid) { +func (fp *mockPeerQueue) ResponseReceived(ks []cid.Cid) { } type peerWants struct { From 9fa9dfe0cab7c260cdb12eb04c7c86c8c56ab862 Mon Sep 17 00:00:00 2001 From: Dirk McCormick Date: Thu, 30 Apr 2020 13:33:12 -0400 Subject: [PATCH 0938/1035] fix: only record latency for first response per want This commit was moved from ipfs/go-bitswap@af8cba85b3cd30d0b7f63bc575d4e14a9331178b --- bitswap/internal/messagequeue/messagequeue.go | 24 ++++++++-- .../messagequeue/messagequeue_test.go | 44 +++++++++++++++++++ 2 files changed, 64 insertions(+), 4 deletions(-) diff --git a/bitswap/internal/messagequeue/messagequeue.go b/bitswap/internal/messagequeue/messagequeue.go index 07c18a77e..fd55fbee3 100644 --- a/bitswap/internal/messagequeue/messagequeue.go +++ b/bitswap/internal/messagequeue/messagequeue.go @@ -147,6 +147,13 @@ func (r *recallWantlist) SentAt(c cid.Cid, at time.Time) { } } +// ClearSentAt clears out the record of the time a want was sent. +// We clear the sent at time when we receive a response for a key so that +// subsequent responses for the key don't appear to be even further delayed. +func (r *recallWantlist) ClearSentAt(c cid.Cid) { + delete(r.sentAt, c) +} + type peerConn struct { p peer.ID network MessageNetwork @@ -549,11 +556,20 @@ func (mq *MessageQueue) handleResponse(ks []cid.Cid) { // Find the earliest request so as to calculate the longest latency as // we want to be conservative when setting the timeout. for _, c := range ks { - if at, ok := mq.bcstWants.sentAt[c]; ok && (earliest.IsZero() || at.Before(earliest)) { - earliest = at + if at, ok := mq.bcstWants.sentAt[c]; ok { + if earliest.IsZero() || at.Before(earliest) { + earliest = at + } + mq.bcstWants.ClearSentAt(c) } - if at, ok := mq.peerWants.sentAt[c]; ok && (earliest.IsZero() || at.Before(earliest)) { - earliest = at + if at, ok := mq.peerWants.sentAt[c]; ok { + if earliest.IsZero() || at.Before(earliest) { + earliest = at + } + // Clear out the sent time for the CID because we only want to + // record the latency between the request and the first response + // for that CID (not subsequent responses) + mq.peerWants.ClearSentAt(c) } } diff --git a/bitswap/internal/messagequeue/messagequeue_test.go b/bitswap/internal/messagequeue/messagequeue_test.go index 1ef0d2a5f..f0f32e0a7 100644 --- a/bitswap/internal/messagequeue/messagequeue_test.go +++ b/bitswap/internal/messagequeue/messagequeue_test.go @@ -640,6 +640,50 @@ func TestResponseReceived(t *testing.T) { } } +func TestResponseReceivedAppliesForFirstResponseOnly(t *testing.T) { + ctx := context.Background() + messagesSent := make(chan []bsmsg.Entry) + resetChan := make(chan struct{}, 1) + fakeSender := newFakeMessageSender(resetChan, messagesSent, false) + fakenet := &fakeMessageNetwork{nil, nil, fakeSender} + peerID := testutil.GeneratePeers(1)[0] + + dhtm := &fakeDontHaveTimeoutMgr{} + messageQueue := newMessageQueue(ctx, peerID, fakenet, maxMessageSize, sendErrorBackoff, dhtm) + messageQueue.Startup() + + cids := testutil.GenerateCids(2) + + // Add some wants and wait 10ms + messageQueue.AddWants(cids, nil) + collectMessages(ctx, t, messagesSent, 10*time.Millisecond) + + // Receive a response for the wants + messageQueue.ResponseReceived(cids) + + // Wait another 10ms + time.Sleep(10 * time.Millisecond) + + // Message queue should inform DHTM of first response + upds := dhtm.latencyUpdates() + if len(upds) != 1 { + t.Fatal("expected one latency update") + } + + // Receive a second response for the same wants + messageQueue.ResponseReceived(cids) + + // Wait for the response to be processed by the message queue + time.Sleep(10 * time.Millisecond) + + // Message queue should not inform DHTM of second response because the + // CIDs are a subset of the first response + upds = dhtm.latencyUpdates() + if len(upds) != 1 { + t.Fatal("expected one latency update") + } +} + func filterWantTypes(wantlist []bsmsg.Entry) ([]cid.Cid, []cid.Cid, []cid.Cid) { var wbs []cid.Cid var whs []cid.Cid From 41ccd67316009b6277295d63f408929124b1d5dc Mon Sep 17 00:00:00 2001 From: Dirk McCormick Date: Thu, 30 Apr 2020 14:11:30 -0400 Subject: [PATCH 0939/1035] fix: discard outliers in latency calculation This commit was moved from ipfs/go-bitswap@a7c7865ad0bde1fd35394705612dfa12d9d62d21 --- bitswap/internal/messagequeue/messagequeue.go | 53 ++++++++++++++----- .../messagequeue/messagequeue_test.go | 52 ++++++++++++++++-- 2 files changed, 87 insertions(+), 18 deletions(-) diff --git a/bitswap/internal/messagequeue/messagequeue.go b/bitswap/internal/messagequeue/messagequeue.go index fd55fbee3..a3e21790d 100644 --- a/bitswap/internal/messagequeue/messagequeue.go +++ b/bitswap/internal/messagequeue/messagequeue.go @@ -41,6 +41,9 @@ const ( // when we debounce for more than sendMessageMaxDelay, we'll send the // message immediately. sendMessageMaxDelay = 20 * time.Millisecond + // The maximum amount of time in which to accept a response as being valid + // for latency calculation (as opposed to discarding it as an outlier) + maxValidLatency = 30 * time.Second ) // MessageNetwork is any network that can connect peers and generate a message @@ -55,14 +58,24 @@ type MessageNetwork interface { // MessageQueue implements queue of want messages to send to peers. type MessageQueue struct { - ctx context.Context - shutdown func() - p peer.ID - network MessageNetwork - dhTimeoutMgr DontHaveTimeoutManager - maxMessageSize int + ctx context.Context + shutdown func() + p peer.ID + network MessageNetwork + dhTimeoutMgr DontHaveTimeoutManager + + // The maximum size of a message in bytes. Any overflow is put into the + // next message + maxMessageSize int + + // The amount of time to wait when there's an error sending to a peer + // before retrying sendErrorBackoff time.Duration + // The maximum amount of time in which to accept a response as being valid + // for latency calculation + maxValidLatency time.Duration + // Signals that there are outgoing wants / cancels ready to be processed outgoingWork chan time.Time @@ -198,12 +211,18 @@ func New(ctx context.Context, p peer.ID, network MessageNetwork, onDontHaveTimeo onDontHaveTimeout(p, ks) } dhTimeoutMgr := newDontHaveTimeoutMgr(newPeerConnection(p, network), onTimeout) - return newMessageQueue(ctx, p, network, maxMessageSize, sendErrorBackoff, dhTimeoutMgr) + return newMessageQueue(ctx, p, network, maxMessageSize, sendErrorBackoff, maxValidLatency, dhTimeoutMgr) } // This constructor is used by the tests -func newMessageQueue(ctx context.Context, p peer.ID, network MessageNetwork, - maxMsgSize int, sendErrorBackoff time.Duration, dhTimeoutMgr DontHaveTimeoutManager) *MessageQueue { +func newMessageQueue( + ctx context.Context, + p peer.ID, + network MessageNetwork, + maxMsgSize int, + sendErrorBackoff time.Duration, + maxValidLatency time.Duration, + dhTimeoutMgr DontHaveTimeoutManager) *MessageQueue { ctx, cancel := context.WithCancel(ctx) return &MessageQueue{ @@ -220,6 +239,7 @@ func newMessageQueue(ctx context.Context, p peer.ID, network MessageNetwork, responses: make(chan []cid.Cid, 8), rebroadcastInterval: defaultRebroadcastInterval, sendErrorBackoff: sendErrorBackoff, + maxValidLatency: maxValidLatency, priority: maxPriority, // For performance reasons we just clear out the fields of the message // after using it, instead of creating a new one every time. @@ -553,17 +573,24 @@ func (mq *MessageQueue) handleResponse(ks []cid.Cid) { // Check if the keys in the response correspond to any request that was // sent to the peer. - // Find the earliest request so as to calculate the longest latency as - // we want to be conservative when setting the timeout. + // + // - Find the earliest request so as to calculate the longest latency as + // we want to be conservative when setting the timeout + // - Ignore latencies that are very long, as these are likely to be outliers + // caused when + // - we send a want to peer A + // - peer A does not have the block + // - peer A later receives the block from peer B + // - peer A sends us HAVE / block for _, c := range ks { if at, ok := mq.bcstWants.sentAt[c]; ok { - if earliest.IsZero() || at.Before(earliest) { + if (earliest.IsZero() || at.Before(earliest)) && now.Sub(at) < mq.maxValidLatency { earliest = at } mq.bcstWants.ClearSentAt(c) } if at, ok := mq.peerWants.sentAt[c]; ok { - if earliest.IsZero() || at.Before(earliest) { + if (earliest.IsZero() || at.Before(earliest)) && now.Sub(at) < mq.maxValidLatency { earliest = at } // Clear out the sent time for the CID because we only want to diff --git a/bitswap/internal/messagequeue/messagequeue_test.go b/bitswap/internal/messagequeue/messagequeue_test.go index f0f32e0a7..4af3000ad 100644 --- a/bitswap/internal/messagequeue/messagequeue_test.go +++ b/bitswap/internal/messagequeue/messagequeue_test.go @@ -498,7 +498,7 @@ func TestSendingLargeMessages(t *testing.T) { wantBlocks := testutil.GenerateCids(10) entrySize := 44 maxMsgSize := entrySize * 3 // 3 wants - messageQueue := newMessageQueue(ctx, peerID, fakenet, maxMsgSize, sendErrorBackoff, dhtm) + messageQueue := newMessageQueue(ctx, peerID, fakenet, maxMsgSize, sendErrorBackoff, maxValidLatency, dhtm) messageQueue.Startup() messageQueue.AddWants(wantBlocks, []cid.Cid{}) @@ -578,7 +578,7 @@ func TestSendToPeerThatDoesntSupportHaveMonitorsTimeouts(t *testing.T) { peerID := testutil.GeneratePeers(1)[0] dhtm := &fakeDontHaveTimeoutMgr{} - messageQueue := newMessageQueue(ctx, peerID, fakenet, maxMessageSize, sendErrorBackoff, dhtm) + messageQueue := newMessageQueue(ctx, peerID, fakenet, maxMessageSize, sendErrorBackoff, maxValidLatency, dhtm) messageQueue.Startup() wbs := testutil.GenerateCids(10) @@ -609,7 +609,7 @@ func TestResponseReceived(t *testing.T) { peerID := testutil.GeneratePeers(1)[0] dhtm := &fakeDontHaveTimeoutMgr{} - messageQueue := newMessageQueue(ctx, peerID, fakenet, maxMessageSize, sendErrorBackoff, dhtm) + messageQueue := newMessageQueue(ctx, peerID, fakenet, maxMessageSize, sendErrorBackoff, maxValidLatency, dhtm) messageQueue.Startup() cids := testutil.GenerateCids(10) @@ -649,7 +649,7 @@ func TestResponseReceivedAppliesForFirstResponseOnly(t *testing.T) { peerID := testutil.GeneratePeers(1)[0] dhtm := &fakeDontHaveTimeoutMgr{} - messageQueue := newMessageQueue(ctx, peerID, fakenet, maxMessageSize, sendErrorBackoff, dhtm) + messageQueue := newMessageQueue(ctx, peerID, fakenet, maxMessageSize, sendErrorBackoff, maxValidLatency, dhtm) messageQueue.Startup() cids := testutil.GenerateCids(2) @@ -684,6 +684,48 @@ func TestResponseReceivedAppliesForFirstResponseOnly(t *testing.T) { } } +func TestResponseReceivedDiscardsOutliers(t *testing.T) { + ctx := context.Background() + messagesSent := make(chan []bsmsg.Entry) + resetChan := make(chan struct{}, 1) + fakeSender := newFakeMessageSender(resetChan, messagesSent, false) + fakenet := &fakeMessageNetwork{nil, nil, fakeSender} + peerID := testutil.GeneratePeers(1)[0] + + maxValLatency := 30 * time.Millisecond + dhtm := &fakeDontHaveTimeoutMgr{} + messageQueue := newMessageQueue(ctx, peerID, fakenet, maxMessageSize, sendErrorBackoff, maxValLatency, dhtm) + messageQueue.Startup() + + cids := testutil.GenerateCids(4) + + // Add some wants and wait 20ms + messageQueue.AddWants(cids[:2], nil) + collectMessages(ctx, t, messagesSent, 20*time.Millisecond) + + // Add some more wants and wait long enough that the first wants will be + // outside the maximum valid latency, but the second wants will be inside + messageQueue.AddWants(cids[2:], nil) + collectMessages(ctx, t, messagesSent, maxValLatency-10*time.Millisecond) + + // Receive a response for the wants + messageQueue.ResponseReceived(cids) + + // Wait for the response to be processed by the message queue + time.Sleep(10 * time.Millisecond) + + // Check that the latency calculation excludes the first wants + // (because they're older than max valid latency) + upds := dhtm.latencyUpdates() + if len(upds) != 1 { + t.Fatal("expected one latency update") + } + // Elapsed time should not include outliers + if upds[0] > maxValLatency { + t.Fatal("expected latency calculation to discard outliers") + } +} + func filterWantTypes(wantlist []bsmsg.Entry) ([]cid.Cid, []cid.Cid, []cid.Cid) { var wbs []cid.Cid var whs []cid.Cid @@ -712,7 +754,7 @@ func BenchmarkMessageQueue(b *testing.B) { dhtm := &fakeDontHaveTimeoutMgr{} peerID := testutil.GeneratePeers(1)[0] - messageQueue := newMessageQueue(ctx, peerID, fakenet, maxMessageSize, sendErrorBackoff, dhtm) + messageQueue := newMessageQueue(ctx, peerID, fakenet, maxMessageSize, sendErrorBackoff, maxValidLatency, dhtm) messageQueue.Startup() go func() { From 5a00b202b81bdf7960a985b4278a9d55abb8e97c Mon Sep 17 00:00:00 2001 From: Dirk McCormick Date: Fri, 1 May 2020 11:04:05 -0400 Subject: [PATCH 0940/1035] docs: MessageQueue docs This commit was moved from ipfs/go-bitswap@f005819cabe8b88188366962a25925024d872b51 --- bitswap/internal/messagequeue/messagequeue.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/bitswap/internal/messagequeue/messagequeue.go b/bitswap/internal/messagequeue/messagequeue.go index a3e21790d..24e80974b 100644 --- a/bitswap/internal/messagequeue/messagequeue.go +++ b/bitswap/internal/messagequeue/messagequeue.go @@ -161,8 +161,8 @@ func (r *recallWantlist) SentAt(c cid.Cid, at time.Time) { } // ClearSentAt clears out the record of the time a want was sent. -// We clear the sent at time when we receive a response for a key so that -// subsequent responses for the key don't appear to be even further delayed. +// We clear the sent at time when we receive a response for a key as we +// only need the first response for latency measurement. func (r *recallWantlist) ClearSentAt(c cid.Cid) { delete(r.sentAt, c) } @@ -201,6 +201,7 @@ type DontHaveTimeoutManager interface { AddPending([]cid.Cid) // CancelPending removes the wants CancelPending([]cid.Cid) + // UpdateMessageLatency informs the manager of a new latency measurement UpdateMessageLatency(time.Duration) } From 5f93e56d5848bd186b2784d75d47c76bc062743b Mon Sep 17 00:00:00 2001 From: dirkmc Date: Fri, 1 May 2020 11:11:04 -0400 Subject: [PATCH 0941/1035] fix: use one less go-routine per session (#377) * fix: use one less go-routine per session * fix: send cancel when GetBlocks() is cancelled (#383) * fix: send cancel when GetBlocks() is cancelled * fix: in SessionManager shutdown nil out sessions * fix: sessionWantSender perf * make sessionWantSender.SignalAvailability() non-blocking * Refactor SessionInterestManager (#384) * refactor: customize SessionInterestManager * refactor: SessionInterestManager perf This commit was moved from ipfs/go-bitswap@a2dd024c5de3330db889b8ef44050f01a8683353 --- bitswap/bitswap.go | 9 +- .../blockpresencemanager.go | 10 + bitswap/internal/session/session.go | 47 ++-- bitswap/internal/session/session_test.go | 109 ++++++-- bitswap/internal/session/sessionwantsender.go | 58 +++- .../session/sessionwantsender_test.go | 75 ++++- .../sessioninterestmanager.go | 134 +++++++-- .../sessioninterestmanager_test.go | 40 ++- .../internal/sessionmanager/sessionmanager.go | 91 ++++-- .../sessionmanager/sessionmanager_test.go | 67 ++++- .../sessionwantlist/sessionwantlist.go | 137 ---------- .../sessionwantlist/sessionwantlist_test.go | 258 ------------------ 12 files changed, 538 insertions(+), 497 deletions(-) delete mode 100644 bitswap/internal/sessionwantlist/sessionwantlist.go delete mode 100644 bitswap/internal/sessionwantlist/sessionwantlist_test.go diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index db0ca0986..0cd6b4976 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -139,7 +139,11 @@ func New(parent context.Context, network bsnet.BitSwapNetwork, pm := bspm.New(ctx, peerQueueFactory, network.Self()) pqm := bspqm.New(ctx, network) - sessionFactory := func(sessctx context.Context, id uint64, spm bssession.SessionPeerManager, + sessionFactory := func( + sessctx context.Context, + sessmgr bssession.SessionManager, + id uint64, + spm bssession.SessionPeerManager, sim *bssim.SessionInterestManager, pm bssession.PeerManager, bpm *bsbpm.BlockPresenceManager, @@ -147,7 +151,7 @@ func New(parent context.Context, network bsnet.BitSwapNetwork, provSearchDelay time.Duration, rebroadcastDelay delay.D, self peer.ID) bssm.Session { - return bssession.New(ctx, sessctx, id, spm, pqm, sim, pm, bpm, notif, provSearchDelay, rebroadcastDelay, self) + return bssession.New(sessctx, sessmgr, id, spm, pqm, sim, pm, bpm, notif, provSearchDelay, rebroadcastDelay, self) } sessionPeerManagerFactory := func(ctx context.Context, id uint64) bssession.SessionPeerManager { return bsspm.New(id, network.ConnectionManager()) @@ -193,6 +197,7 @@ func New(parent context.Context, network bsnet.BitSwapNetwork, // do it over here to avoid closing before all setup is done. go func() { <-px.Closing() // process closes first + sm.Shutdown() cancelFunc() notif.Shutdown() }() diff --git a/bitswap/internal/blockpresencemanager/blockpresencemanager.go b/bitswap/internal/blockpresencemanager/blockpresencemanager.go index 87821f2f8..1d3acb0e2 100644 --- a/bitswap/internal/blockpresencemanager/blockpresencemanager.go +++ b/bitswap/internal/blockpresencemanager/blockpresencemanager.go @@ -109,3 +109,13 @@ func (bpm *BlockPresenceManager) RemoveKeys(ks []cid.Cid) { delete(bpm.presence, c) } } + +// HasKey indicates whether the BlockPresenceManager is tracking the given key +// (used by the tests) +func (bpm *BlockPresenceManager) HasKey(c cid.Cid) bool { + bpm.Lock() + defer bpm.Unlock() + + _, ok := bpm.presence[c] + return ok +} diff --git a/bitswap/internal/session/session.go b/bitswap/internal/session/session.go index 11c8b0924..7a0d23b36 100644 --- a/bitswap/internal/session/session.go +++ b/bitswap/internal/session/session.go @@ -43,6 +43,14 @@ type PeerManager interface { SendCancels(context.Context, []cid.Cid) } +// SessionManager manages all the sessions +type SessionManager interface { + // Remove a session (called when the session shuts down) + RemoveSession(sesid uint64) + // Cancel wants (called when a call to GetBlocks() is cancelled) + CancelSessionWants(sid uint64, wants []cid.Cid) +} + // SessionPeerManager keeps track of peers in the session type SessionPeerManager interface { // PeersDiscovered indicates if any peers have been discovered yet @@ -91,10 +99,10 @@ type op struct { // info to, and who to request blocks from. type Session struct { // dependencies - bsctx context.Context // context for bitswap - ctx context.Context // context for session + ctx context.Context + shutdown func() + sm SessionManager pm PeerManager - bpm *bsbpm.BlockPresenceManager sprm SessionPeerManager providerFinder ProviderFinder sim *bssim.SessionInterestManager @@ -126,8 +134,8 @@ type Session struct { // New creates a new bitswap session whose lifetime is bounded by the // given context. func New( - bsctx context.Context, // context for bitswap - ctx context.Context, // context for this session + ctx context.Context, + sm SessionManager, id uint64, sprm SessionPeerManager, providerFinder ProviderFinder, @@ -138,13 +146,15 @@ func New( initialSearchDelay time.Duration, periodicSearchDelay delay.D, self peer.ID) *Session { + + ctx, cancel := context.WithCancel(ctx) s := &Session{ sw: newSessionWants(broadcastLiveWantsLimit), tickDelayReqs: make(chan time.Duration), - bsctx: bsctx, ctx: ctx, + shutdown: cancel, + sm: sm, pm: pm, - bpm: bpm, sprm: sprm, providerFinder: providerFinder, sim: sim, @@ -158,7 +168,7 @@ func New( periodicSearchDelay: periodicSearchDelay, self: self, } - s.sws = newSessionWantSender(id, pm, sprm, bpm, s.onWantsSent, s.onPeersExhausted) + s.sws = newSessionWantSender(id, pm, sprm, sm, bpm, s.onWantsSent, s.onPeersExhausted) go s.run(ctx) @@ -169,6 +179,10 @@ func (s *Session) ID() uint64 { return s.id } +func (s *Session) Shutdown() { + s.shutdown() +} + // ReceiveFrom receives incoming blocks from the given peer. func (s *Session) ReceiveFrom(from peer.ID, ks []cid.Cid, haves []cid.Cid, dontHaves []cid.Cid) { // The SessionManager tells each Session about all keys that it may be @@ -295,6 +309,7 @@ func (s *Session) run(ctx context.Context) { case opCancel: // Wants were cancelled s.sw.CancelPending(oper.keys) + s.sws.Cancel(oper.keys) case opWantsSent: // Wants were sent to a peer s.sw.WantsSent(oper.keys) @@ -389,19 +404,9 @@ func (s *Session) handleShutdown() { // Shut down the sessionWantSender (blocks until sessionWantSender stops // sending) s.sws.Shutdown() - - // Remove session's interest in the given blocks. - cancelKs := s.sim.RemoveSessionInterest(s.id) - - // Free up block presence tracking for keys that no session is interested - // in anymore - s.bpm.RemoveKeys(cancelKs) - - // Send CANCEL to all peers for blocks that no session is interested in - // anymore. - // Note: use bitswap context because session context has already been - // cancelled. - s.pm.SendCancels(s.bsctx, cancelKs) + // Signal to the SessionManager that the session has been shutdown + // and can be cleaned up + s.sm.RemoveSession(s.id) } // handleReceive is called when the session receives blocks from a peer diff --git a/bitswap/internal/session/session_test.go b/bitswap/internal/session/session_test.go index 79010db1f..028ee46e2 100644 --- a/bitswap/internal/session/session_test.go +++ b/bitswap/internal/session/session_test.go @@ -18,6 +18,40 @@ import ( peer "github.com/libp2p/go-libp2p-core/peer" ) +type mockSessionMgr struct { + lk sync.Mutex + removeSession bool + cancels []cid.Cid +} + +func newMockSessionMgr() *mockSessionMgr { + return &mockSessionMgr{} +} + +func (msm *mockSessionMgr) removeSessionCalled() bool { + msm.lk.Lock() + defer msm.lk.Unlock() + return msm.removeSession +} + +func (msm *mockSessionMgr) cancelled() []cid.Cid { + msm.lk.Lock() + defer msm.lk.Unlock() + return msm.cancels +} + +func (msm *mockSessionMgr) RemoveSession(sesid uint64) { + msm.lk.Lock() + defer msm.lk.Unlock() + msm.removeSession = true +} + +func (msm *mockSessionMgr) CancelSessionWants(sid uint64, wants []cid.Cid) { + msm.lk.Lock() + defer msm.lk.Unlock() + msm.cancels = append(msm.cancels, wants...) +} + func newFakeSessionPeerManager() *bsspm.SessionPeerManager { return bsspm.New(1, newFakePeerTagger()) } @@ -61,8 +95,6 @@ type wantReq struct { type fakePeerManager struct { wantReqs chan wantReq - lk sync.Mutex - cancels []cid.Cid } func newFakePeerManager() *fakePeerManager { @@ -82,16 +114,7 @@ func (pm *fakePeerManager) BroadcastWantHaves(ctx context.Context, cids []cid.Ci case <-ctx.Done(): } } -func (pm *fakePeerManager) SendCancels(ctx context.Context, cancels []cid.Cid) { - pm.lk.Lock() - defer pm.lk.Unlock() - pm.cancels = append(pm.cancels, cancels...) -} -func (pm *fakePeerManager) allCancels() []cid.Cid { - pm.lk.Lock() - defer pm.lk.Unlock() - return append([]cid.Cid{}, pm.cancels...) -} +func (pm *fakePeerManager) SendCancels(ctx context.Context, cancels []cid.Cid) {} func TestSessionGetBlocks(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond) @@ -103,7 +126,8 @@ func TestSessionGetBlocks(t *testing.T) { notif := notifications.New() defer notif.Shutdown() id := testutil.GenerateSessionID() - session := New(ctx, ctx, id, fspm, fpf, sim, fpm, bpm, notif, time.Second, delay.Fixed(time.Minute), "") + sm := newMockSessionMgr() + session := New(ctx, sm, id, fspm, fpf, sim, fpm, bpm, notif, time.Second, delay.Fixed(time.Minute), "") blockGenerator := blocksutil.NewBlockGenerator() blks := blockGenerator.Blocks(broadcastLiveWantsLimit * 2) var cids []cid.Cid @@ -181,9 +205,9 @@ func TestSessionGetBlocks(t *testing.T) { time.Sleep(10 * time.Millisecond) - // Verify wants were cancelled - if len(fpm.allCancels()) != len(blks) { - t.Fatal("expected cancels to be sent for all wants") + // Verify session was removed + if !sm.removeSessionCalled() { + t.Fatal("expected session to be removed") } } @@ -198,7 +222,8 @@ func TestSessionFindMorePeers(t *testing.T) { notif := notifications.New() defer notif.Shutdown() id := testutil.GenerateSessionID() - session := New(ctx, ctx, id, fspm, fpf, sim, fpm, bpm, notif, time.Second, delay.Fixed(time.Minute), "") + sm := newMockSessionMgr() + session := New(ctx, sm, id, fspm, fpf, sim, fpm, bpm, notif, time.Second, delay.Fixed(time.Minute), "") session.SetBaseTickDelay(200 * time.Microsecond) blockGenerator := blocksutil.NewBlockGenerator() blks := blockGenerator.Blocks(broadcastLiveWantsLimit * 2) @@ -272,7 +297,8 @@ func TestSessionOnPeersExhausted(t *testing.T) { notif := notifications.New() defer notif.Shutdown() id := testutil.GenerateSessionID() - session := New(ctx, ctx, id, fspm, fpf, sim, fpm, bpm, notif, time.Second, delay.Fixed(time.Minute), "") + sm := newMockSessionMgr() + session := New(ctx, sm, id, fspm, fpf, sim, fpm, bpm, notif, time.Second, delay.Fixed(time.Minute), "") blockGenerator := blocksutil.NewBlockGenerator() blks := blockGenerator.Blocks(broadcastLiveWantsLimit + 5) var cids []cid.Cid @@ -316,7 +342,8 @@ func TestSessionFailingToGetFirstBlock(t *testing.T) { notif := notifications.New() defer notif.Shutdown() id := testutil.GenerateSessionID() - session := New(ctx, ctx, id, fspm, fpf, sim, fpm, bpm, notif, 10*time.Millisecond, delay.Fixed(100*time.Millisecond), "") + sm := newMockSessionMgr() + session := New(ctx, sm, id, fspm, fpf, sim, fpm, bpm, notif, 10*time.Millisecond, delay.Fixed(100*time.Millisecond), "") blockGenerator := blocksutil.NewBlockGenerator() blks := blockGenerator.Blocks(4) var cids []cid.Cid @@ -428,10 +455,11 @@ func TestSessionCtxCancelClosesGetBlocksChannel(t *testing.T) { notif := notifications.New() defer notif.Shutdown() id := testutil.GenerateSessionID() + sm := newMockSessionMgr() // Create a new session with its own context sessctx, sesscancel := context.WithTimeout(context.Background(), 100*time.Millisecond) - session := New(context.Background(), sessctx, id, fspm, fpf, sim, fpm, bpm, notif, time.Second, delay.Fixed(time.Minute), "") + session := New(sessctx, sm, id, fspm, fpf, sim, fpm, bpm, notif, time.Second, delay.Fixed(time.Minute), "") timerCtx, timerCancel := context.WithTimeout(context.Background(), 10*time.Millisecond) defer timerCancel() @@ -459,10 +487,44 @@ func TestSessionCtxCancelClosesGetBlocksChannel(t *testing.T) { case <-timerCtx.Done(): t.Fatal("expected channel to be closed before timeout") } + + time.Sleep(10 * time.Millisecond) + + // Expect RemoveSession to be called + if !sm.removeSessionCalled() { + t.Fatal("expected onShutdown to be called") + } +} + +func TestSessionOnShutdownCalled(t *testing.T) { + fpm := newFakePeerManager() + fspm := newFakeSessionPeerManager() + fpf := newFakeProviderFinder() + sim := bssim.New() + bpm := bsbpm.New() + notif := notifications.New() + defer notif.Shutdown() + id := testutil.GenerateSessionID() + sm := newMockSessionMgr() + + // Create a new session with its own context + sessctx, sesscancel := context.WithTimeout(context.Background(), 100*time.Millisecond) + defer sesscancel() + session := New(sessctx, sm, id, fspm, fpf, sim, fpm, bpm, notif, time.Second, delay.Fixed(time.Minute), "") + + // Shutdown the session + session.Shutdown() + + time.Sleep(10 * time.Millisecond) + + // Expect RemoveSession to be called + if !sm.removeSessionCalled() { + t.Fatal("expected onShutdown to be called") + } } -func TestSessionReceiveMessageAfterShutdown(t *testing.T) { - ctx, cancelCtx := context.WithTimeout(context.Background(), 10*time.Millisecond) +func TestSessionReceiveMessageAfterCtxCancel(t *testing.T) { + ctx, cancelCtx := context.WithTimeout(context.Background(), 20*time.Millisecond) fpm := newFakePeerManager() fspm := newFakeSessionPeerManager() fpf := newFakeProviderFinder() @@ -472,7 +534,8 @@ func TestSessionReceiveMessageAfterShutdown(t *testing.T) { notif := notifications.New() defer notif.Shutdown() id := testutil.GenerateSessionID() - session := New(ctx, ctx, id, fspm, fpf, sim, fpm, bpm, notif, time.Second, delay.Fixed(time.Minute), "") + sm := newMockSessionMgr() + session := New(ctx, sm, id, fspm, fpf, sim, fpm, bpm, notif, time.Second, delay.Fixed(time.Minute), "") blockGenerator := blocksutil.NewBlockGenerator() blks := blockGenerator.Blocks(2) cids := []cid.Cid{blks[0].Cid(), blks[1].Cid()} diff --git a/bitswap/internal/session/sessionwantsender.go b/bitswap/internal/session/sessionwantsender.go index 8ccba8f80..094d9096b 100644 --- a/bitswap/internal/session/sessionwantsender.go +++ b/bitswap/internal/session/sessionwantsender.go @@ -30,6 +30,12 @@ const ( BPHave ) +// SessionWantsCanceller provides a method to cancel wants +type SessionWantsCanceller interface { + // Cancel wants for this session + CancelSessionWants(sid uint64, wants []cid.Cid) +} + // update encapsulates a message received by the session type update struct { // Which peer sent the update @@ -53,6 +59,8 @@ type peerAvailability struct { type change struct { // new wants requested add []cid.Cid + // wants cancelled + cancel []cid.Cid // new message received by session (blocks / HAVEs / DONT_HAVEs) update update // peer has connected / disconnected @@ -94,6 +102,8 @@ type sessionWantSender struct { pm PeerManager // Keeps track of peers in the session spm SessionPeerManager + // Cancels wants + canceller SessionWantsCanceller // Keeps track of which peer has / doesn't have a block bpm *bsbpm.BlockPresenceManager // Called when wants are sent @@ -102,7 +112,7 @@ type sessionWantSender struct { onPeersExhausted onPeersExhaustedFn } -func newSessionWantSender(sid uint64, pm PeerManager, spm SessionPeerManager, +func newSessionWantSender(sid uint64, pm PeerManager, spm SessionPeerManager, canceller SessionWantsCanceller, bpm *bsbpm.BlockPresenceManager, onSend onSendFn, onPeersExhausted onPeersExhaustedFn) sessionWantSender { ctx, cancel := context.WithCancel(context.Background()) @@ -119,6 +129,7 @@ func newSessionWantSender(sid uint64, pm PeerManager, spm SessionPeerManager, pm: pm, spm: spm, + canceller: canceller, bpm: bpm, onSend: onSend, onPeersExhausted: onPeersExhausted, @@ -139,6 +150,14 @@ func (sws *sessionWantSender) Add(ks []cid.Cid) { sws.addChange(change{add: ks}) } +// Cancel is called when a request is cancelled +func (sws *sessionWantSender) Cancel(ks []cid.Cid) { + if len(ks) == 0 { + return + } + sws.addChange(change{cancel: ks}) +} + // Update is called when the session receives a message with incoming blocks // or HAVE / DONT_HAVE func (sws *sessionWantSender) Update(from peer.ID, ks []cid.Cid, haves []cid.Cid, dontHaves []cid.Cid) { @@ -156,7 +175,9 @@ func (sws *sessionWantSender) Update(from peer.ID, ks []cid.Cid, haves []cid.Cid // connected / disconnected func (sws *sessionWantSender) SignalAvailability(p peer.ID, isAvailable bool) { availability := peerAvailability{p, isAvailable} - sws.addChange(change{availability: availability}) + // Add the change in a non-blocking manner to avoid the possibility of a + // deadlock + sws.addChangeNonBlocking(change{availability: availability}) } // Run is the main loop for processing incoming changes @@ -193,6 +214,22 @@ func (sws *sessionWantSender) addChange(c change) { } } +// addChangeNonBlocking adds a new change to the queue, using a go-routine +// if the change blocks, so as to avoid potential deadlocks +func (sws *sessionWantSender) addChangeNonBlocking(c change) { + select { + case sws.changes <- c: + default: + // changes channel is full, so add change in a go routine instead + go func() { + select { + case sws.changes <- c: + case <-sws.ctx.Done(): + } + }() + } +} + // collectChanges collects all the changes that have occurred since the last // invocation of onChange func (sws *sessionWantSender) collectChanges(changes []change) []change { @@ -215,6 +252,7 @@ func (sws *sessionWantSender) onChange(changes []change) { // Apply each change availability := make(map[peer.ID]bool, len(changes)) + cancels := make([]cid.Cid, 0) var updates []update for _, chng := range changes { // Initialize info for new wants @@ -222,6 +260,12 @@ func (sws *sessionWantSender) onChange(changes []change) { sws.trackWant(c) } + // Remove cancelled wants + for _, c := range chng.cancel { + sws.untrackWant(c) + cancels = append(cancels, c) + } + // Consolidate updates and changes to availability if chng.update.from != "" { // If the update includes blocks or haves, treat it as signaling that @@ -247,6 +291,11 @@ func (sws *sessionWantSender) onChange(changes []change) { // don't have the want sws.checkForExhaustedWants(dontHaves, newlyUnavailable) + // If there are any cancels, send them + if len(cancels) > 0 { + sws.canceller.CancelSessionWants(sws.sessionID, cancels) + } + // If there are some connected peers, send any pending wants if sws.spm.HasPeers() { sws.sendNextWants(newlyAvailable) @@ -306,6 +355,11 @@ func (sws *sessionWantSender) trackWant(c cid.Cid) { } } +// untrackWant removes an entry from the map of CID -> want info +func (sws *sessionWantSender) untrackWant(c cid.Cid) { + delete(sws.wants, c) +} + // processUpdates processes incoming blocks and HAVE / DONT_HAVEs. // It returns all DONT_HAVEs. func (sws *sessionWantSender) processUpdates(updates []update) []cid.Cid { diff --git a/bitswap/internal/session/sessionwantsender_test.go b/bitswap/internal/session/sessionwantsender_test.go index 3593009a3..6c3059c1f 100644 --- a/bitswap/internal/session/sessionwantsender_test.go +++ b/bitswap/internal/session/sessionwantsender_test.go @@ -136,10 +136,12 @@ func TestSendWants(t *testing.T) { sid := uint64(1) pm := newMockPeerManager() fpm := newFakeSessionPeerManager() + swc := newMockSessionMgr() bpm := bsbpm.New() onSend := func(peer.ID, []cid.Cid, []cid.Cid) {} onPeersExhausted := func([]cid.Cid) {} - spm := newSessionWantSender(sid, pm, fpm, bpm, onSend, onPeersExhausted) + spm := newSessionWantSender(sid, pm, fpm, swc, bpm, onSend, onPeersExhausted) + defer spm.Shutdown() go spm.Run() @@ -174,10 +176,12 @@ func TestSendsWantBlockToOnePeerOnly(t *testing.T) { sid := uint64(1) pm := newMockPeerManager() fpm := newFakeSessionPeerManager() + swc := newMockSessionMgr() bpm := bsbpm.New() onSend := func(peer.ID, []cid.Cid, []cid.Cid) {} onPeersExhausted := func([]cid.Cid) {} - spm := newSessionWantSender(sid, pm, fpm, bpm, onSend, onPeersExhausted) + spm := newSessionWantSender(sid, pm, fpm, swc, bpm, onSend, onPeersExhausted) + defer spm.Shutdown() go spm.Run() @@ -232,10 +236,12 @@ func TestReceiveBlock(t *testing.T) { sid := uint64(1) pm := newMockPeerManager() fpm := newFakeSessionPeerManager() + swc := newMockSessionMgr() bpm := bsbpm.New() onSend := func(peer.ID, []cid.Cid, []cid.Cid) {} onPeersExhausted := func([]cid.Cid) {} - spm := newSessionWantSender(sid, pm, fpm, bpm, onSend, onPeersExhausted) + spm := newSessionWantSender(sid, pm, fpm, swc, bpm, onSend, onPeersExhausted) + defer spm.Shutdown() go spm.Run() @@ -284,6 +290,40 @@ func TestReceiveBlock(t *testing.T) { } } +func TestCancelWants(t *testing.T) { + cids := testutil.GenerateCids(4) + sid := uint64(1) + pm := newMockPeerManager() + fpm := newFakeSessionPeerManager() + swc := newMockSessionMgr() + bpm := bsbpm.New() + onSend := func(peer.ID, []cid.Cid, []cid.Cid) {} + onPeersExhausted := func([]cid.Cid) {} + spm := newSessionWantSender(sid, pm, fpm, swc, bpm, onSend, onPeersExhausted) + defer spm.Shutdown() + + go spm.Run() + + // add cid0, cid1, cid2 + blkCids := cids[0:3] + spm.Add(blkCids) + + time.Sleep(5 * time.Millisecond) + + // cancel cid0, cid2 + cancelCids := []cid.Cid{cids[0], cids[2]} + spm.Cancel(cancelCids) + + // Wait for processing to complete + time.Sleep(5 * time.Millisecond) + + // Should have sent cancels for cid0, cid2 + sent := swc.cancelled() + if !testutil.MatchKeysIgnoreOrder(sent, cancelCids) { + t.Fatal("Wrong keys") + } +} + func TestPeerUnavailable(t *testing.T) { cids := testutil.GenerateCids(2) peers := testutil.GeneratePeers(2) @@ -292,10 +332,12 @@ func TestPeerUnavailable(t *testing.T) { sid := uint64(1) pm := newMockPeerManager() fpm := newFakeSessionPeerManager() + swc := newMockSessionMgr() bpm := bsbpm.New() onSend := func(peer.ID, []cid.Cid, []cid.Cid) {} onPeersExhausted := func([]cid.Cid) {} - spm := newSessionWantSender(sid, pm, fpm, bpm, onSend, onPeersExhausted) + spm := newSessionWantSender(sid, pm, fpm, swc, bpm, onSend, onPeersExhausted) + defer spm.Shutdown() go spm.Run() @@ -357,11 +399,12 @@ func TestPeersExhausted(t *testing.T) { sid := uint64(1) pm := newMockPeerManager() fpm := newFakeSessionPeerManager() + swc := newMockSessionMgr() bpm := bsbpm.New() onSend := func(peer.ID, []cid.Cid, []cid.Cid) {} ep := exhaustedPeers{} - spm := newSessionWantSender(sid, pm, fpm, bpm, onSend, ep.onPeersExhausted) + spm := newSessionWantSender(sid, pm, fpm, swc, bpm, onSend, ep.onPeersExhausted) go spm.Run() @@ -433,11 +476,12 @@ func TestPeersExhaustedLastWaitingPeerUnavailable(t *testing.T) { sid := uint64(1) pm := newMockPeerManager() fpm := newFakeSessionPeerManager() + swc := newMockSessionMgr() bpm := bsbpm.New() onSend := func(peer.ID, []cid.Cid, []cid.Cid) {} ep := exhaustedPeers{} - spm := newSessionWantSender(sid, pm, fpm, bpm, onSend, ep.onPeersExhausted) + spm := newSessionWantSender(sid, pm, fpm, swc, bpm, onSend, ep.onPeersExhausted) go spm.Run() @@ -481,11 +525,12 @@ func TestPeersExhaustedAllPeersUnavailable(t *testing.T) { sid := uint64(1) pm := newMockPeerManager() fpm := newFakeSessionPeerManager() + swc := newMockSessionMgr() bpm := bsbpm.New() onSend := func(peer.ID, []cid.Cid, []cid.Cid) {} ep := exhaustedPeers{} - spm := newSessionWantSender(sid, pm, fpm, bpm, onSend, ep.onPeersExhausted) + spm := newSessionWantSender(sid, pm, fpm, swc, bpm, onSend, ep.onPeersExhausted) go spm.Run() @@ -520,10 +565,12 @@ func TestConsecutiveDontHaveLimit(t *testing.T) { sid := uint64(1) pm := newMockPeerManager() fpm := newFakeSessionPeerManager() + swc := newMockSessionMgr() bpm := bsbpm.New() onSend := func(peer.ID, []cid.Cid, []cid.Cid) {} onPeersExhausted := func([]cid.Cid) {} - spm := newSessionWantSender(sid, pm, fpm, bpm, onSend, onPeersExhausted) + spm := newSessionWantSender(sid, pm, fpm, swc, bpm, onSend, onPeersExhausted) + defer spm.Shutdown() go spm.Run() @@ -576,10 +623,12 @@ func TestConsecutiveDontHaveLimitInterrupted(t *testing.T) { sid := uint64(1) pm := newMockPeerManager() fpm := newFakeSessionPeerManager() + swc := newMockSessionMgr() bpm := bsbpm.New() onSend := func(peer.ID, []cid.Cid, []cid.Cid) {} onPeersExhausted := func([]cid.Cid) {} - spm := newSessionWantSender(sid, pm, fpm, bpm, onSend, onPeersExhausted) + spm := newSessionWantSender(sid, pm, fpm, swc, bpm, onSend, onPeersExhausted) + defer spm.Shutdown() go spm.Run() @@ -631,10 +680,12 @@ func TestConsecutiveDontHaveReinstateAfterRemoval(t *testing.T) { sid := uint64(1) pm := newMockPeerManager() fpm := newFakeSessionPeerManager() + swc := newMockSessionMgr() bpm := bsbpm.New() onSend := func(peer.ID, []cid.Cid, []cid.Cid) {} onPeersExhausted := func([]cid.Cid) {} - spm := newSessionWantSender(sid, pm, fpm, bpm, onSend, onPeersExhausted) + spm := newSessionWantSender(sid, pm, fpm, swc, bpm, onSend, onPeersExhausted) + defer spm.Shutdown() go spm.Run() @@ -715,10 +766,12 @@ func TestConsecutiveDontHaveDontRemoveIfHasWantedBlock(t *testing.T) { sid := uint64(1) pm := newMockPeerManager() fpm := newFakeSessionPeerManager() + swc := newMockSessionMgr() bpm := bsbpm.New() onSend := func(peer.ID, []cid.Cid, []cid.Cid) {} onPeersExhausted := func([]cid.Cid) {} - spm := newSessionWantSender(sid, pm, fpm, bpm, onSend, onPeersExhausted) + spm := newSessionWantSender(sid, pm, fpm, swc, bpm, onSend, onPeersExhausted) + defer spm.Shutdown() go spm.Run() diff --git a/bitswap/internal/sessioninterestmanager/sessioninterestmanager.go b/bitswap/internal/sessioninterestmanager/sessioninterestmanager.go index 6e345b55e..0ab32ed1b 100644 --- a/bitswap/internal/sessioninterestmanager/sessioninterestmanager.go +++ b/bitswap/internal/sessioninterestmanager/sessioninterestmanager.go @@ -3,7 +3,6 @@ package sessioninterestmanager import ( "sync" - bsswl "github.com/ipfs/go-bitswap/internal/sessionwantlist" blocks "github.com/ipfs/go-block-format" cid "github.com/ipfs/go-cid" @@ -11,16 +10,22 @@ import ( // SessionInterestManager records the CIDs that each session is interested in. type SessionInterestManager struct { - lk sync.RWMutex - interested *bsswl.SessionWantlist - wanted *bsswl.SessionWantlist + lk sync.RWMutex + wants map[cid.Cid]map[uint64]bool } // New initializes a new SessionInterestManager. func New() *SessionInterestManager { return &SessionInterestManager{ - interested: bsswl.NewSessionWantlist(), - wanted: bsswl.NewSessionWantlist(), + // Map of cids -> sessions -> bool + // + // The boolean indicates whether the session still wants the block + // or is just interested in receiving messages about it. + // + // Note that once the block is received the session no longer wants + // the block, but still wants to receive messages from peers who have + // the block as they may have other blocks the session is interested in. + wants: make(map[cid.Cid]map[uint64]bool), } } @@ -30,25 +35,85 @@ func (sim *SessionInterestManager) RecordSessionInterest(ses uint64, ks []cid.Ci sim.lk.Lock() defer sim.lk.Unlock() - sim.interested.Add(ks, ses) - sim.wanted.Add(ks, ses) + // For each key + for _, c := range ks { + // Record that the session wants the blocks + if want, ok := sim.wants[c]; ok { + want[ses] = true + } else { + sim.wants[c] = map[uint64]bool{ses: true} + } + } } // When the session shuts down it calls RemoveSessionInterest(). -func (sim *SessionInterestManager) RemoveSessionInterest(ses uint64) []cid.Cid { +// Returns the keys that no session is interested in any more. +func (sim *SessionInterestManager) RemoveSession(ses uint64) []cid.Cid { sim.lk.Lock() defer sim.lk.Unlock() - sim.wanted.RemoveSession(ses) - return sim.interested.RemoveSession(ses) + // The keys that no session is interested in + deletedKs := make([]cid.Cid, 0) + + // For each known key + for c := range sim.wants { + // Remove the session from the list of sessions that want the key + delete(sim.wants[c], ses) + + // If there are no more sessions that want the key + if len(sim.wants[c]) == 0 { + // Clean up the list memory + delete(sim.wants, c) + // Add the key to the list of keys that no session is interested in + deletedKs = append(deletedKs, c) + } + } + + return deletedKs } // When the session receives blocks, it calls RemoveSessionWants(). -func (sim *SessionInterestManager) RemoveSessionWants(ses uint64, wants []cid.Cid) { +func (sim *SessionInterestManager) RemoveSessionWants(ses uint64, ks []cid.Cid) { + sim.lk.Lock() + defer sim.lk.Unlock() + + // For each key + for _, c := range ks { + // If the session wanted the block + if wanted, ok := sim.wants[c][ses]; ok && wanted { + // Mark the block as unwanted + sim.wants[c][ses] = false + } + } +} + +// When a request is cancelled, the session calls RemoveSessionInterested(). +// Returns the keys that no session is interested in any more. +func (sim *SessionInterestManager) RemoveSessionInterested(ses uint64, ks []cid.Cid) []cid.Cid { sim.lk.Lock() defer sim.lk.Unlock() - sim.wanted.RemoveSessionKeys(ses, wants) + // The keys that no session is interested in + deletedKs := make([]cid.Cid, 0, len(ks)) + + // For each key + for _, c := range ks { + // If there is a list of sessions that want the key + if _, ok := sim.wants[c]; ok { + // Remove the session from the list of sessions that want the key + delete(sim.wants[c], ses) + + // If there are no more sessions that want the key + if len(sim.wants[c]) == 0 { + // Clean up the list memory + delete(sim.wants, c) + // Add the key to the list of keys that no session is interested in + deletedKs = append(deletedKs, c) + } + } + } + + return deletedKs } // The session calls FilterSessionInterested() to filter the sets of keys for @@ -57,9 +122,20 @@ func (sim *SessionInterestManager) FilterSessionInterested(ses uint64, ksets ... sim.lk.RLock() defer sim.lk.RUnlock() + // For each set of keys kres := make([][]cid.Cid, len(ksets)) for i, ks := range ksets { - kres[i] = sim.interested.SessionHas(ses, ks).Keys() + // The set of keys that at least one session is interested in + has := make([]cid.Cid, 0, len(ks)) + + // For each key in the list + for _, c := range ks { + // If there is a session that's interested, add the key to the set + if _, ok := sim.wants[c][ses]; ok { + has = append(has, c) + } + } + kres[i] = has } return kres } @@ -70,12 +146,19 @@ func (sim *SessionInterestManager) SplitWantedUnwanted(blks []blocks.Block) ([]b sim.lk.RLock() defer sim.lk.RUnlock() - // Get the wanted block keys - ks := make([]cid.Cid, len(blks)) + // Get the wanted block keys as a set + wantedKs := cid.NewSet() for _, b := range blks { - ks = append(ks, b.Cid()) + c := b.Cid() + // For each session that is interested in the key + for ses := range sim.wants[c] { + // If the session wants the key (rather than just being interested) + if wanted, ok := sim.wants[c][ses]; ok && wanted { + // Add the key to the set + wantedKs.Add(c) + } + } } - wantedKs := sim.wanted.Has(ks) // Separate the blocks into wanted and unwanted wantedBlks := make([]blocks.Block, 0, len(blks)) @@ -101,5 +184,18 @@ func (sim *SessionInterestManager) InterestedSessions(blks []cid.Cid, haves []ci ks = append(ks, haves...) ks = append(ks, dontHaves...) - return sim.interested.SessionsFor(ks) + // Create a set of sessions that are interested in the keys + sesSet := make(map[uint64]struct{}) + for _, c := range ks { + for s := range sim.wants[c] { + sesSet[s] = struct{}{} + } + } + + // Convert the set into a list + ses := make([]uint64, 0, len(sesSet)) + for s := range sesSet { + ses = append(ses, s) + } + return ses } diff --git a/bitswap/internal/sessioninterestmanager/sessioninterestmanager_test.go b/bitswap/internal/sessioninterestmanager/sessioninterestmanager_test.go index ead920230..0bba66389 100644 --- a/bitswap/internal/sessioninterestmanager/sessioninterestmanager_test.go +++ b/bitswap/internal/sessioninterestmanager/sessioninterestmanager_test.go @@ -83,7 +83,7 @@ func TestInterestedSessions(t *testing.T) { } } -func TestRemoveSessionInterest(t *testing.T) { +func TestRemoveSession(t *testing.T) { sim := New() ses1 := uint64(1) @@ -92,7 +92,7 @@ func TestRemoveSessionInterest(t *testing.T) { cids2 := append(testutil.GenerateCids(1), cids1[1]) sim.RecordSessionInterest(ses1, cids1) sim.RecordSessionInterest(ses2, cids2) - sim.RemoveSessionInterest(ses1) + sim.RemoveSession(ses1) res := sim.FilterSessionInterested(ses1, cids1) if len(res) != 1 || len(res[0]) != 0 { @@ -111,6 +111,42 @@ func TestRemoveSessionInterest(t *testing.T) { } } +func TestRemoveSessionInterested(t *testing.T) { + sim := New() + + ses1 := uint64(1) + ses2 := uint64(2) + cids1 := testutil.GenerateCids(2) + cids2 := append(testutil.GenerateCids(1), cids1[1]) + sim.RecordSessionInterest(ses1, cids1) + sim.RecordSessionInterest(ses2, cids2) + + res := sim.RemoveSessionInterested(ses1, []cid.Cid{cids1[0]}) + if len(res) != 1 { + t.Fatal("Expected no interested sessions left") + } + + interested := sim.FilterSessionInterested(ses1, cids1) + if len(interested) != 1 || len(interested[0]) != 1 { + t.Fatal("Expected ses1 still interested in one cid") + } + + res = sim.RemoveSessionInterested(ses1, cids1) + if len(res) != 0 { + t.Fatal("Expected ses2 to be interested in one cid") + } + + interested = sim.FilterSessionInterested(ses1, cids1) + if len(interested) != 1 || len(interested[0]) != 0 { + t.Fatal("Expected ses1 to have no remaining interest") + } + + interested = sim.FilterSessionInterested(ses2, cids1) + if len(interested) != 1 || len(interested[0]) != 1 { + t.Fatal("Expected ses2 to still be interested in one key") + } +} + func TestSplitWantedUnwanted(t *testing.T) { blks := testutil.GenerateBlocksOfSize(3, 1024) sim := New() diff --git a/bitswap/internal/sessionmanager/sessionmanager.go b/bitswap/internal/sessionmanager/sessionmanager.go index c69aa0417..42b209387 100644 --- a/bitswap/internal/sessionmanager/sessionmanager.go +++ b/bitswap/internal/sessionmanager/sessionmanager.go @@ -21,10 +21,22 @@ type Session interface { exchange.Fetcher ID() uint64 ReceiveFrom(peer.ID, []cid.Cid, []cid.Cid, []cid.Cid) + Shutdown() } // SessionFactory generates a new session for the SessionManager to track. -type SessionFactory func(ctx context.Context, id uint64, sprm bssession.SessionPeerManager, sim *bssim.SessionInterestManager, pm bssession.PeerManager, bpm *bsbpm.BlockPresenceManager, notif notifications.PubSub, provSearchDelay time.Duration, rebroadcastDelay delay.D, self peer.ID) Session +type SessionFactory func( + ctx context.Context, + sm bssession.SessionManager, + id uint64, + sprm bssession.SessionPeerManager, + sim *bssim.SessionInterestManager, + pm bssession.PeerManager, + bpm *bsbpm.BlockPresenceManager, + notif notifications.PubSub, + provSearchDelay time.Duration, + rebroadcastDelay delay.D, + self peer.ID) Session // PeerManagerFactory generates a new peer manager for a session. type PeerManagerFactory func(ctx context.Context, id uint64) bssession.SessionPeerManager @@ -54,6 +66,7 @@ type SessionManager struct { // New creates a new SessionManager. func New(ctx context.Context, sessionFactory SessionFactory, sessionInterestManager *bssim.SessionInterestManager, peerManagerFactory PeerManagerFactory, blockPresenceManager *bsbpm.BlockPresenceManager, peerManager bssession.PeerManager, notif notifications.PubSub, self peer.ID) *SessionManager { + return &SessionManager{ ctx: ctx, sessionFactory: sessionFactory, @@ -73,31 +86,53 @@ func (sm *SessionManager) NewSession(ctx context.Context, provSearchDelay time.Duration, rebroadcastDelay delay.D) exchange.Fetcher { id := sm.GetNextSessionID() - sessionctx, cancel := context.WithCancel(ctx) - pm := sm.peerManagerFactory(sessionctx, id) - session := sm.sessionFactory(sessionctx, id, pm, sm.sessionInterestManager, sm.peerManager, sm.blockPresenceManager, sm.notif, provSearchDelay, rebroadcastDelay, sm.self) + pm := sm.peerManagerFactory(ctx, id) + session := sm.sessionFactory(ctx, sm, id, pm, sm.sessionInterestManager, sm.peerManager, sm.blockPresenceManager, sm.notif, provSearchDelay, rebroadcastDelay, sm.self) + sm.sessLk.Lock() - sm.sessions[id] = session + if sm.sessions != nil { // check if SessionManager was shutdown + sm.sessions[id] = session + } sm.sessLk.Unlock() - go func() { - defer cancel() - select { - case <-sm.ctx.Done(): - sm.removeSession(id) - case <-ctx.Done(): - sm.removeSession(id) - } - }() return session } -func (sm *SessionManager) removeSession(sesid uint64) { +func (sm *SessionManager) Shutdown() { + sm.sessLk.Lock() + + sessions := make([]Session, 0, len(sm.sessions)) + for _, ses := range sm.sessions { + sessions = append(sessions, ses) + } + + // Ensure that if Shutdown() is called twice we only shut down + // the sessions once + sm.sessions = nil + + sm.sessLk.Unlock() + + for _, ses := range sessions { + ses.Shutdown() + } +} + +func (sm *SessionManager) RemoveSession(sesid uint64) { + // Remove session from SessionInterestManager - returns the keys that no + // session is interested in anymore. + cancelKs := sm.sessionInterestManager.RemoveSession(sesid) + + // Cancel keys that no session is interested in anymore + sm.cancelWants(cancelKs) + sm.sessLk.Lock() defer sm.sessLk.Unlock() - delete(sm.sessions, sesid) + // Clean up session + if sm.sessions != nil { // check if SessionManager was shutdown + delete(sm.sessions, sesid) + } } // GetNextSessionID returns the next sequential identifier for a session. @@ -117,6 +152,10 @@ func (sm *SessionManager) ReceiveFrom(ctx context.Context, p peer.ID, blks []cid // Notify each session that is interested in the blocks / HAVEs / DONT_HAVEs for _, id := range sm.sessionInterestManager.InterestedSessions(blks, haves, dontHaves) { sm.sessLk.RLock() + if sm.sessions == nil { // check if SessionManager was shutdown + sm.sessLk.RUnlock() + return + } sess, ok := sm.sessions[id] sm.sessLk.RUnlock() @@ -128,3 +167,23 @@ func (sm *SessionManager) ReceiveFrom(ctx context.Context, p peer.ID, blks []cid // Send CANCEL to all peers with want-have / want-block sm.peerManager.SendCancels(ctx, blks) } + +// CancelSessionWants is called when a session cancels wants because a call to +// GetBlocks() is cancelled +func (sm *SessionManager) CancelSessionWants(sesid uint64, wants []cid.Cid) { + // Remove session's interest in the given blocks - returns the keys that no + // session is interested in anymore. + cancelKs := sm.sessionInterestManager.RemoveSessionInterested(sesid, wants) + sm.cancelWants(cancelKs) +} + +func (sm *SessionManager) cancelWants(wants []cid.Cid) { + // Free up block presence tracking for keys that no session is interested + // in anymore + sm.blockPresenceManager.RemoveKeys(wants) + + // Send CANCEL to all peers for blocks that no session is interested in + // anymore. + // Note: use bitswap context because session context may already be Done. + sm.peerManager.SendCancels(sm.ctx, wants) +} diff --git a/bitswap/internal/sessionmanager/sessionmanager_test.go b/bitswap/internal/sessionmanager/sessionmanager_test.go index 6fa118e7b..3be1f9b55 100644 --- a/bitswap/internal/sessionmanager/sessionmanager_test.go +++ b/bitswap/internal/sessionmanager/sessionmanager_test.go @@ -2,6 +2,7 @@ package sessionmanager import ( "context" + "sync" "testing" "time" @@ -12,6 +13,7 @@ import ( bspm "github.com/ipfs/go-bitswap/internal/peermanager" bssession "github.com/ipfs/go-bitswap/internal/session" bssim "github.com/ipfs/go-bitswap/internal/sessioninterestmanager" + "github.com/ipfs/go-bitswap/internal/testutil" blocks "github.com/ipfs/go-block-format" cid "github.com/ipfs/go-cid" @@ -24,6 +26,7 @@ type fakeSession struct { wantHaves []cid.Cid id uint64 pm *fakeSesPeerManager + sm bssession.SessionManager notif notifications.PubSub } @@ -41,6 +44,9 @@ func (fs *fakeSession) ReceiveFrom(p peer.ID, ks []cid.Cid, wantBlocks []cid.Cid fs.wantBlocks = append(fs.wantBlocks, wantBlocks...) fs.wantHaves = append(fs.wantHaves, wantHaves...) } +func (fs *fakeSession) Shutdown() { + fs.sm.RemoveSession(fs.id) +} type fakeSesPeerManager struct { } @@ -53,6 +59,7 @@ func (*fakeSesPeerManager) RemovePeer(peer.ID) bool { return false } func (*fakeSesPeerManager) HasPeers() bool { return false } type fakePeerManager struct { + lk sync.Mutex cancels []cid.Cid } @@ -61,10 +68,18 @@ func (*fakePeerManager) UnregisterSession(uint64) func (*fakePeerManager) SendWants(context.Context, peer.ID, []cid.Cid, []cid.Cid) {} func (*fakePeerManager) BroadcastWantHaves(context.Context, []cid.Cid) {} func (fpm *fakePeerManager) SendCancels(ctx context.Context, cancels []cid.Cid) { + fpm.lk.Lock() + defer fpm.lk.Unlock() fpm.cancels = append(fpm.cancels, cancels...) } +func (fpm *fakePeerManager) cancelled() []cid.Cid { + fpm.lk.Lock() + defer fpm.lk.Unlock() + return fpm.cancels +} func sessionFactory(ctx context.Context, + sm bssession.SessionManager, id uint64, sprm bssession.SessionPeerManager, sim *bssim.SessionInterestManager, @@ -74,11 +89,17 @@ func sessionFactory(ctx context.Context, provSearchDelay time.Duration, rebroadcastDelay delay.D, self peer.ID) Session { - return &fakeSession{ + fs := &fakeSession{ id: id, pm: sprm.(*fakeSesPeerManager), + sm: sm, notif: notif, } + go func() { + <-ctx.Done() + sm.RemoveSession(fs.id) + }() + return fs } func peerManagerFactory(ctx context.Context, id uint64) bssession.SessionPeerManager { @@ -127,12 +148,12 @@ func TestReceiveFrom(t *testing.T) { t.Fatal("should have received want-haves but didn't") } - if len(pm.cancels) != 1 { + if len(pm.cancelled()) != 1 { t.Fatal("should have sent cancel for received blocks") } } -func TestReceiveBlocksWhenManagerContextCancelled(t *testing.T) { +func TestReceiveBlocksWhenManagerShutdown(t *testing.T) { ctx := context.Background() ctx, cancel := context.WithCancel(ctx) defer cancel() @@ -154,7 +175,7 @@ func TestReceiveBlocksWhenManagerContextCancelled(t *testing.T) { sim.RecordSessionInterest(secondSession.ID(), []cid.Cid{block.Cid()}) sim.RecordSessionInterest(thirdSession.ID(), []cid.Cid{block.Cid()}) - cancel() + sm.Shutdown() // wait for sessions to get removed time.Sleep(10 * time.Millisecond) @@ -168,8 +189,7 @@ func TestReceiveBlocksWhenManagerContextCancelled(t *testing.T) { } func TestReceiveBlocksWhenSessionContextCancelled(t *testing.T) { - ctx := context.Background() - ctx, cancel := context.WithCancel(ctx) + ctx, cancel := context.WithCancel(context.Background()) defer cancel() notif := notifications.New() defer notif.Shutdown() @@ -202,3 +222,38 @@ func TestReceiveBlocksWhenSessionContextCancelled(t *testing.T) { t.Fatal("received blocks for sessions that are canceled") } } + +func TestShutdown(t *testing.T) { + ctx := context.Background() + ctx, cancel := context.WithCancel(ctx) + defer cancel() + notif := notifications.New() + defer notif.Shutdown() + sim := bssim.New() + bpm := bsbpm.New() + pm := &fakePeerManager{} + sm := New(ctx, sessionFactory, sim, peerManagerFactory, bpm, pm, notif, "") + + p := peer.ID(123) + block := blocks.NewBlock([]byte("block")) + cids := []cid.Cid{block.Cid()} + firstSession := sm.NewSession(ctx, time.Second, delay.Fixed(time.Minute)).(*fakeSession) + sim.RecordSessionInterest(firstSession.ID(), cids) + sm.ReceiveFrom(ctx, p, []cid.Cid{}, []cid.Cid{}, cids) + + if !bpm.HasKey(block.Cid()) { + t.Fatal("expected cid to be added to block presence manager") + } + + sm.Shutdown() + + // wait for cleanup + time.Sleep(10 * time.Millisecond) + + if bpm.HasKey(block.Cid()) { + t.Fatal("expected cid to be removed from block presence manager") + } + if !testutil.MatchKeysIgnoreOrder(pm.cancelled(), cids) { + t.Fatal("expected cancels to be sent") + } +} diff --git a/bitswap/internal/sessionwantlist/sessionwantlist.go b/bitswap/internal/sessionwantlist/sessionwantlist.go deleted file mode 100644 index 05c143367..000000000 --- a/bitswap/internal/sessionwantlist/sessionwantlist.go +++ /dev/null @@ -1,137 +0,0 @@ -package sessionwantlist - -import ( - "sync" - - cid "github.com/ipfs/go-cid" -) - -// The SessionWantList keeps track of which sessions want a CID -type SessionWantlist struct { - sync.RWMutex - wants map[cid.Cid]map[uint64]struct{} -} - -func NewSessionWantlist() *SessionWantlist { - return &SessionWantlist{ - wants: make(map[cid.Cid]map[uint64]struct{}), - } -} - -// The given session wants the keys -func (swl *SessionWantlist) Add(ks []cid.Cid, ses uint64) { - swl.Lock() - defer swl.Unlock() - - for _, c := range ks { - if _, ok := swl.wants[c]; !ok { - swl.wants[c] = make(map[uint64]struct{}) - } - swl.wants[c][ses] = struct{}{} - } -} - -// Remove the keys for all sessions. -// Called when blocks are received. -func (swl *SessionWantlist) RemoveKeys(ks []cid.Cid) { - swl.Lock() - defer swl.Unlock() - - for _, c := range ks { - delete(swl.wants, c) - } -} - -// Remove the session's wants, and return wants that are no longer wanted by -// any session. -func (swl *SessionWantlist) RemoveSession(ses uint64) []cid.Cid { - swl.Lock() - defer swl.Unlock() - - deletedKs := make([]cid.Cid, 0) - for c := range swl.wants { - delete(swl.wants[c], ses) - if len(swl.wants[c]) == 0 { - delete(swl.wants, c) - deletedKs = append(deletedKs, c) - } - } - - return deletedKs -} - -// Remove the session's wants -func (swl *SessionWantlist) RemoveSessionKeys(ses uint64, ks []cid.Cid) { - swl.Lock() - defer swl.Unlock() - - for _, c := range ks { - if _, ok := swl.wants[c]; ok { - delete(swl.wants[c], ses) - if len(swl.wants[c]) == 0 { - delete(swl.wants, c) - } - } - } -} - -// All keys wanted by all sessions -func (swl *SessionWantlist) Keys() []cid.Cid { - swl.RLock() - defer swl.RUnlock() - - ks := make([]cid.Cid, 0, len(swl.wants)) - for c := range swl.wants { - ks = append(ks, c) - } - return ks -} - -// All sessions that want the given keys -func (swl *SessionWantlist) SessionsFor(ks []cid.Cid) []uint64 { - swl.RLock() - defer swl.RUnlock() - - sesMap := make(map[uint64]struct{}) - for _, c := range ks { - for s := range swl.wants[c] { - sesMap[s] = struct{}{} - } - } - - ses := make([]uint64, 0, len(sesMap)) - for s := range sesMap { - ses = append(ses, s) - } - return ses -} - -// Filter for keys that at least one session wants -func (swl *SessionWantlist) Has(ks []cid.Cid) *cid.Set { - swl.RLock() - defer swl.RUnlock() - - has := cid.NewSet() - for _, c := range ks { - if _, ok := swl.wants[c]; ok { - has.Add(c) - } - } - return has -} - -// Filter for keys that the given session wants -func (swl *SessionWantlist) SessionHas(ses uint64, ks []cid.Cid) *cid.Set { - swl.RLock() - defer swl.RUnlock() - - has := cid.NewSet() - for _, c := range ks { - if sesMap, cok := swl.wants[c]; cok { - if _, sok := sesMap[ses]; sok { - has.Add(c) - } - } - } - return has -} diff --git a/bitswap/internal/sessionwantlist/sessionwantlist_test.go b/bitswap/internal/sessionwantlist/sessionwantlist_test.go deleted file mode 100644 index d57f93959..000000000 --- a/bitswap/internal/sessionwantlist/sessionwantlist_test.go +++ /dev/null @@ -1,258 +0,0 @@ -package sessionwantlist - -import ( - "os" - "testing" - - "github.com/ipfs/go-bitswap/internal/testutil" - - cid "github.com/ipfs/go-cid" -) - -var c0 cid.Cid -var c1 cid.Cid -var c2 cid.Cid - -const s0 = uint64(0) -const s1 = uint64(1) - -func setup() { - cids := testutil.GenerateCids(3) - c0 = cids[0] - c1 = cids[1] - c2 = cids[2] -} - -func TestMain(m *testing.M) { - setup() - os.Exit(m.Run()) -} - -func TestEmpty(t *testing.T) { - swl := NewSessionWantlist() - - if len(swl.Keys()) != 0 { - t.Fatal("Expected Keys() to be empty") - } - if len(swl.SessionsFor([]cid.Cid{c0})) != 0 { - t.Fatal("Expected SessionsFor() to be empty") - } -} - -func TestSimpleAdd(t *testing.T) { - swl := NewSessionWantlist() - - // s0: c0 - swl.Add([]cid.Cid{c0}, s0) - if len(swl.Keys()) != 1 { - t.Fatal("Expected Keys() to have length 1") - } - if !swl.Keys()[0].Equals(c0) { - t.Fatal("Expected Keys() to be [cid0]") - } - if len(swl.SessionsFor([]cid.Cid{c0})) != 1 { - t.Fatal("Expected SessionsFor() to have length 1") - } - if swl.SessionsFor([]cid.Cid{c0})[0] != s0 { - t.Fatal("Expected SessionsFor() to be [s0]") - } - - // s0: c0, c1 - swl.Add([]cid.Cid{c1}, s0) - if len(swl.Keys()) != 2 { - t.Fatal("Expected Keys() to have length 2") - } - if !testutil.MatchKeysIgnoreOrder(swl.Keys(), []cid.Cid{c0, c1}) { - t.Fatal("Expected Keys() to contain [cid0, cid1]") - } - if len(swl.SessionsFor([]cid.Cid{c0})) != 1 { - t.Fatal("Expected SessionsFor() to have length 1") - } - if swl.SessionsFor([]cid.Cid{c0})[0] != s0 { - t.Fatal("Expected SessionsFor() to be [s0]") - } - - // s0: c0, c1 - // s1: c0 - swl.Add([]cid.Cid{c0}, s1) - if len(swl.Keys()) != 2 { - t.Fatal("Expected Keys() to have length 2") - } - if !testutil.MatchKeysIgnoreOrder(swl.Keys(), []cid.Cid{c0, c1}) { - t.Fatal("Expected Keys() to contain [cid0, cid1]") - } - if len(swl.SessionsFor([]cid.Cid{c0})) != 2 { - t.Fatal("Expected SessionsFor() to have length 2") - } -} - -func TestMultiKeyAdd(t *testing.T) { - swl := NewSessionWantlist() - - // s0: c0, c1 - swl.Add([]cid.Cid{c0, c1}, s0) - if len(swl.Keys()) != 2 { - t.Fatal("Expected Keys() to have length 2") - } - if !testutil.MatchKeysIgnoreOrder(swl.Keys(), []cid.Cid{c0, c1}) { - t.Fatal("Expected Keys() to contain [cid0, cid1]") - } - if len(swl.SessionsFor([]cid.Cid{c0})) != 1 { - t.Fatal("Expected SessionsFor() to have length 1") - } - if swl.SessionsFor([]cid.Cid{c0})[0] != s0 { - t.Fatal("Expected SessionsFor() to be [s0]") - } -} - -func TestSessionHas(t *testing.T) { - swl := NewSessionWantlist() - - if swl.Has([]cid.Cid{c0, c1}).Len() > 0 { - t.Fatal("Expected Has([c0, c1]) to be []") - } - if swl.SessionHas(s0, []cid.Cid{c0, c1}).Len() > 0 { - t.Fatal("Expected SessionHas(s0, [c0, c1]) to be []") - } - - // s0: c0 - swl.Add([]cid.Cid{c0}, s0) - if !matchSet(swl.Has([]cid.Cid{c0, c1}), []cid.Cid{c0}) { - t.Fatal("Expected Has([c0, c1]) to be [c0]") - } - if !matchSet(swl.SessionHas(s0, []cid.Cid{c0, c1}), []cid.Cid{c0}) { - t.Fatal("Expected SessionHas(s0, [c0, c1]) to be [c0]") - } - if swl.SessionHas(s1, []cid.Cid{c0, c1}).Len() > 0 { - t.Fatal("Expected SessionHas(s1, [c0, c1]) to be []") - } - - // s0: c0, c1 - swl.Add([]cid.Cid{c1}, s0) - if !matchSet(swl.Has([]cid.Cid{c0, c1}), []cid.Cid{c0, c1}) { - t.Fatal("Expected Has([c0, c1]) to be [c0, c1]") - } - if !matchSet(swl.SessionHas(s0, []cid.Cid{c0, c1}), []cid.Cid{c0, c1}) { - t.Fatal("Expected SessionHas(s0, [c0, c1]) to be [c0, c1]") - } - - // s0: c0, c1 - // s1: c0 - swl.Add([]cid.Cid{c0}, s1) - if len(swl.Keys()) != 2 { - t.Fatal("Expected Keys() to have length 2") - } - if !matchSet(swl.Has([]cid.Cid{c0, c1}), []cid.Cid{c0, c1}) { - t.Fatal("Expected Has([c0, c1]) to be [c0, c1]") - } - if !matchSet(swl.SessionHas(s0, []cid.Cid{c0, c1}), []cid.Cid{c0, c1}) { - t.Fatal("Expected SessionHas(s0, [c0, c1]) to be [c0, c1]") - } - if !matchSet(swl.SessionHas(s1, []cid.Cid{c0, c1}), []cid.Cid{c0}) { - t.Fatal("Expected SessionHas(s1, [c0, c1]) to be [c0]") - } -} - -func TestSimpleRemoveKeys(t *testing.T) { - swl := NewSessionWantlist() - - // s0: c0, c1 - // s1: c0 - swl.Add([]cid.Cid{c0, c1}, s0) - swl.Add([]cid.Cid{c0}, s1) - - // s0: c1 - swl.RemoveKeys([]cid.Cid{c0}) - if len(swl.Keys()) != 1 { - t.Fatal("Expected Keys() to have length 1") - } - if !swl.Keys()[0].Equals(c1) { - t.Fatal("Expected Keys() to be [cid1]") - } - if len(swl.SessionsFor([]cid.Cid{c0})) != 0 { - t.Fatal("Expected SessionsFor(c0) to be empty") - } - if len(swl.SessionsFor([]cid.Cid{c1})) != 1 { - t.Fatal("Expected SessionsFor(c1) to have length 1") - } - if swl.SessionsFor([]cid.Cid{c1})[0] != s0 { - t.Fatal("Expected SessionsFor(c1) to be [s0]") - } -} - -func TestMultiRemoveKeys(t *testing.T) { - swl := NewSessionWantlist() - - // s0: c0, c1 - // s1: c0 - swl.Add([]cid.Cid{c0, c1}, s0) - swl.Add([]cid.Cid{c0}, s1) - - // - swl.RemoveKeys([]cid.Cid{c0, c1}) - if len(swl.Keys()) != 0 { - t.Fatal("Expected Keys() to be empty") - } - if len(swl.SessionsFor([]cid.Cid{c0})) != 0 { - t.Fatal("Expected SessionsFor() to be empty") - } -} - -func TestRemoveSession(t *testing.T) { - swl := NewSessionWantlist() - - // s0: c0, c1 - // s1: c0 - swl.Add([]cid.Cid{c0, c1}, s0) - swl.Add([]cid.Cid{c0}, s1) - - // s1: c0 - swl.RemoveSession(s0) - if len(swl.Keys()) != 1 { - t.Fatal("Expected Keys() to have length 1") - } - if !swl.Keys()[0].Equals(c0) { - t.Fatal("Expected Keys() to be [cid0]") - } - if len(swl.SessionsFor([]cid.Cid{c1})) != 0 { - t.Fatal("Expected SessionsFor(c1) to be empty") - } - if len(swl.SessionsFor([]cid.Cid{c0})) != 1 { - t.Fatal("Expected SessionsFor(c0) to have length 1") - } - if swl.SessionsFor([]cid.Cid{c0})[0] != s1 { - t.Fatal("Expected SessionsFor(c0) to be [s1]") - } -} - -func TestRemoveSessionKeys(t *testing.T) { - swl := NewSessionWantlist() - - // s0: c0, c1, c2 - // s1: c0 - swl.Add([]cid.Cid{c0, c1, c2}, s0) - swl.Add([]cid.Cid{c0}, s1) - - // s0: c2 - // s1: c0 - swl.RemoveSessionKeys(s0, []cid.Cid{c0, c1}) - if !matchSet(swl.SessionHas(s0, []cid.Cid{c0, c1, c2}), []cid.Cid{c2}) { - t.Fatal("Expected SessionHas(s0, [c0, c1, c2]) to be [c2]") - } - if !matchSet(swl.SessionHas(s1, []cid.Cid{c0, c1, c2}), []cid.Cid{c0}) { - t.Fatal("Expected SessionHas(s1, [c0, c1, c2]) to be [c0]") - } -} - -func matchSet(ks1 *cid.Set, ks2 []cid.Cid) bool { - if ks1.Len() != len(ks2) { - return false - } - - for _, k := range ks2 { - if !ks1.Has(k) { - return false - } - } - return true -} From ce5ee2e2ec169c665429b414745eb3313449ab6a Mon Sep 17 00:00:00 2001 From: Dirk McCormick Date: Fri, 1 May 2020 12:05:37 -0400 Subject: [PATCH 0942/1035] test: fix flaky test TestSessionBetweenPeers This commit was moved from ipfs/go-bitswap@373033e7540d67c455587e61826d5a1c524f291a --- bitswap/bitswap_with_sessions_test.go | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/bitswap/bitswap_with_sessions_test.go b/bitswap/bitswap_with_sessions_test.go index 9551938c9..f710879a1 100644 --- a/bitswap/bitswap_with_sessions_test.go +++ b/bitswap/bitswap_with_sessions_test.go @@ -9,10 +9,12 @@ import ( bitswap "github.com/ipfs/go-bitswap" bssession "github.com/ipfs/go-bitswap/internal/session" testinstance "github.com/ipfs/go-bitswap/testinstance" + tn "github.com/ipfs/go-bitswap/testnet" blocks "github.com/ipfs/go-block-format" cid "github.com/ipfs/go-cid" blocksutil "github.com/ipfs/go-ipfs-blocksutil" delay "github.com/ipfs/go-ipfs-delay" + mockrouting "github.com/ipfs/go-ipfs-routing/mock" tu "github.com/libp2p/go-libp2p-testing/etc" ) @@ -71,7 +73,7 @@ func TestSessionBetweenPeers(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - vnet := getVirtualNetwork() + vnet := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(time.Millisecond)) ig := testinstance.NewTestInstanceGenerator(vnet, nil, nil) defer ig.Close() bgen := blocksutil.NewBlockGenerator() @@ -112,6 +114,10 @@ func TestSessionBetweenPeers(t *testing.T) { t.Fatal(err) } } + + // Uninvolved nodes should receive + // - initial broadcast want-have of root block + // - CANCEL (when Peer A receives the root block from Peer B) for _, is := range inst[2:] { stat, err := is.Exchange.Stat() if err != nil { From 2ffeb67b313f5efa504f2870535a5d87bdf49817 Mon Sep 17 00:00:00 2001 From: Dirk McCormick Date: Thu, 7 May 2020 12:20:50 -0400 Subject: [PATCH 0943/1035] fix: return wants from engine.WantlistForPeer() This commit was moved from ipfs/go-bitswap@42e4a89639c009f68583c7e9ea6bd01dac835ea6 --- bitswap/internal/decision/engine.go | 7 ++--- bitswap/internal/decision/engine_test.go | 34 ++++++++++++++++++++++++ 2 files changed, 38 insertions(+), 3 deletions(-) diff --git a/bitswap/internal/decision/engine.go b/bitswap/internal/decision/engine.go index 81ef9b9e5..49063bd5c 100644 --- a/bitswap/internal/decision/engine.go +++ b/bitswap/internal/decision/engine.go @@ -334,8 +334,8 @@ func (e *Engine) onPeerRemoved(p peer.ID) { e.peerTagger.UntagPeer(p, e.tagQueued) } -// WantlistForPeer returns the currently understood want list for a given peer -func (e *Engine) WantlistForPeer(p peer.ID) (out []wl.Entry) { +// WantlistForPeer returns the list of keys that the given peer has asked for +func (e *Engine) WantlistForPeer(p peer.ID) []wl.Entry { partner := e.findOrCreate(p) partner.lk.Lock() @@ -343,7 +343,8 @@ func (e *Engine) WantlistForPeer(p peer.ID) (out []wl.Entry) { partner.lk.Unlock() wl.SortEntries(entries) - return + + return entries } // LedgerForPeer returns aggregated data about blocks swapped and communication diff --git a/bitswap/internal/decision/engine_test.go b/bitswap/internal/decision/engine_test.go index bdfa93623..cf000d96e 100644 --- a/bitswap/internal/decision/engine_test.go +++ b/bitswap/internal/decision/engine_test.go @@ -981,6 +981,40 @@ func TestSendDontHave(t *testing.T) { } } +func TestWantlistForPeer(t *testing.T) { + bs := blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore())) + partner := libp2ptest.RandPeerIDFatal(t) + otherPeer := libp2ptest.RandPeerIDFatal(t) + + e := newEngine(context.Background(), bs, &fakePeerTagger{}, "localhost", 0, shortTerm, nil) + e.StartWorkers(context.Background(), process.WithTeardown(func() error { return nil })) + + blks := testutil.GenerateBlocksOfSize(4, 8*1024) + msg := message.New(false) + msg.AddEntry(blks[0].Cid(), 2, pb.Message_Wantlist_Have, false) + msg.AddEntry(blks[1].Cid(), 3, pb.Message_Wantlist_Have, false) + e.MessageReceived(context.Background(), partner, msg) + + msg2 := message.New(false) + msg2.AddEntry(blks[2].Cid(), 1, pb.Message_Wantlist_Block, false) + msg2.AddEntry(blks[3].Cid(), 4, pb.Message_Wantlist_Block, false) + e.MessageReceived(context.Background(), partner, msg2) + + entries := e.WantlistForPeer(otherPeer) + if len(entries) != 0 { + t.Fatal("expected wantlist to contain no wants for other peer") + } + + entries = e.WantlistForPeer(partner) + if len(entries) != 4 { + t.Fatal("expected wantlist to contain all wants from parter") + } + if entries[0].Priority != 4 || entries[1].Priority != 3 || entries[2].Priority != 2 || entries[3].Priority != 1 { + t.Fatal("expected wantlist to be sorted") + } + +} + func TestTaggingPeers(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) defer cancel() From 3120b78babc2db0120ed50d61f201c50aa158f8b Mon Sep 17 00:00:00 2001 From: Dirk McCormick Date: Tue, 19 May 2020 11:26:14 -0400 Subject: [PATCH 0944/1035] perf: improve peer manager performance This commit was moved from ipfs/go-bitswap@e4f2791e90e88e5d5fd768c968519ebb191a8b2a --- bitswap/internal/peermanager/peermanager.go | 22 +- .../internal/peermanager/peermanager_test.go | 59 ++++ .../internal/peermanager/peerwantmanager.go | 201 +++++++------- .../peermanager/peerwantmanager_test.go | 257 ++++++++++++------ 4 files changed, 334 insertions(+), 205 deletions(-) diff --git a/bitswap/internal/peermanager/peermanager.go b/bitswap/internal/peermanager/peermanager.go index 04b015bfd..0ce735846 100644 --- a/bitswap/internal/peermanager/peermanager.go +++ b/bitswap/internal/peermanager/peermanager.go @@ -90,9 +90,8 @@ func (pm *PeerManager) Connected(p peer.ID) { pq := pm.getOrCreate(p) // Inform the peer want manager that there's a new peer - wants := pm.pwm.addPeer(p) - // Broadcast any live want-haves to the newly connected peers - pq.AddBroadcastWantHaves(wants) + pm.pwm.addPeer(pq, p) + // Inform the sessions that the peer has connected pm.signalAvailability(p, true) } @@ -138,11 +137,7 @@ func (pm *PeerManager) BroadcastWantHaves(ctx context.Context, wantHaves []cid.C pm.pqLk.Lock() defer pm.pqLk.Unlock() - for p, ks := range pm.pwm.prepareBroadcastWantHaves(wantHaves) { - if pq, ok := pm.peerQueues[p]; ok { - pq.AddBroadcastWantHaves(ks) - } - } + pm.pwm.broadcastWantHaves(wantHaves) } // SendWants sends the given want-blocks and want-haves to the given peer. @@ -151,9 +146,8 @@ func (pm *PeerManager) SendWants(ctx context.Context, p peer.ID, wantBlocks []ci pm.pqLk.Lock() defer pm.pqLk.Unlock() - if pq, ok := pm.peerQueues[p]; ok { - wblks, whvs := pm.pwm.prepareSendWants(p, wantBlocks, wantHaves) - pq.AddWants(wblks, whvs) + if _, ok := pm.peerQueues[p]; ok { + pm.pwm.sendWants(p, wantBlocks, wantHaves) } } @@ -164,11 +158,7 @@ func (pm *PeerManager) SendCancels(ctx context.Context, cancelKs []cid.Cid) { defer pm.pqLk.Unlock() // Send a CANCEL to each peer that has been sent a want-block or want-have - for p, ks := range pm.pwm.prepareSendCancels(cancelKs) { - if pq, ok := pm.peerQueues[p]; ok { - pq.AddCancels(ks) - } - } + pm.pwm.sendCancels(cancelKs) } // CurrentWants returns the list of pending wants (both want-haves and want-blocks). diff --git a/bitswap/internal/peermanager/peermanager_test.go b/bitswap/internal/peermanager/peermanager_test.go index 560868466..2a4c4c697 100644 --- a/bitswap/internal/peermanager/peermanager_test.go +++ b/bitswap/internal/peermanager/peermanager_test.go @@ -2,6 +2,7 @@ package peermanager import ( "context" + "math/rand" "testing" "time" @@ -318,3 +319,61 @@ func TestSessionRegistration(t *testing.T) { t.Fatal("Expected no signal callback (session unregistered)") } } + +type benchPeerQueue struct { +} + +func (*benchPeerQueue) Startup() {} +func (*benchPeerQueue) Shutdown() {} + +func (*benchPeerQueue) AddBroadcastWantHaves(whs []cid.Cid) {} +func (*benchPeerQueue) AddWants(wbs []cid.Cid, whs []cid.Cid) {} +func (*benchPeerQueue) AddCancels(cs []cid.Cid) {} +func (*benchPeerQueue) ResponseReceived(ks []cid.Cid) {} + +// Simplistic benchmark to allow us to stress test +func BenchmarkPeerManager(b *testing.B) { + b.StopTimer() + + ctx := context.Background() + + peerQueueFactory := func(ctx context.Context, p peer.ID) PeerQueue { + return &benchPeerQueue{} + } + + self := testutil.GeneratePeers(1)[0] + peers := testutil.GeneratePeers(500) + peerManager := New(ctx, peerQueueFactory, self) + + // Create a bunch of connections + connected := 0 + for i := 0; i < len(peers); i++ { + peerManager.Connected(peers[i]) + connected++ + } + + var wanted []cid.Cid + + b.StartTimer() + for n := 0; n < b.N; n++ { + // Pick a random peer + i := rand.Intn(connected) + + // Alternately add either a few wants or many broadcast wants + r := rand.Intn(8) + if r == 0 { + wants := testutil.GenerateCids(10) + peerManager.SendWants(ctx, peers[i], wants[:2], wants[2:]) + wanted = append(wanted, wants...) + } else if r == 1 { + wants := testutil.GenerateCids(30) + peerManager.BroadcastWantHaves(ctx, wants) + wanted = append(wanted, wants...) + } else { + limit := len(wanted) / 10 + cancel := wanted[:limit] + wanted = wanted[limit:] + peerManager.SendCancels(ctx, cancel) + } + } +} diff --git a/bitswap/internal/peermanager/peerwantmanager.go b/bitswap/internal/peermanager/peerwantmanager.go index 418a646c4..9b6198afa 100644 --- a/bitswap/internal/peermanager/peerwantmanager.go +++ b/bitswap/internal/peermanager/peerwantmanager.go @@ -37,6 +37,7 @@ type peerWantManager struct { type peerWant struct { wantBlocks *cid.Set wantHaves *cid.Set + peerQueue PeerQueue } // New creates a new peerWantManager with a Gauge that keeps track of the @@ -50,17 +51,24 @@ func newPeerWantManager(wantBlockGauge Gauge) *peerWantManager { } } -// addPeer adds a peer whose wants we need to keep track of. It returns the -// current list of broadcast wants that should be sent to the peer. -func (pwm *peerWantManager) addPeer(p peer.ID) []cid.Cid { - if _, ok := pwm.peerWants[p]; !ok { - pwm.peerWants[p] = &peerWant{ - wantBlocks: cid.NewSet(), - wantHaves: cid.NewSet(), - } - return pwm.broadcastWants.Keys() +// addPeer adds a peer whose wants we need to keep track of. It sends the +// current list of broadcast wants to the peer. +func (pwm *peerWantManager) addPeer(peerQueue PeerQueue, p peer.ID) { + if _, ok := pwm.peerWants[p]; ok { + return + } + + pwm.peerWants[p] = &peerWant{ + wantBlocks: cid.NewSet(), + wantHaves: cid.NewSet(), + peerQueue: peerQueue, + } + + // Broadcast any live want-haves to the newly connected peer + if pwm.broadcastWants.Len() > 0 { + wants := pwm.broadcastWants.Keys() + peerQueue.AddBroadcastWantHaves(wants) } - return nil } // RemovePeer removes a peer and its associated wants from tracking @@ -87,55 +95,53 @@ func (pwm *peerWantManager) removePeer(p peer.ID) { delete(pwm.peerWants, p) } -// PrepareBroadcastWantHaves filters the list of want-haves for each peer, -// returning a map of peers to the want-haves they have not yet been sent. -func (pwm *peerWantManager) prepareBroadcastWantHaves(wantHaves []cid.Cid) map[peer.ID][]cid.Cid { - res := make(map[peer.ID][]cid.Cid, len(pwm.peerWants)) +// broadcastWantHaves sends want-haves to any peers that have not yet been sent them. +func (pwm *peerWantManager) broadcastWantHaves(wantHaves []cid.Cid) { + unsent := make([]cid.Cid, 0, len(wantHaves)) for _, c := range wantHaves { if pwm.broadcastWants.Has(c) { // Already a broadcast want, skip it. continue } pwm.broadcastWants.Add(c) + unsent = append(unsent, c) + } - // Prepare broadcast. - wantedBy := pwm.wantPeers[c] - for p := range pwm.peerWants { + if len(unsent) == 0 { + return + } + + // Allocate a single buffer to filter broadcast wants for each peer + bcstWantsBuffer := make([]cid.Cid, 0, len(unsent)) + + // Send broadcast wants to each peer + for _, pws := range pwm.peerWants { + peerUnsent := bcstWantsBuffer[:0] + for _, c := range unsent { // If we've already sent a want to this peer, skip them. - // - // This is faster than checking the actual wantlists due - // to better locality. - if _, ok := wantedBy[p]; ok { - continue + if !pws.wantBlocks.Has(c) && !pws.wantHaves.Has(c) { + peerUnsent = append(peerUnsent, c) } + } - cids, ok := res[p] - if !ok { - cids = make([]cid.Cid, 0, len(wantHaves)) - } - res[p] = append(cids, c) + if len(peerUnsent) > 0 { + pws.peerQueue.AddBroadcastWantHaves(peerUnsent) } } - - return res } -// PrepareSendWants filters the list of want-blocks and want-haves such that -// it only contains wants that have not already been sent to the peer. -func (pwm *peerWantManager) prepareSendWants(p peer.ID, wantBlocks []cid.Cid, wantHaves []cid.Cid) ([]cid.Cid, []cid.Cid) { - resWantBlks := make([]cid.Cid, 0) - resWantHvs := make([]cid.Cid, 0) +// sendWants only sends the peer the want-blocks and want-haves that have not +// already been sent to it. +func (pwm *peerWantManager) sendWants(p peer.ID, wantBlocks []cid.Cid, wantHaves []cid.Cid) { + fltWantBlks := make([]cid.Cid, 0, len(wantBlocks)) + fltWantHvs := make([]cid.Cid, 0, len(wantHaves)) // Get the existing want-blocks and want-haves for the peer pws, ok := pwm.peerWants[p] - if !ok { - // In practice this should never happen: - // - PeerManager calls addPeer() as soon as the peer connects - // - PeerManager calls removePeer() as soon as the peer disconnects - // - All calls to PeerWantManager are locked - log.Errorf("prepareSendWants() called with peer %s but peer not found in peerWantManager", string(p)) - return resWantBlks, resWantHvs + // In practice this should never happen + log.Errorf("sendWants() called with peer %s but peer not found in peerWantManager", string(p)) + return } // Iterate over the requested want-blocks @@ -149,7 +155,7 @@ func (pwm *peerWantManager) prepareSendWants(p peer.ID, wantBlocks []cid.Cid, wa pwm.reverseIndexAdd(c, p) // Add the CID to the results - resWantBlks = append(resWantBlks, c) + fltWantBlks = append(fltWantBlks, c) // Make sure the CID is no longer recorded as a want-have pws.wantHaves.Remove(c) @@ -176,57 +182,45 @@ func (pwm *peerWantManager) prepareSendWants(p peer.ID, wantBlocks []cid.Cid, wa pwm.reverseIndexAdd(c, p) // Add the CID to the results - resWantHvs = append(resWantHvs, c) + fltWantHvs = append(fltWantHvs, c) } } - return resWantBlks, resWantHvs + // Send the want-blocks and want-haves to the peer + pws.peerQueue.AddWants(fltWantBlks, fltWantHvs) } -// PrepareSendCancels filters the list of cancels for each peer, -// returning a map of peers which only contains cancels for wants that have -// been sent to the peer. -func (pwm *peerWantManager) prepareSendCancels(cancelKs []cid.Cid) map[peer.ID][]cid.Cid { +// sendCancels sends a cancel to each peer to which a corresponding want was +// sent +func (pwm *peerWantManager) sendCancels(cancelKs []cid.Cid) { if len(cancelKs) == 0 { - return nil - } - - // Pre-allocate enough space for all peers that have the first CID. - // Chances are these peers are related. - expectedResSize := 0 - firstCancel := cancelKs[0] - if pwm.broadcastWants.Has(firstCancel) { - expectedResSize = len(pwm.peerWants) - } else { - expectedResSize = len(pwm.wantPeers[firstCancel]) + return } - res := make(map[peer.ID][]cid.Cid, expectedResSize) - // Keep the broadcast keys separate. This lets us batch-process them at - // the end. + // Handle broadcast wants up-front broadcastKs := make([]cid.Cid, 0, len(cancelKs)) - - // Iterate over all requested cancels for _, c := range cancelKs { - // Handle broadcast wants up-front. - isBroadcast := pwm.broadcastWants.Has(c) - if isBroadcast { + if pwm.broadcastWants.Has(c) { broadcastKs = append(broadcastKs, c) pwm.broadcastWants.Remove(c) } + } - // Even if this is a broadcast, we may have sent targeted wants. - // Deal with them. - for p := range pwm.wantPeers[c] { - pws, ok := pwm.peerWants[p] - if !ok { - // Should never happen but check just in case - log.Errorf("peerWantManager reverse index missing peer %s for key %s", p, c) + // Allocate a single buffer to filter the cancels to send to each peer + cancelsBuff := make([]cid.Cid, 0, len(cancelKs)) + + // Send cancels to a particular peer + send := func(p peer.ID, pws *peerWant) { + // Include broadcast cancels + peerCancels := append(cancelsBuff[:0], broadcastKs...) + for _, c := range cancelKs { + wantBlock := pws.wantBlocks.Has(c) + if !wantBlock && !pws.wantHaves.Has(c) { continue } // Update the want gauge. - if pws.wantBlocks.Has(c) { + if wantBlock { pwm.wantBlockGauge.Dec() } @@ -235,40 +229,49 @@ func (pwm *peerWantManager) prepareSendCancels(cancelKs []cid.Cid) map[peer.ID][ pws.wantHaves.Remove(c) // If it's a broadcast want, we've already added it to - // the broadcastKs list. - if isBroadcast { - continue + // the peer cancels. + if !pwm.broadcastWants.Has(c) { + peerCancels = append(peerCancels, c) } - - // Add the CID to the result for the peer. - cids, ok := res[p] - if !ok { - // Pre-allocate enough for all keys. - // Cancels are usually related. - cids = make([]cid.Cid, 0, len(cancelKs)) - } - res[p] = append(cids, c) } - // Finally, batch-remove the reverse-index. There's no need to - // clear this index peer-by-peer. - delete(pwm.wantPeers, c) + // Send cancels to the peer + if len(peerCancels) > 0 { + pws.peerQueue.AddCancels(peerCancels) + } } - // If we have any broadcasted CIDs, add them in. - // - // Doing this at the end can save us a bunch of work and allocations. if len(broadcastKs) > 0 { - for p := range pwm.peerWants { - if cids, ok := res[p]; ok { - res[p] = append(cids, broadcastKs...) - } else { - res[p] = broadcastKs + // If a broadcast want is being cancelled, send the cancel to all + // peers + for p, pws := range pwm.peerWants { + send(p, pws) + } + } else { + // Only send cancels to peers that received a corresponding want + cancelPeers := make(map[peer.ID]struct{}, len(pwm.wantPeers[cancelKs[0]])) + for _, c := range cancelKs { + for p := range pwm.wantPeers[c] { + cancelPeers[p] = struct{}{} + } + } + for p := range cancelPeers { + pws, ok := pwm.peerWants[p] + if !ok { + // Should never happen but check just in case + log.Errorf("sendCancels - peerWantManager index missing peer %s", p) + continue } + + send(p, pws) } } - return res + // Finally, batch-remove the reverse-index. There's no need to + // clear this index peer-by-peer. + for _, c := range cancelKs { + delete(pwm.wantPeers, c) + } } // Add the peer to the list of peers that have sent a want with the cid diff --git a/bitswap/internal/peermanager/peerwantmanager_test.go b/bitswap/internal/peermanager/peerwantmanager_test.go index 766033e8f..396ea0d82 100644 --- a/bitswap/internal/peermanager/peerwantmanager_test.go +++ b/bitswap/internal/peermanager/peerwantmanager_test.go @@ -4,8 +4,8 @@ import ( "testing" "github.com/ipfs/go-bitswap/internal/testutil" - cid "github.com/ipfs/go-cid" + peer "github.com/libp2p/go-libp2p-core/peer" ) type gauge struct { @@ -19,6 +19,42 @@ func (g *gauge) Dec() { g.count-- } +type mockPQ struct { + bcst []cid.Cid + wbs []cid.Cid + whs []cid.Cid + cancels []cid.Cid +} + +func (mpq *mockPQ) clear() { + mpq.bcst = nil + mpq.wbs = nil + mpq.whs = nil + mpq.cancels = nil +} + +func (mpq *mockPQ) Startup() {} +func (mpq *mockPQ) Shutdown() {} + +func (mpq *mockPQ) AddBroadcastWantHaves(whs []cid.Cid) { + mpq.bcst = append(mpq.bcst, whs...) +} +func (mpq *mockPQ) AddWants(wbs []cid.Cid, whs []cid.Cid) { + mpq.wbs = append(mpq.wbs, wbs...) + mpq.whs = append(mpq.whs, whs...) +} +func (mpq *mockPQ) AddCancels(cs []cid.Cid) { + mpq.cancels = append(mpq.cancels, cs...) +} +func (mpq *mockPQ) ResponseReceived(ks []cid.Cid) { +} + +func clearSent(pqs map[peer.ID]PeerQueue) { + for _, pqi := range pqs { + pqi.(*mockPQ).clear() + } +} + func TestEmpty(t *testing.T) { pwm := newPeerWantManager(&gauge{}) @@ -30,7 +66,7 @@ func TestEmpty(t *testing.T) { } } -func TestPrepareBroadcastWantHaves(t *testing.T) { +func TestPWMBroadcastWantHaves(t *testing.T) { pwm := newPeerWantManager(&gauge{}) peers := testutil.GeneratePeers(3) @@ -38,74 +74,87 @@ func TestPrepareBroadcastWantHaves(t *testing.T) { cids2 := testutil.GenerateCids(2) cids3 := testutil.GenerateCids(2) - if blist := pwm.addPeer(peers[0]); len(blist) > 0 { - t.Errorf("expected no broadcast wants") - } - if blist := pwm.addPeer(peers[1]); len(blist) > 0 { - t.Errorf("expected no broadcast wants") + peerQueues := make(map[peer.ID]PeerQueue) + for _, p := range peers[:2] { + pq := &mockPQ{} + peerQueues[p] = pq + pwm.addPeer(pq, p) + if len(pq.bcst) > 0 { + t.Errorf("expected no broadcast wants") + } } // Broadcast 2 cids to 2 peers - bcst := pwm.prepareBroadcastWantHaves(cids) - if len(bcst) != 2 { - t.Fatal("Expected 2 peers") - } - for p := range bcst { - if !testutil.MatchKeysIgnoreOrder(bcst[p], cids) { + pwm.broadcastWantHaves(cids) + for _, pqi := range peerQueues { + pq := pqi.(*mockPQ) + if len(pq.bcst) != 2 { + t.Fatal("Expected 2 want-haves") + } + if !testutil.MatchKeysIgnoreOrder(pq.bcst, cids) { t.Fatal("Expected all cids to be broadcast") } } // Broadcasting same cids should have no effect - bcst2 := pwm.prepareBroadcastWantHaves(cids) - if len(bcst2) != 0 { - t.Fatal("Expected 0 peers") + clearSent(peerQueues) + pwm.broadcastWantHaves(cids) + for _, pqi := range peerQueues { + pq := pqi.(*mockPQ) + if len(pq.bcst) != 0 { + t.Fatal("Expected 0 want-haves") + } } // Broadcast 2 other cids - bcst3 := pwm.prepareBroadcastWantHaves(cids2) - if len(bcst3) != 2 { - t.Fatal("Expected 2 peers") - } - for p := range bcst3 { - if !testutil.MatchKeysIgnoreOrder(bcst3[p], cids2) { + clearSent(peerQueues) + pwm.broadcastWantHaves(cids2) + for _, pqi := range peerQueues { + pq := pqi.(*mockPQ) + if len(pq.bcst) != 2 { + t.Fatal("Expected 2 want-haves") + } + if !testutil.MatchKeysIgnoreOrder(pq.bcst, cids2) { t.Fatal("Expected all new cids to be broadcast") } } // Broadcast mix of old and new cids - bcst4 := pwm.prepareBroadcastWantHaves(append(cids, cids3...)) - if len(bcst4) != 2 { - t.Fatal("Expected 2 peers") - } - // Only new cids should be broadcast - for p := range bcst4 { - if !testutil.MatchKeysIgnoreOrder(bcst4[p], cids3) { + clearSent(peerQueues) + pwm.broadcastWantHaves(append(cids, cids3...)) + for _, pqi := range peerQueues { + pq := pqi.(*mockPQ) + if len(pq.bcst) != 2 { + t.Fatal("Expected 2 want-haves") + } + // Only new cids should be broadcast + if !testutil.MatchKeysIgnoreOrder(pq.bcst, cids3) { t.Fatal("Expected all new cids to be broadcast") } } // Sending want-block for a cid should prevent broadcast to that peer + clearSent(peerQueues) cids4 := testutil.GenerateCids(4) wantBlocks := []cid.Cid{cids4[0], cids4[2]} - pwm.prepareSendWants(peers[0], wantBlocks, []cid.Cid{}) - - bcst5 := pwm.prepareBroadcastWantHaves(cids4) - if len(bcst4) != 2 { - t.Fatal("Expected 2 peers") - } - // Only cids that were not sent as want-block to peer should be broadcast - for p := range bcst5 { - if p == peers[0] { - if !testutil.MatchKeysIgnoreOrder(bcst5[p], []cid.Cid{cids4[1], cids4[3]}) { - t.Fatal("Expected unsent cids to be broadcast") - } - } - if p == peers[1] { - if !testutil.MatchKeysIgnoreOrder(bcst5[p], cids4) { - t.Fatal("Expected all cids to be broadcast") - } - } + p0 := peers[0] + p1 := peers[1] + pwm.sendWants(p0, wantBlocks, []cid.Cid{}) + + pwm.broadcastWantHaves(cids4) + pq0 := peerQueues[p0].(*mockPQ) + if len(pq0.bcst) != 2 { // only broadcast 2 / 4 want-haves + t.Fatal("Expected 2 want-haves") + } + if !testutil.MatchKeysIgnoreOrder(pq0.bcst, []cid.Cid{cids4[1], cids4[3]}) { + t.Fatalf("Expected unsent cids to be broadcast") + } + pq1 := peerQueues[p1].(*mockPQ) + if len(pq1.bcst) != 4 { // broadcast all 4 want-haves + t.Fatal("Expected 4 want-haves") + } + if !testutil.MatchKeysIgnoreOrder(pq1.bcst, cids4) { + t.Fatal("Expected all cids to be broadcast") } allCids := cids @@ -114,17 +163,22 @@ func TestPrepareBroadcastWantHaves(t *testing.T) { allCids = append(allCids, cids4...) // Add another peer - bcst6 := pwm.addPeer(peers[2]) - if !testutil.MatchKeysIgnoreOrder(bcst6, allCids) { + peer2 := peers[2] + pq2 := &mockPQ{} + peerQueues[peer2] = pq2 + pwm.addPeer(pq2, peer2) + if !testutil.MatchKeysIgnoreOrder(pq2.bcst, allCids) { t.Fatalf("Expected all cids to be broadcast.") } - if broadcast := pwm.prepareBroadcastWantHaves(allCids); len(broadcast) != 0 { + clearSent(peerQueues) + pwm.broadcastWantHaves(allCids) + if len(pq2.bcst) != 0 { t.Errorf("did not expect to have CIDs to broadcast") } } -func TestPrepareSendWants(t *testing.T) { +func TestPWMSendWants(t *testing.T) { pwm := newPeerWantManager(&gauge{}) peers := testutil.GeneratePeers(2) @@ -133,68 +187,78 @@ func TestPrepareSendWants(t *testing.T) { cids := testutil.GenerateCids(2) cids2 := testutil.GenerateCids(2) - pwm.addPeer(p0) - pwm.addPeer(p1) + peerQueues := make(map[peer.ID]PeerQueue) + for _, p := range peers[:2] { + pq := &mockPQ{} + peerQueues[p] = pq + pwm.addPeer(pq, p) + } + pq0 := peerQueues[p0].(*mockPQ) + pq1 := peerQueues[p1].(*mockPQ) // Send 2 want-blocks and 2 want-haves to p0 - wb, wh := pwm.prepareSendWants(p0, cids, cids2) - if !testutil.MatchKeysIgnoreOrder(wb, cids) { + clearSent(peerQueues) + pwm.sendWants(p0, cids, cids2) + if !testutil.MatchKeysIgnoreOrder(pq0.wbs, cids) { t.Fatal("Expected 2 want-blocks") } - if !testutil.MatchKeysIgnoreOrder(wh, cids2) { + if !testutil.MatchKeysIgnoreOrder(pq0.whs, cids2) { t.Fatal("Expected 2 want-haves") } // Send to p0 // - 1 old want-block and 2 new want-blocks // - 1 old want-have and 2 new want-haves + clearSent(peerQueues) cids3 := testutil.GenerateCids(2) cids4 := testutil.GenerateCids(2) - wb2, wh2 := pwm.prepareSendWants(p0, append(cids3, cids[0]), append(cids4, cids2[0])) - if !testutil.MatchKeysIgnoreOrder(wb2, cids3) { + pwm.sendWants(p0, append(cids3, cids[0]), append(cids4, cids2[0])) + if !testutil.MatchKeysIgnoreOrder(pq0.wbs, cids3) { t.Fatal("Expected 2 want-blocks") } - if !testutil.MatchKeysIgnoreOrder(wh2, cids4) { + if !testutil.MatchKeysIgnoreOrder(pq0.whs, cids4) { t.Fatal("Expected 2 want-haves") } // Send to p0 as want-blocks: 1 new want-block, 1 old want-have + clearSent(peerQueues) cids5 := testutil.GenerateCids(1) newWantBlockOldWantHave := append(cids5, cids2[0]) - wb3, wh3 := pwm.prepareSendWants(p0, newWantBlockOldWantHave, []cid.Cid{}) + pwm.sendWants(p0, newWantBlockOldWantHave, []cid.Cid{}) // If a want was sent as a want-have, it should be ok to now send it as a // want-block - if !testutil.MatchKeysIgnoreOrder(wb3, newWantBlockOldWantHave) { + if !testutil.MatchKeysIgnoreOrder(pq0.wbs, newWantBlockOldWantHave) { t.Fatal("Expected 2 want-blocks") } - if len(wh3) != 0 { + if len(pq0.whs) != 0 { t.Fatal("Expected 0 want-haves") } // Send to p0 as want-haves: 1 new want-have, 1 old want-block + clearSent(peerQueues) cids6 := testutil.GenerateCids(1) newWantHaveOldWantBlock := append(cids6, cids[0]) - wb4, wh4 := pwm.prepareSendWants(p0, []cid.Cid{}, newWantHaveOldWantBlock) + pwm.sendWants(p0, []cid.Cid{}, newWantHaveOldWantBlock) // If a want was previously sent as a want-block, it should not be // possible to now send it as a want-have - if !testutil.MatchKeysIgnoreOrder(wh4, cids6) { + if !testutil.MatchKeysIgnoreOrder(pq0.whs, cids6) { t.Fatal("Expected 1 want-have") } - if len(wb4) != 0 { + if len(pq0.wbs) != 0 { t.Fatal("Expected 0 want-blocks") } // Send 2 want-blocks and 2 want-haves to p1 - wb5, wh5 := pwm.prepareSendWants(p1, cids, cids2) - if !testutil.MatchKeysIgnoreOrder(wb5, cids) { + pwm.sendWants(p1, cids, cids2) + if !testutil.MatchKeysIgnoreOrder(pq1.wbs, cids) { t.Fatal("Expected 2 want-blocks") } - if !testutil.MatchKeysIgnoreOrder(wh5, cids2) { + if !testutil.MatchKeysIgnoreOrder(pq1.whs, cids2) { t.Fatal("Expected 2 want-haves") } } -func TestPrepareSendCancels(t *testing.T) { +func TestPWMSendCancels(t *testing.T) { pwm := newPeerWantManager(&gauge{}) peers := testutil.GeneratePeers(2) @@ -207,14 +271,20 @@ func TestPrepareSendCancels(t *testing.T) { allwb := append(wb1, wb2...) allwh := append(wh1, wh2...) - pwm.addPeer(p0) - pwm.addPeer(p1) + peerQueues := make(map[peer.ID]PeerQueue) + for _, p := range peers[:2] { + pq := &mockPQ{} + peerQueues[p] = pq + pwm.addPeer(pq, p) + } + pq0 := peerQueues[p0].(*mockPQ) + pq1 := peerQueues[p1].(*mockPQ) // Send 2 want-blocks and 2 want-haves to p0 - pwm.prepareSendWants(p0, wb1, wh1) + pwm.sendWants(p0, wb1, wh1) // Send 3 want-blocks and 3 want-haves to p1 // (1 overlapping want-block / want-have with p0) - pwm.prepareSendWants(p1, append(wb2, wb1[1]), append(wh2, wh1[1])) + pwm.sendWants(p1, append(wb2, wb1[1]), append(wh2, wh1[1])) if !testutil.MatchKeysIgnoreOrder(pwm.getWantBlocks(), allwb) { t.Fatal("Expected 4 cids to be wanted") @@ -224,12 +294,13 @@ func TestPrepareSendCancels(t *testing.T) { } // Cancel 1 want-block and 1 want-have that were sent to p0 - res := pwm.prepareSendCancels([]cid.Cid{wb1[0], wh1[0]}) + clearSent(peerQueues) + pwm.sendCancels([]cid.Cid{wb1[0], wh1[0]}) // Should cancel the want-block and want-have - if len(res) != 1 { - t.Fatal("Expected 1 peer") + if len(pq1.cancels) != 0 { + t.Fatal("Expected no cancels sent to p1") } - if !testutil.MatchKeysIgnoreOrder(res[p0], []cid.Cid{wb1[0], wh1[0]}) { + if !testutil.MatchKeysIgnoreOrder(pq0.cancels, []cid.Cid{wb1[0], wh1[0]}) { t.Fatal("Expected 2 cids to be cancelled") } if !testutil.MatchKeysIgnoreOrder(pwm.getWantBlocks(), append(wb2, wb1[1])) { @@ -240,18 +311,21 @@ func TestPrepareSendCancels(t *testing.T) { } // Cancel everything + clearSent(peerQueues) allCids := append(allwb, allwh...) - res2 := pwm.prepareSendCancels(allCids) - // Should cancel the remaining want-blocks and want-haves - if len(res2) != 2 { - t.Fatal("Expected 2 peers", len(res2)) - } - if !testutil.MatchKeysIgnoreOrder(res2[p0], []cid.Cid{wb1[1], wh1[1]}) { + pwm.sendCancels(allCids) + // Should cancel the remaining want-blocks and want-haves for p0 + if !testutil.MatchKeysIgnoreOrder(pq0.cancels, []cid.Cid{wb1[1], wh1[1]}) { t.Fatal("Expected un-cancelled cids to be cancelled") } - remainingP2 := append(wb2, wh2...) - remainingP2 = append(remainingP2, wb1[1], wh1[1]) - if !testutil.MatchKeysIgnoreOrder(res2[p1], remainingP2) { + + // Should cancel the remaining want-blocks and want-haves for p1 + remainingP1 := append(wb2, wh2...) + remainingP1 = append(remainingP1, wb1[1], wh1[1]) + if len(pq1.cancels) != len(remainingP1) { + t.Fatal("mismatch", len(pq1.cancels), len(remainingP1)) + } + if !testutil.MatchKeysIgnoreOrder(pq1.cancels, remainingP1) { t.Fatal("Expected un-cancelled cids to be cancelled") } if len(pwm.getWantBlocks()) != 0 { @@ -271,10 +345,13 @@ func TestStats(t *testing.T) { cids := testutil.GenerateCids(2) cids2 := testutil.GenerateCids(2) - pwm.addPeer(p0) + peerQueues := make(map[peer.ID]PeerQueue) + pq := &mockPQ{} + peerQueues[p0] = pq + pwm.addPeer(pq, p0) // Send 2 want-blocks and 2 want-haves to p0 - pwm.prepareSendWants(p0, cids, cids2) + pwm.sendWants(p0, cids, cids2) if g.count != 2 { t.Fatal("Expected 2 want-blocks") @@ -282,7 +359,7 @@ func TestStats(t *testing.T) { // Send 1 old want-block and 2 new want-blocks to p0 cids3 := testutil.GenerateCids(2) - pwm.prepareSendWants(p0, append(cids3, cids[0]), []cid.Cid{}) + pwm.sendWants(p0, append(cids3, cids[0]), []cid.Cid{}) if g.count != 4 { t.Fatal("Expected 4 want-blocks") @@ -291,7 +368,7 @@ func TestStats(t *testing.T) { // Cancel 1 want-block that was sent to p0 // and 1 want-block that was not sent cids4 := testutil.GenerateCids(1) - pwm.prepareSendCancels(append(cids4, cids[0])) + pwm.sendCancels(append(cids4, cids[0])) if g.count != 3 { t.Fatal("Expected 3 want-blocks", g.count) From ffbf58876ecb43787ba2c212bf446e096337b50f Mon Sep 17 00:00:00 2001 From: Dirk McCormick Date: Wed, 20 May 2020 10:30:05 -0400 Subject: [PATCH 0945/1035] perf: improve cancel wants perf This commit was moved from ipfs/go-bitswap@6d9c17eba99fedb256155d8f71d0942bf2c72f7f --- .../internal/peermanager/peerwantmanager.go | 36 ++++++++++++------- 1 file changed, 23 insertions(+), 13 deletions(-) diff --git a/bitswap/internal/peermanager/peerwantmanager.go b/bitswap/internal/peermanager/peerwantmanager.go index 9b6198afa..421032d2c 100644 --- a/bitswap/internal/peermanager/peerwantmanager.go +++ b/bitswap/internal/peermanager/peerwantmanager.go @@ -197,23 +197,27 @@ func (pwm *peerWantManager) sendCancels(cancelKs []cid.Cid) { return } - // Handle broadcast wants up-front - broadcastKs := make([]cid.Cid, 0, len(cancelKs)) + // Create a buffer to use for filtering cancels per peer, with the + // broadcast wants at the front of the buffer (broadcast wants are sent to + // all peers) + i := 0 + cancelsBuff := make([]cid.Cid, len(cancelKs)) for _, c := range cancelKs { if pwm.broadcastWants.Has(c) { - broadcastKs = append(broadcastKs, c) - pwm.broadcastWants.Remove(c) + cancelsBuff[i] = c + i++ } } - - // Allocate a single buffer to filter the cancels to send to each peer - cancelsBuff := make([]cid.Cid, 0, len(cancelKs)) + broadcastKsCount := i // Send cancels to a particular peer send := func(p peer.ID, pws *peerWant) { - // Include broadcast cancels - peerCancels := append(cancelsBuff[:0], broadcastKs...) + // Start the index into the buffer after the broadcast wants + i = broadcastKsCount + + // For each key to be cancelled for _, c := range cancelKs { + // Check if a want was sent for the key wantBlock := pws.wantBlocks.Has(c) if !wantBlock && !pws.wantHaves.Has(c) { continue @@ -231,17 +235,18 @@ func (pwm *peerWantManager) sendCancels(cancelKs []cid.Cid) { // If it's a broadcast want, we've already added it to // the peer cancels. if !pwm.broadcastWants.Has(c) { - peerCancels = append(peerCancels, c) + cancelsBuff[i] = c + i++ } } // Send cancels to the peer - if len(peerCancels) > 0 { - pws.peerQueue.AddCancels(peerCancels) + if i > 0 { + pws.peerQueue.AddCancels(cancelsBuff[:i]) } } - if len(broadcastKs) > 0 { + if broadcastKsCount > 0 { // If a broadcast want is being cancelled, send the cancel to all // peers for p, pws := range pwm.peerWants { @@ -267,6 +272,11 @@ func (pwm *peerWantManager) sendCancels(cancelKs []cid.Cid) { } } + // Remove cancelled broadcast wants + for _, c := range cancelsBuff[:broadcastKsCount] { + pwm.broadcastWants.Remove(c) + } + // Finally, batch-remove the reverse-index. There's no need to // clear this index peer-by-peer. for _, c := range cancelKs { From 6d118d27649528e564deaee2f5938e2819ed1187 Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Wed, 20 May 2020 17:16:12 -0700 Subject: [PATCH 0946/1035] feat: simplify broadcast cancel logic (#399) Instead of tracking offsets, just create a "new" slice starting with the broadcast cancel slice. Under the covers, this will just use the same memory over and over. This commit was moved from ipfs/go-bitswap@60b07e9250acb5cf20fa71739d6fd9cdb36d357c --- .../internal/peermanager/peerwantmanager.go | 22 ++++++++----------- 1 file changed, 9 insertions(+), 13 deletions(-) diff --git a/bitswap/internal/peermanager/peerwantmanager.go b/bitswap/internal/peermanager/peerwantmanager.go index 421032d2c..16d191378 100644 --- a/bitswap/internal/peermanager/peerwantmanager.go +++ b/bitswap/internal/peermanager/peerwantmanager.go @@ -200,20 +200,17 @@ func (pwm *peerWantManager) sendCancels(cancelKs []cid.Cid) { // Create a buffer to use for filtering cancels per peer, with the // broadcast wants at the front of the buffer (broadcast wants are sent to // all peers) - i := 0 - cancelsBuff := make([]cid.Cid, len(cancelKs)) + broadcastCancels := make([]cid.Cid, 0, len(cancelKs)) for _, c := range cancelKs { if pwm.broadcastWants.Has(c) { - cancelsBuff[i] = c - i++ + broadcastCancels = append(broadcastCancels, c) } } - broadcastKsCount := i // Send cancels to a particular peer send := func(p peer.ID, pws *peerWant) { - // Start the index into the buffer after the broadcast wants - i = broadcastKsCount + // Start from the broadcast cancels + toCancel := broadcastCancels // For each key to be cancelled for _, c := range cancelKs { @@ -235,18 +232,17 @@ func (pwm *peerWantManager) sendCancels(cancelKs []cid.Cid) { // If it's a broadcast want, we've already added it to // the peer cancels. if !pwm.broadcastWants.Has(c) { - cancelsBuff[i] = c - i++ + toCancel = append(toCancel, c) } } // Send cancels to the peer - if i > 0 { - pws.peerQueue.AddCancels(cancelsBuff[:i]) + if len(toCancel) > 0 { + pws.peerQueue.AddCancels(toCancel) } } - if broadcastKsCount > 0 { + if len(broadcastCancels) > 0 { // If a broadcast want is being cancelled, send the cancel to all // peers for p, pws := range pwm.peerWants { @@ -273,7 +269,7 @@ func (pwm *peerWantManager) sendCancels(cancelKs []cid.Cid) { } // Remove cancelled broadcast wants - for _, c := range cancelsBuff[:broadcastKsCount] { + for _, c := range broadcastCancels { pwm.broadcastWants.Remove(c) } From d64c0a809d14dd4060c347e6d7261d92b4fe3984 Mon Sep 17 00:00:00 2001 From: dirkmc Date: Tue, 2 Jun 2020 11:07:42 -0400 Subject: [PATCH 0947/1035] Total wants gauge (#402) * feat: total wants gauge * fix: in gauges count wants regardless of which peers they're sent to * fix: want block gauge calculation * refactor: simplify peermanagerwants This commit was moved from ipfs/go-bitswap@88373cd4d30a9e66256ce0fd9d5a7309703f3273 --- bitswap/internal/peermanager/peermanager.go | 3 +- .../internal/peermanager/peerwantmanager.go | 99 +++++++++++++++---- .../peermanager/peerwantmanager_test.go | 82 ++++++++++++--- 3 files changed, 149 insertions(+), 35 deletions(-) diff --git a/bitswap/internal/peermanager/peermanager.go b/bitswap/internal/peermanager/peermanager.go index 0ce735846..4c489dd8a 100644 --- a/bitswap/internal/peermanager/peermanager.go +++ b/bitswap/internal/peermanager/peermanager.go @@ -52,9 +52,10 @@ type PeerManager struct { // New creates a new PeerManager, given a context and a peerQueueFactory. func New(ctx context.Context, createPeerQueue PeerQueueFactory, self peer.ID) *PeerManager { wantGauge := metrics.NewCtx(ctx, "wantlist_total", "Number of items in wantlist.").Gauge() + wantBlockGauge := metrics.NewCtx(ctx, "want_blocks_total", "Number of want-blocks in wantlist.").Gauge() return &PeerManager{ peerQueues: make(map[peer.ID]PeerQueue), - pwm: newPeerWantManager(wantGauge), + pwm: newPeerWantManager(wantGauge, wantBlockGauge), createPeerQueue: createPeerQueue, ctx: ctx, self: self, diff --git a/bitswap/internal/peermanager/peerwantmanager.go b/bitswap/internal/peermanager/peerwantmanager.go index 16d191378..ee81649a7 100644 --- a/bitswap/internal/peermanager/peerwantmanager.go +++ b/bitswap/internal/peermanager/peerwantmanager.go @@ -30,6 +30,8 @@ type peerWantManager struct { // broadcastWants tracks all the current broadcast wants. broadcastWants *cid.Set + // Keeps track of the number of active want-haves & want-blocks + wantGauge Gauge // Keeps track of the number of active want-blocks wantBlockGauge Gauge } @@ -42,11 +44,12 @@ type peerWant struct { // New creates a new peerWantManager with a Gauge that keeps track of the // number of active want-blocks (ie sent but no response received) -func newPeerWantManager(wantBlockGauge Gauge) *peerWantManager { +func newPeerWantManager(wantGauge Gauge, wantBlockGauge Gauge) *peerWantManager { return &peerWantManager{ broadcastWants: cid.NewSet(), peerWants: make(map[peer.ID]*peerWant), wantPeers: make(map[cid.Cid]map[peer.ID]struct{}), + wantGauge: wantGauge, wantBlockGauge: wantBlockGauge, } } @@ -78,17 +81,30 @@ func (pwm *peerWantManager) removePeer(p peer.ID) { return } + // Clean up want-blocks _ = pws.wantBlocks.ForEach(func(c cid.Cid) error { - // Decrement the gauge by the number of pending want-blocks to the peer - pwm.wantBlockGauge.Dec() // Clean up want-blocks from the reverse index - pwm.reverseIndexRemove(c, p) + removedLastPeer := pwm.reverseIndexRemove(c, p) + + // Decrement the gauges by the number of pending want-blocks to the peer + if removedLastPeer { + pwm.wantBlockGauge.Dec() + if !pwm.broadcastWants.Has(c) { + pwm.wantGauge.Dec() + } + } return nil }) - // Clean up want-haves from the reverse index + // Clean up want-haves _ = pws.wantHaves.ForEach(func(c cid.Cid) error { - pwm.reverseIndexRemove(c, p) + // Clean up want-haves from the reverse index + removedLastPeer := pwm.reverseIndexRemove(c, p) + + // Decrement the gauge by the number of pending want-haves to the peer + if removedLastPeer && !pwm.broadcastWants.Has(c) { + pwm.wantGauge.Dec() + } return nil }) @@ -105,6 +121,11 @@ func (pwm *peerWantManager) broadcastWantHaves(wantHaves []cid.Cid) { } pwm.broadcastWants.Add(c) unsent = append(unsent, c) + + // Increment the total wants gauge + if _, ok := pwm.wantPeers[c]; !ok { + pwm.wantGauge.Inc() + } } if len(unsent) == 0 { @@ -151,17 +172,22 @@ func (pwm *peerWantManager) sendWants(p peer.ID, wantBlocks []cid.Cid, wantHaves // Record that the CID was sent as a want-block pws.wantBlocks.Add(c) - // Update the reverse index - pwm.reverseIndexAdd(c, p) - // Add the CID to the results fltWantBlks = append(fltWantBlks, c) // Make sure the CID is no longer recorded as a want-have pws.wantHaves.Remove(c) - // Increment the count of want-blocks - pwm.wantBlockGauge.Inc() + // Update the reverse index + isNew := pwm.reverseIndexAdd(c, p) + + // Increment the want gauges + if isNew { + pwm.wantBlockGauge.Inc() + if !pwm.broadcastWants.Has(c) { + pwm.wantGauge.Inc() + } + } } } @@ -178,11 +204,16 @@ func (pwm *peerWantManager) sendWants(p peer.ID, wantBlocks []cid.Cid, wantHaves // Record that the CID was sent as a want-have pws.wantHaves.Add(c) - // Update the reverse index - pwm.reverseIndexAdd(c, p) - // Add the CID to the results fltWantHvs = append(fltWantHvs, c) + + // Update the reverse index + isNew := pwm.reverseIndexAdd(c, p) + + // Increment the total wants gauge + if isNew && !pwm.broadcastWants.Has(c) { + pwm.wantGauge.Inc() + } } } @@ -207,6 +238,9 @@ func (pwm *peerWantManager) sendCancels(cancelKs []cid.Cid) { } } + cancelledWantBlocks := cid.NewSet() + cancelledWantHaves := cid.NewSet() + // Send cancels to a particular peer send := func(p peer.ID, pws *peerWant) { // Start from the broadcast cancels @@ -216,13 +250,15 @@ func (pwm *peerWantManager) sendCancels(cancelKs []cid.Cid) { for _, c := range cancelKs { // Check if a want was sent for the key wantBlock := pws.wantBlocks.Has(c) - if !wantBlock && !pws.wantHaves.Has(c) { - continue - } + wantHave := pws.wantHaves.Has(c) - // Update the want gauge. + // Update the want gauges if wantBlock { - pwm.wantBlockGauge.Dec() + cancelledWantBlocks.Add(c) + } else if wantHave { + cancelledWantHaves.Add(c) + } else { + continue } // Unconditionally remove from the want lists. @@ -271,33 +307,54 @@ func (pwm *peerWantManager) sendCancels(cancelKs []cid.Cid) { // Remove cancelled broadcast wants for _, c := range broadcastCancels { pwm.broadcastWants.Remove(c) + + // Decrement the total wants gauge for broadcast wants + if !cancelledWantHaves.Has(c) && !cancelledWantBlocks.Has(c) { + pwm.wantGauge.Dec() + } } + // Decrement the total wants gauge for peer wants + _ = cancelledWantHaves.ForEach(func(c cid.Cid) error { + pwm.wantGauge.Dec() + return nil + }) + _ = cancelledWantBlocks.ForEach(func(c cid.Cid) error { + pwm.wantGauge.Dec() + pwm.wantBlockGauge.Dec() + return nil + }) + // Finally, batch-remove the reverse-index. There's no need to // clear this index peer-by-peer. for _, c := range cancelKs { delete(pwm.wantPeers, c) } + } // Add the peer to the list of peers that have sent a want with the cid -func (pwm *peerWantManager) reverseIndexAdd(c cid.Cid, p peer.ID) { +func (pwm *peerWantManager) reverseIndexAdd(c cid.Cid, p peer.ID) bool { peers, ok := pwm.wantPeers[c] if !ok { peers = make(map[peer.ID]struct{}, 10) pwm.wantPeers[c] = peers } peers[p] = struct{}{} + return !ok } // Remove the peer from the list of peers that have sent a want with the cid -func (pwm *peerWantManager) reverseIndexRemove(c cid.Cid, p peer.ID) { +func (pwm *peerWantManager) reverseIndexRemove(c cid.Cid, p peer.ID) bool { if peers, ok := pwm.wantPeers[c]; ok { delete(peers, p) if len(peers) == 0 { delete(pwm.wantPeers, c) + return true } } + + return false } // GetWantBlocks returns the set of all want-blocks sent to all peers diff --git a/bitswap/internal/peermanager/peerwantmanager_test.go b/bitswap/internal/peermanager/peerwantmanager_test.go index 396ea0d82..60b7c8e72 100644 --- a/bitswap/internal/peermanager/peerwantmanager_test.go +++ b/bitswap/internal/peermanager/peerwantmanager_test.go @@ -56,7 +56,7 @@ func clearSent(pqs map[peer.ID]PeerQueue) { } func TestEmpty(t *testing.T) { - pwm := newPeerWantManager(&gauge{}) + pwm := newPeerWantManager(&gauge{}, &gauge{}) if len(pwm.getWantBlocks()) > 0 { t.Fatal("Expected GetWantBlocks() to have length 0") @@ -67,7 +67,7 @@ func TestEmpty(t *testing.T) { } func TestPWMBroadcastWantHaves(t *testing.T) { - pwm := newPeerWantManager(&gauge{}) + pwm := newPeerWantManager(&gauge{}, &gauge{}) peers := testutil.GeneratePeers(3) cids := testutil.GenerateCids(2) @@ -179,7 +179,7 @@ func TestPWMBroadcastWantHaves(t *testing.T) { } func TestPWMSendWants(t *testing.T) { - pwm := newPeerWantManager(&gauge{}) + pwm := newPeerWantManager(&gauge{}, &gauge{}) peers := testutil.GeneratePeers(2) p0 := peers[0] @@ -259,7 +259,7 @@ func TestPWMSendWants(t *testing.T) { } func TestPWMSendCancels(t *testing.T) { - pwm := newPeerWantManager(&gauge{}) + pwm := newPeerWantManager(&gauge{}, &gauge{}) peers := testutil.GeneratePeers(2) p0 := peers[0] @@ -338,10 +338,12 @@ func TestPWMSendCancels(t *testing.T) { func TestStats(t *testing.T) { g := &gauge{} - pwm := newPeerWantManager(g) + wbg := &gauge{} + pwm := newPeerWantManager(g, wbg) peers := testutil.GeneratePeers(2) p0 := peers[0] + p1 := peers[1] cids := testutil.GenerateCids(2) cids2 := testutil.GenerateCids(2) @@ -353,7 +355,10 @@ func TestStats(t *testing.T) { // Send 2 want-blocks and 2 want-haves to p0 pwm.sendWants(p0, cids, cids2) - if g.count != 2 { + if g.count != 4 { + t.Fatal("Expected 4 wants") + } + if wbg.count != 2 { t.Fatal("Expected 2 want-blocks") } @@ -361,22 +366,73 @@ func TestStats(t *testing.T) { cids3 := testutil.GenerateCids(2) pwm.sendWants(p0, append(cids3, cids[0]), []cid.Cid{}) - if g.count != 4 { + if g.count != 6 { + t.Fatal("Expected 6 wants") + } + if wbg.count != 4 { + t.Fatal("Expected 4 want-blocks") + } + + // Broadcast 1 old want-have and 2 new want-haves + cids4 := testutil.GenerateCids(2) + pwm.broadcastWantHaves(append(cids4, cids2[0])) + if g.count != 8 { + t.Fatal("Expected 8 wants") + } + if wbg.count != 4 { + t.Fatal("Expected 4 want-blocks") + } + + // Add a second peer + pwm.addPeer(pq, p1) + + if g.count != 8 { + t.Fatal("Expected 8 wants") + } + if wbg.count != 4 { t.Fatal("Expected 4 want-blocks") } // Cancel 1 want-block that was sent to p0 // and 1 want-block that was not sent - cids4 := testutil.GenerateCids(1) - pwm.sendCancels(append(cids4, cids[0])) + cids5 := testutil.GenerateCids(1) + pwm.sendCancels(append(cids5, cids[0])) - if g.count != 3 { - t.Fatal("Expected 3 want-blocks", g.count) + if g.count != 7 { + t.Fatal("Expected 7 wants") + } + if wbg.count != 3 { + t.Fatal("Expected 3 want-blocks") } + // Remove first peer pwm.removePeer(p0) - if g.count != 0 { - t.Fatal("Expected all want-blocks to be removed with peer", g.count) + // Should still have 3 broadcast wants + if g.count != 3 { + t.Fatal("Expected 3 wants") + } + if wbg.count != 0 { + t.Fatal("Expected all want-blocks to be removed") + } + + // Remove second peer + pwm.removePeer(p1) + + // Should still have 3 broadcast wants + if g.count != 3 { + t.Fatal("Expected 3 wants") + } + if wbg.count != 0 { + t.Fatal("Expected 0 want-blocks") + } + + // Cancel one remaining broadcast want-have + pwm.sendCancels(cids2[:1]) + if g.count != 2 { + t.Fatal("Expected 2 wants") + } + if wbg.count != 0 { + t.Fatal("Expected 0 want-blocks") } } From ace323aecc4a8c3d5899549371794f8e164c181f Mon Sep 17 00:00:00 2001 From: Dirk McCormick Date: Wed, 3 Jun 2020 10:30:34 -0400 Subject: [PATCH 0948/1035] fix: ensure sessions register with PeerManager This commit was moved from ipfs/go-bitswap@103776ec96bb3d503110f7cb593fe2162e085c1c --- bitswap/internal/session/sessionwantsender.go | 6 ++- .../session/sessionwantsender_test.go | 50 +++++++++++++++++++ 2 files changed, 55 insertions(+), 1 deletion(-) diff --git a/bitswap/internal/session/sessionwantsender.go b/bitswap/internal/session/sessionwantsender.go index 094d9096b..036a7e910 100644 --- a/bitswap/internal/session/sessionwantsender.go +++ b/bitswap/internal/session/sessionwantsender.go @@ -271,7 +271,11 @@ func (sws *sessionWantSender) onChange(changes []change) { // If the update includes blocks or haves, treat it as signaling that // the peer is available if len(chng.update.ks) > 0 || len(chng.update.haves) > 0 { - availability[chng.update.from] = true + p := chng.update.from + availability[p] = true + + // Register with the PeerManager + sws.pm.RegisterSession(p, sws) } updates = append(updates, chng.update) diff --git a/bitswap/internal/session/sessionwantsender_test.go b/bitswap/internal/session/sessionwantsender_test.go index 6c3059c1f..a36eb432e 100644 --- a/bitswap/internal/session/sessionwantsender_test.go +++ b/bitswap/internal/session/sessionwantsender_test.go @@ -66,6 +66,16 @@ func (pm *mockPeerManager) RegisterSession(p peer.ID, sess bspm.Session) bool { return true } +func (pm *mockPeerManager) has(p peer.ID, sid uint64) bool { + pm.lk.Lock() + defer pm.lk.Unlock() + + if session, ok := pm.peerSessions[p]; ok { + return session.ID() == sid + } + return false +} + func (*mockPeerManager) UnregisterSession(uint64) {} func (*mockPeerManager) BroadcastWantHaves(context.Context, []cid.Cid) {} func (*mockPeerManager) SendCancels(context.Context, []cid.Cid) {} @@ -324,6 +334,46 @@ func TestCancelWants(t *testing.T) { } } +func TestRegisterSessionWithPeerManager(t *testing.T) { + cids := testutil.GenerateCids(2) + peers := testutil.GeneratePeers(2) + peerA := peers[0] + peerB := peers[1] + sid := uint64(1) + pm := newMockPeerManager() + fpm := newFakeSessionPeerManager() + swc := newMockSessionMgr() + bpm := bsbpm.New() + onSend := func(peer.ID, []cid.Cid, []cid.Cid) {} + onPeersExhausted := func([]cid.Cid) {} + spm := newSessionWantSender(sid, pm, fpm, swc, bpm, onSend, onPeersExhausted) + defer spm.Shutdown() + + go spm.Run() + + // peerA: HAVE cid0 + spm.Update(peerA, nil, cids[:1], nil) + + // Wait for processing to complete + time.Sleep(10 * time.Millisecond) + + // Expect session to have been registered with PeerManager + if !pm.has(peerA, sid) { + t.Fatal("Expected HAVE to register session with PeerManager") + } + + // peerB: block cid1 + spm.Update(peerB, cids[1:], nil, nil) + + // Wait for processing to complete + time.Sleep(10 * time.Millisecond) + + // Expect session to have been registered with PeerManager + if !pm.has(peerB, sid) { + t.Fatal("Expected HAVE to register session with PeerManager") + } +} + func TestPeerUnavailable(t *testing.T) { cids := testutil.GenerateCids(2) peers := testutil.GeneratePeers(2) From 362527ad9e3098acdc891b80e6689c123ca62a43 Mon Sep 17 00:00:00 2001 From: Dirk McCormick Date: Wed, 3 Jun 2020 16:00:15 -0400 Subject: [PATCH 0949/1035] feat: protect connection for session peers that are first to send block This commit was moved from ipfs/go-bitswap@ba0f59c33ca033cb497b0a5837ada652f84c9e31 --- bitswap/internal/session/session.go | 2 + bitswap/internal/session/session_test.go | 36 ++++++++-- bitswap/internal/session/sessionwantsender.go | 5 ++ .../session/sessionwantsender_test.go | 59 +++++++++++++++++ .../sessionmanager/sessionmanager_test.go | 13 ++-- .../sessionpeermanager/sessionpeermanager.go | 15 +++++ .../sessionpeermanager_test.go | 66 ++++++++++++++++++- 7 files changed, 182 insertions(+), 14 deletions(-) diff --git a/bitswap/internal/session/session.go b/bitswap/internal/session/session.go index 7a0d23b36..7b2953f95 100644 --- a/bitswap/internal/session/session.go +++ b/bitswap/internal/session/session.go @@ -65,6 +65,8 @@ type SessionPeerManager interface { Peers() []peer.ID // Whether there are any peers in the session HasPeers() bool + // Protect connection from being pruned by the connection manager + ProtectConnection(peer.ID) } // ProviderFinder is used to find providers for a given key diff --git a/bitswap/internal/session/session_test.go b/bitswap/internal/session/session_test.go index 028ee46e2..e553bb876 100644 --- a/bitswap/internal/session/session_test.go +++ b/bitswap/internal/session/session_test.go @@ -56,16 +56,42 @@ func newFakeSessionPeerManager() *bsspm.SessionPeerManager { return bsspm.New(1, newFakePeerTagger()) } -type fakePeerTagger struct { +func newFakePeerTagger() *fakePeerTagger { + return &fakePeerTagger{ + protectedPeers: make(map[peer.ID]map[string]struct{}), + } } -func newFakePeerTagger() *fakePeerTagger { - return &fakePeerTagger{} +type fakePeerTagger struct { + lk sync.Mutex + protectedPeers map[peer.ID]map[string]struct{} } -func (fpt *fakePeerTagger) TagPeer(p peer.ID, tag string, val int) { +func (fpt *fakePeerTagger) TagPeer(p peer.ID, tag string, val int) {} +func (fpt *fakePeerTagger) UntagPeer(p peer.ID, tag string) {} + +func (fpt *fakePeerTagger) Protect(p peer.ID, tag string) { + fpt.lk.Lock() + defer fpt.lk.Unlock() + + tags, ok := fpt.protectedPeers[p] + if !ok { + tags = make(map[string]struct{}) + fpt.protectedPeers[p] = tags + } + tags[tag] = struct{}{} } -func (fpt *fakePeerTagger) UntagPeer(p peer.ID, tag string) { + +func (fpt *fakePeerTagger) Unprotect(p peer.ID, tag string) bool { + fpt.lk.Lock() + defer fpt.lk.Unlock() + + if tags, ok := fpt.protectedPeers[p]; ok { + delete(tags, tag) + return len(tags) > 0 + } + + return false } type fakeProviderFinder struct { diff --git a/bitswap/internal/session/sessionwantsender.go b/bitswap/internal/session/sessionwantsender.go index 036a7e910..95439a9bf 100644 --- a/bitswap/internal/session/sessionwantsender.go +++ b/bitswap/internal/session/sessionwantsender.go @@ -379,6 +379,11 @@ func (sws *sessionWantSender) processUpdates(updates []update) []cid.Cid { // Inform the peer tracker that this peer was the first to send // us the block sws.peerRspTrkr.receivedBlockFrom(upd.from) + + // Protect the connection to this peer so that we can ensure + // that the connection doesn't get pruned by the connection + // manager + sws.spm.ProtectConnection(upd.from) } delete(sws.peerConsecutiveDontHaves, upd.from) } diff --git a/bitswap/internal/session/sessionwantsender_test.go b/bitswap/internal/session/sessionwantsender_test.go index a36eb432e..de73c564e 100644 --- a/bitswap/internal/session/sessionwantsender_test.go +++ b/bitswap/internal/session/sessionwantsender_test.go @@ -2,12 +2,14 @@ package session import ( "context" + "fmt" "sync" "testing" "time" bsbpm "github.com/ipfs/go-bitswap/internal/blockpresencemanager" bspm "github.com/ipfs/go-bitswap/internal/peermanager" + bsspm "github.com/ipfs/go-bitswap/internal/sessionpeermanager" "github.com/ipfs/go-bitswap/internal/testutil" cid "github.com/ipfs/go-cid" peer "github.com/libp2p/go-libp2p-core/peer" @@ -374,6 +376,63 @@ func TestRegisterSessionWithPeerManager(t *testing.T) { } } +func TestProtectConnFirstPeerToSendWantedBlock(t *testing.T) { + cids := testutil.GenerateCids(2) + peers := testutil.GeneratePeers(3) + peerA := peers[0] + peerB := peers[1] + peerC := peers[2] + sid := uint64(1) + sidStr := fmt.Sprintf("%d", sid) + pm := newMockPeerManager() + fpt := newFakePeerTagger() + fpm := bsspm.New(1, fpt) + swc := newMockSessionMgr() + bpm := bsbpm.New() + onSend := func(peer.ID, []cid.Cid, []cid.Cid) {} + onPeersExhausted := func([]cid.Cid) {} + spm := newSessionWantSender(sid, pm, fpm, swc, bpm, onSend, onPeersExhausted) + defer spm.Shutdown() + + go spm.Run() + + // add cid0 + spm.Add(cids[:1]) + + // peerA: block cid0 + spm.Update(peerA, cids[:1], nil, nil) + + // Wait for processing to complete + time.Sleep(10 * time.Millisecond) + + // Expect peer A to be protected as it was first to send the block + if _, ok := fpt.protectedPeers[peerA][sidStr]; !ok { + t.Fatal("Expected first peer to send block to have protected connection") + } + + // peerB: block cid0 + spm.Update(peerB, cids[:1], nil, nil) + + // Wait for processing to complete + time.Sleep(10 * time.Millisecond) + + // Expect peer B not to be protected as it was not first to send the block + if _, ok := fpt.protectedPeers[peerB][sidStr]; ok { + t.Fatal("Expected peer not to be protected") + } + + // peerC: block cid1 + spm.Update(peerC, cids[1:], nil, nil) + + // Wait for processing to complete + time.Sleep(10 * time.Millisecond) + + // Expect peer C not to be protected as we didn't want the block it sent + if _, ok := fpt.protectedPeers[peerC][sidStr]; ok { + t.Fatal("Expected peer not to be protected") + } +} + func TestPeerUnavailable(t *testing.T) { cids := testutil.GenerateCids(2) peers := testutil.GeneratePeers(2) diff --git a/bitswap/internal/sessionmanager/sessionmanager_test.go b/bitswap/internal/sessionmanager/sessionmanager_test.go index 3be1f9b55..fb8445f1e 100644 --- a/bitswap/internal/sessionmanager/sessionmanager_test.go +++ b/bitswap/internal/sessionmanager/sessionmanager_test.go @@ -51,12 +51,13 @@ func (fs *fakeSession) Shutdown() { type fakeSesPeerManager struct { } -func (*fakeSesPeerManager) Peers() []peer.ID { return nil } -func (*fakeSesPeerManager) PeersDiscovered() bool { return false } -func (*fakeSesPeerManager) Shutdown() {} -func (*fakeSesPeerManager) AddPeer(peer.ID) bool { return false } -func (*fakeSesPeerManager) RemovePeer(peer.ID) bool { return false } -func (*fakeSesPeerManager) HasPeers() bool { return false } +func (*fakeSesPeerManager) Peers() []peer.ID { return nil } +func (*fakeSesPeerManager) PeersDiscovered() bool { return false } +func (*fakeSesPeerManager) Shutdown() {} +func (*fakeSesPeerManager) AddPeer(peer.ID) bool { return false } +func (*fakeSesPeerManager) RemovePeer(peer.ID) bool { return false } +func (*fakeSesPeerManager) HasPeers() bool { return false } +func (*fakeSesPeerManager) ProtectConnection(peer.ID) {} type fakePeerManager struct { lk sync.Mutex diff --git a/bitswap/internal/sessionpeermanager/sessionpeermanager.go b/bitswap/internal/sessionpeermanager/sessionpeermanager.go index 499aa830b..1ad144d26 100644 --- a/bitswap/internal/sessionpeermanager/sessionpeermanager.go +++ b/bitswap/internal/sessionpeermanager/sessionpeermanager.go @@ -21,6 +21,8 @@ const ( type PeerTagger interface { TagPeer(peer.ID, string, int) UntagPeer(p peer.ID, tag string) + Protect(peer.ID, string) + Unprotect(peer.ID, string) bool } // SessionPeerManager keeps track of peers for a session, and takes care of @@ -67,6 +69,18 @@ func (spm *SessionPeerManager) AddPeer(p peer.ID) bool { return true } +// Protect connection to this peer from being pruned by the connection manager +func (spm *SessionPeerManager) ProtectConnection(p peer.ID) { + spm.plk.Lock() + defer spm.plk.Unlock() + + if _, ok := spm.peers[p]; !ok { + return + } + + spm.tagger.Protect(p, fmt.Sprintf("%d", spm.id)) +} + // RemovePeer removes the peer from the SessionPeerManager. // Returns true if the peer was removed, false if it did not exist. func (spm *SessionPeerManager) RemovePeer(p peer.ID) bool { @@ -79,6 +93,7 @@ func (spm *SessionPeerManager) RemovePeer(p peer.ID) bool { delete(spm.peers, p) spm.tagger.UntagPeer(p, spm.tag) + spm.tagger.Unprotect(p, fmt.Sprintf("%d", spm.id)) log.Debugw("Bitswap: removed peer from session", "session", spm.id, "peer", p, "peerCount", len(spm.peers)) return true diff --git a/bitswap/internal/sessionpeermanager/sessionpeermanager_test.go b/bitswap/internal/sessionpeermanager/sessionpeermanager_test.go index e3c1c4ab4..ba3a3427d 100644 --- a/bitswap/internal/sessionpeermanager/sessionpeermanager_test.go +++ b/bitswap/internal/sessionpeermanager/sessionpeermanager_test.go @@ -1,6 +1,7 @@ package sessionpeermanager import ( + "fmt" "sync" "testing" @@ -9,9 +10,16 @@ import ( ) type fakePeerTagger struct { - lk sync.Mutex - taggedPeers []peer.ID - wait sync.WaitGroup + lk sync.Mutex + taggedPeers []peer.ID + protectedPeers map[peer.ID]map[string]struct{} + wait sync.WaitGroup +} + +func newFakePeerTagger() *fakePeerTagger { + return &fakePeerTagger{ + protectedPeers: make(map[peer.ID]map[string]struct{}), + } } func (fpt *fakePeerTagger) TagPeer(p peer.ID, tag string, n int) { @@ -36,6 +44,30 @@ func (fpt *fakePeerTagger) UntagPeer(p peer.ID, tag string) { } } +func (fpt *fakePeerTagger) Protect(p peer.ID, tag string) { + fpt.lk.Lock() + defer fpt.lk.Unlock() + + tags, ok := fpt.protectedPeers[p] + if !ok { + tags = make(map[string]struct{}) + fpt.protectedPeers[p] = tags + } + tags[tag] = struct{}{} +} + +func (fpt *fakePeerTagger) Unprotect(p peer.ID, tag string) bool { + fpt.lk.Lock() + defer fpt.lk.Unlock() + + if tags, ok := fpt.protectedPeers[p]; ok { + delete(tags, tag) + return len(tags) > 0 + } + + return false +} + func TestAddPeers(t *testing.T) { peers := testutil.GeneratePeers(2) spm := New(1, &fakePeerTagger{}) @@ -208,6 +240,34 @@ func TestPeerTagging(t *testing.T) { } } +func TestProtectConnection(t *testing.T) { + peers := testutil.GeneratePeers(1) + peerA := peers[0] + fpt := newFakePeerTagger() + sid := 1 + sidstr := fmt.Sprintf("%d", sid) + spm := New(1, fpt) + + // Should not protect connection if peer hasn't been added yet + spm.ProtectConnection(peerA) + if _, ok := fpt.protectedPeers[peerA][sidstr]; ok { + t.Fatal("Expected peer not to be protected") + } + + // Once peer is added, should be able to protect connection + spm.AddPeer(peerA) + spm.ProtectConnection(peerA) + if _, ok := fpt.protectedPeers[peerA][sidstr]; !ok { + t.Fatal("Expected peer to be protected") + } + + // Removing peer should unprotect connection + spm.RemovePeer(peerA) + if _, ok := fpt.protectedPeers[peerA][sidstr]; ok { + t.Fatal("Expected peer to be unprotected") + } +} + func TestShutdown(t *testing.T) { peers := testutil.GeneratePeers(2) fpt := &fakePeerTagger{} From 64fd392a6ad42adce1fab45850ea980fb436e43a Mon Sep 17 00:00:00 2001 From: Dirk McCormick Date: Wed, 3 Jun 2020 16:10:34 -0400 Subject: [PATCH 0950/1035] fix: ensure conns are unprotected on shutdown This commit was moved from ipfs/go-bitswap@c7e7afca3f78a56d19088cb5023f0b5e0379daed --- .../sessionpeermanager/sessionpeermanager.go | 9 +++++++-- .../sessionpeermanager_test.go | 17 +++++++++++++++-- 2 files changed, 22 insertions(+), 4 deletions(-) diff --git a/bitswap/internal/sessionpeermanager/sessionpeermanager.go b/bitswap/internal/sessionpeermanager/sessionpeermanager.go index 1ad144d26..e5442d5c4 100644 --- a/bitswap/internal/sessionpeermanager/sessionpeermanager.go +++ b/bitswap/internal/sessionpeermanager/sessionpeermanager.go @@ -78,7 +78,7 @@ func (spm *SessionPeerManager) ProtectConnection(p peer.ID) { return } - spm.tagger.Protect(p, fmt.Sprintf("%d", spm.id)) + spm.tagger.Protect(p, spm.protectedTag()) } // RemovePeer removes the peer from the SessionPeerManager. @@ -93,7 +93,7 @@ func (spm *SessionPeerManager) RemovePeer(p peer.ID) bool { delete(spm.peers, p) spm.tagger.UntagPeer(p, spm.tag) - spm.tagger.Unprotect(p, fmt.Sprintf("%d", spm.id)) + spm.tagger.Unprotect(p, spm.protectedTag()) log.Debugw("Bitswap: removed peer from session", "session", spm.id, "peer", p, "peerCount", len(spm.peers)) return true @@ -145,5 +145,10 @@ func (spm *SessionPeerManager) Shutdown() { // connections to those peers for p := range spm.peers { spm.tagger.UntagPeer(p, spm.tag) + spm.tagger.Unprotect(p, spm.protectedTag()) } } + +func (spm *SessionPeerManager) protectedTag() string { + return fmt.Sprintf("%d", spm.id) +} diff --git a/bitswap/internal/sessionpeermanager/sessionpeermanager_test.go b/bitswap/internal/sessionpeermanager/sessionpeermanager_test.go index ba3a3427d..7bb36b342 100644 --- a/bitswap/internal/sessionpeermanager/sessionpeermanager_test.go +++ b/bitswap/internal/sessionpeermanager/sessionpeermanager_test.go @@ -62,6 +62,9 @@ func (fpt *fakePeerTagger) Unprotect(p peer.ID, tag string) bool { if tags, ok := fpt.protectedPeers[p]; ok { delete(tags, tag) + if len(tags) == 0 { + delete(fpt.protectedPeers, p) + } return len(tags) > 0 } @@ -270,8 +273,10 @@ func TestProtectConnection(t *testing.T) { func TestShutdown(t *testing.T) { peers := testutil.GeneratePeers(2) - fpt := &fakePeerTagger{} - spm := New(1, fpt) + fpt := newFakePeerTagger() + sid := uint64(1) + sidstr := fmt.Sprintf("%d", sid) + spm := New(sid, fpt) spm.AddPeer(peers[0]) spm.AddPeer(peers[1]) @@ -279,9 +284,17 @@ func TestShutdown(t *testing.T) { t.Fatal("Expected to have tagged two peers") } + spm.ProtectConnection(peers[0]) + if _, ok := fpt.protectedPeers[peers[0]][sidstr]; !ok { + t.Fatal("Expected peer to be protected") + } + spm.Shutdown() if len(fpt.taggedPeers) != 0 { t.Fatal("Expected to have untagged all peers") } + if len(fpt.protectedPeers) != 0 { + t.Fatal("Expected to have unprotected all peers") + } } From 2a401dcb8a0813a9eba49951d83e70e3eb0dce84 Mon Sep 17 00:00:00 2001 From: Dirk McCormick Date: Wed, 3 Jun 2020 16:26:34 -0400 Subject: [PATCH 0951/1035] fix: race in tests This commit was moved from ipfs/go-bitswap@a38d8a9cce10c8b5d0a086632702225ad74f5198 --- bitswap/internal/session/session_test.go | 8 ++++++++ bitswap/internal/session/sessionwantsender_test.go | 6 +++--- 2 files changed, 11 insertions(+), 3 deletions(-) diff --git a/bitswap/internal/session/session_test.go b/bitswap/internal/session/session_test.go index e553bb876..b6aa5b5ee 100644 --- a/bitswap/internal/session/session_test.go +++ b/bitswap/internal/session/session_test.go @@ -94,6 +94,14 @@ func (fpt *fakePeerTagger) Unprotect(p peer.ID, tag string) bool { return false } +func (fpt *fakePeerTagger) isProtected(p peer.ID, tag string) bool { + fpt.lk.Lock() + defer fpt.lk.Unlock() + + _, ok := fpt.protectedPeers[p][tag] + return ok +} + type fakeProviderFinder struct { findMorePeersRequested chan cid.Cid } diff --git a/bitswap/internal/session/sessionwantsender_test.go b/bitswap/internal/session/sessionwantsender_test.go index de73c564e..08c465bf7 100644 --- a/bitswap/internal/session/sessionwantsender_test.go +++ b/bitswap/internal/session/sessionwantsender_test.go @@ -406,7 +406,7 @@ func TestProtectConnFirstPeerToSendWantedBlock(t *testing.T) { time.Sleep(10 * time.Millisecond) // Expect peer A to be protected as it was first to send the block - if _, ok := fpt.protectedPeers[peerA][sidStr]; !ok { + if !fpt.isProtected(peerA, sidStr) { t.Fatal("Expected first peer to send block to have protected connection") } @@ -417,7 +417,7 @@ func TestProtectConnFirstPeerToSendWantedBlock(t *testing.T) { time.Sleep(10 * time.Millisecond) // Expect peer B not to be protected as it was not first to send the block - if _, ok := fpt.protectedPeers[peerB][sidStr]; ok { + if fpt.isProtected(peerB, sidStr) { t.Fatal("Expected peer not to be protected") } @@ -428,7 +428,7 @@ func TestProtectConnFirstPeerToSendWantedBlock(t *testing.T) { time.Sleep(10 * time.Millisecond) // Expect peer C not to be protected as we didn't want the block it sent - if _, ok := fpt.protectedPeers[peerC][sidStr]; ok { + if fpt.isProtected(peerC, sidStr) { t.Fatal("Expected peer not to be protected") } } From 46e24892acde94acea1132832c0160b7bce33289 Mon Sep 17 00:00:00 2001 From: Dirk McCormick Date: Wed, 3 Jun 2020 16:39:31 -0400 Subject: [PATCH 0952/1035] fix: ensure unique tag for session connection protection This commit was moved from ipfs/go-bitswap@b38f4513604915f3080d1207a79c56e4be4cf3b6 --- bitswap/internal/session/session_test.go | 5 ++--- .../session/sessionwantsender_test.go | 8 +++---- .../sessionpeermanager/sessionpeermanager.go | 10 +++------ .../sessionpeermanager_test.go | 22 ++++++++++--------- 4 files changed, 20 insertions(+), 25 deletions(-) diff --git a/bitswap/internal/session/session_test.go b/bitswap/internal/session/session_test.go index b6aa5b5ee..08bc9f88b 100644 --- a/bitswap/internal/session/session_test.go +++ b/bitswap/internal/session/session_test.go @@ -94,12 +94,11 @@ func (fpt *fakePeerTagger) Unprotect(p peer.ID, tag string) bool { return false } -func (fpt *fakePeerTagger) isProtected(p peer.ID, tag string) bool { +func (fpt *fakePeerTagger) isProtected(p peer.ID) bool { fpt.lk.Lock() defer fpt.lk.Unlock() - _, ok := fpt.protectedPeers[p][tag] - return ok + return len(fpt.protectedPeers[p]) > 0 } type fakeProviderFinder struct { diff --git a/bitswap/internal/session/sessionwantsender_test.go b/bitswap/internal/session/sessionwantsender_test.go index 08c465bf7..806112f55 100644 --- a/bitswap/internal/session/sessionwantsender_test.go +++ b/bitswap/internal/session/sessionwantsender_test.go @@ -2,7 +2,6 @@ package session import ( "context" - "fmt" "sync" "testing" "time" @@ -383,7 +382,6 @@ func TestProtectConnFirstPeerToSendWantedBlock(t *testing.T) { peerB := peers[1] peerC := peers[2] sid := uint64(1) - sidStr := fmt.Sprintf("%d", sid) pm := newMockPeerManager() fpt := newFakePeerTagger() fpm := bsspm.New(1, fpt) @@ -406,7 +404,7 @@ func TestProtectConnFirstPeerToSendWantedBlock(t *testing.T) { time.Sleep(10 * time.Millisecond) // Expect peer A to be protected as it was first to send the block - if !fpt.isProtected(peerA, sidStr) { + if !fpt.isProtected(peerA) { t.Fatal("Expected first peer to send block to have protected connection") } @@ -417,7 +415,7 @@ func TestProtectConnFirstPeerToSendWantedBlock(t *testing.T) { time.Sleep(10 * time.Millisecond) // Expect peer B not to be protected as it was not first to send the block - if fpt.isProtected(peerB, sidStr) { + if fpt.isProtected(peerB) { t.Fatal("Expected peer not to be protected") } @@ -428,7 +426,7 @@ func TestProtectConnFirstPeerToSendWantedBlock(t *testing.T) { time.Sleep(10 * time.Millisecond) // Expect peer C not to be protected as we didn't want the block it sent - if fpt.isProtected(peerC, sidStr) { + if fpt.isProtected(peerC) { t.Fatal("Expected peer not to be protected") } } diff --git a/bitswap/internal/sessionpeermanager/sessionpeermanager.go b/bitswap/internal/sessionpeermanager/sessionpeermanager.go index e5442d5c4..db46691b9 100644 --- a/bitswap/internal/sessionpeermanager/sessionpeermanager.go +++ b/bitswap/internal/sessionpeermanager/sessionpeermanager.go @@ -78,7 +78,7 @@ func (spm *SessionPeerManager) ProtectConnection(p peer.ID) { return } - spm.tagger.Protect(p, spm.protectedTag()) + spm.tagger.Protect(p, spm.tag) } // RemovePeer removes the peer from the SessionPeerManager. @@ -93,7 +93,7 @@ func (spm *SessionPeerManager) RemovePeer(p peer.ID) bool { delete(spm.peers, p) spm.tagger.UntagPeer(p, spm.tag) - spm.tagger.Unprotect(p, spm.protectedTag()) + spm.tagger.Unprotect(p, spm.tag) log.Debugw("Bitswap: removed peer from session", "session", spm.id, "peer", p, "peerCount", len(spm.peers)) return true @@ -145,10 +145,6 @@ func (spm *SessionPeerManager) Shutdown() { // connections to those peers for p := range spm.peers { spm.tagger.UntagPeer(p, spm.tag) - spm.tagger.Unprotect(p, spm.protectedTag()) + spm.tagger.Unprotect(p, spm.tag) } } - -func (spm *SessionPeerManager) protectedTag() string { - return fmt.Sprintf("%d", spm.id) -} diff --git a/bitswap/internal/sessionpeermanager/sessionpeermanager_test.go b/bitswap/internal/sessionpeermanager/sessionpeermanager_test.go index 7bb36b342..746333c22 100644 --- a/bitswap/internal/sessionpeermanager/sessionpeermanager_test.go +++ b/bitswap/internal/sessionpeermanager/sessionpeermanager_test.go @@ -1,7 +1,6 @@ package sessionpeermanager import ( - "fmt" "sync" "testing" @@ -71,6 +70,13 @@ func (fpt *fakePeerTagger) Unprotect(p peer.ID, tag string) bool { return false } +func (fpt *fakePeerTagger) isProtected(p peer.ID) bool { + fpt.lk.Lock() + defer fpt.lk.Unlock() + + return len(fpt.protectedPeers[p]) > 0 +} + func TestAddPeers(t *testing.T) { peers := testutil.GeneratePeers(2) spm := New(1, &fakePeerTagger{}) @@ -247,26 +253,24 @@ func TestProtectConnection(t *testing.T) { peers := testutil.GeneratePeers(1) peerA := peers[0] fpt := newFakePeerTagger() - sid := 1 - sidstr := fmt.Sprintf("%d", sid) spm := New(1, fpt) // Should not protect connection if peer hasn't been added yet spm.ProtectConnection(peerA) - if _, ok := fpt.protectedPeers[peerA][sidstr]; ok { + if fpt.isProtected(peerA) { t.Fatal("Expected peer not to be protected") } // Once peer is added, should be able to protect connection spm.AddPeer(peerA) spm.ProtectConnection(peerA) - if _, ok := fpt.protectedPeers[peerA][sidstr]; !ok { + if !fpt.isProtected(peerA) { t.Fatal("Expected peer to be protected") } // Removing peer should unprotect connection spm.RemovePeer(peerA) - if _, ok := fpt.protectedPeers[peerA][sidstr]; ok { + if fpt.isProtected(peerA) { t.Fatal("Expected peer to be unprotected") } } @@ -274,9 +278,7 @@ func TestProtectConnection(t *testing.T) { func TestShutdown(t *testing.T) { peers := testutil.GeneratePeers(2) fpt := newFakePeerTagger() - sid := uint64(1) - sidstr := fmt.Sprintf("%d", sid) - spm := New(sid, fpt) + spm := New(1, fpt) spm.AddPeer(peers[0]) spm.AddPeer(peers[1]) @@ -285,7 +287,7 @@ func TestShutdown(t *testing.T) { } spm.ProtectConnection(peers[0]) - if _, ok := fpt.protectedPeers[peers[0]][sidstr]; !ok { + if !fpt.isProtected(peers[0]) { t.Fatal("Expected peer to be protected") } From 799b44d70bc97d694e9966d7befecc39bc46a875 Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Mon, 8 Jun 2020 11:25:15 -0700 Subject: [PATCH 0953/1035] fix: only track useful received data in the ledger (#411) Quick alternative to #407 to fix the main issue. This commit was moved from ipfs/go-bitswap@a7afff5443a1b67a26ade6ecd378d8730dabf55c --- bitswap/internal/decision/engine.go | 23 ++++++++++++++++------- bitswap/internal/decision/engine_test.go | 1 + 2 files changed, 17 insertions(+), 7 deletions(-) diff --git a/bitswap/internal/decision/engine.go b/bitswap/internal/decision/engine.go index 49063bd5c..b62074053 100644 --- a/bitswap/internal/decision/engine.go +++ b/bitswap/internal/decision/engine.go @@ -557,13 +557,6 @@ func (e *Engine) MessageReceived(ctx context.Context, p peer.ID, m bsmsg.BitSwap l.lk.Lock() defer l.lk.Unlock() - // Record how many bytes were received in the ledger - blks := m.Blocks() - for _, block := range blks { - log.Debugw("Bitswap engine <- block", "local", e.self, "from", p, "cid", block.Cid(), "size", len(block.RawData())) - l.ReceivedBytes(len(block.RawData())) - } - // If the peer sent a full wantlist, replace the ledger's wantlist if m.Full() { l.wantList = wl.New() @@ -664,11 +657,26 @@ func (e *Engine) splitWantsCancels(es []bsmsg.Entry) ([]bsmsg.Entry, []bsmsg.Ent // ReceiveFrom is called when new blocks are received and added to the block // store, meaning there may be peers who want those blocks, so we should send // the blocks to them. +// +// This function also updates the receive side of the ledger. func (e *Engine) ReceiveFrom(from peer.ID, blks []blocks.Block, haves []cid.Cid) { if len(blks) == 0 { return } + if from != "" { + l := e.findOrCreate(from) + l.lk.Lock() + + // Record how many bytes were received in the ledger + for _, blk := range blks { + log.Debugw("Bitswap engine <- block", "local", e.self, "from", from, "cid", blk.Cid(), "size", len(blk.RawData())) + l.ReceivedBytes(len(blk.RawData())) + } + + l.lk.Unlock() + } + // Get the size of each block blockSizes := make(map[cid.Cid]int, len(blks)) for _, blk := range blks { @@ -678,6 +686,7 @@ func (e *Engine) ReceiveFrom(from peer.ID, blks []blocks.Block, haves []cid.Cid) // Check each peer to see if it wants one of the blocks we received work := false e.lock.RLock() + for _, l := range e.ledgerMap { l.lk.RLock() diff --git a/bitswap/internal/decision/engine_test.go b/bitswap/internal/decision/engine_test.go index cf000d96e..3cb765973 100644 --- a/bitswap/internal/decision/engine_test.go +++ b/bitswap/internal/decision/engine_test.go @@ -123,6 +123,7 @@ func TestConsistentAccounting(t *testing.T) { sender.Engine.MessageSent(receiver.Peer, m) receiver.Engine.MessageReceived(ctx, sender.Peer, m) + receiver.Engine.ReceiveFrom(sender.Peer, m.Blocks(), nil) } // Ensure sender records the change From 0fed64cc3352e16df197f166e555b196b5c7948c Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Tue, 9 Jun 2020 18:51:35 -0700 Subject: [PATCH 0954/1035] fix: avoid taking accessing the peerQueues without taking the lock Or, really, just avoid accessing it. We don't need it. This caused a concurrent map access panic under load. This commit was moved from ipfs/go-bitswap@b0cea10d1a51ec211f5beeda875a6436422732ed --- bitswap/internal/peermanager/peermanager.go | 5 +---- bitswap/internal/session/session.go | 2 +- bitswap/internal/session/session_test.go | 4 +--- bitswap/internal/session/sessionwantsender_test.go | 3 +-- bitswap/internal/sessionmanager/sessionmanager_test.go | 2 +- 5 files changed, 5 insertions(+), 11 deletions(-) diff --git a/bitswap/internal/peermanager/peermanager.go b/bitswap/internal/peermanager/peermanager.go index 4c489dd8a..00857627c 100644 --- a/bitswap/internal/peermanager/peermanager.go +++ b/bitswap/internal/peermanager/peermanager.go @@ -198,7 +198,7 @@ func (pm *PeerManager) getOrCreate(p peer.ID) PeerQueue { // RegisterSession tells the PeerManager that the given session is interested // in events about the given peer. -func (pm *PeerManager) RegisterSession(p peer.ID, s Session) bool { +func (pm *PeerManager) RegisterSession(p peer.ID, s Session) { pm.psLk.Lock() defer pm.psLk.Unlock() @@ -210,9 +210,6 @@ func (pm *PeerManager) RegisterSession(p peer.ID, s Session) bool { pm.peerSessions[p] = make(map[uint64]struct{}) } pm.peerSessions[p][s.ID()] = struct{}{} - - _, ok := pm.peerQueues[p] - return ok } // UnregisterSession tells the PeerManager that the given session is no longer diff --git a/bitswap/internal/session/session.go b/bitswap/internal/session/session.go index 7b2953f95..f2a4d2e46 100644 --- a/bitswap/internal/session/session.go +++ b/bitswap/internal/session/session.go @@ -30,7 +30,7 @@ const ( type PeerManager interface { // RegisterSession tells the PeerManager that the session is interested // in a peer's connection state - RegisterSession(peer.ID, bspm.Session) bool + RegisterSession(peer.ID, bspm.Session) // UnregisterSession tells the PeerManager that the session is no longer // interested in a peer's connection state UnregisterSession(uint64) diff --git a/bitswap/internal/session/session_test.go b/bitswap/internal/session/session_test.go index 08bc9f88b..b63a20d9d 100644 --- a/bitswap/internal/session/session_test.go +++ b/bitswap/internal/session/session_test.go @@ -136,9 +136,7 @@ func newFakePeerManager() *fakePeerManager { } } -func (pm *fakePeerManager) RegisterSession(peer.ID, bspm.Session) bool { - return true -} +func (pm *fakePeerManager) RegisterSession(peer.ID, bspm.Session) {} func (pm *fakePeerManager) UnregisterSession(uint64) {} func (pm *fakePeerManager) SendWants(context.Context, peer.ID, []cid.Cid, []cid.Cid) {} func (pm *fakePeerManager) BroadcastWantHaves(ctx context.Context, cids []cid.Cid) { diff --git a/bitswap/internal/session/sessionwantsender_test.go b/bitswap/internal/session/sessionwantsender_test.go index 806112f55..4b39a893f 100644 --- a/bitswap/internal/session/sessionwantsender_test.go +++ b/bitswap/internal/session/sessionwantsender_test.go @@ -59,12 +59,11 @@ func newMockPeerManager() *mockPeerManager { } } -func (pm *mockPeerManager) RegisterSession(p peer.ID, sess bspm.Session) bool { +func (pm *mockPeerManager) RegisterSession(p peer.ID, sess bspm.Session) { pm.lk.Lock() defer pm.lk.Unlock() pm.peerSessions[p] = sess - return true } func (pm *mockPeerManager) has(p peer.ID, sid uint64) bool { diff --git a/bitswap/internal/sessionmanager/sessionmanager_test.go b/bitswap/internal/sessionmanager/sessionmanager_test.go index fb8445f1e..db88855f5 100644 --- a/bitswap/internal/sessionmanager/sessionmanager_test.go +++ b/bitswap/internal/sessionmanager/sessionmanager_test.go @@ -64,7 +64,7 @@ type fakePeerManager struct { cancels []cid.Cid } -func (*fakePeerManager) RegisterSession(peer.ID, bspm.Session) bool { return true } +func (*fakePeerManager) RegisterSession(peer.ID, bspm.Session) {} func (*fakePeerManager) UnregisterSession(uint64) {} func (*fakePeerManager) SendWants(context.Context, peer.ID, []cid.Cid, []cid.Cid) {} func (*fakePeerManager) BroadcastWantHaves(context.Context, []cid.Cid) {} From c63616fbd1471ec50532d59069488d7e3be0bd1b Mon Sep 17 00:00:00 2001 From: Dirk McCormick Date: Wed, 10 Jun 2020 15:55:34 -0400 Subject: [PATCH 0955/1035] fix: want gauge calculation This commit was moved from ipfs/go-bitswap@47129f71fb800cdfb3fef3985f3d792630018547 --- .../internal/peermanager/peerwantmanager.go | 151 ++++++++++-------- .../peermanager/peerwantmanager_test.go | 78 +++++++++ 2 files changed, 166 insertions(+), 63 deletions(-) diff --git a/bitswap/internal/peermanager/peerwantmanager.go b/bitswap/internal/peermanager/peerwantmanager.go index ee81649a7..21934b815 100644 --- a/bitswap/internal/peermanager/peerwantmanager.go +++ b/bitswap/internal/peermanager/peerwantmanager.go @@ -84,25 +84,28 @@ func (pwm *peerWantManager) removePeer(p peer.ID) { // Clean up want-blocks _ = pws.wantBlocks.ForEach(func(c cid.Cid) error { // Clean up want-blocks from the reverse index - removedLastPeer := pwm.reverseIndexRemove(c, p) + pwm.reverseIndexRemove(c, p) // Decrement the gauges by the number of pending want-blocks to the peer - if removedLastPeer { + peersWantingBlock, peersWantingHave := pwm.peersWanting(c) + if peersWantingBlock == 0 { pwm.wantBlockGauge.Dec() - if !pwm.broadcastWants.Has(c) { + if peersWantingHave == 0 && !pwm.broadcastWants.Has(c) { pwm.wantGauge.Dec() } } + return nil }) // Clean up want-haves _ = pws.wantHaves.ForEach(func(c cid.Cid) error { // Clean up want-haves from the reverse index - removedLastPeer := pwm.reverseIndexRemove(c, p) + pwm.reverseIndexRemove(c, p) // Decrement the gauge by the number of pending want-haves to the peer - if removedLastPeer && !pwm.broadcastWants.Has(c) { + peersWantingBlock, peersWantingHave := pwm.peersWanting(c) + if peersWantingBlock == 0 && peersWantingHave == 0 && !pwm.broadcastWants.Has(c) { pwm.wantGauge.Dec() } return nil @@ -122,8 +125,9 @@ func (pwm *peerWantManager) broadcastWantHaves(wantHaves []cid.Cid) { pwm.broadcastWants.Add(c) unsent = append(unsent, c) - // Increment the total wants gauge + // If no peer has a pending want for the key if _, ok := pwm.wantPeers[c]; !ok { + // Increment the total wants gauge pwm.wantGauge.Inc() } } @@ -168,27 +172,30 @@ func (pwm *peerWantManager) sendWants(p peer.ID, wantBlocks []cid.Cid, wantHaves // Iterate over the requested want-blocks for _, c := range wantBlocks { // If the want-block hasn't been sent to the peer - if !pws.wantBlocks.Has(c) { - // Record that the CID was sent as a want-block - pws.wantBlocks.Add(c) - - // Add the CID to the results - fltWantBlks = append(fltWantBlks, c) - - // Make sure the CID is no longer recorded as a want-have - pws.wantHaves.Remove(c) + if pws.wantBlocks.Has(c) { + continue + } - // Update the reverse index - isNew := pwm.reverseIndexAdd(c, p) - - // Increment the want gauges - if isNew { - pwm.wantBlockGauge.Inc() - if !pwm.broadcastWants.Has(c) { - pwm.wantGauge.Inc() - } + // Increment the want gauges + peersWantingBlock, peersWantingHave := pwm.peersWanting(c) + if peersWantingBlock == 0 { + pwm.wantBlockGauge.Inc() + if peersWantingHave == 0 && !pwm.broadcastWants.Has(c) { + pwm.wantGauge.Inc() } } + + // Make sure the CID is no longer recorded as a want-have + pws.wantHaves.Remove(c) + + // Record that the CID was sent as a want-block + pws.wantBlocks.Add(c) + + // Add the CID to the results + fltWantBlks = append(fltWantBlks, c) + + // Update the reverse index + pwm.reverseIndexAdd(c, p) } // Iterate over the requested want-haves @@ -201,6 +208,12 @@ func (pwm *peerWantManager) sendWants(p peer.ID, wantBlocks []cid.Cid, wantHaves // If the CID has not been sent as a want-block or want-have if !pws.wantBlocks.Has(c) && !pws.wantHaves.Has(c) { + // Increment the total wants gauge + peersWantingBlock, peersWantingHave := pwm.peersWanting(c) + if peersWantingHave == 0 && !pwm.broadcastWants.Has(c) && peersWantingBlock == 0 { + pwm.wantGauge.Inc() + } + // Record that the CID was sent as a want-have pws.wantHaves.Add(c) @@ -208,12 +221,7 @@ func (pwm *peerWantManager) sendWants(p peer.ID, wantBlocks []cid.Cid, wantHaves fltWantHvs = append(fltWantHvs, c) // Update the reverse index - isNew := pwm.reverseIndexAdd(c, p) - - // Increment the total wants gauge - if isNew && !pwm.broadcastWants.Has(c) { - pwm.wantGauge.Inc() - } + pwm.reverseIndexAdd(c, p) } } @@ -228,6 +236,14 @@ func (pwm *peerWantManager) sendCancels(cancelKs []cid.Cid) { return } + // Record how many peers have a pending want-block and want-have for each + // key to be cancelled + peersWantingBefore := make(map[cid.Cid][]int, len(cancelKs)) + for _, c := range cancelKs { + blks, haves := pwm.peersWanting(c) + peersWantingBefore[c] = []int{blks, haves} + } + // Create a buffer to use for filtering cancels per peer, with the // broadcast wants at the front of the buffer (broadcast wants are sent to // all peers) @@ -238,9 +254,6 @@ func (pwm *peerWantManager) sendCancels(cancelKs []cid.Cid) { } } - cancelledWantBlocks := cid.NewSet() - cancelledWantHaves := cid.NewSet() - // Send cancels to a particular peer send := func(p peer.ID, pws *peerWant) { // Start from the broadcast cancels @@ -249,15 +262,7 @@ func (pwm *peerWantManager) sendCancels(cancelKs []cid.Cid) { // For each key to be cancelled for _, c := range cancelKs { // Check if a want was sent for the key - wantBlock := pws.wantBlocks.Has(c) - wantHave := pws.wantHaves.Has(c) - - // Update the want gauges - if wantBlock { - cancelledWantBlocks.Add(c) - } else if wantHave { - cancelledWantHaves.Add(c) - } else { + if !pws.wantBlocks.Has(c) && !pws.wantHaves.Has(c) { continue } @@ -304,33 +309,56 @@ func (pwm *peerWantManager) sendCancels(cancelKs []cid.Cid) { } } - // Remove cancelled broadcast wants - for _, c := range broadcastCancels { - pwm.broadcastWants.Remove(c) + // Decrement the wants gauges + for _, c := range cancelKs { + before := peersWantingBefore[c] + peersWantingBlockBefore := before[0] + peersWantingHaveBefore := before[1] - // Decrement the total wants gauge for broadcast wants - if !cancelledWantHaves.Has(c) && !cancelledWantBlocks.Has(c) { + // If there were any peers that had a pending want-block for the key + if peersWantingBlockBefore > 0 { + // Decrement the want-block gauge + pwm.wantBlockGauge.Dec() + } + + // If there was a peer that had a pending want or it was a broadcast want + if peersWantingBlockBefore > 0 || peersWantingHaveBefore > 0 || pwm.broadcastWants.Has(c) { + // Decrement the total wants gauge pwm.wantGauge.Dec() } } - // Decrement the total wants gauge for peer wants - _ = cancelledWantHaves.ForEach(func(c cid.Cid) error { - pwm.wantGauge.Dec() - return nil - }) - _ = cancelledWantBlocks.ForEach(func(c cid.Cid) error { - pwm.wantGauge.Dec() - pwm.wantBlockGauge.Dec() - return nil - }) + // Remove cancelled broadcast wants + for _, c := range broadcastCancels { + pwm.broadcastWants.Remove(c) + } - // Finally, batch-remove the reverse-index. There's no need to - // clear this index peer-by-peer. + // Batch-remove the reverse-index. There's no need to clear this index + // peer-by-peer. for _, c := range cancelKs { delete(pwm.wantPeers, c) } +} + +// peersWanting counts how many peers have a pending want-block and want-have +// for the given CID +func (pwm *peerWantManager) peersWanting(c cid.Cid) (int, int) { + blockCount := 0 + haveCount := 0 + for p := range pwm.wantPeers[c] { + pws, ok := pwm.peerWants[p] + if !ok { + continue + } + + if pws.wantBlocks.Has(c) { + blockCount++ + } else if pws.wantHaves.Has(c) { + haveCount++ + } + } + return blockCount, haveCount } // Add the peer to the list of peers that have sent a want with the cid @@ -345,16 +373,13 @@ func (pwm *peerWantManager) reverseIndexAdd(c cid.Cid, p peer.ID) bool { } // Remove the peer from the list of peers that have sent a want with the cid -func (pwm *peerWantManager) reverseIndexRemove(c cid.Cid, p peer.ID) bool { +func (pwm *peerWantManager) reverseIndexRemove(c cid.Cid, p peer.ID) { if peers, ok := pwm.wantPeers[c]; ok { delete(peers, p) if len(peers) == 0 { delete(pwm.wantPeers, c) - return true } } - - return false } // GetWantBlocks returns the set of all want-blocks sent to all peers diff --git a/bitswap/internal/peermanager/peerwantmanager_test.go b/bitswap/internal/peermanager/peerwantmanager_test.go index 60b7c8e72..5a00f27f4 100644 --- a/bitswap/internal/peermanager/peerwantmanager_test.go +++ b/bitswap/internal/peermanager/peerwantmanager_test.go @@ -436,3 +436,81 @@ func TestStats(t *testing.T) { t.Fatal("Expected 0 want-blocks") } } + +func TestStatsOverlappingWantBlockWantHave(t *testing.T) { + g := &gauge{} + wbg := &gauge{} + pwm := newPeerWantManager(g, wbg) + + peers := testutil.GeneratePeers(2) + p0 := peers[0] + p1 := peers[1] + cids := testutil.GenerateCids(2) + cids2 := testutil.GenerateCids(2) + + pwm.addPeer(&mockPQ{}, p0) + pwm.addPeer(&mockPQ{}, p1) + + // Send 2 want-blocks and 2 want-haves to p0 + pwm.sendWants(p0, cids, cids2) + + // Send opposite: + // 2 want-haves and 2 want-blocks to p1 + pwm.sendWants(p1, cids2, cids) + + if g.count != 4 { + t.Fatal("Expected 4 wants") + } + if wbg.count != 4 { + t.Fatal("Expected 4 want-blocks") + } + + // Cancel 1 of each group of cids + pwm.sendCancels([]cid.Cid{cids[0], cids2[0]}) + + if g.count != 2 { + t.Fatal("Expected 2 wants") + } + if wbg.count != 2 { + t.Fatal("Expected 2 want-blocks") + } +} + +func TestStatsRemovePeerOverlappingWantBlockWantHave(t *testing.T) { + g := &gauge{} + wbg := &gauge{} + pwm := newPeerWantManager(g, wbg) + + peers := testutil.GeneratePeers(2) + p0 := peers[0] + p1 := peers[1] + cids := testutil.GenerateCids(2) + cids2 := testutil.GenerateCids(2) + + pwm.addPeer(&mockPQ{}, p0) + pwm.addPeer(&mockPQ{}, p1) + + // Send 2 want-blocks and 2 want-haves to p0 + pwm.sendWants(p0, cids, cids2) + + // Send opposite: + // 2 want-haves and 2 want-blocks to p1 + pwm.sendWants(p1, cids2, cids) + + if g.count != 4 { + t.Fatal("Expected 4 wants") + } + if wbg.count != 4 { + t.Fatal("Expected 4 want-blocks") + } + + // Remove p0 + pwm.removePeer(p0) + + if g.count != 4 { + t.Fatal("Expected 4 wants") + } + if wbg.count != 2 { + t.Fatal("Expected 2 want-blocks") + } +} From 353e6933e05e5eaa004c797b2e82a5965f97f26e Mon Sep 17 00:00:00 2001 From: Dirk McCormick Date: Wed, 10 Jun 2020 16:18:20 -0400 Subject: [PATCH 0956/1035] fix: PeerManager signalAvailabiity() race This commit was moved from ipfs/go-bitswap@980ca8d495635a4c3d7cd781de48bdc6134ac320 --- bitswap/internal/peermanager/peermanager.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/bitswap/internal/peermanager/peermanager.go b/bitswap/internal/peermanager/peermanager.go index 00857627c..1d4538a7e 100644 --- a/bitswap/internal/peermanager/peermanager.go +++ b/bitswap/internal/peermanager/peermanager.go @@ -231,6 +231,9 @@ func (pm *PeerManager) UnregisterSession(ses uint64) { // signalAvailability is called when a peer's connectivity changes. // It informs interested sessions. func (pm *PeerManager) signalAvailability(p peer.ID, isConnected bool) { + pm.psLk.Lock() + defer pm.psLk.Unlock() + sesIds, ok := pm.peerSessions[p] if !ok { return From 902d40cf993eccac3af52418f724ba843fa68e23 Mon Sep 17 00:00:00 2001 From: Dirk McCormick Date: Wed, 10 Jun 2020 16:44:56 -0400 Subject: [PATCH 0957/1035] refactor: simplify PeerWantManager pending want counts This commit was moved from ipfs/go-bitswap@85f0e9faa69febafd290e87c6072878fb35c79d4 --- .../internal/peermanager/peerwantmanager.go | 62 +++++++++++-------- 1 file changed, 37 insertions(+), 25 deletions(-) diff --git a/bitswap/internal/peermanager/peerwantmanager.go b/bitswap/internal/peermanager/peerwantmanager.go index 21934b815..fc852d317 100644 --- a/bitswap/internal/peermanager/peerwantmanager.go +++ b/bitswap/internal/peermanager/peerwantmanager.go @@ -87,12 +87,12 @@ func (pwm *peerWantManager) removePeer(p peer.ID) { pwm.reverseIndexRemove(c, p) // Decrement the gauges by the number of pending want-blocks to the peer - peersWantingBlock, peersWantingHave := pwm.peersWanting(c) - if peersWantingBlock == 0 { + peerCounts := pwm.wantPeerCounts(c) + if peerCounts.wantBlock == 0 { pwm.wantBlockGauge.Dec() - if peersWantingHave == 0 && !pwm.broadcastWants.Has(c) { - pwm.wantGauge.Dec() - } + } + if !peerCounts.wanted() { + pwm.wantGauge.Dec() } return nil @@ -104,8 +104,8 @@ func (pwm *peerWantManager) removePeer(p peer.ID) { pwm.reverseIndexRemove(c, p) // Decrement the gauge by the number of pending want-haves to the peer - peersWantingBlock, peersWantingHave := pwm.peersWanting(c) - if peersWantingBlock == 0 && peersWantingHave == 0 && !pwm.broadcastWants.Has(c) { + peerCounts := pwm.wantPeerCounts(c) + if !peerCounts.wanted() { pwm.wantGauge.Dec() } return nil @@ -177,12 +177,12 @@ func (pwm *peerWantManager) sendWants(p peer.ID, wantBlocks []cid.Cid, wantHaves } // Increment the want gauges - peersWantingBlock, peersWantingHave := pwm.peersWanting(c) - if peersWantingBlock == 0 { + peerCounts := pwm.wantPeerCounts(c) + if peerCounts.wantBlock == 0 { pwm.wantBlockGauge.Inc() - if peersWantingHave == 0 && !pwm.broadcastWants.Has(c) { - pwm.wantGauge.Inc() - } + } + if !peerCounts.wanted() { + pwm.wantGauge.Inc() } // Make sure the CID is no longer recorded as a want-have @@ -209,8 +209,8 @@ func (pwm *peerWantManager) sendWants(p peer.ID, wantBlocks []cid.Cid, wantHaves // If the CID has not been sent as a want-block or want-have if !pws.wantBlocks.Has(c) && !pws.wantHaves.Has(c) { // Increment the total wants gauge - peersWantingBlock, peersWantingHave := pwm.peersWanting(c) - if peersWantingHave == 0 && !pwm.broadcastWants.Has(c) && peersWantingBlock == 0 { + peerCounts := pwm.wantPeerCounts(c) + if !peerCounts.wanted() { pwm.wantGauge.Inc() } @@ -238,10 +238,9 @@ func (pwm *peerWantManager) sendCancels(cancelKs []cid.Cid) { // Record how many peers have a pending want-block and want-have for each // key to be cancelled - peersWantingBefore := make(map[cid.Cid][]int, len(cancelKs)) + peerCounts := make(map[cid.Cid]wantPeerCnts, len(cancelKs)) for _, c := range cancelKs { - blks, haves := pwm.peersWanting(c) - peersWantingBefore[c] = []int{blks, haves} + peerCounts[c] = pwm.wantPeerCounts(c) } // Create a buffer to use for filtering cancels per peer, with the @@ -311,18 +310,16 @@ func (pwm *peerWantManager) sendCancels(cancelKs []cid.Cid) { // Decrement the wants gauges for _, c := range cancelKs { - before := peersWantingBefore[c] - peersWantingBlockBefore := before[0] - peersWantingHaveBefore := before[1] + peerCnts := peerCounts[c] // If there were any peers that had a pending want-block for the key - if peersWantingBlockBefore > 0 { + if peerCnts.wantBlock > 0 { // Decrement the want-block gauge pwm.wantBlockGauge.Dec() } // If there was a peer that had a pending want or it was a broadcast want - if peersWantingBlockBefore > 0 || peersWantingHaveBefore > 0 || pwm.broadcastWants.Has(c) { + if peerCnts.wanted() { // Decrement the total wants gauge pwm.wantGauge.Dec() } @@ -340,9 +337,24 @@ func (pwm *peerWantManager) sendCancels(cancelKs []cid.Cid) { } } -// peersWanting counts how many peers have a pending want-block and want-have +// wantPeerCnts stores the number of peers that have pending wants for a CID +type wantPeerCnts struct { + // number of peers that have a pending want-block for the CID + wantBlock int + // number of peers that have a pending want-have for the CID + wantHave int + // whether the CID is a broadcast want + isBroadcast bool +} + +// wanted returns true if any peer wants the CID or it's a broadcast want +func (pwm *wantPeerCnts) wanted() bool { + return pwm.wantBlock > 0 || pwm.wantHave > 0 || pwm.isBroadcast +} + +// wantPeerCounts counts how many peers have a pending want-block and want-have // for the given CID -func (pwm *peerWantManager) peersWanting(c cid.Cid) (int, int) { +func (pwm *peerWantManager) wantPeerCounts(c cid.Cid) wantPeerCnts { blockCount := 0 haveCount := 0 for p := range pwm.wantPeers[c] { @@ -358,7 +370,7 @@ func (pwm *peerWantManager) peersWanting(c cid.Cid) (int, int) { } } - return blockCount, haveCount + return wantPeerCnts{blockCount, haveCount, pwm.broadcastWants.Has(c)} } // Add the peer to the list of peers that have sent a want with the cid From c0688e38aae2d1493d69bbba8107041591162f4f Mon Sep 17 00:00:00 2001 From: Dirk McCormick Date: Wed, 10 Jun 2020 16:51:02 -0400 Subject: [PATCH 0958/1035] fix: log error for unexpected reverse index mismatch This commit was moved from ipfs/go-bitswap@654e5b4df00b7544f6f5f94592c15668ec509112 --- bitswap/internal/peermanager/peerwantmanager.go | 1 + 1 file changed, 1 insertion(+) diff --git a/bitswap/internal/peermanager/peerwantmanager.go b/bitswap/internal/peermanager/peerwantmanager.go index fc852d317..46a3ac348 100644 --- a/bitswap/internal/peermanager/peerwantmanager.go +++ b/bitswap/internal/peermanager/peerwantmanager.go @@ -360,6 +360,7 @@ func (pwm *peerWantManager) wantPeerCounts(c cid.Cid) wantPeerCnts { for p := range pwm.wantPeers[c] { pws, ok := pwm.peerWants[p] if !ok { + log.Errorf("reverse index has extra peer %s for key %s in peerWantManager", string(p), c) continue } From 9895f628ed83e25561246cff26d4931167500ec5 Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Wed, 19 Aug 2020 10:31:52 -0700 Subject: [PATCH 0959/1035] fix: don't say we're sending a full wantlist unless we are (#429) I'm not sure why we set "full" to true here, but this could be the source of a whole bunch of bidirectional sync issues. That is, if two peers are syncing off each other, they could repeatedly "reset" each other's wantlist to "empty". This commit was moved from ipfs/go-bitswap@72d351cb3915079401fc3594baab3be50d736650 --- bitswap/internal/decision/engine.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bitswap/internal/decision/engine.go b/bitswap/internal/decision/engine.go index b62074053..2a6dc60f6 100644 --- a/bitswap/internal/decision/engine.go +++ b/bitswap/internal/decision/engine.go @@ -421,7 +421,7 @@ func (e *Engine) nextEnvelope(ctx context.Context) (*Envelope, error) { } // Create a new message - msg := bsmsg.New(true) + msg := bsmsg.New(false) log.Debugw("Bitswap process tasks", "local", e.self, "taskCount", len(nextTasks)) From a82ecc5546ad3fdcdfcb220e62db9f4448c4e9cb Mon Sep 17 00:00:00 2001 From: Paul Wolneykien Date: Thu, 3 Sep 2020 13:37:46 +0300 Subject: [PATCH 0960/1035] Added `WithScoreLedger` Bitswap option (#430) * Separate decision engine ledger on two parts: score and the wantlist This is the first step to make external decision logic (tagging peers with score values) possible. The wantlist still resides in the original `ledger` struct while sent/received byte accounting and scores are extracted to the new `scoreledger` struct managed by the original `scoreWorker()` logic. The accounting is integrated into the `Engine` via `ScoreLedger` interface making it possible to replace the original `scoreWorker()` with some other logic. The interface, however, doesn't allow a score logic to directly touch peer tags: the logic may decide about score values while tagging itself is still under control of Engine. Note: with this commit it's yet not possible to replace the original score logic because there is no public methods for that. * Added "WithScoreLedger" Bitswap option New `WithScoreLedger(decision.ScoreLedger)` option in the `bitswap` package is the way to connect a custom `ScoreLedger` implementation to the decision engine. The `Engine` now has the corresponding `UseScoreLedger(ScoreLedger)` method. The `ScoreLedger` and `ScorePeerFunc` types are exposed from the internal `decision` package to the public one. Because its options are processed by the `Bitswap` after construction of its parts but before starting of the engine, the default `scoreLedger` initialization is moved from `newEngine()` to `StartWorkers()`. New `TestWithScoreLedger` test is added. The test checks for start and stop of the testing score ledger implementation that is specified via `WithScoreLedger` option. * Combine score ledger start with initialization of the score function Having a separate `Init(ScoreFunc)` method seems redundant (thx @dirkmc for pointing about that). As a bonus, the two-step ledger starting process is now enclosed in the `startScoreLedger()` function. * Let's call Stop() to stop a ScoreLedger The `Close()` method was there to stop the ledger. Let call it `Stop()` now. * Get return of the blank Receipt out of conditional block Explicitly form it as the final resort. Co-authored-by: Paul Wolneykien This commit was moved from ipfs/go-bitswap@fd213932c1f68a9a7a28c5c855cd0d786c85bf76 --- bitswap/bitswap.go | 8 + bitswap/bitswap_test.go | 59 +++- bitswap/decision/decision.go | 8 +- bitswap/internal/decision/engine.go | 220 +++++--------- bitswap/internal/decision/engine_test.go | 16 +- bitswap/internal/decision/ledger.go | 65 +---- bitswap/internal/decision/scoreledger.go | 350 +++++++++++++++++++++++ 7 files changed, 499 insertions(+), 227 deletions(-) create mode 100644 bitswap/internal/decision/scoreledger.go diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 9afe5d275..8af786a80 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -11,6 +11,7 @@ import ( delay "github.com/ipfs/go-ipfs-delay" + deciface "github.com/ipfs/go-bitswap/decision" bsbpm "github.com/ipfs/go-bitswap/internal/blockpresencemanager" decision "github.com/ipfs/go-bitswap/internal/decision" bsgetter "github.com/ipfs/go-bitswap/internal/getter" @@ -95,6 +96,13 @@ func SetSendDontHaves(send bool) Option { } } +// Configures the engine to use the given score decision logic. +func WithScoreLedger(scoreLedger deciface.ScoreLedger) Option { + return func(bs *Bitswap) { + bs.engine.UseScoreLedger(scoreLedger) + } +} + // New initializes a BitSwap instance that communicates over the provided // BitSwapNetwork. This function registers the returned instance as the network // delegate. Runs until context is cancelled or bitswap.Close is called. diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index ba89e038d..b95faa30d 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -9,11 +9,12 @@ import ( "time" bitswap "github.com/ipfs/go-bitswap" + deciface "github.com/ipfs/go-bitswap/decision" decision "github.com/ipfs/go-bitswap/internal/decision" bssession "github.com/ipfs/go-bitswap/internal/session" + "github.com/ipfs/go-bitswap/message" testinstance "github.com/ipfs/go-bitswap/testinstance" tn "github.com/ipfs/go-bitswap/testnet" - "github.com/ipfs/go-bitswap/message" blocks "github.com/ipfs/go-block-format" cid "github.com/ipfs/go-cid" detectrace "github.com/ipfs/go-detect-race" @@ -803,3 +804,59 @@ func TestBitswapLedgerTwoWay(t *testing.T) { } } } + +type testingScoreLedger struct { + scorePeer deciface.ScorePeerFunc + started chan struct{} + closed chan struct{} +} + +func newTestingScoreLedger() *testingScoreLedger { + return &testingScoreLedger{ + nil, + make(chan struct{}), + make(chan struct{}), + } +} + +func (tsl *testingScoreLedger) GetReceipt(p peer.ID) *deciface.Receipt { + return nil +} +func (tsl *testingScoreLedger) AddToSentBytes(p peer.ID, n int) {} +func (tsl *testingScoreLedger) AddToReceivedBytes(p peer.ID, n int) {} +func (tsl *testingScoreLedger) PeerConnected(p peer.ID) {} +func (tsl *testingScoreLedger) PeerDisconnected(p peer.ID) {} +func (tsl *testingScoreLedger) Start(scorePeer deciface.ScorePeerFunc) { + tsl.scorePeer = scorePeer + close(tsl.started) +} +func (tsl *testingScoreLedger) Stop() { + close(tsl.closed) +} + +// Tests start and stop of a custom decision logic +func TestWithScoreLedger(t *testing.T) { + tsl := newTestingScoreLedger() + net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay)) + bsOpts := []bitswap.Option{bitswap.WithScoreLedger(tsl)} + ig := testinstance.NewTestInstanceGenerator(net, nil, bsOpts) + defer ig.Close() + i := ig.Next() + defer i.Exchange.Close() + + select { + case <-tsl.started: + if tsl.scorePeer == nil { + t.Fatal("Expected the score function to be initialized") + } + case <-time.After(time.Second * 5): + t.Fatal("Expected the score ledger to be started within 5s") + } + + i.Exchange.Close() + select { + case <-tsl.closed: + case <-time.After(time.Second * 5): + t.Fatal("Expected the score ledger to be closed within 5s") + } +} diff --git a/bitswap/decision/decision.go b/bitswap/decision/decision.go index 8dd310f69..4afc463ec 100644 --- a/bitswap/decision/decision.go +++ b/bitswap/decision/decision.go @@ -2,5 +2,11 @@ package decision import intdec "github.com/ipfs/go-bitswap/internal/decision" -// Expose type externally +// Expose Receipt externally type Receipt = intdec.Receipt + +// Expose ScoreLedger externally +type ScoreLedger = intdec.ScoreLedger + +// Expose ScorePeerFunc externally +type ScorePeerFunc = intdec.ScorePeerFunc diff --git a/bitswap/internal/decision/engine.go b/bitswap/internal/decision/engine.go index 2a6dc60f6..28584fb10 100644 --- a/bitswap/internal/decision/engine.go +++ b/bitswap/internal/decision/engine.go @@ -70,25 +70,6 @@ const ( // on their behalf. queuedTagWeight = 10 - // the alpha for the EWMA used to track short term usefulness - shortTermAlpha = 0.5 - - // the alpha for the EWMA used to track long term usefulness - longTermAlpha = 0.05 - - // how frequently the engine should sample usefulness. Peers that - // interact every shortTerm time period are considered "active". - shortTerm = 10 * time.Second - - // long term ratio defines what "long term" means in terms of the - // shortTerm duration. Peers that interact once every longTermRatio are - // considered useful over the long term. - longTermRatio = 10 - - // long/short term scores for tagging peers - longTermScore = 10 // this is a high tag but it grows _very_ slowly. - shortTermScore = 10 // this is a high tag but it'll go away quickly if we aren't using the peer. - // maxBlockSizeReplaceHasWithBlock is the maximum size of the block in // bytes up to which we will replace a want-have with a want-block maxBlockSizeReplaceHasWithBlock = 1024 @@ -119,6 +100,29 @@ type PeerTagger interface { UntagPeer(p peer.ID, tag string) } +// Assigns a specific score to a peer +type ScorePeerFunc func(peer.ID, int) + +// ScoreLedger is an external ledger dealing with peer scores. +type ScoreLedger interface { + // Returns aggregated data communication with a given peer. + GetReceipt(p peer.ID) *Receipt + // Increments the sent counter for the given peer. + AddToSentBytes(p peer.ID, n int) + // Increments the received counter for the given peer. + AddToReceivedBytes(p peer.ID, n int) + // PeerConnected should be called when a new peer connects, + // meaning the ledger should open accounting. + PeerConnected(p peer.ID) + // PeerDisconnected should be called when a peer disconnects to + // clean up the accounting. + PeerDisconnected(p peer.ID) + // Starts the ledger sampling process. + Start(scorePeer ScorePeerFunc) + // Stops the sampling process. + Stop() +} + // Engine manages sending requested blocks to peers. type Engine struct { // peerRequestQueue is a priority queue of requests received from peers. @@ -145,9 +149,12 @@ type Engine struct { lock sync.RWMutex // protects the fields immediatly below - // ledgerMap lists Ledgers by their Partner key. + // ledgerMap lists block-related Ledgers by their Partner key. ledgerMap map[peer.ID]*ledger + // an external ledger dealing with peer scores + scoreLedger ScoreLedger + ticker *time.Ticker taskWorkerLock sync.Mutex @@ -157,11 +164,6 @@ type Engine struct { // bytes up to which we will replace a want-have with a want-block maxBlockSizeReplaceHasWithBlock int - // how frequently the engine should sample peer usefulness - peerSampleInterval time.Duration - // used by the tests to detect when a sample is taken - sampleCh chan struct{} - sendDontHaves bool self peer.ID @@ -169,23 +171,22 @@ type Engine struct { // NewEngine creates a new block sending engine for the given block store func NewEngine(ctx context.Context, bs bstore.Blockstore, peerTagger PeerTagger, self peer.ID) *Engine { - return newEngine(ctx, bs, peerTagger, self, maxBlockSizeReplaceHasWithBlock, shortTerm, nil) + return newEngine(ctx, bs, peerTagger, self, maxBlockSizeReplaceHasWithBlock, nil) } // This constructor is used by the tests func newEngine(ctx context.Context, bs bstore.Blockstore, peerTagger PeerTagger, self peer.ID, - maxReplaceSize int, peerSampleInterval time.Duration, sampleCh chan struct{}) *Engine { + maxReplaceSize int, scoreLedger ScoreLedger) *Engine { e := &Engine{ ledgerMap: make(map[peer.ID]*ledger), + scoreLedger: scoreLedger, bsm: newBlockstoreManager(ctx, bs, blockstoreWorkerCount), peerTagger: peerTagger, outbox: make(chan (<-chan *Envelope), outboxChanBuffer), workSignal: make(chan struct{}, 1), ticker: time.NewTicker(time.Millisecond * 100), maxBlockSizeReplaceHasWithBlock: maxReplaceSize, - peerSampleInterval: peerSampleInterval, - sampleCh: sampleCh, taskWorkerCount: taskWorkerCount, sendDontHaves: true, self: self, @@ -210,11 +211,37 @@ func (e *Engine) SetSendDontHaves(send bool) { e.sendDontHaves = send } +// Sets the scoreLedger to the given implementation. Should be called +// before StartWorkers(). +func (e *Engine) UseScoreLedger(scoreLedger ScoreLedger) { + e.scoreLedger = scoreLedger +} + +// Starts the score ledger. Before start the function checks and, +// if it is unset, initializes the scoreLedger with the default +// implementation. +func (e *Engine) startScoreLedger(px process.Process) { + if e.scoreLedger == nil { + e.scoreLedger = NewDefaultScoreLedger() + } + e.scoreLedger.Start(func(p peer.ID, score int) { + if score == 0 { + e.peerTagger.UntagPeer(p, e.tagUseful) + } else { + e.peerTagger.TagPeer(p, e.tagUseful, score) + } + }) + px.Go(func(ppx process.Process) { + <-ppx.Closing() + e.scoreLedger.Stop() + }) +} + // Start up workers to handle requests from other nodes for the data on this node func (e *Engine) StartWorkers(ctx context.Context, px process.Process) { // Start up blockstore manager e.bsm.start(px) - px.Go(e.scoreWorker) + e.startScoreLedger(px) for i := 0; i < e.taskWorkerCount; i++ { px.Go(func(px process.Process) { @@ -223,109 +250,6 @@ func (e *Engine) StartWorkers(ctx context.Context, px process.Process) { } } -// scoreWorker keeps track of how "useful" our peers are, updating scores in the -// connection manager. -// -// It does this by tracking two scores: short-term usefulness and long-term -// usefulness. Short-term usefulness is sampled frequently and highly weights -// new observations. Long-term usefulness is sampled less frequently and highly -// weights on long-term trends. -// -// In practice, we do this by keeping two EWMAs. If we see an interaction -// within the sampling period, we record the score, otherwise, we record a 0. -// The short-term one has a high alpha and is sampled every shortTerm period. -// The long-term one has a low alpha and is sampled every -// longTermRatio*shortTerm period. -// -// To calculate the final score, we sum the short-term and long-term scores then -// adjust it ±25% based on our debt ratio. Peers that have historically been -// more useful to us than we are to them get the highest score. -func (e *Engine) scoreWorker(px process.Process) { - ticker := time.NewTicker(e.peerSampleInterval) - defer ticker.Stop() - - type update struct { - peer peer.ID - score int - } - var ( - lastShortUpdate, lastLongUpdate time.Time - updates []update - ) - - for i := 0; ; i = (i + 1) % longTermRatio { - var now time.Time - select { - case now = <-ticker.C: - case <-px.Closing(): - return - } - - // The long term update ticks every `longTermRatio` short - // intervals. - updateLong := i == 0 - - e.lock.Lock() - for _, ledger := range e.ledgerMap { - ledger.lk.Lock() - - // Update the short-term score. - if ledger.lastExchange.After(lastShortUpdate) { - ledger.shortScore = ewma(ledger.shortScore, shortTermScore, shortTermAlpha) - } else { - ledger.shortScore = ewma(ledger.shortScore, 0, shortTermAlpha) - } - - // Update the long-term score. - if updateLong { - if ledger.lastExchange.After(lastLongUpdate) { - ledger.longScore = ewma(ledger.longScore, longTermScore, longTermAlpha) - } else { - ledger.longScore = ewma(ledger.longScore, 0, longTermAlpha) - } - } - - // Calculate the new score. - // - // The accounting score adjustment prefers peers _we_ - // need over peers that need us. This doesn't help with - // leeching. - score := int((ledger.shortScore + ledger.longScore) * ((ledger.Accounting.Score())*.5 + .75)) - - // Avoid updating the connection manager unless there's a change. This can be expensive. - if ledger.score != score { - // put these in a list so we can perform the updates outside _global_ the lock. - updates = append(updates, update{ledger.Partner, score}) - ledger.score = score - } - ledger.lk.Unlock() - } - e.lock.Unlock() - - // record the times. - lastShortUpdate = now - if updateLong { - lastLongUpdate = now - } - - // apply the updates - for _, update := range updates { - if update.score == 0 { - e.peerTagger.UntagPeer(update.peer, e.tagUseful) - } else { - e.peerTagger.TagPeer(update.peer, e.tagUseful, update.score) - } - } - // Keep the memory. It's not much and it saves us from having to allocate. - updates = updates[:0] - - // Used by the tests - if e.sampleCh != nil { - e.sampleCh <- struct{}{} - } - } -} - func (e *Engine) onPeerAdded(p peer.ID) { e.peerTagger.TagPeer(p, e.tagQueued, queuedTagWeight) } @@ -347,21 +271,9 @@ func (e *Engine) WantlistForPeer(p peer.ID) []wl.Entry { return entries } -// LedgerForPeer returns aggregated data about blocks swapped and communication -// with a given peer. +// LedgerForPeer returns aggregated data communication with a given peer. func (e *Engine) LedgerForPeer(p peer.ID) *Receipt { - ledger := e.findOrCreate(p) - - ledger.lk.Lock() - defer ledger.lk.Unlock() - - return &Receipt{ - Peer: ledger.Partner.String(), - Value: ledger.Accounting.Value(), - Sent: ledger.Accounting.BytesSent, - Recv: ledger.Accounting.BytesRecv, - Exchanged: ledger.ExchangeCount(), - } + return e.scoreLedger.GetReceipt(p) } // Each taskWorker pulls items off the request queue up to the maximum size @@ -671,7 +583,7 @@ func (e *Engine) ReceiveFrom(from peer.ID, blks []blocks.Block, haves []cid.Cid) // Record how many bytes were received in the ledger for _, blk := range blks { log.Debugw("Bitswap engine <- block", "local", e.self, "from", from, "cid", blk.Cid(), "size", len(blk.RawData())) - l.ReceivedBytes(len(blk.RawData())) + e.scoreLedger.AddToReceivedBytes(l.Partner, len(blk.RawData())) } l.lk.Unlock() @@ -741,7 +653,7 @@ func (e *Engine) MessageSent(p peer.ID, m bsmsg.BitSwapMessage) { // Remove sent blocks from the want list for the peer for _, block := range m.Blocks() { - l.SentBytes(len(block.RawData())) + e.scoreLedger.AddToSentBytes(l.Partner, len(block.RawData())) l.wantList.RemoveType(block.Cid(), pb.Message_Wantlist_Block) } @@ -764,6 +676,8 @@ func (e *Engine) PeerConnected(p peer.ID) { if !ok { e.ledgerMap[p] = newLedger(p) } + + e.scoreLedger.PeerConnected(p) } // PeerDisconnected is called when a peer disconnects. @@ -772,6 +686,8 @@ func (e *Engine) PeerDisconnected(p peer.ID) { defer e.lock.Unlock() delete(e.ledgerMap, p) + + e.scoreLedger.PeerDisconnected(p) } // If the want is a want-have, and it's below a certain size, send the full @@ -782,13 +698,11 @@ func (e *Engine) sendAsBlock(wantType pb.Message_Wantlist_WantType, blockSize in } func (e *Engine) numBytesSentTo(p peer.ID) uint64 { - // NB not threadsafe - return e.findOrCreate(p).Accounting.BytesSent + return e.LedgerForPeer(p).Sent } func (e *Engine) numBytesReceivedFrom(p peer.ID) uint64 { - // NB not threadsafe - return e.findOrCreate(p).Accounting.BytesRecv + return e.LedgerForPeer(p).Recv } // ledger lazily instantiates a ledger diff --git a/bitswap/internal/decision/engine_test.go b/bitswap/internal/decision/engine_test.go index 3cb765973..3046dc0d1 100644 --- a/bitswap/internal/decision/engine_test.go +++ b/bitswap/internal/decision/engine_test.go @@ -97,7 +97,7 @@ func newTestEngine(ctx context.Context, idStr string) engineSet { func newTestEngineWithSampling(ctx context.Context, idStr string, peerSampleInterval time.Duration, sampleCh chan struct{}) engineSet { fpt := &fakePeerTagger{} bs := blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore())) - e := newEngine(ctx, bs, fpt, "localhost", 0, peerSampleInterval, sampleCh) + e := newEngine(ctx, bs, fpt, "localhost", 0, NewTestScoreLedger(peerSampleInterval, sampleCh)) e.StartWorkers(ctx, process.WithTeardown(func() error { return nil })) return engineSet{ Peer: peer.ID(idStr), @@ -185,7 +185,7 @@ func peerIsPartner(p peer.ID, e *Engine) bool { func TestOutboxClosedWhenEngineClosed(t *testing.T) { ctx := context.Background() t.SkipNow() // TODO implement *Engine.Close - e := newEngine(ctx, blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore())), &fakePeerTagger{}, "localhost", 0, shortTerm, nil) + e := newEngine(ctx, blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore())), &fakePeerTagger{}, "localhost", 0, NewTestScoreLedger(shortTerm, nil)) e.StartWorkers(ctx, process.WithTeardown(func() error { return nil })) var wg sync.WaitGroup wg.Add(1) @@ -513,7 +513,7 @@ func TestPartnerWantHaveWantBlockNonActive(t *testing.T) { testCases = onlyTestCases } - e := newEngine(context.Background(), bs, &fakePeerTagger{}, "localhost", 0, shortTerm, nil) + e := newEngine(context.Background(), bs, &fakePeerTagger{}, "localhost", 0, NewTestScoreLedger(shortTerm, nil)) e.StartWorkers(context.Background(), process.WithTeardown(func() error { return nil })) for i, testCase := range testCases { t.Logf("Test case %d:", i) @@ -669,7 +669,7 @@ func TestPartnerWantHaveWantBlockActive(t *testing.T) { testCases = onlyTestCases } - e := newEngine(context.Background(), bs, &fakePeerTagger{}, "localhost", 0, shortTerm, nil) + e := newEngine(context.Background(), bs, &fakePeerTagger{}, "localhost", 0, NewTestScoreLedger(shortTerm, nil)) e.StartWorkers(context.Background(), process.WithTeardown(func() error { return nil })) var next envChan @@ -854,7 +854,7 @@ func TestPartnerWantsThenCancels(t *testing.T) { ctx := context.Background() for i := 0; i < numRounds; i++ { expected := make([][]string, 0, len(testcases)) - e := newEngine(ctx, bs, &fakePeerTagger{}, "localhost", 0, shortTerm, nil) + e := newEngine(ctx, bs, &fakePeerTagger{}, "localhost", 0, NewTestScoreLedger(shortTerm, nil)) e.StartWorkers(ctx, process.WithTeardown(func() error { return nil })) for _, testcase := range testcases { set := testcase[0] @@ -879,7 +879,7 @@ func TestSendReceivedBlocksToPeersThatWantThem(t *testing.T) { partner := libp2ptest.RandPeerIDFatal(t) otherPeer := libp2ptest.RandPeerIDFatal(t) - e := newEngine(context.Background(), bs, &fakePeerTagger{}, "localhost", 0, shortTerm, nil) + e := newEngine(context.Background(), bs, &fakePeerTagger{}, "localhost", 0, NewTestScoreLedger(shortTerm, nil)) e.StartWorkers(context.Background(), process.WithTeardown(func() error { return nil })) blks := testutil.GenerateBlocksOfSize(4, 8*1024) @@ -923,7 +923,7 @@ func TestSendDontHave(t *testing.T) { partner := libp2ptest.RandPeerIDFatal(t) otherPeer := libp2ptest.RandPeerIDFatal(t) - e := newEngine(context.Background(), bs, &fakePeerTagger{}, "localhost", 0, shortTerm, nil) + e := newEngine(context.Background(), bs, &fakePeerTagger{}, "localhost", 0, NewTestScoreLedger(shortTerm, nil)) e.StartWorkers(context.Background(), process.WithTeardown(func() error { return nil })) blks := testutil.GenerateBlocksOfSize(4, 8*1024) @@ -987,7 +987,7 @@ func TestWantlistForPeer(t *testing.T) { partner := libp2ptest.RandPeerIDFatal(t) otherPeer := libp2ptest.RandPeerIDFatal(t) - e := newEngine(context.Background(), bs, &fakePeerTagger{}, "localhost", 0, shortTerm, nil) + e := newEngine(context.Background(), bs, &fakePeerTagger{}, "localhost", 0, NewTestScoreLedger(shortTerm, nil)) e.StartWorkers(context.Background(), process.WithTeardown(func() error { return nil })) blks := testutil.GenerateBlocksOfSize(4, 8*1024) diff --git a/bitswap/internal/decision/ledger.go b/bitswap/internal/decision/ledger.go index 87fedc458..a607ff4f4 100644 --- a/bitswap/internal/decision/ledger.go +++ b/bitswap/internal/decision/ledger.go @@ -2,7 +2,6 @@ package decision import ( "sync" - "time" pb "github.com/ipfs/go-bitswap/message/pb" wl "github.com/ipfs/go-bitswap/wantlist" @@ -18,75 +17,17 @@ func newLedger(p peer.ID) *ledger { } } -// ledger stores the data exchange relationship between two peers. -// NOT threadsafe +// Keeps the wantlist for the partner. NOT threadsafe! type ledger struct { // Partner is the remote Peer. Partner peer.ID - // Accounting tracks bytes sent and received. - Accounting debtRatio - - // lastExchange is the time of the last data exchange. - lastExchange time.Time - - // These scores keep track of how useful we think this peer is. Short - // tracks short-term usefulness and long tracks long-term usefulness. - shortScore, longScore float64 - // Score keeps track of the score used in the peer tagger. We track it - // here to avoid unnecessarily updating the tags in the connection manager. - score int - - // exchangeCount is the number of exchanges with this peer - exchangeCount uint64 - // wantList is a (bounded, small) set of keys that Partner desires. wantList *wl.Wantlist lk sync.RWMutex } -// Receipt is a summary of the ledger for a given peer -// collecting various pieces of aggregated data for external -// reporting purposes. -type Receipt struct { - Peer string - Value float64 - Sent uint64 - Recv uint64 - Exchanged uint64 -} - -type debtRatio struct { - BytesSent uint64 - BytesRecv uint64 -} - -// Value returns the debt ratio, sent:receive. -func (dr *debtRatio) Value() float64 { - return float64(dr.BytesSent) / float64(dr.BytesRecv+1) -} - -// Score returns the debt _score_ on a 0-1 scale. -func (dr *debtRatio) Score() float64 { - if dr.BytesRecv == 0 { - return 0 - } - return float64(dr.BytesRecv) / float64(dr.BytesRecv+dr.BytesSent) -} - -func (l *ledger) SentBytes(n int) { - l.exchangeCount++ - l.lastExchange = time.Now() - l.Accounting.BytesSent += uint64(n) -} - -func (l *ledger) ReceivedBytes(n int) { - l.exchangeCount++ - l.lastExchange = time.Now() - l.Accounting.BytesRecv += uint64(n) -} - func (l *ledger) Wants(k cid.Cid, priority int32, wantType pb.Message_Wantlist_WantType) { log.Debugf("peer %s wants %s", l.Partner, k) l.wantList.Add(k, priority, wantType) @@ -99,7 +40,3 @@ func (l *ledger) CancelWant(k cid.Cid) bool { func (l *ledger) WantListContains(k cid.Cid) (wl.Entry, bool) { return l.wantList.Contains(k) } - -func (l *ledger) ExchangeCount() uint64 { - return l.exchangeCount -} diff --git a/bitswap/internal/decision/scoreledger.go b/bitswap/internal/decision/scoreledger.go new file mode 100644 index 000000000..5ffd6bb8a --- /dev/null +++ b/bitswap/internal/decision/scoreledger.go @@ -0,0 +1,350 @@ +package decision + +import ( + "sync" + "time" + + peer "github.com/libp2p/go-libp2p-core/peer" +) + +const ( + // the alpha for the EWMA used to track short term usefulness + shortTermAlpha = 0.5 + + // the alpha for the EWMA used to track long term usefulness + longTermAlpha = 0.05 + + // how frequently the engine should sample usefulness. Peers that + // interact every shortTerm time period are considered "active". + shortTerm = 10 * time.Second + + // long term ratio defines what "long term" means in terms of the + // shortTerm duration. Peers that interact once every longTermRatio are + // considered useful over the long term. + longTermRatio = 10 + + // long/short term scores for tagging peers + longTermScore = 10 // this is a high tag but it grows _very_ slowly. + shortTermScore = 10 // this is a high tag but it'll go away quickly if we aren't using the peer. +) + +// Stores the data exchange relationship between two peers. +type scoreledger struct { + // Partner is the remote Peer. + partner peer.ID + + // tracks bytes sent... + bytesSent uint64 + + // ...and received. + bytesRecv uint64 + + // lastExchange is the time of the last data exchange. + lastExchange time.Time + + // These scores keep track of how useful we think this peer is. Short + // tracks short-term usefulness and long tracks long-term usefulness. + shortScore, longScore float64 + + // Score keeps track of the score used in the peer tagger. We track it + // here to avoid unnecessarily updating the tags in the connection manager. + score int + + // exchangeCount is the number of exchanges with this peer + exchangeCount uint64 + + // the record lock + lock sync.RWMutex +} + +// Receipt is a summary of the ledger for a given peer +// collecting various pieces of aggregated data for external +// reporting purposes. +type Receipt struct { + Peer string + Value float64 + Sent uint64 + Recv uint64 + Exchanged uint64 +} + +// Increments the sent counter. +func (l *scoreledger) AddToSentBytes(n int) { + l.lock.Lock() + defer l.lock.Unlock() + l.exchangeCount++ + l.lastExchange = time.Now() + l.bytesSent += uint64(n) +} + +// Increments the received counter. +func (l *scoreledger) AddToReceivedBytes(n int) { + l.lock.Lock() + defer l.lock.Unlock() + l.exchangeCount++ + l.lastExchange = time.Now() + l.bytesRecv += uint64(n) +} + +// Returns the Receipt for this ledger record. +func (l *scoreledger) Receipt() *Receipt { + l.lock.RLock() + defer l.lock.RUnlock() + + return &Receipt{ + Peer: l.partner.String(), + Value: float64(l.bytesSent) / float64(l.bytesRecv+1), + Sent: l.bytesSent, + Recv: l.bytesRecv, + Exchanged: l.exchangeCount, + } +} + +// DefaultScoreLedger is used by Engine as the default ScoreLedger. +type DefaultScoreLedger struct { + // a sample counting ticker + ticker *time.Ticker + // the score func + scorePeer ScorePeerFunc + // is closed on Close + closing chan struct{} + // protects the fields immediatly below + lock sync.RWMutex + // ledgerMap lists score ledgers by their partner key. + ledgerMap map[peer.ID]*scoreledger + // how frequently the engine should sample peer usefulness + peerSampleInterval time.Duration + // used by the tests to detect when a sample is taken + sampleCh chan struct{} +} + +// scoreWorker keeps track of how "useful" our peers are, updating scores in the +// connection manager. +// +// It does this by tracking two scores: short-term usefulness and long-term +// usefulness. Short-term usefulness is sampled frequently and highly weights +// new observations. Long-term usefulness is sampled less frequently and highly +// weights on long-term trends. +// +// In practice, we do this by keeping two EWMAs. If we see an interaction +// within the sampling period, we record the score, otherwise, we record a 0. +// The short-term one has a high alpha and is sampled every shortTerm period. +// The long-term one has a low alpha and is sampled every +// longTermRatio*shortTerm period. +// +// To calculate the final score, we sum the short-term and long-term scores then +// adjust it ±25% based on our debt ratio. Peers that have historically been +// more useful to us than we are to them get the highest score. +func (dsl *DefaultScoreLedger) scoreWorker() { + ticker := time.NewTicker(dsl.peerSampleInterval) + defer ticker.Stop() + + type update struct { + peer peer.ID + score int + } + var ( + lastShortUpdate, lastLongUpdate time.Time + updates []update + ) + + for i := 0; ; i = (i + 1) % longTermRatio { + var now time.Time + select { + case now = <-ticker.C: + case <-dsl.closing: + return + } + + // The long term update ticks every `longTermRatio` short + // intervals. + updateLong := i == 0 + + dsl.lock.Lock() + for _, l := range dsl.ledgerMap { + l.lock.Lock() + + // Update the short-term score. + if l.lastExchange.After(lastShortUpdate) { + l.shortScore = ewma(l.shortScore, shortTermScore, shortTermAlpha) + } else { + l.shortScore = ewma(l.shortScore, 0, shortTermAlpha) + } + + // Update the long-term score. + if updateLong { + if l.lastExchange.After(lastLongUpdate) { + l.longScore = ewma(l.longScore, longTermScore, longTermAlpha) + } else { + l.longScore = ewma(l.longScore, 0, longTermAlpha) + } + } + + // Calculate the new score. + // + // The accounting score adjustment prefers peers _we_ + // need over peers that need us. This doesn't help with + // leeching. + var lscore float64 + if l.bytesRecv == 0 { + lscore = 0 + } else { + lscore = float64(l.bytesRecv) / float64(l.bytesRecv+l.bytesSent) + } + score := int((l.shortScore + l.longScore) * (lscore*.5 + .75)) + + // Avoid updating the connection manager unless there's a change. This can be expensive. + if l.score != score { + // put these in a list so we can perform the updates outside _global_ the lock. + updates = append(updates, update{l.partner, score}) + l.score = score + } + l.lock.Unlock() + } + dsl.lock.Unlock() + + // record the times. + lastShortUpdate = now + if updateLong { + lastLongUpdate = now + } + + // apply the updates + for _, update := range updates { + dsl.scorePeer(update.peer, update.score) + } + // Keep the memory. It's not much and it saves us from having to allocate. + updates = updates[:0] + + // Used by the tests + if dsl.sampleCh != nil { + dsl.sampleCh <- struct{}{} + } + } +} + +// Returns the score ledger for the given peer or nil if that peer +// is not on the ledger. +func (dsl *DefaultScoreLedger) find(p peer.ID) *scoreledger { + // Take a read lock (as it's less expensive) to check if we have + // a ledger for the peer. + dsl.lock.RLock() + l, ok := dsl.ledgerMap[p] + dsl.lock.RUnlock() + if ok { + return l + } + return nil +} + +// Returns a new scoreledger. +func newScoreLedger(p peer.ID) *scoreledger { + return &scoreledger{ + partner: p, + } +} + +// Lazily instantiates a ledger. +func (dsl *DefaultScoreLedger) findOrCreate(p peer.ID) *scoreledger { + l := dsl.find(p) + if l != nil { + return l + } + + // There's no ledger, so take a write lock, then check again and + // create the ledger if necessary. + dsl.lock.Lock() + defer dsl.lock.Unlock() + l, ok := dsl.ledgerMap[p] + if !ok { + l = newScoreLedger(p) + dsl.ledgerMap[p] = l + } + return l +} + +// GetReceipt returns aggregated data communication with a given peer. +func (dsl *DefaultScoreLedger) GetReceipt(p peer.ID) *Receipt { + l := dsl.find(p) + if l != nil { + return l.Receipt() + } + + // Return a blank receipt otherwise. + return &Receipt{ + Peer: p.String(), + Value: 0, + Sent: 0, + Recv: 0, + Exchanged: 0, + } +} + +// Starts the default ledger sampling process. +func (dsl *DefaultScoreLedger) Start(scorePeer ScorePeerFunc) { + dsl.init(scorePeer) + go dsl.scoreWorker() +} + +// Stops the sampling process. +func (dsl *DefaultScoreLedger) Stop() { + close(dsl.closing) +} + +// Initializes the score ledger. +func (dsl *DefaultScoreLedger) init(scorePeer ScorePeerFunc) { + dsl.lock.Lock() + defer dsl.lock.Unlock() + dsl.ledgerMap = make(map[peer.ID]*scoreledger) + dsl.scorePeer = scorePeer +} + +// Increments the sent counter for the given peer. +func (dsl *DefaultScoreLedger) AddToSentBytes(p peer.ID, n int) { + l := dsl.findOrCreate(p) + l.AddToSentBytes(n) +} + +// Increments the received counter for the given peer. +func (dsl *DefaultScoreLedger) AddToReceivedBytes(p peer.ID, n int) { + l := dsl.findOrCreate(p) + l.AddToReceivedBytes(n) +} + +// PeerConnected should be called when a new peer connects, meaning +// we should open accounting. +func (dsl *DefaultScoreLedger) PeerConnected(p peer.ID) { + dsl.lock.Lock() + defer dsl.lock.Unlock() + _, ok := dsl.ledgerMap[p] + if !ok { + dsl.ledgerMap[p] = newScoreLedger(p) + } +} + +// PeerDisconnected should be called when a peer disconnects to +// clean up the accounting. +func (dsl *DefaultScoreLedger) PeerDisconnected(p peer.ID) { + dsl.lock.Lock() + defer dsl.lock.Unlock() + delete(dsl.ledgerMap, p) +} + +// Creates a new instance of the default score ledger. +func NewDefaultScoreLedger() *DefaultScoreLedger { + return &DefaultScoreLedger{ + ledgerMap: make(map[peer.ID]*scoreledger), + ticker: time.NewTicker(time.Millisecond * 100), + closing: make(chan struct{}), + peerSampleInterval: shortTerm, + } +} + +// Creates a new instance of the default score ledger with testing +// parameters. +func NewTestScoreLedger(peerSampleInterval time.Duration, sampleCh chan struct{}) *DefaultScoreLedger { + dsl := NewDefaultScoreLedger() + dsl.peerSampleInterval = peerSampleInterval + dsl.sampleCh = sampleCh + return dsl +} From 3312463786615e2a84e56ee8bc1a00614b416fce Mon Sep 17 00:00:00 2001 From: dirkmc Date: Thu, 3 Sep 2020 14:28:46 +0200 Subject: [PATCH 0961/1035] refactor: remove extraneous ledger field init (#437) This commit was moved from ipfs/go-bitswap@00f4df8d04e2af6bf83103b21bbb92010b6a9478 --- bitswap/internal/decision/scoreledger.go | 1 - 1 file changed, 1 deletion(-) diff --git a/bitswap/internal/decision/scoreledger.go b/bitswap/internal/decision/scoreledger.go index 5ffd6bb8a..6f7c0f162 100644 --- a/bitswap/internal/decision/scoreledger.go +++ b/bitswap/internal/decision/scoreledger.go @@ -295,7 +295,6 @@ func (dsl *DefaultScoreLedger) Stop() { func (dsl *DefaultScoreLedger) init(scorePeer ScorePeerFunc) { dsl.lock.Lock() defer dsl.lock.Unlock() - dsl.ledgerMap = make(map[peer.ID]*scoreledger) dsl.scorePeer = scorePeer } From 2873fb09ebe8c212014119c40161af4eb25b43d4 Mon Sep 17 00:00:00 2001 From: Paul Wolneykien Date: Thu, 10 Sep 2020 15:05:51 +0300 Subject: [PATCH 0962/1035] Fix: Increment stats.MessagesSent in msgToStream() function (#441) * Share common code between network/ipfs_impl_test.go tests Extract the code that is common in TestMessageResendAfterError, TestMessageSendTimeout and TestMessageSendNotSupportedResponse to a separate function. * Make prepareNetwork() return two hosts and two networks Let prepareNetwork() make simmetric setup with two `ErrHost`s with two `impl` networks to be sure we test `impl` instances on both ends. * Added TestNetworkCounters test to the "network" package The test shows we have a problem with `MessagesSent` counter. * Fix: Increment stats.MessagesSent in msgToStream() function Fixes the bug with incrementing `MessagesSent` counter only in `SendMessage()` method if `impl`. Now it works for `MessageSender` too. * Allow to specify a network event listener for tests Added `listener network.Notifiee` to the `receiver` structure. If a listener is specified then `prepareNetwork()` connects it to the mock network it builds before making any connections. * Wait for all network streams are closed in testNetworkCounters Wait for all network streams are closed instead of just using a timeout. The timeout of 5 s is still used as a deadline (it makes the test to fail). * Fix: Close the MessageSender in testNetworkCounters() The `MessageSender` needs to be closed if we want all streams in the network to be closed. * Fix: Close MessageSender in other tests too Co-authored-by: Paul Wolneykien This commit was moved from ipfs/go-bitswap@bcf85413390a677b6e59325a59ea5c31f5e0c6bd --- bitswap/network/ipfs_impl.go | 3 +- bitswap/network/ipfs_impl_test.go | 279 ++++++++++++++++++------------ 2 files changed, 171 insertions(+), 111 deletions(-) diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index 3636b048a..0254e64fe 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -265,6 +265,8 @@ func (bsnet *impl) msgToStream(ctx context.Context, s network.Stream, msg bsmsg. return fmt.Errorf("unrecognized protocol on remote: %s", s.Protocol()) } + atomic.AddUint64(&bsnet.stats.MessagesSent, 1) + if err := s.SetWriteDeadline(time.Time{}); err != nil { log.Warnf("error resetting deadline: %s", err) } @@ -320,7 +322,6 @@ func (bsnet *impl) SendMessage( _ = s.Reset() return err } - atomic.AddUint64(&bsnet.stats.MessagesSent, 1) // TODO(https://github.com/libp2p/go-libp2p-net/issues/28): Avoid this goroutine. //nolint diff --git a/bitswap/network/ipfs_impl_test.go b/bitswap/network/ipfs_impl_test.go index 454bb4109..3ad047f61 100644 --- a/bitswap/network/ipfs_impl_test.go +++ b/bitswap/network/ipfs_impl_test.go @@ -31,6 +31,7 @@ type receiver struct { connectionEvent chan bool lastMessage bsmsg.BitSwapMessage lastSender peer.ID + listener network.Notifiee } func newReceiver() *receiver { @@ -254,36 +255,38 @@ func TestMessageSendAndReceive(t *testing.T) { } } -func TestMessageResendAfterError(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), time.Second) - defer cancel() - +func prepareNetwork(t *testing.T, ctx context.Context, p1 tnet.Identity, r1 *receiver, p2 tnet.Identity, r2 *receiver) (*ErrHost, bsnet.BitSwapNetwork, *ErrHost, bsnet.BitSwapNetwork, bsmsg.BitSwapMessage) { // create network mn := mocknet.New(ctx) mr := mockrouting.NewServer() - streamNet, err := tn.StreamNet(ctx, mn, mr) - if err != nil { - t.Fatal("Unable to setup network") - } - p1 := tnet.RandIdentityOrFatal(t) - p2 := tnet.RandIdentityOrFatal(t) + // Host 1 h1, err := mn.AddPeer(p1.PrivateKey(), p1.Address()) if err != nil { t.Fatal(err) } - - // Create a special host that we can force to start returning errors - eh := &ErrHost{Host: h1} - routing := mr.ClientWithDatastore(context.TODO(), p1, ds.NewMapDatastore()) - bsnet1 := bsnet.NewFromIpfsHost(eh, routing) - - bsnet2 := streamNet.Adapter(p2) - r1 := newReceiver() - r2 := newReceiver() + eh1 := &ErrHost{Host: h1} + routing1 := mr.ClientWithDatastore(context.TODO(), p1, ds.NewMapDatastore()) + bsnet1 := bsnet.NewFromIpfsHost(eh1, routing1) bsnet1.SetDelegate(r1) + if r1.listener != nil { + eh1.Network().Notify(r1.listener) + } + + // Host 2 + h2, err := mn.AddPeer(p2.PrivateKey(), p2.Address()) + if err != nil { + t.Fatal(err) + } + eh2 := &ErrHost{Host: h2} + routing2 := mr.ClientWithDatastore(context.TODO(), p2, ds.NewMapDatastore()) + bsnet2 := bsnet.NewFromIpfsHost(eh2, routing2) bsnet2.SetDelegate(r2) + if r2.listener != nil { + eh2.Network().Notify(r2.listener) + } + // Networking err = mn.LinkAll() if err != nil { t.Fatal(err) @@ -307,6 +310,20 @@ func TestMessageResendAfterError(t *testing.T) { msg := bsmsg.New(false) msg.AddEntry(block1.Cid(), 1, pb.Message_Wantlist_Block, true) + return eh1, bsnet1, eh2, bsnet2, msg +} + +func TestMessageResendAfterError(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + + p1 := tnet.RandIdentityOrFatal(t) + r1 := newReceiver() + p2 := tnet.RandIdentityOrFatal(t) + r2 := newReceiver() + + eh, bsnet1, _, _, msg := prepareNetwork(t, ctx, p1, r1, p2, r2) + testSendErrorBackoff := 100 * time.Millisecond ms, err := bsnet1.NewMessageSender(ctx, p2.ID(), &bsnet.MessageSenderOpts{ MaxRetries: 3, @@ -316,6 +333,7 @@ func TestMessageResendAfterError(t *testing.T) { if err != nil { t.Fatal(err) } + defer ms.Close() // Return an error from the networking layer the next time we try to send // a message @@ -345,54 +363,12 @@ func TestMessageSendTimeout(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), time.Second) defer cancel() - // create network - mn := mocknet.New(ctx) - mr := mockrouting.NewServer() - streamNet, err := tn.StreamNet(ctx, mn, mr) - if err != nil { - t.Fatal("Unable to setup network") - } p1 := tnet.RandIdentityOrFatal(t) - p2 := tnet.RandIdentityOrFatal(t) - - h1, err := mn.AddPeer(p1.PrivateKey(), p1.Address()) - if err != nil { - t.Fatal(err) - } - - // Create a special host that we can force to start timing out - eh := &ErrHost{Host: h1} - routing := mr.ClientWithDatastore(context.TODO(), p1, ds.NewMapDatastore()) - bsnet1 := bsnet.NewFromIpfsHost(eh, routing) - - bsnet2 := streamNet.Adapter(p2) r1 := newReceiver() + p2 := tnet.RandIdentityOrFatal(t) r2 := newReceiver() - bsnet1.SetDelegate(r1) - bsnet2.SetDelegate(r2) - err = mn.LinkAll() - if err != nil { - t.Fatal(err) - } - err = bsnet1.ConnectTo(ctx, p2.ID()) - if err != nil { - t.Fatal(err) - } - isConnected := <-r1.connectionEvent - if !isConnected { - t.Fatal("Expected connect event") - } - - err = bsnet2.ConnectTo(ctx, p1.ID()) - if err != nil { - t.Fatal(err) - } - - blockGenerator := blocksutil.NewBlockGenerator() - block1 := blockGenerator.Next() - msg := bsmsg.New(false) - msg.AddEntry(block1.Cid(), 1, pb.Message_Wantlist_Block, true) + eh, bsnet1, _, _, msg := prepareNetwork(t, ctx, p1, r1, p2, r2) ms, err := bsnet1.NewMessageSender(ctx, p2.ID(), &bsnet.MessageSenderOpts{ MaxRetries: 3, @@ -402,6 +378,7 @@ func TestMessageSendTimeout(t *testing.T) { if err != nil { t.Fatal(err) } + defer ms.Close() // Return a DeadlineExceeded error from the networking layer the next time we try to // send a message @@ -416,7 +393,7 @@ func TestMessageSendTimeout(t *testing.T) { select { case <-time.After(500 * time.Millisecond): t.Fatal("Did not receive disconnect event") - case isConnected = <-r1.connectionEvent: + case isConnected := <-r1.connectionEvent: if isConnected { t.Fatal("Expected disconnect event (got connect event)") } @@ -427,69 +404,28 @@ func TestMessageSendNotSupportedResponse(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), time.Second) defer cancel() - // create network - mn := mocknet.New(ctx) - mr := mockrouting.NewServer() - streamNet, err := tn.StreamNet(ctx, mn, mr) - if err != nil { - t.Fatal("Unable to setup network") - } p1 := tnet.RandIdentityOrFatal(t) - p2 := tnet.RandIdentityOrFatal(t) - - h1, err := mn.AddPeer(p1.PrivateKey(), p1.Address()) - if err != nil { - t.Fatal(err) - } - - // Create a special host that responds with ErrNotSupported - eh := &ErrHost{Host: h1} - routing := mr.ClientWithDatastore(context.TODO(), p1, ds.NewMapDatastore()) - bsnet1 := bsnet.NewFromIpfsHost(eh, routing) - - bsnet2 := streamNet.Adapter(p2) r1 := newReceiver() + p2 := tnet.RandIdentityOrFatal(t) r2 := newReceiver() - bsnet1.SetDelegate(r1) - bsnet2.SetDelegate(r2) - - err = mn.LinkAll() - if err != nil { - t.Fatal(err) - } - err = bsnet1.ConnectTo(ctx, p2.ID()) - if err != nil { - t.Fatal(err) - } - isConnected := <-r1.connectionEvent - if !isConnected { - t.Fatal("Expected connect event") - } - err = bsnet2.ConnectTo(ctx, p1.ID()) - if err != nil { - t.Fatal(err) - } - - blockGenerator := blocksutil.NewBlockGenerator() - block1 := blockGenerator.Next() - msg := bsmsg.New(false) - msg.AddEntry(block1.Cid(), 1, pb.Message_Wantlist_Block, true) + eh, bsnet1, _, _, _ := prepareNetwork(t, ctx, p1, r1, p2, r2) eh.setError(multistream.ErrNotSupported) - _, err = bsnet1.NewMessageSender(ctx, p2.ID(), &bsnet.MessageSenderOpts{ + ms, err := bsnet1.NewMessageSender(ctx, p2.ID(), &bsnet.MessageSenderOpts{ MaxRetries: 3, SendTimeout: 100 * time.Millisecond, SendErrorBackoff: 100 * time.Millisecond, }) if err == nil { + ms.Close() t.Fatal("Expected ErrNotSupported") } select { case <-time.After(500 * time.Millisecond): t.Fatal("Did not receive disconnect event") - case isConnected = <-r1.connectionEvent: + case isConnected := <-r1.connectionEvent: if isConnected { t.Fatal("Expected disconnect event (got connect event)") } @@ -535,9 +471,132 @@ func TestSupportsHave(t *testing.T) { if err != nil { t.Fatal(err) } + defer senderCurrent.Close() if senderCurrent.SupportsHave() != tc.expSupportsHave { t.Fatal("Expected sender HAVE message support", tc.proto, tc.expSupportsHave) } } } + +func testNetworkCounters(t *testing.T, n1 int, n2 int) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + p1 := tnet.RandIdentityOrFatal(t) + r1 := newReceiver() + p2 := tnet.RandIdentityOrFatal(t) + r2 := newReceiver() + + var wg1, wg2 sync.WaitGroup + r1.listener = &network.NotifyBundle{ + OpenedStreamF: func(n network.Network, s network.Stream) { + wg1.Add(1) + }, + ClosedStreamF: func(n network.Network, s network.Stream) { + wg1.Done() + }, + } + r2.listener = &network.NotifyBundle{ + OpenedStreamF: func(n network.Network, s network.Stream) { + wg2.Add(1) + }, + ClosedStreamF: func(n network.Network, s network.Stream) { + wg2.Done() + }, + } + _, bsnet1, _, bsnet2, msg := prepareNetwork(t, ctx, p1, r1, p2, r2) + + for n := 0; n < n1; n++ { + ctx, cancel := context.WithTimeout(ctx, time.Second) + err := bsnet1.SendMessage(ctx, p2.ID(), msg) + if err != nil { + t.Fatal(err) + } + select { + case <-ctx.Done(): + t.Fatal("p2 did not receive message sent") + case <-r2.messageReceived: + for j := 0; j < 2; j++ { + err := bsnet2.SendMessage(ctx, p1.ID(), msg) + if err != nil { + t.Fatal(err) + } + select { + case <-ctx.Done(): + t.Fatal("p1 did not receive message sent") + case <-r1.messageReceived: + } + } + } + cancel() + } + + if n2 > 0 { + ms, err := bsnet1.NewMessageSender(ctx, p2.ID(), &bsnet.MessageSenderOpts{}) + if err != nil { + t.Fatal(err) + } + defer ms.Close() + for n := 0; n < n2; n++ { + ctx, cancel := context.WithTimeout(ctx, time.Second) + err = ms.SendMsg(ctx, msg) + if err != nil { + t.Fatal(err) + } + select { + case <-ctx.Done(): + t.Fatal("p2 did not receive message sent") + case <-r2.messageReceived: + for j := 0; j < 2; j++ { + err := bsnet2.SendMessage(ctx, p1.ID(), msg) + if err != nil { + t.Fatal(err) + } + select { + case <-ctx.Done(): + t.Fatal("p1 did not receive message sent") + case <-r1.messageReceived: + } + } + } + cancel() + } + ms.Close() + } + + // Wait until all streams are closed and MessagesRecvd counters + // updated. + ctxto, cancelto := context.WithTimeout(ctx, 5*time.Second) + defer cancelto() + ctxwait, cancelwait := context.WithCancel(ctx) + defer cancelwait() + go func() { + wg1.Wait() + wg2.Wait() + cancelwait() + }() + select { + case <-ctxto.Done(): + t.Fatal("network streams closing timed out") + case <-ctxwait.Done(): + } + + if bsnet1.Stats().MessagesSent != uint64(n1+n2) { + t.Fatal(fmt.Errorf("expected %d sent messages, got %d", n1+n2, bsnet1.Stats().MessagesSent)) + } + + if bsnet2.Stats().MessagesRecvd != uint64(n1+n2) { + t.Fatal(fmt.Errorf("expected %d received messages, got %d", n1+n2, bsnet2.Stats().MessagesRecvd)) + } + + if bsnet1.Stats().MessagesRecvd != 2*uint64(n1+n2) { + t.Fatal(fmt.Errorf("expected %d received reply messages, got %d", 2*(n1+n2), bsnet1.Stats().MessagesRecvd)) + } +} + +func TestNetworkCounters(t *testing.T) { + for n := 0; n < 11; n++ { + testNetworkCounters(t, 10-n, n) + } +} From d02d3d1bde0e0324922e95dfd7c4c8163d60e55b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Zdyba=C5=82?= Date: Thu, 24 Sep 2020 11:20:43 +0200 Subject: [PATCH 0963/1035] Add WireTap interface (#444) * Add WireTap interface WireTap interface can be used to access all messages send and received by Bitswap. This can be used to implement advanced statistics/analysis logic, which is beyond scope of Bitswap, but can be implemented as IPFS plugin. Some examples of potential applications: - per CID bandwidth tracker (see: https://gitcoin.co/issue/PinataCloud/apollo/2/100023631) - detailed per peer stats - intrusion detection system (IDS) implementation * Add test for WireTap This commit was moved from ipfs/go-bitswap@bc3df6bd01b7f3d9be9d44e9a83b0663abf0230c --- bitswap/bitswap.go | 7 ++ bitswap/bitswap_test.go | 144 +++++++++++++++++++++++++++++++++++++++- bitswap/wiretap.go | 27 ++++++++ bitswap/workers.go | 3 + 4 files changed, 180 insertions(+), 1 deletion(-) create mode 100644 bitswap/wiretap.go diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 8af786a80..e87157573 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -252,6 +252,9 @@ type Bitswap struct { allMetric metrics.Histogram sentHistogram metrics.Histogram + // External statistics interface + wiretap WireTap + // the SessionManager routes requests to interested sessions sm *bssm.SessionManager @@ -419,6 +422,10 @@ func (bs *Bitswap) ReceiveMessage(ctx context.Context, p peer.ID, incoming bsmsg // TODO: this is bad, and could be easily abused. // Should only track *useful* messages in ledger + if bs.wiretap != nil { + bs.wiretap.MessageReceived(p, incoming) + } + iblocks := incoming.Blocks() if len(iblocks) > 0 { diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index b95faa30d..2962394d1 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -13,6 +13,8 @@ import ( decision "github.com/ipfs/go-bitswap/internal/decision" bssession "github.com/ipfs/go-bitswap/internal/session" "github.com/ipfs/go-bitswap/message" + bsmsg "github.com/ipfs/go-bitswap/message" + pb "github.com/ipfs/go-bitswap/message/pb" testinstance "github.com/ipfs/go-bitswap/testinstance" tn "github.com/ipfs/go-bitswap/testnet" blocks "github.com/ipfs/go-block-format" @@ -468,7 +470,6 @@ func TestBasicBitswap(t *testing.T) { if err != nil { t.Fatal(err) } - st1, err := instances[1].Exchange.Stat() if err != nil { t.Fatal(err) @@ -860,3 +861,144 @@ func TestWithScoreLedger(t *testing.T) { t.Fatal("Expected the score ledger to be closed within 5s") } } + +type logItem struct { + dir byte + pid peer.ID + msg bsmsg.BitSwapMessage +} +type mockWireTap struct { + log []logItem +} + +func (m *mockWireTap) MessageReceived(p peer.ID, msg bsmsg.BitSwapMessage) { + m.log = append(m.log, logItem{'r', p, msg}) +} +func (m *mockWireTap) MessageSent(p peer.ID, msg bsmsg.BitSwapMessage) { + m.log = append(m.log, logItem{'s', p, msg}) +} + +func TestWireTap(t *testing.T) { + net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay)) + ig := testinstance.NewTestInstanceGenerator(net, nil, nil) + defer ig.Close() + bg := blocksutil.NewBlockGenerator() + + instances := ig.Instances(3) + blocks := bg.Blocks(2) + + // Install WireTap + wiretap := new(mockWireTap) + bitswap.EnableWireTap(wiretap)(instances[0].Exchange) + + // First peer has block + err := instances[0].Exchange.HasBlock(blocks[0]) + if err != nil { + t.Fatal(err) + } + + ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) + defer cancel() + + // Second peer broadcasts want for block CID + // (Received by first and third peers) + _, err = instances[1].Exchange.GetBlock(ctx, blocks[0].Cid()) + if err != nil { + t.Fatal(err) + } + + // When second peer receives block, it should send out a cancel, so third + // peer should no longer keep second peer's want + if err = tu.WaitFor(ctx, func() error { + if len(instances[2].Exchange.WantlistForPeer(instances[1].Peer)) != 0 { + return fmt.Errorf("should have no items in other peers wantlist") + } + if len(instances[1].Exchange.GetWantlist()) != 0 { + return fmt.Errorf("shouldnt have anything in wantlist") + } + return nil + }); err != nil { + t.Fatal(err) + } + + // After communication, 3 messages should be logged via WireTap + if l := len(wiretap.log); l != 3 { + t.Fatal("expected 3 items logged via WireTap, found", l) + } + + // Received: 'Have' + if wiretap.log[0].dir != 'r' { + t.Error("expected message to be received") + } + if wiretap.log[0].pid != instances[1].Peer { + t.Error("expected peer", instances[1].Peer, ", found", wiretap.log[0].pid) + } + if l := len(wiretap.log[0].msg.Wantlist()); l != 1 { + t.Fatal("expected 1 entry in Wantlist, found", l) + } + if wiretap.log[0].msg.Wantlist()[0].WantType != pb.Message_Wantlist_Have { + t.Error("expected WantType equal to 'Have', found 'Block'") + } + + // Sent: Block + if wiretap.log[1].dir != 's' { + t.Error("expected message to be sent") + } + if wiretap.log[1].pid != instances[1].Peer { + t.Error("expected peer", instances[1].Peer, ", found", wiretap.log[1].pid) + } + if l := len(wiretap.log[1].msg.Blocks()); l != 1 { + t.Fatal("expected 1 entry in Blocks, found", l) + } + if wiretap.log[1].msg.Blocks()[0].Cid() != blocks[0].Cid() { + t.Error("wrong block Cid") + } + + // Received: 'Cancel' + if wiretap.log[2].dir != 'r' { + t.Error("expected message to be received") + } + if wiretap.log[2].pid != instances[1].Peer { + t.Error("expected peer", instances[1].Peer, ", found", wiretap.log[2].pid) + } + if l := len(wiretap.log[2].msg.Wantlist()); l != 1 { + t.Fatal("expected 1 entry in Wantlist, found", l) + } + if wiretap.log[2].msg.Wantlist()[0].WantType != pb.Message_Wantlist_Block { + t.Error("expected WantType equal to 'Block', found 'Have'") + } + if wiretap.log[2].msg.Wantlist()[0].Cancel != true { + t.Error("expected entry with Cancel set to 'true'") + } + + // After disabling WireTap, no new messages are logged + bitswap.DisableWireTap()(instances[0].Exchange) + + err = instances[0].Exchange.HasBlock(blocks[1]) + if err != nil { + t.Fatal(err) + } + _, err = instances[1].Exchange.GetBlock(ctx, blocks[1].Cid()) + if err != nil { + t.Fatal(err) + } + if err = tu.WaitFor(ctx, func() error { + if len(instances[1].Exchange.GetWantlist()) != 0 { + return fmt.Errorf("shouldnt have anything in wantlist") + } + return nil + }); err != nil { + t.Fatal(err) + } + + if l := len(wiretap.log); l != 3 { + t.Fatal("expected 3 items logged via WireTap, found", l) + } + + for _, inst := range instances { + err := inst.Exchange.Close() + if err != nil { + t.Fatal(err) + } + } +} diff --git a/bitswap/wiretap.go b/bitswap/wiretap.go new file mode 100644 index 000000000..55cb21d3e --- /dev/null +++ b/bitswap/wiretap.go @@ -0,0 +1,27 @@ +package bitswap + +import ( + bsmsg "github.com/ipfs/go-bitswap/message" + peer "github.com/libp2p/go-libp2p-core/peer" +) + +// WireTap provides methods to access all messages sent and received by Bitswap. +// This interface can be used to implement various statistics (this is original intent). +type WireTap interface { + MessageReceived(peer.ID, bsmsg.BitSwapMessage) + MessageSent(peer.ID, bsmsg.BitSwapMessage) +} + +// Configures Bitswap to use given wiretap. +func EnableWireTap(tap WireTap) Option { + return func(bs *Bitswap) { + bs.wiretap = tap + } +} + +// Configures Bitswap not to use any wiretap. +func DisableWireTap() Option { + return func(bs *Bitswap) { + bs.wiretap = nil + } +} diff --git a/bitswap/workers.go b/bitswap/workers.go index 208c02bff..5db534231 100644 --- a/bitswap/workers.go +++ b/bitswap/workers.go @@ -56,6 +56,9 @@ func (bs *Bitswap) taskWorker(ctx context.Context, id int) { // Ideally, yes. But we'd need some way to trigger a retry and/or drop // the peer. bs.engine.MessageSent(envelope.Peer, envelope.Message) + if bs.wiretap != nil { + bs.wiretap.MessageSent(envelope.Peer, envelope.Message) + } bs.sendBlocks(ctx, envelope) case <-ctx.Done(): return From 7a51725533b154cc673aeb28bd9667e655a34700 Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Mon, 9 Nov 2020 19:25:35 -0800 Subject: [PATCH 0964/1035] fix: guard access to the mock wiretap with a lock This commit was moved from ipfs/go-bitswap@0a5174d2c124df828636d47f0ac22722122c6160 --- bitswap/bitswap_test.go | 51 ++++++++++++++++++++++++++--------------- 1 file changed, 33 insertions(+), 18 deletions(-) diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index 2962394d1..8037d1639 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -868,16 +868,27 @@ type logItem struct { msg bsmsg.BitSwapMessage } type mockWireTap struct { + mu sync.Mutex log []logItem } func (m *mockWireTap) MessageReceived(p peer.ID, msg bsmsg.BitSwapMessage) { + m.mu.Lock() + defer m.mu.Unlock() m.log = append(m.log, logItem{'r', p, msg}) } func (m *mockWireTap) MessageSent(p peer.ID, msg bsmsg.BitSwapMessage) { + m.mu.Lock() + defer m.mu.Unlock() m.log = append(m.log, logItem{'s', p, msg}) } +func (m *mockWireTap) getLog() []logItem { + m.mu.Lock() + defer m.mu.Unlock() + return m.log[:len(m.log):len(m.log)] +} + func TestWireTap(t *testing.T) { net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay)) ig := testinstance.NewTestInstanceGenerator(net, nil, nil) @@ -921,53 +932,55 @@ func TestWireTap(t *testing.T) { t.Fatal(err) } + log := wiretap.getLog() + // After communication, 3 messages should be logged via WireTap - if l := len(wiretap.log); l != 3 { + if l := len(log); l != 3 { t.Fatal("expected 3 items logged via WireTap, found", l) } // Received: 'Have' - if wiretap.log[0].dir != 'r' { + if log[0].dir != 'r' { t.Error("expected message to be received") } - if wiretap.log[0].pid != instances[1].Peer { - t.Error("expected peer", instances[1].Peer, ", found", wiretap.log[0].pid) + if log[0].pid != instances[1].Peer { + t.Error("expected peer", instances[1].Peer, ", found", log[0].pid) } - if l := len(wiretap.log[0].msg.Wantlist()); l != 1 { + if l := len(log[0].msg.Wantlist()); l != 1 { t.Fatal("expected 1 entry in Wantlist, found", l) } - if wiretap.log[0].msg.Wantlist()[0].WantType != pb.Message_Wantlist_Have { + if log[0].msg.Wantlist()[0].WantType != pb.Message_Wantlist_Have { t.Error("expected WantType equal to 'Have', found 'Block'") } // Sent: Block - if wiretap.log[1].dir != 's' { + if log[1].dir != 's' { t.Error("expected message to be sent") } - if wiretap.log[1].pid != instances[1].Peer { - t.Error("expected peer", instances[1].Peer, ", found", wiretap.log[1].pid) + if log[1].pid != instances[1].Peer { + t.Error("expected peer", instances[1].Peer, ", found", log[1].pid) } - if l := len(wiretap.log[1].msg.Blocks()); l != 1 { + if l := len(log[1].msg.Blocks()); l != 1 { t.Fatal("expected 1 entry in Blocks, found", l) } - if wiretap.log[1].msg.Blocks()[0].Cid() != blocks[0].Cid() { + if log[1].msg.Blocks()[0].Cid() != blocks[0].Cid() { t.Error("wrong block Cid") } // Received: 'Cancel' - if wiretap.log[2].dir != 'r' { + if log[2].dir != 'r' { t.Error("expected message to be received") } - if wiretap.log[2].pid != instances[1].Peer { - t.Error("expected peer", instances[1].Peer, ", found", wiretap.log[2].pid) + if log[2].pid != instances[1].Peer { + t.Error("expected peer", instances[1].Peer, ", found", log[2].pid) } - if l := len(wiretap.log[2].msg.Wantlist()); l != 1 { + if l := len(log[2].msg.Wantlist()); l != 1 { t.Fatal("expected 1 entry in Wantlist, found", l) } - if wiretap.log[2].msg.Wantlist()[0].WantType != pb.Message_Wantlist_Block { + if log[2].msg.Wantlist()[0].WantType != pb.Message_Wantlist_Block { t.Error("expected WantType equal to 'Block', found 'Have'") } - if wiretap.log[2].msg.Wantlist()[0].Cancel != true { + if log[2].msg.Wantlist()[0].Cancel != true { t.Error("expected entry with Cancel set to 'true'") } @@ -991,7 +1004,9 @@ func TestWireTap(t *testing.T) { t.Fatal(err) } - if l := len(wiretap.log); l != 3 { + log = wiretap.getLog() + + if l := len(log); l != 3 { t.Fatal("expected 3 items logged via WireTap, found", l) } From f8a7ad6a4c9dd0516e3932b0f303aee45c7c7d4b Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Wed, 2 Sep 2020 15:54:41 -0700 Subject: [PATCH 0965/1035] fix: update to go 1.15 lint warnings This commit was moved from ipfs/go-bitswap@179650d33515a758a2010e8b3b20617612b58bce --- bitswap/internal/sessionmanager/sessionmanager_test.go | 9 +++++---- bitswap/internal/testutil/testutil.go | 3 ++- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/bitswap/internal/sessionmanager/sessionmanager_test.go b/bitswap/internal/sessionmanager/sessionmanager_test.go index db88855f5..8025bd5fa 100644 --- a/bitswap/internal/sessionmanager/sessionmanager_test.go +++ b/bitswap/internal/sessionmanager/sessionmanager_test.go @@ -2,6 +2,7 @@ package sessionmanager import ( "context" + "fmt" "sync" "testing" "time" @@ -118,7 +119,7 @@ func TestReceiveFrom(t *testing.T) { pm := &fakePeerManager{} sm := New(ctx, sessionFactory, sim, peerManagerFactory, bpm, pm, notif, "") - p := peer.ID(123) + p := peer.ID(fmt.Sprint(123)) block := blocks.NewBlock([]byte("block")) firstSession := sm.NewSession(ctx, time.Second, delay.Fixed(time.Minute)).(*fakeSession) @@ -165,7 +166,7 @@ func TestReceiveBlocksWhenManagerShutdown(t *testing.T) { pm := &fakePeerManager{} sm := New(ctx, sessionFactory, sim, peerManagerFactory, bpm, pm, notif, "") - p := peer.ID(123) + p := peer.ID(fmt.Sprint(123)) block := blocks.NewBlock([]byte("block")) firstSession := sm.NewSession(ctx, time.Second, delay.Fixed(time.Minute)).(*fakeSession) @@ -199,7 +200,7 @@ func TestReceiveBlocksWhenSessionContextCancelled(t *testing.T) { pm := &fakePeerManager{} sm := New(ctx, sessionFactory, sim, peerManagerFactory, bpm, pm, notif, "") - p := peer.ID(123) + p := peer.ID(fmt.Sprint(123)) block := blocks.NewBlock([]byte("block")) firstSession := sm.NewSession(ctx, time.Second, delay.Fixed(time.Minute)).(*fakeSession) @@ -235,7 +236,7 @@ func TestShutdown(t *testing.T) { pm := &fakePeerManager{} sm := New(ctx, sessionFactory, sim, peerManagerFactory, bpm, pm, notif, "") - p := peer.ID(123) + p := peer.ID(fmt.Sprint(123)) block := blocks.NewBlock([]byte("block")) cids := []cid.Cid{block.Cid()} firstSession := sm.NewSession(ctx, time.Second, delay.Fixed(time.Minute)).(*fakeSession) diff --git a/bitswap/internal/testutil/testutil.go b/bitswap/internal/testutil/testutil.go index 48af8a7d8..6b9fc6f39 100644 --- a/bitswap/internal/testutil/testutil.go +++ b/bitswap/internal/testutil/testutil.go @@ -1,6 +1,7 @@ package testutil import ( + "fmt" "math/rand" bsmsg "github.com/ipfs/go-bitswap/message" @@ -59,7 +60,7 @@ func GeneratePeers(n int) []peer.ID { peerIds := make([]peer.ID, 0, n) for i := 0; i < n; i++ { peerSeq++ - p := peer.ID(peerSeq) + p := peer.ID(fmt.Sprint(i)) peerIds = append(peerIds, p) } return peerIds From 07d61837059b81e5a17fe39eaf8b5a31cf017871 Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Tue, 27 Oct 2020 15:45:23 -0700 Subject: [PATCH 0966/1035] feat: update for go-libp2p-core 0.7.0 interface changes This commit was moved from ipfs/go-bitswap@7c5676aceded5427ab301e6f0734cf9bf6cffdc0 --- bitswap/network/ipfs_impl.go | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index 0254e64fe..e4357760c 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -13,7 +13,6 @@ import ( cid "github.com/ipfs/go-cid" logging "github.com/ipfs/go-log" "github.com/libp2p/go-libp2p-core/connmgr" - "github.com/libp2p/go-libp2p-core/helpers" "github.com/libp2p/go-libp2p-core/host" "github.com/libp2p/go-libp2p-core/network" "github.com/libp2p/go-libp2p-core/peer" @@ -132,7 +131,7 @@ func (s *streamMessageSender) Reset() error { // Close the stream func (s *streamMessageSender) Close() error { - return helpers.FullClose(s.stream) + return s.stream.Close() } // Indicates whether the peer supports HAVE / DONT_HAVE messages @@ -323,9 +322,6 @@ func (bsnet *impl) SendMessage( return err } - // TODO(https://github.com/libp2p/go-libp2p-net/issues/28): Avoid this goroutine. - //nolint - go helpers.AwaitEOF(s) return s.Close() } From 7b035efdde9ba7af35bd20e2471b0125f878613c Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Fri, 13 Nov 2020 13:30:03 -0800 Subject: [PATCH 0967/1035] fix: remove unnecessary (and leaked) ticker This commit was moved from ipfs/go-bitswap@7525baeb2903f06d06e7d2c88ff696be7dec38e8 --- bitswap/internal/decision/scoreledger.go | 3 --- 1 file changed, 3 deletions(-) diff --git a/bitswap/internal/decision/scoreledger.go b/bitswap/internal/decision/scoreledger.go index 6f7c0f162..b9f1dfb90 100644 --- a/bitswap/internal/decision/scoreledger.go +++ b/bitswap/internal/decision/scoreledger.go @@ -102,8 +102,6 @@ func (l *scoreledger) Receipt() *Receipt { // DefaultScoreLedger is used by Engine as the default ScoreLedger. type DefaultScoreLedger struct { - // a sample counting ticker - ticker *time.Ticker // the score func scorePeer ScorePeerFunc // is closed on Close @@ -333,7 +331,6 @@ func (dsl *DefaultScoreLedger) PeerDisconnected(p peer.ID) { func NewDefaultScoreLedger() *DefaultScoreLedger { return &DefaultScoreLedger{ ledgerMap: make(map[peer.ID]*scoreledger), - ticker: time.NewTicker(time.Millisecond * 100), closing: make(chan struct{}), peerSampleInterval: shortTerm, } From fffc8a9de5a7b97fc5db2187615bcc31a18a6bf6 Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Fri, 13 Nov 2020 13:30:39 -0800 Subject: [PATCH 0968/1035] fix: set the score ledger on start It's possible to start receiving and processing messages before we get around to starting. This commit was moved from ipfs/go-bitswap@ed0f4edf638e1b645c2f979ce62018be202f00f7 --- bitswap/internal/decision/engine.go | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/bitswap/internal/decision/engine.go b/bitswap/internal/decision/engine.go index 28584fb10..62957d611 100644 --- a/bitswap/internal/decision/engine.go +++ b/bitswap/internal/decision/engine.go @@ -178,6 +178,10 @@ func NewEngine(ctx context.Context, bs bstore.Blockstore, peerTagger PeerTagger, func newEngine(ctx context.Context, bs bstore.Blockstore, peerTagger PeerTagger, self peer.ID, maxReplaceSize int, scoreLedger ScoreLedger) *Engine { + if scoreLedger == nil { + scoreLedger = NewDefaultScoreLedger() + } + e := &Engine{ ledgerMap: make(map[peer.ID]*ledger), scoreLedger: scoreLedger, @@ -221,9 +225,6 @@ func (e *Engine) UseScoreLedger(scoreLedger ScoreLedger) { // if it is unset, initializes the scoreLedger with the default // implementation. func (e *Engine) startScoreLedger(px process.Process) { - if e.scoreLedger == nil { - e.scoreLedger = NewDefaultScoreLedger() - } e.scoreLedger.Start(func(p peer.ID, score int) { if score == 0 { e.peerTagger.UntagPeer(p, e.tagUseful) From f3ceb05b73e14850eefb5e2fee2b68b3ed0e14a2 Mon Sep 17 00:00:00 2001 From: dirkmc Date: Wed, 18 Nov 2020 10:11:12 +0100 Subject: [PATCH 0969/1035] feat: configurable engine blockstore worker count (#449) This commit was moved from ipfs/go-bitswap@47b99b1ce34a8add8e5f38cf2eec6bea1559b035 --- bitswap/bitswap.go | 51 ++++++++++++++----- .../internal/decision/blockstoremanager.go | 2 +- .../decision/blockstoremanager_test.go | 10 ++-- bitswap/internal/decision/engine.go | 19 ++----- bitswap/internal/decision/engine_test.go | 16 +++--- 5 files changed, 56 insertions(+), 42 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index e87157573..0297c0989 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -5,6 +5,7 @@ package bitswap import ( "context" "errors" + "fmt" "sync" "time" @@ -45,6 +46,9 @@ const ( // these requests take at _least_ two minutes at the moment. provideTimeout = time.Minute * 3 defaultProvSearchDelay = time.Second + + // Number of concurrent workers in decision engine that process requests to the blockstore + defaulEngineBlockstoreWorkerCount = 128 ) var ( @@ -85,6 +89,17 @@ func RebroadcastDelay(newRebroadcastDelay delay.D) Option { } } +// EngineBlockstoreWorkerCount sets the number of worker threads used for +// blockstore operations in the decision engine +func EngineBlockstoreWorkerCount(count int) Option { + if count <= 0 { + panic(fmt.Sprintf("Engine blockstore worker count is %d but must be > 0", count)) + } + return func(bs *Bitswap) { + bs.engineBstoreWorkerCount = count + } +} + // SetSendDontHaves indicates what to do when the engine receives a want-block // for a block that is not in the blockstore. Either // - Send a DONT_HAVE message @@ -99,7 +114,7 @@ func SetSendDontHaves(send bool) Option { // Configures the engine to use the given score decision logic. func WithScoreLedger(scoreLedger deciface.ScoreLedger) Option { return func(bs *Bitswap) { - bs.engine.UseScoreLedger(scoreLedger) + bs.engineScoreLedger = scoreLedger } } @@ -166,27 +181,26 @@ func New(parent context.Context, network bsnet.BitSwapNetwork, } notif := notifications.New() sm = bssm.New(ctx, sessionFactory, sim, sessionPeerManagerFactory, bpm, pm, notif, network.Self()) - engine := decision.NewEngine(ctx, bstore, network.ConnectionManager(), network.Self()) bs := &Bitswap{ blockstore: bstore, - engine: engine, network: network, process: px, newBlocks: make(chan cid.Cid, HasBlockBufferSize), provideKeys: make(chan cid.Cid, provideKeysBufferSize), pm: pm, pqm: pqm, - sm: sm, - sim: sim, - notif: notif, - counters: new(counters), - dupMetric: dupHist, - allMetric: allHist, - sentHistogram: sentHistogram, - provideEnabled: true, - provSearchDelay: defaultProvSearchDelay, - rebroadcastDelay: delay.Fixed(time.Minute), + sm: sm, + sim: sim, + notif: notif, + counters: new(counters), + dupMetric: dupHist, + allMetric: allHist, + sentHistogram: sentHistogram, + provideEnabled: true, + provSearchDelay: defaultProvSearchDelay, + rebroadcastDelay: delay.Fixed(time.Minute), + engineBstoreWorkerCount: defaulEngineBlockstoreWorkerCount, } // apply functional options before starting and running bitswap @@ -194,12 +208,15 @@ func New(parent context.Context, network bsnet.BitSwapNetwork, option(bs) } + // Set up decision engine + bs.engine = decision.NewEngine(bstore, bs.engineBstoreWorkerCount, network.ConnectionManager(), network.Self(), bs.engineScoreLedger) + bs.pqm.Startup() network.SetDelegate(bs) // Start up bitswaps async worker routines bs.startWorkers(ctx, px) - engine.StartWorkers(ctx, px) + bs.engine.StartWorkers(ctx, px) // bind the context and process. // do it over here to avoid closing before all setup is done. @@ -270,6 +287,12 @@ type Bitswap struct { // how often to rebroadcast providing requests to find more optimized providers rebroadcastDelay delay.D + + // how many worker threads to start for decision engine blockstore worker + engineBstoreWorkerCount int + + // the score ledger used by the decision engine + engineScoreLedger deciface.ScoreLedger } type counters struct { diff --git a/bitswap/internal/decision/blockstoremanager.go b/bitswap/internal/decision/blockstoremanager.go index 8d880a6c4..1cc09dffc 100644 --- a/bitswap/internal/decision/blockstoremanager.go +++ b/bitswap/internal/decision/blockstoremanager.go @@ -21,7 +21,7 @@ type blockstoreManager struct { // newBlockstoreManager creates a new blockstoreManager with the given context // and number of workers -func newBlockstoreManager(ctx context.Context, bs bstore.Blockstore, workerCount int) *blockstoreManager { +func newBlockstoreManager(bs bstore.Blockstore, workerCount int) *blockstoreManager { return &blockstoreManager{ bs: bs, workerCount: workerCount, diff --git a/bitswap/internal/decision/blockstoremanager_test.go b/bitswap/internal/decision/blockstoremanager_test.go index cac0a5b0e..49a10c50c 100644 --- a/bitswap/internal/decision/blockstoremanager_test.go +++ b/bitswap/internal/decision/blockstoremanager_test.go @@ -25,7 +25,7 @@ func TestBlockstoreManagerNotFoundKey(t *testing.T) { dstore := ds_sync.MutexWrap(delayed.New(ds.NewMapDatastore(), bsdelay)) bstore := blockstore.NewBlockstore(ds_sync.MutexWrap(dstore)) - bsm := newBlockstoreManager(ctx, bstore, 5) + bsm := newBlockstoreManager(bstore, 5) bsm.start(process.WithTeardown(func() error { return nil })) cids := testutil.GenerateCids(4) @@ -64,7 +64,7 @@ func TestBlockstoreManager(t *testing.T) { dstore := ds_sync.MutexWrap(delayed.New(ds.NewMapDatastore(), bsdelay)) bstore := blockstore.NewBlockstore(ds_sync.MutexWrap(dstore)) - bsm := newBlockstoreManager(ctx, bstore, 5) + bsm := newBlockstoreManager(bstore, 5) bsm.start(process.WithTeardown(func() error { return nil })) exp := make(map[cid.Cid]blocks.Block) @@ -148,7 +148,7 @@ func TestBlockstoreManagerConcurrency(t *testing.T) { bstore := blockstore.NewBlockstore(ds_sync.MutexWrap(dstore)) workerCount := 5 - bsm := newBlockstoreManager(ctx, bstore, workerCount) + bsm := newBlockstoreManager(bstore, workerCount) bsm.start(process.WithTeardown(func() error { return nil })) blkSize := int64(8 * 1024) @@ -190,7 +190,7 @@ func TestBlockstoreManagerClose(t *testing.T) { dstore := ds_sync.MutexWrap(delayed.New(ds.NewMapDatastore(), bsdelay)) bstore := blockstore.NewBlockstore(ds_sync.MutexWrap(dstore)) - bsm := newBlockstoreManager(ctx, bstore, 3) + bsm := newBlockstoreManager(bstore, 3) px := process.WithTeardown(func() error { return nil }) bsm.start(px) @@ -227,7 +227,7 @@ func TestBlockstoreManagerCtxDone(t *testing.T) { dstore := ds_sync.MutexWrap(delayed.New(ds.NewMapDatastore(), bsdelay)) bstore := blockstore.NewBlockstore(ds_sync.MutexWrap(dstore)) - bsm := newBlockstoreManager(context.Background(), bstore, 3) + bsm := newBlockstoreManager(bstore, 3) proc := process.WithTeardown(func() error { return nil }) bsm.start(proc) diff --git a/bitswap/internal/decision/engine.go b/bitswap/internal/decision/engine.go index 62957d611..6e69ca657 100644 --- a/bitswap/internal/decision/engine.go +++ b/bitswap/internal/decision/engine.go @@ -76,9 +76,6 @@ const ( // Number of concurrent workers that pull tasks off the request queue taskWorkerCount = 8 - - // Number of concurrent workers that process requests to the blockstore - blockstoreWorkerCount = 128 ) // Envelope contains a message for a Peer. @@ -166,16 +163,16 @@ type Engine struct { sendDontHaves bool - self peer.ID + self peer.ID } // NewEngine creates a new block sending engine for the given block store -func NewEngine(ctx context.Context, bs bstore.Blockstore, peerTagger PeerTagger, self peer.ID) *Engine { - return newEngine(ctx, bs, peerTagger, self, maxBlockSizeReplaceHasWithBlock, nil) +func NewEngine(bs bstore.Blockstore, bstoreWorkerCount int, peerTagger PeerTagger, self peer.ID, scoreLedger ScoreLedger) *Engine { + return newEngine(bs, bstoreWorkerCount, peerTagger, self, maxBlockSizeReplaceHasWithBlock, scoreLedger) } // This constructor is used by the tests -func newEngine(ctx context.Context, bs bstore.Blockstore, peerTagger PeerTagger, self peer.ID, +func newEngine(bs bstore.Blockstore, bstoreWorkerCount int, peerTagger PeerTagger, self peer.ID, maxReplaceSize int, scoreLedger ScoreLedger) *Engine { if scoreLedger == nil { @@ -185,7 +182,7 @@ func newEngine(ctx context.Context, bs bstore.Blockstore, peerTagger PeerTagger, e := &Engine{ ledgerMap: make(map[peer.ID]*ledger), scoreLedger: scoreLedger, - bsm: newBlockstoreManager(ctx, bs, blockstoreWorkerCount), + bsm: newBlockstoreManager(bs, bstoreWorkerCount), peerTagger: peerTagger, outbox: make(chan (<-chan *Envelope), outboxChanBuffer), workSignal: make(chan struct{}, 1), @@ -215,12 +212,6 @@ func (e *Engine) SetSendDontHaves(send bool) { e.sendDontHaves = send } -// Sets the scoreLedger to the given implementation. Should be called -// before StartWorkers(). -func (e *Engine) UseScoreLedger(scoreLedger ScoreLedger) { - e.scoreLedger = scoreLedger -} - // Starts the score ledger. Before start the function checks and, // if it is unset, initializes the scoreLedger with the default // implementation. diff --git a/bitswap/internal/decision/engine_test.go b/bitswap/internal/decision/engine_test.go index 3046dc0d1..b4f3d068e 100644 --- a/bitswap/internal/decision/engine_test.go +++ b/bitswap/internal/decision/engine_test.go @@ -97,7 +97,7 @@ func newTestEngine(ctx context.Context, idStr string) engineSet { func newTestEngineWithSampling(ctx context.Context, idStr string, peerSampleInterval time.Duration, sampleCh chan struct{}) engineSet { fpt := &fakePeerTagger{} bs := blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore())) - e := newEngine(ctx, bs, fpt, "localhost", 0, NewTestScoreLedger(peerSampleInterval, sampleCh)) + e := newEngine(bs, 4, fpt, "localhost", 0, NewTestScoreLedger(peerSampleInterval, sampleCh)) e.StartWorkers(ctx, process.WithTeardown(func() error { return nil })) return engineSet{ Peer: peer.ID(idStr), @@ -185,7 +185,7 @@ func peerIsPartner(p peer.ID, e *Engine) bool { func TestOutboxClosedWhenEngineClosed(t *testing.T) { ctx := context.Background() t.SkipNow() // TODO implement *Engine.Close - e := newEngine(ctx, blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore())), &fakePeerTagger{}, "localhost", 0, NewTestScoreLedger(shortTerm, nil)) + e := newEngine(blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore())), 4, &fakePeerTagger{}, "localhost", 0, NewTestScoreLedger(shortTerm, nil)) e.StartWorkers(ctx, process.WithTeardown(func() error { return nil })) var wg sync.WaitGroup wg.Add(1) @@ -513,7 +513,7 @@ func TestPartnerWantHaveWantBlockNonActive(t *testing.T) { testCases = onlyTestCases } - e := newEngine(context.Background(), bs, &fakePeerTagger{}, "localhost", 0, NewTestScoreLedger(shortTerm, nil)) + e := newEngine(bs, 4, &fakePeerTagger{}, "localhost", 0, NewTestScoreLedger(shortTerm, nil)) e.StartWorkers(context.Background(), process.WithTeardown(func() error { return nil })) for i, testCase := range testCases { t.Logf("Test case %d:", i) @@ -669,7 +669,7 @@ func TestPartnerWantHaveWantBlockActive(t *testing.T) { testCases = onlyTestCases } - e := newEngine(context.Background(), bs, &fakePeerTagger{}, "localhost", 0, NewTestScoreLedger(shortTerm, nil)) + e := newEngine(bs, 4, &fakePeerTagger{}, "localhost", 0, NewTestScoreLedger(shortTerm, nil)) e.StartWorkers(context.Background(), process.WithTeardown(func() error { return nil })) var next envChan @@ -854,7 +854,7 @@ func TestPartnerWantsThenCancels(t *testing.T) { ctx := context.Background() for i := 0; i < numRounds; i++ { expected := make([][]string, 0, len(testcases)) - e := newEngine(ctx, bs, &fakePeerTagger{}, "localhost", 0, NewTestScoreLedger(shortTerm, nil)) + e := newEngine(bs, 4, &fakePeerTagger{}, "localhost", 0, NewTestScoreLedger(shortTerm, nil)) e.StartWorkers(ctx, process.WithTeardown(func() error { return nil })) for _, testcase := range testcases { set := testcase[0] @@ -879,7 +879,7 @@ func TestSendReceivedBlocksToPeersThatWantThem(t *testing.T) { partner := libp2ptest.RandPeerIDFatal(t) otherPeer := libp2ptest.RandPeerIDFatal(t) - e := newEngine(context.Background(), bs, &fakePeerTagger{}, "localhost", 0, NewTestScoreLedger(shortTerm, nil)) + e := newEngine(bs, 4, &fakePeerTagger{}, "localhost", 0, NewTestScoreLedger(shortTerm, nil)) e.StartWorkers(context.Background(), process.WithTeardown(func() error { return nil })) blks := testutil.GenerateBlocksOfSize(4, 8*1024) @@ -923,7 +923,7 @@ func TestSendDontHave(t *testing.T) { partner := libp2ptest.RandPeerIDFatal(t) otherPeer := libp2ptest.RandPeerIDFatal(t) - e := newEngine(context.Background(), bs, &fakePeerTagger{}, "localhost", 0, NewTestScoreLedger(shortTerm, nil)) + e := newEngine(bs, 4, &fakePeerTagger{}, "localhost", 0, NewTestScoreLedger(shortTerm, nil)) e.StartWorkers(context.Background(), process.WithTeardown(func() error { return nil })) blks := testutil.GenerateBlocksOfSize(4, 8*1024) @@ -987,7 +987,7 @@ func TestWantlistForPeer(t *testing.T) { partner := libp2ptest.RandPeerIDFatal(t) otherPeer := libp2ptest.RandPeerIDFatal(t) - e := newEngine(context.Background(), bs, &fakePeerTagger{}, "localhost", 0, NewTestScoreLedger(shortTerm, nil)) + e := newEngine(bs, 4, &fakePeerTagger{}, "localhost", 0, NewTestScoreLedger(shortTerm, nil)) e.StartWorkers(context.Background(), process.WithTeardown(func() error { return nil })) blks := testutil.GenerateBlocksOfSize(4, 8*1024) From 1072e2269172593bad95601f542e41c032ff4b9e Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Fri, 26 Feb 2021 09:39:24 -0800 Subject: [PATCH 0970/1035] fix a startup race by creating the blockstoremanager process on init This commit was moved from ipfs/go-bitswap@0f5cc8bd3b8ca4d9c7a538dd55e7bdebf8e6f798 --- bitswap/internal/decision/blockstoremanager.go | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/bitswap/internal/decision/blockstoremanager.go b/bitswap/internal/decision/blockstoremanager.go index 1cc09dffc..dc022caf0 100644 --- a/bitswap/internal/decision/blockstoremanager.go +++ b/bitswap/internal/decision/blockstoremanager.go @@ -26,24 +26,24 @@ func newBlockstoreManager(bs bstore.Blockstore, workerCount int) *blockstoreMana bs: bs, workerCount: workerCount, jobs: make(chan func()), + px: process.WithTeardown(func() error { return nil }), } } func (bsm *blockstoreManager) start(px process.Process) { - bsm.px = px - + px.AddChild(bsm.px) // Start up workers for i := 0; i < bsm.workerCount; i++ { - px.Go(func(px process.Process) { - bsm.worker() + bsm.px.Go(func(px process.Process) { + bsm.worker(px) }) } } -func (bsm *blockstoreManager) worker() { +func (bsm *blockstoreManager) worker(px process.Process) { for { select { - case <-bsm.px.Closing(): + case <-px.Closing(): return case job := <-bsm.jobs: job() From 4e9c468a34b42bb6dd4647dfbe3bfae93dd541ec Mon Sep 17 00:00:00 2001 From: vyzo Date: Thu, 1 Apr 2021 19:26:04 +0300 Subject: [PATCH 0971/1035] ignore transient connections This commit was moved from ipfs/go-bitswap@cf23160d14079d59eda9cee54f110a5f0d6e0594 --- bitswap/network/ipfs_impl.go | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index e4357760c..5873af5a1 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -422,9 +422,19 @@ func (nn *netNotifiee) impl() *impl { } func (nn *netNotifiee) Connected(n network.Network, v network.Conn) { + // ignore transient connections + if v.Stat().Transient { + return + } + nn.impl().connectEvtMgr.Connected(v.RemotePeer()) } func (nn *netNotifiee) Disconnected(n network.Network, v network.Conn) { + // ignore transient connections + if v.Stat().Transient { + return + } + nn.impl().connectEvtMgr.Disconnected(v.RemotePeer()) } func (nn *netNotifiee) OpenedStream(n network.Network, s network.Stream) {} From d80f2da0e92a722928e854af7d1a04db70e29fa4 Mon Sep 17 00:00:00 2001 From: Cory Schwartz Date: Wed, 14 Apr 2021 22:50:52 -0700 Subject: [PATCH 0972/1035] fix staticcheck This commit was moved from ipfs/go-bitswap@f4fae3a4f281fcaf5d4b07b2121eb4c062e82975 --- bitswap/bitswap_test.go | 5 ++--- bitswap/internal/decision/engine_test.go | 3 +-- bitswap/internal/messagequeue/messagequeue.go | 3 +-- bitswap/internal/messagequeue/messagequeue_test.go | 3 +-- bitswap/network/ipfs_impl_test.go | 6 +++--- 5 files changed, 8 insertions(+), 12 deletions(-) diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index 8037d1639..f28112d79 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -12,7 +12,6 @@ import ( deciface "github.com/ipfs/go-bitswap/decision" decision "github.com/ipfs/go-bitswap/internal/decision" bssession "github.com/ipfs/go-bitswap/internal/session" - "github.com/ipfs/go-bitswap/message" bsmsg "github.com/ipfs/go-bitswap/message" pb "github.com/ipfs/go-bitswap/message/pb" testinstance "github.com/ipfs/go-bitswap/testinstance" @@ -149,7 +148,7 @@ func TestUnwantedBlockNotAdded(t *testing.T) { net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay)) block := blocks.NewBlock([]byte("block")) - bsMessage := message.New(true) + bsMessage := bsmsg.New(true) bsMessage.AddBlock(block) ig := testinstance.NewTestInstanceGenerator(net, nil, nil) @@ -215,7 +214,7 @@ func TestPendingBlockAdded(t *testing.T) { // Simulate receiving a message which contains the block in the "tofetch" queue lastBlock := blks[len(blks)-1] - bsMessage := message.New(true) + bsMessage := bsmsg.New(true) bsMessage.AddBlock(lastBlock) unknownPeer := peer.ID("QmUHfvCQrzyR6vFXmeyCptfCWedfcmfa12V6UuziDtrw23") instance.Exchange.ReceiveMessage(oneSecCtx, unknownPeer, bsMessage) diff --git a/bitswap/internal/decision/engine_test.go b/bitswap/internal/decision/engine_test.go index b4f3d068e..5c547ffef 100644 --- a/bitswap/internal/decision/engine_test.go +++ b/bitswap/internal/decision/engine_test.go @@ -183,10 +183,9 @@ func peerIsPartner(p peer.ID, e *Engine) bool { } func TestOutboxClosedWhenEngineClosed(t *testing.T) { - ctx := context.Background() t.SkipNow() // TODO implement *Engine.Close e := newEngine(blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore())), 4, &fakePeerTagger{}, "localhost", 0, NewTestScoreLedger(shortTerm, nil)) - e.StartWorkers(ctx, process.WithTeardown(func() error { return nil })) + e.StartWorkers(context.Background(), process.WithTeardown(func() error { return nil })) var wg sync.WaitGroup wg.Add(1) go func() { diff --git a/bitswap/internal/messagequeue/messagequeue.go b/bitswap/internal/messagequeue/messagequeue.go index 24e80974b..908f12943 100644 --- a/bitswap/internal/messagequeue/messagequeue.go +++ b/bitswap/internal/messagequeue/messagequeue.go @@ -9,7 +9,6 @@ import ( bsmsg "github.com/ipfs/go-bitswap/message" pb "github.com/ipfs/go-bitswap/message/pb" bsnet "github.com/ipfs/go-bitswap/network" - "github.com/ipfs/go-bitswap/wantlist" bswl "github.com/ipfs/go-bitswap/wantlist" cid "github.com/ipfs/go-cid" logging "github.com/ipfs/go-log" @@ -142,7 +141,7 @@ func (r *recallWantlist) RemoveType(c cid.Cid, wtype pb.Message_Wantlist_WantTyp // // Returns true if the want was marked as sent. Returns false if the want wasn't // pending. -func (r *recallWantlist) MarkSent(e wantlist.Entry) bool { +func (r *recallWantlist) MarkSent(e bswl.Entry) bool { if !r.pending.RemoveType(e.Cid, e.WantType) { return false } diff --git a/bitswap/internal/messagequeue/messagequeue_test.go b/bitswap/internal/messagequeue/messagequeue_test.go index 4af3000ad..ca0ac7198 100644 --- a/bitswap/internal/messagequeue/messagequeue_test.go +++ b/bitswap/internal/messagequeue/messagequeue_test.go @@ -10,7 +10,6 @@ import ( "time" "github.com/ipfs/go-bitswap/internal/testutil" - "github.com/ipfs/go-bitswap/message" pb "github.com/ipfs/go-bitswap/message/pb" cid "github.com/ipfs/go-cid" @@ -251,7 +250,7 @@ func TestSendingMessagesPriority(t *testing.T) { if totalEntriesLength(messages) != len(wantHaves)+len(wantBlocks) { t.Fatal("wrong number of wants") } - byCid := make(map[cid.Cid]message.Entry) + byCid := make(map[cid.Cid]bsmsg.Entry) for _, entry := range messages[0] { byCid[entry.Cid] = entry } diff --git a/bitswap/network/ipfs_impl_test.go b/bitswap/network/ipfs_impl_test.go index 3ad047f61..475fcfc6a 100644 --- a/bitswap/network/ipfs_impl_test.go +++ b/bitswap/network/ipfs_impl_test.go @@ -67,7 +67,7 @@ func (r *receiver) PeerDisconnected(p peer.ID) { r.connectionEvent <- false } -var mockNetErr = fmt.Errorf("network err") +var errMockNetErr = fmt.Errorf("network err") type ErrStream struct { network.Stream @@ -115,7 +115,7 @@ func (eh *ErrHost) NewStream(ctx context.Context, p peer.ID, pids ...protocol.ID defer eh.lk.Unlock() if eh.err != nil { - return nil, mockNetErr + return nil, errMockNetErr } if eh.timingOut { return nil, context.DeadlineExceeded @@ -337,7 +337,7 @@ func TestMessageResendAfterError(t *testing.T) { // Return an error from the networking layer the next time we try to send // a message - eh.setError(mockNetErr) + eh.setError(errMockNetErr) go func() { time.Sleep(testSendErrorBackoff / 2) From fa1acb3b24994b7d9b9407603a633599764dbc98 Mon Sep 17 00:00:00 2001 From: Lucas Molas Date: Thu, 22 Apr 2021 11:28:39 -0300 Subject: [PATCH 0973/1035] fix(network): impl: add timeout in newStreamToPeer call This commit was moved from ipfs/go-bitswap@a28f6eb5e764dfc5b05a57cb24181a57a007b686 --- bitswap/network/ipfs_impl.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index 5873af5a1..fc48ef674 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -27,6 +27,7 @@ import ( var log = logging.Logger("bitswap_network") +var connectTimeout = time.Second * 5 var sendMessageTimeout = time.Minute * 10 // NewFromIpfsHost returns a BitSwapNetwork supported by underlying IPFS host. @@ -312,7 +313,10 @@ func (bsnet *impl) SendMessage( p peer.ID, outgoing bsmsg.BitSwapMessage) error { - s, err := bsnet.newStreamToPeer(ctx, p) + tctx, cancel := context.WithTimeout(ctx, connectTimeout) + defer cancel() + + s, err := bsnet.newStreamToPeer(tctx, p) if err != nil { return err } From 2bc6262f0d8be6791e39bfb611dfbb3e16ae429c Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Thu, 29 Apr 2021 20:47:26 -0700 Subject: [PATCH 0974/1035] fix: fix alignment of stats struct in virtual network This needs to be at the top of the "allocated" struct. Otherwise, 32bit tests fail. This commit was moved from ipfs/go-bitswap@09ad29c0776bef30f67b93f031f6ea7c8e417799 --- bitswap/testnet/virtual.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/bitswap/testnet/virtual.go b/bitswap/testnet/virtual.go index c44b430db..48ef7b435 100644 --- a/bitswap/testnet/virtual.go +++ b/bitswap/testnet/virtual.go @@ -184,11 +184,13 @@ func (n *network) SendMessage( } type networkClient struct { + // These need to be at the top of the struct (allocated on the heap) for alignment on 32bit platforms. + stats bsnet.Stats + local peer.ID bsnet.Receiver network *network routing routing.Routing - stats bsnet.Stats supportedProtocols []protocol.ID } From deeb528ef24d7891c21476dac508d353f62eee1a Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Thu, 29 Apr 2021 22:07:51 -0700 Subject: [PATCH 0975/1035] test: deflake large-message test This commit was moved from ipfs/go-bitswap@7c482ecac9e87d8942b54f733948e51a281c6c8f --- bitswap/internal/messagequeue/messagequeue_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bitswap/internal/messagequeue/messagequeue_test.go b/bitswap/internal/messagequeue/messagequeue_test.go index ca0ac7198..4bb538eb0 100644 --- a/bitswap/internal/messagequeue/messagequeue_test.go +++ b/bitswap/internal/messagequeue/messagequeue_test.go @@ -501,7 +501,7 @@ func TestSendingLargeMessages(t *testing.T) { messageQueue.Startup() messageQueue.AddWants(wantBlocks, []cid.Cid{}) - messages := collectMessages(ctx, t, messagesSent, 10*time.Millisecond) + messages := collectMessages(ctx, t, messagesSent, 100*time.Millisecond) // want-block has size 44, so with maxMsgSize 44 * 3 (3 want-blocks), then if // we send 10 want-blocks we should expect 4 messages: From f0ab615c6fa6ce5e2faa558bfb10708f97210df5 Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Thu, 29 Apr 2021 22:10:48 -0700 Subject: [PATCH 0976/1035] test: deflake donthave timeout test Give it some more time. We're not testing the _exact_ timeout. This commit was moved from ipfs/go-bitswap@42932307201141fdf9b7140420f6ea8c6cb92596 --- bitswap/internal/messagequeue/donthavetimeoutmgr_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bitswap/internal/messagequeue/donthavetimeoutmgr_test.go b/bitswap/internal/messagequeue/donthavetimeoutmgr_test.go index 6f315fea9..cc0ebb301 100644 --- a/bitswap/internal/messagequeue/donthavetimeoutmgr_test.go +++ b/bitswap/internal/messagequeue/donthavetimeoutmgr_test.go @@ -355,7 +355,7 @@ func TestDontHaveTimeoutMgrUsesDefaultTimeoutIfLatencyLonger(t *testing.T) { } // Sleep until after the default timeout - time.Sleep(10 * time.Millisecond) + time.Sleep(defaultTimeout * 2) // Now the keys should have timed out if tr.timedOutCount() != len(ks) { From b41b1a82fb65b03522efa17e040ef8b4ca345d4f Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Fri, 30 Apr 2021 11:15:06 -0700 Subject: [PATCH 0977/1035] test: deflake engine test This commit was moved from ipfs/go-bitswap@1198579780a5d65a00ed93cfcaa0000c486a8757 --- bitswap/internal/decision/engine_test.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/bitswap/internal/decision/engine_test.go b/bitswap/internal/decision/engine_test.go index 5c547ffef..2cf9e773a 100644 --- a/bitswap/internal/decision/engine_test.go +++ b/bitswap/internal/decision/engine_test.go @@ -1058,18 +1058,18 @@ func TestTaggingUseful(t *testing.T) { msg.AddBlock(block) for i := 0; i < 3; i++ { - if me.PeerTagger.count(me.Engine.tagUseful) != 0 { - t.Fatal("Peers should be untagged but weren't") + if untagged := me.PeerTagger.count(me.Engine.tagUseful); untagged != 0 { + t.Fatalf("%d peers should be untagged but weren't", untagged) } me.Engine.MessageSent(friend, msg) - for j := 0; j < 3; j++ { + for j := 0; j < 2; j++ { <-sampleCh } - if me.PeerTagger.count(me.Engine.tagUseful) != 1 { - t.Fatal("Peers should be tagged but weren't") + if tagged := me.PeerTagger.count(me.Engine.tagUseful); tagged != 1 { + t.Fatalf("1 peer should be tagged, but %d were", tagged) } for j := 0; j < longTermRatio; j++ { From 2002754fbba7d9c2836b5c10205779d652fb9539 Mon Sep 17 00:00:00 2001 From: web3-bot Date: Sun, 13 Jun 2021 21:47:18 +0000 Subject: [PATCH 0978/1035] run gofmt -s This commit was moved from ipfs/go-bitswap@5cd913af9a1fe8714c5ad34eb624a47a8c33a6a4 --- bitswap/benchmarks_test.go | 44 +++--- bitswap/bitswap.go | 14 +- .../blockpresencemanager_test.go | 26 ++-- bitswap/internal/decision/engine.go | 2 +- bitswap/internal/decision/engine_test.go | 132 +++++++++--------- bitswap/network/ipfs_impl_test.go | 8 +- bitswap/testinstance/testinstance.go | 2 +- bitswap/testnet/virtual.go | 6 +- 8 files changed, 117 insertions(+), 117 deletions(-) diff --git a/bitswap/benchmarks_test.go b/bitswap/benchmarks_test.go index d3aaf04f9..dd4cf5b6c 100644 --- a/bitswap/benchmarks_test.go +++ b/bitswap/benchmarks_test.go @@ -19,9 +19,9 @@ import ( bitswap "github.com/ipfs/go-bitswap" bssession "github.com/ipfs/go-bitswap/internal/session" + bsnet "github.com/ipfs/go-bitswap/network" testinstance "github.com/ipfs/go-bitswap/testinstance" tn "github.com/ipfs/go-bitswap/testnet" - bsnet "github.com/ipfs/go-bitswap/network" cid "github.com/ipfs/go-cid" delay "github.com/ipfs/go-ipfs-delay" mockrouting "github.com/ipfs/go-ipfs-routing/mock" @@ -53,14 +53,14 @@ type bench struct { var benches = []bench{ // Fetch from two seed nodes that both have all 100 blocks // - request one at a time, in series - bench{"3Nodes-AllToAll-OneAtATime", 3, 100, allToAll, oneAtATime}, + {"3Nodes-AllToAll-OneAtATime", 3, 100, allToAll, oneAtATime}, // - request all 100 with a single GetBlocks() call - bench{"3Nodes-AllToAll-BigBatch", 3, 100, allToAll, batchFetchAll}, + {"3Nodes-AllToAll-BigBatch", 3, 100, allToAll, batchFetchAll}, // Fetch from two seed nodes, one at a time, where: // - node A has blocks 0 - 74 // - node B has blocks 25 - 99 - bench{"3Nodes-Overlap1-OneAtATime", 3, 100, overlap1, oneAtATime}, + {"3Nodes-Overlap1-OneAtATime", 3, 100, overlap1, oneAtATime}, // Fetch from two seed nodes, where: // - node A has even blocks @@ -68,40 +68,40 @@ var benches = []bench{ // - both nodes have every third block // - request one at a time, in series - bench{"3Nodes-Overlap3-OneAtATime", 3, 100, overlap2, oneAtATime}, + {"3Nodes-Overlap3-OneAtATime", 3, 100, overlap2, oneAtATime}, // - request 10 at a time, in series - bench{"3Nodes-Overlap3-BatchBy10", 3, 100, overlap2, batchFetchBy10}, + {"3Nodes-Overlap3-BatchBy10", 3, 100, overlap2, batchFetchBy10}, // - request all 100 in parallel as individual GetBlock() calls - bench{"3Nodes-Overlap3-AllConcurrent", 3, 100, overlap2, fetchAllConcurrent}, + {"3Nodes-Overlap3-AllConcurrent", 3, 100, overlap2, fetchAllConcurrent}, // - request all 100 with a single GetBlocks() call - bench{"3Nodes-Overlap3-BigBatch", 3, 100, overlap2, batchFetchAll}, + {"3Nodes-Overlap3-BigBatch", 3, 100, overlap2, batchFetchAll}, // - request 1, then 10, then 89 blocks (similar to how IPFS would fetch a file) - bench{"3Nodes-Overlap3-UnixfsFetch", 3, 100, overlap2, unixfsFileFetch}, + {"3Nodes-Overlap3-UnixfsFetch", 3, 100, overlap2, unixfsFileFetch}, // Fetch from nine seed nodes, all nodes have all blocks // - request one at a time, in series - bench{"10Nodes-AllToAll-OneAtATime", 10, 100, allToAll, oneAtATime}, + {"10Nodes-AllToAll-OneAtATime", 10, 100, allToAll, oneAtATime}, // - request 10 at a time, in series - bench{"10Nodes-AllToAll-BatchFetchBy10", 10, 100, allToAll, batchFetchBy10}, + {"10Nodes-AllToAll-BatchFetchBy10", 10, 100, allToAll, batchFetchBy10}, // - request all 100 with a single GetBlocks() call - bench{"10Nodes-AllToAll-BigBatch", 10, 100, allToAll, batchFetchAll}, + {"10Nodes-AllToAll-BigBatch", 10, 100, allToAll, batchFetchAll}, // - request all 100 in parallel as individual GetBlock() calls - bench{"10Nodes-AllToAll-AllConcurrent", 10, 100, allToAll, fetchAllConcurrent}, + {"10Nodes-AllToAll-AllConcurrent", 10, 100, allToAll, fetchAllConcurrent}, // - request 1, then 10, then 89 blocks (similar to how IPFS would fetch a file) - bench{"10Nodes-AllToAll-UnixfsFetch", 10, 100, allToAll, unixfsFileFetch}, + {"10Nodes-AllToAll-UnixfsFetch", 10, 100, allToAll, unixfsFileFetch}, // - follow a typical IPFS request pattern for 1000 blocks - bench{"10Nodes-AllToAll-UnixfsFetchLarge", 10, 1000, allToAll, unixfsFileFetchLarge}, + {"10Nodes-AllToAll-UnixfsFetchLarge", 10, 1000, allToAll, unixfsFileFetchLarge}, // Fetch from nine seed nodes, blocks are distributed randomly across all nodes (no dups) // - request one at a time, in series - bench{"10Nodes-OnePeerPerBlock-OneAtATime", 10, 100, onePeerPerBlock, oneAtATime}, + {"10Nodes-OnePeerPerBlock-OneAtATime", 10, 100, onePeerPerBlock, oneAtATime}, // - request all 100 with a single GetBlocks() call - bench{"10Nodes-OnePeerPerBlock-BigBatch", 10, 100, onePeerPerBlock, batchFetchAll}, + {"10Nodes-OnePeerPerBlock-BigBatch", 10, 100, onePeerPerBlock, batchFetchAll}, // - request 1, then 10, then 89 blocks (similar to how IPFS would fetch a file) - bench{"10Nodes-OnePeerPerBlock-UnixfsFetch", 10, 100, onePeerPerBlock, unixfsFileFetch}, + {"10Nodes-OnePeerPerBlock-UnixfsFetch", 10, 100, onePeerPerBlock, unixfsFileFetch}, // Fetch from 199 seed nodes, all nodes have all blocks, fetch all 20 blocks with a single GetBlocks() call - bench{"200Nodes-AllToAll-BigBatch", 200, 20, allToAll, batchFetchAll}, + {"200Nodes-AllToAll-BigBatch", 200, 20, allToAll, batchFetchAll}, } func BenchmarkFixedDelay(b *testing.B) { @@ -127,9 +127,9 @@ type mixedBench struct { } var mixedBenches = []mixedBench{ - mixedBench{bench{"3Nodes-Overlap3-OneAtATime", 3, 10, overlap2, oneAtATime}, 1, 2}, - mixedBench{bench{"3Nodes-AllToAll-OneAtATime", 3, 10, allToAll, oneAtATime}, 1, 2}, - mixedBench{bench{"3Nodes-Overlap3-AllConcurrent", 3, 10, overlap2, fetchAllConcurrent}, 1, 2}, + {bench{"3Nodes-Overlap3-OneAtATime", 3, 10, overlap2, oneAtATime}, 1, 2}, + {bench{"3Nodes-AllToAll-OneAtATime", 3, 10, allToAll, oneAtATime}, 1, 2}, + {bench{"3Nodes-Overlap3-AllConcurrent", 3, 10, overlap2, fetchAllConcurrent}, 1, 2}, // mixedBench{bench{"3Nodes-Overlap3-UnixfsFetch", 3, 100, overlap2, unixfsFileFetch}, 1, 2}, } diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 0297c0989..b7f763df5 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -183,13 +183,13 @@ func New(parent context.Context, network bsnet.BitSwapNetwork, sm = bssm.New(ctx, sessionFactory, sim, sessionPeerManagerFactory, bpm, pm, notif, network.Self()) bs := &Bitswap{ - blockstore: bstore, - network: network, - process: px, - newBlocks: make(chan cid.Cid, HasBlockBufferSize), - provideKeys: make(chan cid.Cid, provideKeysBufferSize), - pm: pm, - pqm: pqm, + blockstore: bstore, + network: network, + process: px, + newBlocks: make(chan cid.Cid, HasBlockBufferSize), + provideKeys: make(chan cid.Cid, provideKeysBufferSize), + pm: pm, + pqm: pqm, sm: sm, sim: sim, notif: notif, diff --git a/bitswap/internal/blockpresencemanager/blockpresencemanager_test.go b/bitswap/internal/blockpresencemanager/blockpresencemanager_test.go index 579dbfcda..0d65c457e 100644 --- a/bitswap/internal/blockpresencemanager/blockpresencemanager_test.go +++ b/bitswap/internal/blockpresencemanager/blockpresencemanager_test.go @@ -208,24 +208,24 @@ func TestAllPeersDoNotHaveBlock(t *testing.T) { } testcases := []testcase{ - testcase{[]peer.ID{p0}, []cid.Cid{c0}, []cid.Cid{}}, - testcase{[]peer.ID{p1}, []cid.Cid{c0}, []cid.Cid{c0}}, - testcase{[]peer.ID{p2}, []cid.Cid{c0}, []cid.Cid{}}, + {[]peer.ID{p0}, []cid.Cid{c0}, []cid.Cid{}}, + {[]peer.ID{p1}, []cid.Cid{c0}, []cid.Cid{c0}}, + {[]peer.ID{p2}, []cid.Cid{c0}, []cid.Cid{}}, - testcase{[]peer.ID{p0}, []cid.Cid{c1}, []cid.Cid{c1}}, - testcase{[]peer.ID{p1}, []cid.Cid{c1}, []cid.Cid{}}, - testcase{[]peer.ID{p2}, []cid.Cid{c1}, []cid.Cid{}}, + {[]peer.ID{p0}, []cid.Cid{c1}, []cid.Cid{c1}}, + {[]peer.ID{p1}, []cid.Cid{c1}, []cid.Cid{}}, + {[]peer.ID{p2}, []cid.Cid{c1}, []cid.Cid{}}, - testcase{[]peer.ID{p0}, []cid.Cid{c2}, []cid.Cid{c2}}, - testcase{[]peer.ID{p1}, []cid.Cid{c2}, []cid.Cid{}}, - testcase{[]peer.ID{p2}, []cid.Cid{c2}, []cid.Cid{c2}}, + {[]peer.ID{p0}, []cid.Cid{c2}, []cid.Cid{c2}}, + {[]peer.ID{p1}, []cid.Cid{c2}, []cid.Cid{}}, + {[]peer.ID{p2}, []cid.Cid{c2}, []cid.Cid{c2}}, // p0 recieved DONT_HAVE for c1 & c2 (but not for c0) - testcase{[]peer.ID{p0}, []cid.Cid{c0, c1, c2}, []cid.Cid{c1, c2}}, - testcase{[]peer.ID{p0, p1}, []cid.Cid{c0, c1, c2}, []cid.Cid{}}, + {[]peer.ID{p0}, []cid.Cid{c0, c1, c2}, []cid.Cid{c1, c2}}, + {[]peer.ID{p0, p1}, []cid.Cid{c0, c1, c2}, []cid.Cid{}}, // Both p0 and p2 received DONT_HAVE for c2 - testcase{[]peer.ID{p0, p2}, []cid.Cid{c0, c1, c2}, []cid.Cid{c2}}, - testcase{[]peer.ID{p0, p1, p2}, []cid.Cid{c0, c1, c2}, []cid.Cid{}}, + {[]peer.ID{p0, p2}, []cid.Cid{c0, c1, c2}, []cid.Cid{c2}}, + {[]peer.ID{p0, p1, p2}, []cid.Cid{c0, c1, c2}, []cid.Cid{}}, } for i, tc := range testcases { diff --git a/bitswap/internal/decision/engine.go b/bitswap/internal/decision/engine.go index 6e69ca657..6950f59e5 100644 --- a/bitswap/internal/decision/engine.go +++ b/bitswap/internal/decision/engine.go @@ -163,7 +163,7 @@ type Engine struct { sendDontHaves bool - self peer.ID + self peer.ID } // NewEngine creates a new block sending engine for the given block store diff --git a/bitswap/internal/decision/engine_test.go b/bitswap/internal/decision/engine_test.go index 2cf9e773a..ac370d0db 100644 --- a/bitswap/internal/decision/engine_test.go +++ b/bitswap/internal/decision/engine_test.go @@ -236,31 +236,31 @@ func TestPartnerWantHaveWantBlockNonActive(t *testing.T) { testCases := []testCase{ // Just send want-blocks - testCase{ + { wls: []testCaseEntry{ - testCaseEntry{ + { wantBlks: vowels, sendDontHave: false, }, }, exp: []testCaseExp{ - testCaseExp{ + { blks: vowels, }, }, }, // Send want-blocks and want-haves - testCase{ + { wls: []testCaseEntry{ - testCaseEntry{ + { wantBlks: vowels, wantHaves: "fgh", sendDontHave: false, }, }, exp: []testCaseExp{ - testCaseExp{ + { blks: vowels, haves: "fgh", }, @@ -269,16 +269,16 @@ func TestPartnerWantHaveWantBlockNonActive(t *testing.T) { // Send want-blocks and want-haves, with some want-haves that are not // present, but without requesting DONT_HAVES - testCase{ + { wls: []testCaseEntry{ - testCaseEntry{ + { wantBlks: vowels, wantHaves: "fgh123", sendDontHave: false, }, }, exp: []testCaseExp{ - testCaseExp{ + { blks: vowels, haves: "fgh", }, @@ -287,16 +287,16 @@ func TestPartnerWantHaveWantBlockNonActive(t *testing.T) { // Send want-blocks and want-haves, with some want-haves that are not // present, and request DONT_HAVES - testCase{ + { wls: []testCaseEntry{ - testCaseEntry{ + { wantBlks: vowels, wantHaves: "fgh123", sendDontHave: true, }, }, exp: []testCaseExp{ - testCaseExp{ + { blks: vowels, haves: "fgh", dontHaves: "123", @@ -306,16 +306,16 @@ func TestPartnerWantHaveWantBlockNonActive(t *testing.T) { // Send want-blocks and want-haves, with some want-blocks and want-haves that are not // present, but without requesting DONT_HAVES - testCase{ + { wls: []testCaseEntry{ - testCaseEntry{ + { wantBlks: "aeiou123", wantHaves: "fgh456", sendDontHave: false, }, }, exp: []testCaseExp{ - testCaseExp{ + { blks: "aeiou", haves: "fgh", dontHaves: "", @@ -325,16 +325,16 @@ func TestPartnerWantHaveWantBlockNonActive(t *testing.T) { // Send want-blocks and want-haves, with some want-blocks and want-haves that are not // present, and request DONT_HAVES - testCase{ + { wls: []testCaseEntry{ - testCaseEntry{ + { wantBlks: "aeiou123", wantHaves: "fgh456", sendDontHave: true, }, }, exp: []testCaseExp{ - testCaseExp{ + { blks: "aeiou", haves: "fgh", dontHaves: "123456", @@ -343,48 +343,48 @@ func TestPartnerWantHaveWantBlockNonActive(t *testing.T) { }, // Send repeated want-blocks - testCase{ + { wls: []testCaseEntry{ - testCaseEntry{ + { wantBlks: "ae", sendDontHave: false, }, - testCaseEntry{ + { wantBlks: "io", sendDontHave: false, }, - testCaseEntry{ + { wantBlks: "u", sendDontHave: false, }, }, exp: []testCaseExp{ - testCaseExp{ + { blks: "aeiou", }, }, }, // Send repeated want-blocks and want-haves - testCase{ + { wls: []testCaseEntry{ - testCaseEntry{ + { wantBlks: "ae", wantHaves: "jk", sendDontHave: false, }, - testCaseEntry{ + { wantBlks: "io", wantHaves: "lm", sendDontHave: false, }, - testCaseEntry{ + { wantBlks: "u", sendDontHave: false, }, }, exp: []testCaseExp{ - testCaseExp{ + { blks: "aeiou", haves: "jklm", }, @@ -393,26 +393,26 @@ func TestPartnerWantHaveWantBlockNonActive(t *testing.T) { // Send repeated want-blocks and want-haves, with some want-blocks and want-haves that are not // present, and request DONT_HAVES - testCase{ + { wls: []testCaseEntry{ - testCaseEntry{ + { wantBlks: "ae12", wantHaves: "jk5", sendDontHave: true, }, - testCaseEntry{ + { wantBlks: "io34", wantHaves: "lm", sendDontHave: true, }, - testCaseEntry{ + { wantBlks: "u", wantHaves: "6", sendDontHave: true, }, }, exp: []testCaseExp{ - testCaseExp{ + { blks: "aeiou", haves: "jklm", dontHaves: "123456", @@ -421,13 +421,13 @@ func TestPartnerWantHaveWantBlockNonActive(t *testing.T) { }, // Send want-block then want-have for same CID - testCase{ + { wls: []testCaseEntry{ - testCaseEntry{ + { wantBlks: "a", sendDontHave: true, }, - testCaseEntry{ + { wantHaves: "a", sendDontHave: true, }, @@ -435,67 +435,67 @@ func TestPartnerWantHaveWantBlockNonActive(t *testing.T) { // want-have should be ignored because there was already a // want-block for the same CID in the queue exp: []testCaseExp{ - testCaseExp{ + { blks: "a", }, }, }, // Send want-have then want-block for same CID - testCase{ + { wls: []testCaseEntry{ - testCaseEntry{ + { wantHaves: "b", sendDontHave: true, }, - testCaseEntry{ + { wantBlks: "b", sendDontHave: true, }, }, // want-block should overwrite existing want-have exp: []testCaseExp{ - testCaseExp{ + { blks: "b", }, }, }, // Send want-block then want-block for same CID - testCase{ + { wls: []testCaseEntry{ - testCaseEntry{ + { wantBlks: "a", sendDontHave: true, }, - testCaseEntry{ + { wantBlks: "a", sendDontHave: true, }, }, // second want-block should be ignored exp: []testCaseExp{ - testCaseExp{ + { blks: "a", }, }, }, // Send want-have then want-have for same CID - testCase{ + { wls: []testCaseEntry{ - testCaseEntry{ + { wantHaves: "a", sendDontHave: true, }, - testCaseEntry{ + { wantHaves: "a", sendDontHave: true, }, }, // second want-have should be ignored exp: []testCaseExp{ - testCaseExp{ + { haves: "a", }, }, @@ -573,13 +573,13 @@ func TestPartnerWantHaveWantBlockActive(t *testing.T) { testCases := []testCase{ // Send want-block then want-have for same CID - testCase{ + { wls: []testCaseEntry{ - testCaseEntry{ + { wantBlks: "a", sendDontHave: true, }, - testCaseEntry{ + { wantHaves: "a", sendDontHave: true, }, @@ -587,20 +587,20 @@ func TestPartnerWantHaveWantBlockActive(t *testing.T) { // want-have should be ignored because there was already a // want-block for the same CID in the queue exp: []testCaseExp{ - testCaseExp{ + { blks: "a", }, }, }, // Send want-have then want-block for same CID - testCase{ + { wls: []testCaseEntry{ - testCaseEntry{ + { wantHaves: "b", sendDontHave: true, }, - testCaseEntry{ + { wantBlks: "b", sendDontHave: true, }, @@ -608,50 +608,50 @@ func TestPartnerWantHaveWantBlockActive(t *testing.T) { // want-have is active when want-block is added, so want-have // should get sent, then want-block exp: []testCaseExp{ - testCaseExp{ + { haves: "b", }, - testCaseExp{ + { blks: "b", }, }, }, // Send want-block then want-block for same CID - testCase{ + { wls: []testCaseEntry{ - testCaseEntry{ + { wantBlks: "a", sendDontHave: true, }, - testCaseEntry{ + { wantBlks: "a", sendDontHave: true, }, }, // second want-block should be ignored exp: []testCaseExp{ - testCaseExp{ + { blks: "a", }, }, }, // Send want-have then want-have for same CID - testCase{ + { wls: []testCaseEntry{ - testCaseEntry{ + { wantHaves: "a", sendDontHave: true, }, - testCaseEntry{ + { wantHaves: "a", sendDontHave: true, }, }, // second want-have should be ignored exp: []testCaseExp{ - testCaseExp{ + { haves: "a", }, }, diff --git a/bitswap/network/ipfs_impl_test.go b/bitswap/network/ipfs_impl_test.go index 475fcfc6a..0d7968ecb 100644 --- a/bitswap/network/ipfs_impl_test.go +++ b/bitswap/network/ipfs_impl_test.go @@ -447,10 +447,10 @@ func TestSupportsHave(t *testing.T) { } testCases := []testCase{ - testCase{bsnet.ProtocolBitswap, true}, - testCase{bsnet.ProtocolBitswapOneOne, false}, - testCase{bsnet.ProtocolBitswapOneZero, false}, - testCase{bsnet.ProtocolBitswapNoVers, false}, + {bsnet.ProtocolBitswap, true}, + {bsnet.ProtocolBitswapOneOne, false}, + {bsnet.ProtocolBitswapOneZero, false}, + {bsnet.ProtocolBitswapNoVers, false}, } for _, tc := range testCases { diff --git a/bitswap/testinstance/testinstance.go b/bitswap/testinstance/testinstance.go index 2ee6be8bd..05e3d515e 100644 --- a/bitswap/testinstance/testinstance.go +++ b/bitswap/testinstance/testinstance.go @@ -5,8 +5,8 @@ import ( "time" bitswap "github.com/ipfs/go-bitswap" - tn "github.com/ipfs/go-bitswap/testnet" bsnet "github.com/ipfs/go-bitswap/network" + tn "github.com/ipfs/go-bitswap/testnet" ds "github.com/ipfs/go-datastore" delayed "github.com/ipfs/go-datastore/delayed" ds_sync "github.com/ipfs/go-datastore/sync" diff --git a/bitswap/testnet/virtual.go b/bitswap/testnet/virtual.go index 48ef7b435..66f5e8216 100644 --- a/bitswap/testnet/virtual.go +++ b/bitswap/testnet/virtual.go @@ -271,9 +271,9 @@ func (mp *messagePasser) Reset() error { } var oldProtos = map[protocol.ID]struct{}{ - bsnet.ProtocolBitswapNoVers: struct{}{}, - bsnet.ProtocolBitswapOneZero: struct{}{}, - bsnet.ProtocolBitswapOneOne: struct{}{}, + bsnet.ProtocolBitswapNoVers: {}, + bsnet.ProtocolBitswapOneZero: {}, + bsnet.ProtocolBitswapOneOne: {}, } func (mp *messagePasser) SupportsHave() bool { From 9cc220e920c7f7e07534c7eea555dbc6e5b9f084 Mon Sep 17 00:00:00 2001 From: Hannah Howard Date: Fri, 4 Jun 2021 10:16:05 -0700 Subject: [PATCH 0979/1035] fix(decision): fix test flakiness through mock clock (#494) This commit was moved from ipfs/go-bitswap@531f3e232c1a5299a9732d697cd57d293102e9a3 --- bitswap/internal/decision/engine_test.go | 37 ++++++++++++++---------- bitswap/internal/decision/scoreledger.go | 21 +++++++++----- 2 files changed, 35 insertions(+), 23 deletions(-) diff --git a/bitswap/internal/decision/engine_test.go b/bitswap/internal/decision/engine_test.go index ac370d0db..f7a752577 100644 --- a/bitswap/internal/decision/engine_test.go +++ b/bitswap/internal/decision/engine_test.go @@ -10,6 +10,7 @@ import ( "testing" "time" + "github.com/benbjohnson/clock" "github.com/ipfs/go-bitswap/internal/testutil" message "github.com/ipfs/go-bitswap/message" pb "github.com/ipfs/go-bitswap/message/pb" @@ -91,13 +92,13 @@ type engineSet struct { } func newTestEngine(ctx context.Context, idStr string) engineSet { - return newTestEngineWithSampling(ctx, idStr, shortTerm, nil) + return newTestEngineWithSampling(ctx, idStr, shortTerm, nil, clock.New()) } -func newTestEngineWithSampling(ctx context.Context, idStr string, peerSampleInterval time.Duration, sampleCh chan struct{}) engineSet { +func newTestEngineWithSampling(ctx context.Context, idStr string, peerSampleInterval time.Duration, sampleCh chan struct{}, clock clock.Clock) engineSet { fpt := &fakePeerTagger{} bs := blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore())) - e := newEngine(bs, 4, fpt, "localhost", 0, NewTestScoreLedger(peerSampleInterval, sampleCh)) + e := newEngine(bs, 4, fpt, "localhost", 0, NewTestScoreLedger(peerSampleInterval, sampleCh, clock)) e.StartWorkers(ctx, process.WithTeardown(func() error { return nil })) return engineSet{ Peer: peer.ID(idStr), @@ -184,7 +185,7 @@ func peerIsPartner(p peer.ID, e *Engine) bool { func TestOutboxClosedWhenEngineClosed(t *testing.T) { t.SkipNow() // TODO implement *Engine.Close - e := newEngine(blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore())), 4, &fakePeerTagger{}, "localhost", 0, NewTestScoreLedger(shortTerm, nil)) + e := newEngine(blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore())), 4, &fakePeerTagger{}, "localhost", 0, NewTestScoreLedger(shortTerm, nil, clock.New())) e.StartWorkers(context.Background(), process.WithTeardown(func() error { return nil })) var wg sync.WaitGroup wg.Add(1) @@ -512,7 +513,7 @@ func TestPartnerWantHaveWantBlockNonActive(t *testing.T) { testCases = onlyTestCases } - e := newEngine(bs, 4, &fakePeerTagger{}, "localhost", 0, NewTestScoreLedger(shortTerm, nil)) + e := newEngine(bs, 4, &fakePeerTagger{}, "localhost", 0, NewTestScoreLedger(shortTerm, nil, clock.New())) e.StartWorkers(context.Background(), process.WithTeardown(func() error { return nil })) for i, testCase := range testCases { t.Logf("Test case %d:", i) @@ -668,7 +669,7 @@ func TestPartnerWantHaveWantBlockActive(t *testing.T) { testCases = onlyTestCases } - e := newEngine(bs, 4, &fakePeerTagger{}, "localhost", 0, NewTestScoreLedger(shortTerm, nil)) + e := newEngine(bs, 4, &fakePeerTagger{}, "localhost", 0, NewTestScoreLedger(shortTerm, nil, clock.New())) e.StartWorkers(context.Background(), process.WithTeardown(func() error { return nil })) var next envChan @@ -853,7 +854,7 @@ func TestPartnerWantsThenCancels(t *testing.T) { ctx := context.Background() for i := 0; i < numRounds; i++ { expected := make([][]string, 0, len(testcases)) - e := newEngine(bs, 4, &fakePeerTagger{}, "localhost", 0, NewTestScoreLedger(shortTerm, nil)) + e := newEngine(bs, 4, &fakePeerTagger{}, "localhost", 0, NewTestScoreLedger(shortTerm, nil, clock.New())) e.StartWorkers(ctx, process.WithTeardown(func() error { return nil })) for _, testcase := range testcases { set := testcase[0] @@ -878,7 +879,7 @@ func TestSendReceivedBlocksToPeersThatWantThem(t *testing.T) { partner := libp2ptest.RandPeerIDFatal(t) otherPeer := libp2ptest.RandPeerIDFatal(t) - e := newEngine(bs, 4, &fakePeerTagger{}, "localhost", 0, NewTestScoreLedger(shortTerm, nil)) + e := newEngine(bs, 4, &fakePeerTagger{}, "localhost", 0, NewTestScoreLedger(shortTerm, nil, clock.New())) e.StartWorkers(context.Background(), process.WithTeardown(func() error { return nil })) blks := testutil.GenerateBlocksOfSize(4, 8*1024) @@ -922,7 +923,7 @@ func TestSendDontHave(t *testing.T) { partner := libp2ptest.RandPeerIDFatal(t) otherPeer := libp2ptest.RandPeerIDFatal(t) - e := newEngine(bs, 4, &fakePeerTagger{}, "localhost", 0, NewTestScoreLedger(shortTerm, nil)) + e := newEngine(bs, 4, &fakePeerTagger{}, "localhost", 0, NewTestScoreLedger(shortTerm, nil, clock.New())) e.StartWorkers(context.Background(), process.WithTeardown(func() error { return nil })) blks := testutil.GenerateBlocksOfSize(4, 8*1024) @@ -986,7 +987,7 @@ func TestWantlistForPeer(t *testing.T) { partner := libp2ptest.RandPeerIDFatal(t) otherPeer := libp2ptest.RandPeerIDFatal(t) - e := newEngine(bs, 4, &fakePeerTagger{}, "localhost", 0, NewTestScoreLedger(shortTerm, nil)) + e := newEngine(bs, 4, &fakePeerTagger{}, "localhost", 0, NewTestScoreLedger(shortTerm, nil, clock.New())) e.StartWorkers(context.Background(), process.WithTeardown(func() error { return nil })) blks := testutil.GenerateBlocksOfSize(4, 8*1024) @@ -1044,13 +1045,15 @@ func TestTaggingPeers(t *testing.T) { } func TestTaggingUseful(t *testing.T) { - peerSampleInterval := 1 * time.Millisecond + peerSampleIntervalHalf := 10 * time.Millisecond ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() sampleCh := make(chan struct{}) - me := newTestEngineWithSampling(ctx, "engine", peerSampleInterval, sampleCh) + mockClock := clock.NewMock() + me := newTestEngineWithSampling(ctx, "engine", peerSampleIntervalHalf*2, sampleCh, mockClock) + mockClock.Add(1 * time.Millisecond) friend := peer.ID("friend") block := blocks.NewBlock([]byte("foobar")) @@ -1061,18 +1064,18 @@ func TestTaggingUseful(t *testing.T) { if untagged := me.PeerTagger.count(me.Engine.tagUseful); untagged != 0 { t.Fatalf("%d peers should be untagged but weren't", untagged) } - + mockClock.Add(peerSampleIntervalHalf) me.Engine.MessageSent(friend, msg) - for j := 0; j < 2; j++ { - <-sampleCh - } + mockClock.Add(peerSampleIntervalHalf) + <-sampleCh if tagged := me.PeerTagger.count(me.Engine.tagUseful); tagged != 1 { t.Fatalf("1 peer should be tagged, but %d were", tagged) } for j := 0; j < longTermRatio; j++ { + mockClock.Add(peerSampleIntervalHalf * 2) <-sampleCh } } @@ -1082,6 +1085,7 @@ func TestTaggingUseful(t *testing.T) { } for j := 0; j < longTermRatio; j++ { + mockClock.Add(peerSampleIntervalHalf * 2) <-sampleCh } @@ -1090,6 +1094,7 @@ func TestTaggingUseful(t *testing.T) { } for j := 0; j < longTermRatio; j++ { + mockClock.Add(peerSampleIntervalHalf * 2) <-sampleCh } diff --git a/bitswap/internal/decision/scoreledger.go b/bitswap/internal/decision/scoreledger.go index b9f1dfb90..188c998a3 100644 --- a/bitswap/internal/decision/scoreledger.go +++ b/bitswap/internal/decision/scoreledger.go @@ -4,6 +4,7 @@ import ( "sync" "time" + "github.com/benbjohnson/clock" peer "github.com/libp2p/go-libp2p-core/peer" ) @@ -55,6 +56,8 @@ type scoreledger struct { // the record lock lock sync.RWMutex + + clock clock.Clock } // Receipt is a summary of the ledger for a given peer @@ -73,7 +76,7 @@ func (l *scoreledger) AddToSentBytes(n int) { l.lock.Lock() defer l.lock.Unlock() l.exchangeCount++ - l.lastExchange = time.Now() + l.lastExchange = l.clock.Now() l.bytesSent += uint64(n) } @@ -82,7 +85,7 @@ func (l *scoreledger) AddToReceivedBytes(n int) { l.lock.Lock() defer l.lock.Unlock() l.exchangeCount++ - l.lastExchange = time.Now() + l.lastExchange = l.clock.Now() l.bytesRecv += uint64(n) } @@ -114,6 +117,7 @@ type DefaultScoreLedger struct { peerSampleInterval time.Duration // used by the tests to detect when a sample is taken sampleCh chan struct{} + clock clock.Clock } // scoreWorker keeps track of how "useful" our peers are, updating scores in the @@ -134,7 +138,7 @@ type DefaultScoreLedger struct { // adjust it ±25% based on our debt ratio. Peers that have historically been // more useful to us than we are to them get the highest score. func (dsl *DefaultScoreLedger) scoreWorker() { - ticker := time.NewTicker(dsl.peerSampleInterval) + ticker := dsl.clock.Ticker(dsl.peerSampleInterval) defer ticker.Stop() type update struct { @@ -236,9 +240,10 @@ func (dsl *DefaultScoreLedger) find(p peer.ID) *scoreledger { } // Returns a new scoreledger. -func newScoreLedger(p peer.ID) *scoreledger { +func newScoreLedger(p peer.ID, clock clock.Clock) *scoreledger { return &scoreledger{ partner: p, + clock: clock, } } @@ -255,7 +260,7 @@ func (dsl *DefaultScoreLedger) findOrCreate(p peer.ID) *scoreledger { defer dsl.lock.Unlock() l, ok := dsl.ledgerMap[p] if !ok { - l = newScoreLedger(p) + l = newScoreLedger(p, dsl.clock) dsl.ledgerMap[p] = l } return l @@ -315,7 +320,7 @@ func (dsl *DefaultScoreLedger) PeerConnected(p peer.ID) { defer dsl.lock.Unlock() _, ok := dsl.ledgerMap[p] if !ok { - dsl.ledgerMap[p] = newScoreLedger(p) + dsl.ledgerMap[p] = newScoreLedger(p, dsl.clock) } } @@ -333,14 +338,16 @@ func NewDefaultScoreLedger() *DefaultScoreLedger { ledgerMap: make(map[peer.ID]*scoreledger), closing: make(chan struct{}), peerSampleInterval: shortTerm, + clock: clock.New(), } } // Creates a new instance of the default score ledger with testing // parameters. -func NewTestScoreLedger(peerSampleInterval time.Duration, sampleCh chan struct{}) *DefaultScoreLedger { +func NewTestScoreLedger(peerSampleInterval time.Duration, sampleCh chan struct{}, clock clock.Clock) *DefaultScoreLedger { dsl := NewDefaultScoreLedger() dsl.peerSampleInterval = peerSampleInterval dsl.sampleCh = sampleCh + dsl.clock = clock return dsl } From 6a1faf1f399f2c4f53f4972d6464bf0b49450f3b Mon Sep 17 00:00:00 2001 From: hannahhoward Date: Thu, 3 Jun 2021 13:40:58 -0700 Subject: [PATCH 0980/1035] fix(network): fix TestNetworkCounters count message received before callback so that the count is always accurate at the time of counting This commit was moved from ipfs/go-bitswap@072bd1159bc5ed944de663c2ba1a0a1845519458 --- bitswap/network/ipfs_impl.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index fc48ef674..b05ce5584 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -403,8 +403,8 @@ func (bsnet *impl) handleNewStream(s network.Stream) { ctx := context.Background() log.Debugf("bitswap net handleNewStream from %s", s.Conn().RemotePeer()) bsnet.connectEvtMgr.OnMessage(s.Conn().RemotePeer()) - bsnet.receiver.ReceiveMessage(ctx, p, received) atomic.AddUint64(&bsnet.stats.MessagesRecvd, 1) + bsnet.receiver.ReceiveMessage(ctx, p, received) } } From cda7896ae1b7a8fae8387d5424eca3c4d324f53d Mon Sep 17 00:00:00 2001 From: hannahhoward Date: Wed, 2 Jun 2021 16:31:10 -0700 Subject: [PATCH 0981/1035] fix(bitswap): add send don't have timeout option The TestSessionWithPeers test was most commonly failing cause of a don't have timeout, which triggered simulated don't have message for all CIDs on the peer with content, which triggered a re-broadcast, causing peers with no content to receive additional wants This commit was moved from ipfs/go-bitswap@d1a550323a6e0d9688790f8799f37df6cc14e992 --- bitswap/bitswap.go | 17 +++++++++++++++-- bitswap/bitswap_with_sessions_test.go | 2 +- 2 files changed, 16 insertions(+), 3 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index b7f763df5..760512679 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -118,6 +118,12 @@ func WithScoreLedger(scoreLedger deciface.ScoreLedger) Option { } } +func SetSendDontHavesOnTimeout(send bool) Option { + return func(bs *Bitswap) { + bs.sendDontHavesOnTimeout = send + } +} + // New initializes a BitSwap instance that communicates over the provided // BitSwapNetwork. This function registers the returned instance as the network // delegate. Runs until context is cancelled or bitswap.Close is called. @@ -149,9 +155,12 @@ func New(parent context.Context, network bsnet.BitSwapNetwork, // has an old version of Bitswap that doesn't support DONT_HAVE messages, // or when no response is received within a timeout. var sm *bssm.SessionManager + var bs *Bitswap onDontHaveTimeout := func(p peer.ID, dontHaves []cid.Cid) { // Simulate a message arriving with DONT_HAVEs - sm.ReceiveFrom(ctx, p, nil, nil, dontHaves) + if bs.sendDontHavesOnTimeout { + sm.ReceiveFrom(ctx, p, nil, nil, dontHaves) + } } peerQueueFactory := func(ctx context.Context, p peer.ID) bspm.PeerQueue { return bsmq.New(ctx, p, network, onDontHaveTimeout) @@ -182,7 +191,7 @@ func New(parent context.Context, network bsnet.BitSwapNetwork, notif := notifications.New() sm = bssm.New(ctx, sessionFactory, sim, sessionPeerManagerFactory, bpm, pm, notif, network.Self()) - bs := &Bitswap{ + bs = &Bitswap{ blockstore: bstore, network: network, process: px, @@ -201,6 +210,7 @@ func New(parent context.Context, network bsnet.BitSwapNetwork, provSearchDelay: defaultProvSearchDelay, rebroadcastDelay: delay.Fixed(time.Minute), engineBstoreWorkerCount: defaulEngineBlockstoreWorkerCount, + sendDontHavesOnTimeout: true, } // apply functional options before starting and running bitswap @@ -293,6 +303,9 @@ type Bitswap struct { // the score ledger used by the decision engine engineScoreLedger deciface.ScoreLedger + + // whether we should actually simulate dont haves on request timeout + sendDontHavesOnTimeout bool } type counters struct { diff --git a/bitswap/bitswap_with_sessions_test.go b/bitswap/bitswap_with_sessions_test.go index f710879a1..ec85baf55 100644 --- a/bitswap/bitswap_with_sessions_test.go +++ b/bitswap/bitswap_with_sessions_test.go @@ -74,7 +74,7 @@ func TestSessionBetweenPeers(t *testing.T) { defer cancel() vnet := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(time.Millisecond)) - ig := testinstance.NewTestInstanceGenerator(vnet, nil, nil) + ig := testinstance.NewTestInstanceGenerator(vnet, nil, []bitswap.Option{bitswap.SetSendDontHavesOnTimeout(false)}) defer ig.Close() bgen := blocksutil.NewBlockGenerator() From 21dba94fc572f3d95025737656e467b2c793963e Mon Sep 17 00:00:00 2001 From: hannahhoward Date: Thu, 3 Jun 2021 13:49:06 -0700 Subject: [PATCH 0982/1035] refactor(bitswap): rename simulateDontHaves option s/SetSendDontHavesOnTimeout/SetSimulateDontHavesOnTimeout This commit was moved from ipfs/go-bitswap@f0e84a9a3c4928d5d2adc9811393290b4b46c162 --- bitswap/bitswap.go | 46 +++++++++++++-------------- bitswap/bitswap_with_sessions_test.go | 2 +- 2 files changed, 24 insertions(+), 24 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 760512679..ac8904372 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -118,9 +118,9 @@ func WithScoreLedger(scoreLedger deciface.ScoreLedger) Option { } } -func SetSendDontHavesOnTimeout(send bool) Option { +func SetSimulateDontHavesOnTimeout(send bool) Option { return func(bs *Bitswap) { - bs.sendDontHavesOnTimeout = send + bs.simulateDontHavesOnTimeout = send } } @@ -158,7 +158,7 @@ func New(parent context.Context, network bsnet.BitSwapNetwork, var bs *Bitswap onDontHaveTimeout := func(p peer.ID, dontHaves []cid.Cid) { // Simulate a message arriving with DONT_HAVEs - if bs.sendDontHavesOnTimeout { + if bs.simulateDontHavesOnTimeout { sm.ReceiveFrom(ctx, p, nil, nil, dontHaves) } } @@ -192,25 +192,25 @@ func New(parent context.Context, network bsnet.BitSwapNetwork, sm = bssm.New(ctx, sessionFactory, sim, sessionPeerManagerFactory, bpm, pm, notif, network.Self()) bs = &Bitswap{ - blockstore: bstore, - network: network, - process: px, - newBlocks: make(chan cid.Cid, HasBlockBufferSize), - provideKeys: make(chan cid.Cid, provideKeysBufferSize), - pm: pm, - pqm: pqm, - sm: sm, - sim: sim, - notif: notif, - counters: new(counters), - dupMetric: dupHist, - allMetric: allHist, - sentHistogram: sentHistogram, - provideEnabled: true, - provSearchDelay: defaultProvSearchDelay, - rebroadcastDelay: delay.Fixed(time.Minute), - engineBstoreWorkerCount: defaulEngineBlockstoreWorkerCount, - sendDontHavesOnTimeout: true, + blockstore: bstore, + network: network, + process: px, + newBlocks: make(chan cid.Cid, HasBlockBufferSize), + provideKeys: make(chan cid.Cid, provideKeysBufferSize), + pm: pm, + pqm: pqm, + sm: sm, + sim: sim, + notif: notif, + counters: new(counters), + dupMetric: dupHist, + allMetric: allHist, + sentHistogram: sentHistogram, + provideEnabled: true, + provSearchDelay: defaultProvSearchDelay, + rebroadcastDelay: delay.Fixed(time.Minute), + engineBstoreWorkerCount: defaulEngineBlockstoreWorkerCount, + simulateDontHavesOnTimeout: true, } // apply functional options before starting and running bitswap @@ -305,7 +305,7 @@ type Bitswap struct { engineScoreLedger deciface.ScoreLedger // whether we should actually simulate dont haves on request timeout - sendDontHavesOnTimeout bool + simulateDontHavesOnTimeout bool } type counters struct { diff --git a/bitswap/bitswap_with_sessions_test.go b/bitswap/bitswap_with_sessions_test.go index ec85baf55..441745329 100644 --- a/bitswap/bitswap_with_sessions_test.go +++ b/bitswap/bitswap_with_sessions_test.go @@ -74,7 +74,7 @@ func TestSessionBetweenPeers(t *testing.T) { defer cancel() vnet := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(time.Millisecond)) - ig := testinstance.NewTestInstanceGenerator(vnet, nil, []bitswap.Option{bitswap.SetSendDontHavesOnTimeout(false)}) + ig := testinstance.NewTestInstanceGenerator(vnet, nil, []bitswap.Option{bitswap.SetSimulateDontHavesOnTimeout(false)}) defer ig.Close() bgen := blocksutil.NewBlockGenerator() From e31ef76716d07f99e8fa432f25972dcf6a07946e Mon Sep 17 00:00:00 2001 From: hannahhoward Date: Thu, 3 Jun 2021 12:53:37 -0700 Subject: [PATCH 0983/1035] fix(messagequeue): fix flaky TestDontHaveMgr tests convery DontHaveTimeoutMgr to use clock interface, use mocks in tests to make tests predictable and fast This commit was moved from ipfs/go-bitswap@e30c1e9f72b0f6a951f513e6b102989ad4b761a6 --- .../messagequeue/donthavetimeoutmgr.go | 47 ++++-- .../messagequeue/donthavetimeoutmgr_test.go | 142 ++++++++++++------ 2 files changed, 133 insertions(+), 56 deletions(-) diff --git a/bitswap/internal/messagequeue/donthavetimeoutmgr.go b/bitswap/internal/messagequeue/donthavetimeoutmgr.go index 14e70c077..39eb56a9a 100644 --- a/bitswap/internal/messagequeue/donthavetimeoutmgr.go +++ b/bitswap/internal/messagequeue/donthavetimeoutmgr.go @@ -5,6 +5,7 @@ import ( "sync" "time" + "github.com/benbjohnson/clock" cid "github.com/ipfs/go-cid" "github.com/libp2p/go-libp2p/p2p/protocol/ping" ) @@ -60,6 +61,7 @@ type pendingWant struct { // we ping the peer to estimate latency. If we receive a response from the // peer we use the response latency. type dontHaveTimeoutMgr struct { + clock clock.Clock ctx context.Context shutdown func() peerConn PeerConnection @@ -83,14 +85,16 @@ type dontHaveTimeoutMgr struct { // ewma of message latency (time from message sent to response received) messageLatency *latencyEwma // timer used to wait until want at front of queue expires - checkForTimeoutsTimer *time.Timer + checkForTimeoutsTimer *clock.Timer + // used for testing -- signal when a scheduled timeout check has happened + signal chan struct{} } // newDontHaveTimeoutMgr creates a new dontHaveTimeoutMgr // onDontHaveTimeout is called when pending keys expire (not cancelled before timeout) func newDontHaveTimeoutMgr(pc PeerConnection, onDontHaveTimeout func([]cid.Cid)) *dontHaveTimeoutMgr { return newDontHaveTimeoutMgrWithParams(pc, onDontHaveTimeout, dontHaveTimeout, maxTimeout, - pingLatencyMultiplier, messageLatencyMultiplier, maxExpectedWantProcessTime) + pingLatencyMultiplier, messageLatencyMultiplier, maxExpectedWantProcessTime, clock.New(), nil) } // newDontHaveTimeoutMgrWithParams is used by the tests @@ -101,10 +105,13 @@ func newDontHaveTimeoutMgrWithParams( maxTimeout time.Duration, pingLatencyMultiplier int, messageLatencyMultiplier int, - maxExpectedWantProcessTime time.Duration) *dontHaveTimeoutMgr { + maxExpectedWantProcessTime time.Duration, + clock clock.Clock, + signal chan struct{}) *dontHaveTimeoutMgr { ctx, shutdown := context.WithCancel(context.Background()) mqp := &dontHaveTimeoutMgr{ + clock: clock, ctx: ctx, shutdown: shutdown, peerConn: pc, @@ -117,6 +124,7 @@ func newDontHaveTimeoutMgrWithParams( messageLatencyMultiplier: messageLatencyMultiplier, maxExpectedWantProcessTime: maxExpectedWantProcessTime, onDontHaveTimeout: onDontHaveTimeout, + signal: signal, } return mqp @@ -214,6 +222,7 @@ func (dhtm *dontHaveTimeoutMgr) measurePingLatency() { // checkForTimeouts checks pending wants to see if any are over the timeout. // Note: this function should only be called within the lock. func (dhtm *dontHaveTimeoutMgr) checkForTimeouts() { + if len(dhtm.wantQueue) == 0 { return } @@ -228,7 +237,7 @@ func (dhtm *dontHaveTimeoutMgr) checkForTimeouts() { if pw.active { // The queue is in order from earliest to latest, so if we // didn't find an expired entry we can stop iterating - if time.Since(pw.sent) < dhtm.timeout { + if dhtm.clock.Since(pw.sent) < dhtm.timeout { break } @@ -259,20 +268,29 @@ func (dhtm *dontHaveTimeoutMgr) checkForTimeouts() { // Schedule the next check for the moment when the oldest pending want will // timeout oldestStart := dhtm.wantQueue[0].sent - until := time.Until(oldestStart.Add(dhtm.timeout)) + until := oldestStart.Add(dhtm.timeout).Sub(dhtm.clock.Now()) if dhtm.checkForTimeoutsTimer == nil { - dhtm.checkForTimeoutsTimer = time.AfterFunc(until, func() { - dhtm.lk.Lock() - defer dhtm.lk.Unlock() - - dhtm.checkForTimeouts() - }) + dhtm.checkForTimeoutsTimer = dhtm.clock.Timer(until) + go dhtm.consumeTimeouts() } else { dhtm.checkForTimeoutsTimer.Stop() dhtm.checkForTimeoutsTimer.Reset(until) } } +func (dhtm *dontHaveTimeoutMgr) consumeTimeouts() { + for { + select { + case <-dhtm.ctx.Done(): + return + case <-dhtm.checkForTimeoutsTimer.C: + dhtm.lk.Lock() + dhtm.checkForTimeouts() + dhtm.lk.Unlock() + } + } +} + // AddPending adds the given keys that will expire if not cancelled before // the timeout func (dhtm *dontHaveTimeoutMgr) AddPending(ks []cid.Cid) { @@ -280,7 +298,7 @@ func (dhtm *dontHaveTimeoutMgr) AddPending(ks []cid.Cid) { return } - start := time.Now() + start := dhtm.clock.Now() dhtm.lk.Lock() defer dhtm.lk.Unlock() @@ -331,6 +349,11 @@ func (dhtm *dontHaveTimeoutMgr) fireTimeout(pending []cid.Cid) { // Fire the timeout dhtm.onDontHaveTimeout(pending) + + // signal a timeout fired + if dhtm.signal != nil { + dhtm.signal <- struct{}{} + } } // calculateTimeoutFromPingLatency calculates a reasonable timeout derived from latency diff --git a/bitswap/internal/messagequeue/donthavetimeoutmgr_test.go b/bitswap/internal/messagequeue/donthavetimeoutmgr_test.go index cc0ebb301..bdca09344 100644 --- a/bitswap/internal/messagequeue/donthavetimeoutmgr_test.go +++ b/bitswap/internal/messagequeue/donthavetimeoutmgr_test.go @@ -7,6 +7,7 @@ import ( "testing" "time" + "github.com/benbjohnson/clock" "github.com/ipfs/go-bitswap/internal/testutil" cid "github.com/ipfs/go-cid" "github.com/libp2p/go-libp2p/p2p/protocol/ping" @@ -16,10 +17,13 @@ type mockPeerConn struct { err error latency time.Duration latencies []time.Duration + clock clock.Clock + pinged chan struct{} } func (pc *mockPeerConn) Ping(ctx context.Context) ping.Result { - timer := time.NewTimer(pc.latency) + timer := pc.clock.Timer(pc.latency) + pc.pinged <- struct{}{} select { case <-timer.C: if pc.err != nil { @@ -75,19 +79,21 @@ func TestDontHaveTimeoutMgrTimeout(t *testing.T) { latMultiplier := 2 expProcessTime := 5 * time.Millisecond expectedTimeout := expProcessTime + latency*time.Duration(latMultiplier) - pc := &mockPeerConn{latency: latency} + clock := clock.NewMock() + pinged := make(chan struct{}) + pc := &mockPeerConn{latency: latency, clock: clock, pinged: pinged} tr := timeoutRecorder{} - + signal := make(chan struct{}) dhtm := newDontHaveTimeoutMgrWithParams(pc, tr.onTimeout, - dontHaveTimeout, maxTimeout, latMultiplier, messageLatencyMultiplier, expProcessTime) + dontHaveTimeout, maxTimeout, latMultiplier, messageLatencyMultiplier, expProcessTime, clock, signal) dhtm.Start() defer dhtm.Shutdown() - + <-pinged // Add first set of keys dhtm.AddPending(firstks) // Wait for less than the expected timeout - time.Sleep(expectedTimeout - 10*time.Millisecond) + clock.Add(expectedTimeout - 10*time.Millisecond) // At this stage no keys should have timed out if tr.timedOutCount() > 0 { @@ -98,18 +104,21 @@ func TestDontHaveTimeoutMgrTimeout(t *testing.T) { dhtm.AddPending(secondks) // Wait until after the expected timeout - time.Sleep(20 * time.Millisecond) + clock.Add(20 * time.Millisecond) + + <-signal // At this stage first set of keys should have timed out if tr.timedOutCount() != len(firstks) { t.Fatal("expected timeout", tr.timedOutCount(), len(firstks)) } - // Clear the recorded timed out keys tr.clear() // Sleep until the second set of keys should have timed out - time.Sleep(expectedTimeout + 10*time.Millisecond) + clock.Add(expectedTimeout + 10*time.Millisecond) + + <-signal // At this stage all keys should have timed out. The second set included // the first set of keys, but they were added before the first set timed @@ -125,24 +134,29 @@ func TestDontHaveTimeoutMgrCancel(t *testing.T) { latMultiplier := 1 expProcessTime := time.Duration(0) expectedTimeout := latency - pc := &mockPeerConn{latency: latency} + clock := clock.NewMock() + pinged := make(chan struct{}) + pc := &mockPeerConn{latency: latency, clock: clock, pinged: pinged} tr := timeoutRecorder{} - + signal := make(chan struct{}) dhtm := newDontHaveTimeoutMgrWithParams(pc, tr.onTimeout, - dontHaveTimeout, maxTimeout, latMultiplier, messageLatencyMultiplier, expProcessTime) + dontHaveTimeout, maxTimeout, latMultiplier, messageLatencyMultiplier, expProcessTime, clock, signal) dhtm.Start() defer dhtm.Shutdown() + <-pinged // Add keys dhtm.AddPending(ks) - time.Sleep(5 * time.Millisecond) + clock.Add(5 * time.Millisecond) // Cancel keys cancelCount := 1 dhtm.CancelPending(ks[:cancelCount]) // Wait for the expected timeout - time.Sleep(expectedTimeout) + clock.Add(expectedTimeout) + + <-signal // At this stage all non-cancelled keys should have timed out if tr.timedOutCount() != len(ks)-cancelCount { @@ -156,30 +170,36 @@ func TestDontHaveTimeoutWantCancelWant(t *testing.T) { latMultiplier := 1 expProcessTime := time.Duration(0) expectedTimeout := latency - pc := &mockPeerConn{latency: latency} + clock := clock.NewMock() + pinged := make(chan struct{}) + pc := &mockPeerConn{latency: latency, clock: clock, pinged: pinged} tr := timeoutRecorder{} + signal := make(chan struct{}) dhtm := newDontHaveTimeoutMgrWithParams(pc, tr.onTimeout, - dontHaveTimeout, maxTimeout, latMultiplier, messageLatencyMultiplier, expProcessTime) + dontHaveTimeout, maxTimeout, latMultiplier, messageLatencyMultiplier, expProcessTime, clock, signal) dhtm.Start() defer dhtm.Shutdown() + <-pinged // Add keys dhtm.AddPending(ks) // Wait for a short time - time.Sleep(expectedTimeout - 10*time.Millisecond) + clock.Add(expectedTimeout - 10*time.Millisecond) // Cancel two keys dhtm.CancelPending(ks[:2]) - time.Sleep(5 * time.Millisecond) + clock.Add(5 * time.Millisecond) // Add back one cancelled key dhtm.AddPending(ks[:1]) // Wait till after initial timeout - time.Sleep(10 * time.Millisecond) + clock.Add(10 * time.Millisecond) + + <-signal // At this stage only the key that was never cancelled should have timed out if tr.timedOutCount() != 1 { @@ -187,7 +207,9 @@ func TestDontHaveTimeoutWantCancelWant(t *testing.T) { } // Wait till after added back key should time out - time.Sleep(latency) + clock.Add(latency) + + <-signal // At this stage the key that was added back should also have timed out if tr.timedOutCount() != 2 { @@ -200,13 +222,17 @@ func TestDontHaveTimeoutRepeatedAddPending(t *testing.T) { latency := time.Millisecond * 5 latMultiplier := 1 expProcessTime := time.Duration(0) - pc := &mockPeerConn{latency: latency} + clock := clock.NewMock() + pinged := make(chan struct{}) + pc := &mockPeerConn{latency: latency, clock: clock, pinged: pinged} tr := timeoutRecorder{} + signal := make(chan struct{}) dhtm := newDontHaveTimeoutMgrWithParams(pc, tr.onTimeout, - dontHaveTimeout, maxTimeout, latMultiplier, messageLatencyMultiplier, expProcessTime) + dontHaveTimeout, maxTimeout, latMultiplier, messageLatencyMultiplier, expProcessTime, clock, signal) dhtm.Start() defer dhtm.Shutdown() + <-pinged // Add keys repeatedly for _, c := range ks { @@ -214,7 +240,9 @@ func TestDontHaveTimeoutRepeatedAddPending(t *testing.T) { } // Wait for the expected timeout - time.Sleep(latency + 5*time.Millisecond) + clock.Add(latency + 5*time.Millisecond) + + <-signal // At this stage all keys should have timed out if tr.timedOutCount() != len(ks) { @@ -228,14 +256,17 @@ func TestDontHaveTimeoutMgrMessageLatency(t *testing.T) { latMultiplier := 1 expProcessTime := time.Duration(0) msgLatencyMultiplier := 1 - pc := &mockPeerConn{latency: latency} + clock := clock.NewMock() + pinged := make(chan struct{}) + pc := &mockPeerConn{latency: latency, clock: clock, pinged: pinged} tr := timeoutRecorder{} + signal := make(chan struct{}) dhtm := newDontHaveTimeoutMgrWithParams(pc, tr.onTimeout, - dontHaveTimeout, maxTimeout, latMultiplier, msgLatencyMultiplier, expProcessTime) + dontHaveTimeout, maxTimeout, latMultiplier, msgLatencyMultiplier, expProcessTime, clock, signal) dhtm.Start() defer dhtm.Shutdown() - + <-pinged // Add keys dhtm.AddPending(ks) @@ -245,7 +276,7 @@ func TestDontHaveTimeoutMgrMessageLatency(t *testing.T) { // = 40ms // Wait for less than the expected timeout - time.Sleep(25 * time.Millisecond) + clock.Add(25 * time.Millisecond) // Receive two message latency updates dhtm.UpdateMessageLatency(time.Millisecond * 20) @@ -259,7 +290,9 @@ func TestDontHaveTimeoutMgrMessageLatency(t *testing.T) { // the keys should have timed out // Give the queue some time to process the updates - time.Sleep(5 * time.Millisecond) + clock.Add(5 * time.Millisecond) + + <-signal if tr.timedOutCount() != len(ks) { t.Fatal("expected keys to timeout") @@ -268,16 +301,19 @@ func TestDontHaveTimeoutMgrMessageLatency(t *testing.T) { func TestDontHaveTimeoutMgrMessageLatencyMax(t *testing.T) { ks := testutil.GenerateCids(2) - pc := &mockPeerConn{latency: time.Second} // ignored + clock := clock.NewMock() + pinged := make(chan struct{}) + pc := &mockPeerConn{latency: time.Second, clock: clock, pinged: pinged} tr := timeoutRecorder{} msgLatencyMultiplier := 1 testMaxTimeout := time.Millisecond * 10 + signal := make(chan struct{}) dhtm := newDontHaveTimeoutMgrWithParams(pc, tr.onTimeout, - dontHaveTimeout, testMaxTimeout, pingLatencyMultiplier, msgLatencyMultiplier, maxExpectedWantProcessTime) + dontHaveTimeout, testMaxTimeout, pingLatencyMultiplier, msgLatencyMultiplier, maxExpectedWantProcessTime, clock, signal) dhtm.Start() defer dhtm.Shutdown() - + <-pinged // Add keys dhtm.AddPending(ks) @@ -286,7 +322,9 @@ func TestDontHaveTimeoutMgrMessageLatencyMax(t *testing.T) { dhtm.UpdateMessageLatency(testMaxTimeout * 4) // Sleep until just after the maximum timeout - time.Sleep(testMaxTimeout + 5*time.Millisecond) + clock.Add(testMaxTimeout + 5*time.Millisecond) + + <-signal // Keys should have timed out if tr.timedOutCount() != len(ks) { @@ -302,18 +340,22 @@ func TestDontHaveTimeoutMgrUsesDefaultTimeoutIfPingError(t *testing.T) { defaultTimeout := 10 * time.Millisecond expectedTimeout := expProcessTime + defaultTimeout tr := timeoutRecorder{} - pc := &mockPeerConn{latency: latency, err: fmt.Errorf("ping error")} + clock := clock.NewMock() + pinged := make(chan struct{}) + pc := &mockPeerConn{latency: latency, clock: clock, pinged: pinged, err: fmt.Errorf("ping error")} + signal := make(chan struct{}) dhtm := newDontHaveTimeoutMgrWithParams(pc, tr.onTimeout, - defaultTimeout, dontHaveTimeout, latMultiplier, messageLatencyMultiplier, expProcessTime) + defaultTimeout, dontHaveTimeout, latMultiplier, messageLatencyMultiplier, expProcessTime, clock, signal) dhtm.Start() defer dhtm.Shutdown() + <-pinged // Add keys dhtm.AddPending(ks) // Sleep for less than the expected timeout - time.Sleep(expectedTimeout - 5*time.Millisecond) + clock.Add(expectedTimeout - 5*time.Millisecond) // At this stage no timeout should have happened yet if tr.timedOutCount() > 0 { @@ -321,7 +363,9 @@ func TestDontHaveTimeoutMgrUsesDefaultTimeoutIfPingError(t *testing.T) { } // Sleep until after the expected timeout - time.Sleep(10 * time.Millisecond) + clock.Add(10 * time.Millisecond) + + <-signal // Now the keys should have timed out if tr.timedOutCount() != len(ks) { @@ -335,19 +379,23 @@ func TestDontHaveTimeoutMgrUsesDefaultTimeoutIfLatencyLonger(t *testing.T) { latMultiplier := 1 expProcessTime := time.Duration(0) defaultTimeout := 10 * time.Millisecond + clock := clock.NewMock() + pinged := make(chan struct{}) + pc := &mockPeerConn{latency: latency, clock: clock, pinged: pinged} tr := timeoutRecorder{} - pc := &mockPeerConn{latency: latency} + signal := make(chan struct{}) dhtm := newDontHaveTimeoutMgrWithParams(pc, tr.onTimeout, - defaultTimeout, dontHaveTimeout, latMultiplier, messageLatencyMultiplier, expProcessTime) + defaultTimeout, dontHaveTimeout, latMultiplier, messageLatencyMultiplier, expProcessTime, clock, signal) dhtm.Start() defer dhtm.Shutdown() + <-pinged // Add keys dhtm.AddPending(ks) // Sleep for less than the default timeout - time.Sleep(defaultTimeout - 5*time.Millisecond) + clock.Add(defaultTimeout - 5*time.Millisecond) // At this stage no timeout should have happened yet if tr.timedOutCount() > 0 { @@ -355,7 +403,9 @@ func TestDontHaveTimeoutMgrUsesDefaultTimeoutIfLatencyLonger(t *testing.T) { } // Sleep until after the default timeout - time.Sleep(defaultTimeout * 2) + clock.Add(defaultTimeout * 2) + + <-signal // Now the keys should have timed out if tr.timedOutCount() != len(ks) { @@ -368,25 +418,29 @@ func TestDontHaveTimeoutNoTimeoutAfterShutdown(t *testing.T) { latency := time.Millisecond * 10 latMultiplier := 1 expProcessTime := time.Duration(0) + clock := clock.NewMock() + pinged := make(chan struct{}) + pc := &mockPeerConn{latency: latency, clock: clock, pinged: pinged} tr := timeoutRecorder{} - pc := &mockPeerConn{latency: latency} + signal := make(chan struct{}) dhtm := newDontHaveTimeoutMgrWithParams(pc, tr.onTimeout, - dontHaveTimeout, maxTimeout, latMultiplier, messageLatencyMultiplier, expProcessTime) + dontHaveTimeout, maxTimeout, latMultiplier, messageLatencyMultiplier, expProcessTime, clock, signal) dhtm.Start() defer dhtm.Shutdown() + <-pinged // Add keys dhtm.AddPending(ks) // Wait less than the timeout - time.Sleep(latency - 5*time.Millisecond) + clock.Add(latency - 5*time.Millisecond) // Shutdown the manager dhtm.Shutdown() // Wait for the expected timeout - time.Sleep(10 * time.Millisecond) + clock.Add(10 * time.Millisecond) // Manager was shut down so timeout should not have fired if tr.timedOutCount() != 0 { From 01a421bddcdc5f44121107920c48297c28401e16 Mon Sep 17 00:00:00 2001 From: hannahhoward Date: Fri, 4 Jun 2021 17:58:46 -0700 Subject: [PATCH 0984/1035] refactor(messagequeue): rename ambigous channel This commit was moved from ipfs/go-bitswap@38aae7e11a322e5ddbdc677dbcb40aeb1af4fc7d --- .../messagequeue/donthavetimeoutmgr.go | 12 ++-- .../messagequeue/donthavetimeoutmgr_test.go | 56 +++++++++---------- 2 files changed, 34 insertions(+), 34 deletions(-) diff --git a/bitswap/internal/messagequeue/donthavetimeoutmgr.go b/bitswap/internal/messagequeue/donthavetimeoutmgr.go index 39eb56a9a..4e3aae861 100644 --- a/bitswap/internal/messagequeue/donthavetimeoutmgr.go +++ b/bitswap/internal/messagequeue/donthavetimeoutmgr.go @@ -86,8 +86,8 @@ type dontHaveTimeoutMgr struct { messageLatency *latencyEwma // timer used to wait until want at front of queue expires checkForTimeoutsTimer *clock.Timer - // used for testing -- signal when a scheduled timeout check has happened - signal chan struct{} + // used for testing -- timeoutsTriggered when a scheduled dont have timeouts were triggered + timeoutsTriggered chan struct{} } // newDontHaveTimeoutMgr creates a new dontHaveTimeoutMgr @@ -107,7 +107,7 @@ func newDontHaveTimeoutMgrWithParams( messageLatencyMultiplier int, maxExpectedWantProcessTime time.Duration, clock clock.Clock, - signal chan struct{}) *dontHaveTimeoutMgr { + timeoutsTriggered chan struct{}) *dontHaveTimeoutMgr { ctx, shutdown := context.WithCancel(context.Background()) mqp := &dontHaveTimeoutMgr{ @@ -124,7 +124,7 @@ func newDontHaveTimeoutMgrWithParams( messageLatencyMultiplier: messageLatencyMultiplier, maxExpectedWantProcessTime: maxExpectedWantProcessTime, onDontHaveTimeout: onDontHaveTimeout, - signal: signal, + timeoutsTriggered: timeoutsTriggered, } return mqp @@ -351,8 +351,8 @@ func (dhtm *dontHaveTimeoutMgr) fireTimeout(pending []cid.Cid) { dhtm.onDontHaveTimeout(pending) // signal a timeout fired - if dhtm.signal != nil { - dhtm.signal <- struct{}{} + if dhtm.timeoutsTriggered != nil { + dhtm.timeoutsTriggered <- struct{}{} } } diff --git a/bitswap/internal/messagequeue/donthavetimeoutmgr_test.go b/bitswap/internal/messagequeue/donthavetimeoutmgr_test.go index bdca09344..61023f00d 100644 --- a/bitswap/internal/messagequeue/donthavetimeoutmgr_test.go +++ b/bitswap/internal/messagequeue/donthavetimeoutmgr_test.go @@ -83,9 +83,9 @@ func TestDontHaveTimeoutMgrTimeout(t *testing.T) { pinged := make(chan struct{}) pc := &mockPeerConn{latency: latency, clock: clock, pinged: pinged} tr := timeoutRecorder{} - signal := make(chan struct{}) + timeoutsTriggered := make(chan struct{}) dhtm := newDontHaveTimeoutMgrWithParams(pc, tr.onTimeout, - dontHaveTimeout, maxTimeout, latMultiplier, messageLatencyMultiplier, expProcessTime, clock, signal) + dontHaveTimeout, maxTimeout, latMultiplier, messageLatencyMultiplier, expProcessTime, clock, timeoutsTriggered) dhtm.Start() defer dhtm.Shutdown() <-pinged @@ -106,7 +106,7 @@ func TestDontHaveTimeoutMgrTimeout(t *testing.T) { // Wait until after the expected timeout clock.Add(20 * time.Millisecond) - <-signal + <-timeoutsTriggered // At this stage first set of keys should have timed out if tr.timedOutCount() != len(firstks) { @@ -118,7 +118,7 @@ func TestDontHaveTimeoutMgrTimeout(t *testing.T) { // Sleep until the second set of keys should have timed out clock.Add(expectedTimeout + 10*time.Millisecond) - <-signal + <-timeoutsTriggered // At this stage all keys should have timed out. The second set included // the first set of keys, but they were added before the first set timed @@ -138,9 +138,9 @@ func TestDontHaveTimeoutMgrCancel(t *testing.T) { pinged := make(chan struct{}) pc := &mockPeerConn{latency: latency, clock: clock, pinged: pinged} tr := timeoutRecorder{} - signal := make(chan struct{}) + timeoutsTriggered := make(chan struct{}) dhtm := newDontHaveTimeoutMgrWithParams(pc, tr.onTimeout, - dontHaveTimeout, maxTimeout, latMultiplier, messageLatencyMultiplier, expProcessTime, clock, signal) + dontHaveTimeout, maxTimeout, latMultiplier, messageLatencyMultiplier, expProcessTime, clock, timeoutsTriggered) dhtm.Start() defer dhtm.Shutdown() <-pinged @@ -156,7 +156,7 @@ func TestDontHaveTimeoutMgrCancel(t *testing.T) { // Wait for the expected timeout clock.Add(expectedTimeout) - <-signal + <-timeoutsTriggered // At this stage all non-cancelled keys should have timed out if tr.timedOutCount() != len(ks)-cancelCount { @@ -174,10 +174,10 @@ func TestDontHaveTimeoutWantCancelWant(t *testing.T) { pinged := make(chan struct{}) pc := &mockPeerConn{latency: latency, clock: clock, pinged: pinged} tr := timeoutRecorder{} - signal := make(chan struct{}) + timeoutsTriggered := make(chan struct{}) dhtm := newDontHaveTimeoutMgrWithParams(pc, tr.onTimeout, - dontHaveTimeout, maxTimeout, latMultiplier, messageLatencyMultiplier, expProcessTime, clock, signal) + dontHaveTimeout, maxTimeout, latMultiplier, messageLatencyMultiplier, expProcessTime, clock, timeoutsTriggered) dhtm.Start() defer dhtm.Shutdown() <-pinged @@ -199,7 +199,7 @@ func TestDontHaveTimeoutWantCancelWant(t *testing.T) { // Wait till after initial timeout clock.Add(10 * time.Millisecond) - <-signal + <-timeoutsTriggered // At this stage only the key that was never cancelled should have timed out if tr.timedOutCount() != 1 { @@ -209,7 +209,7 @@ func TestDontHaveTimeoutWantCancelWant(t *testing.T) { // Wait till after added back key should time out clock.Add(latency) - <-signal + <-timeoutsTriggered // At this stage the key that was added back should also have timed out if tr.timedOutCount() != 2 { @@ -226,10 +226,10 @@ func TestDontHaveTimeoutRepeatedAddPending(t *testing.T) { pinged := make(chan struct{}) pc := &mockPeerConn{latency: latency, clock: clock, pinged: pinged} tr := timeoutRecorder{} - signal := make(chan struct{}) + timeoutsTriggered := make(chan struct{}) dhtm := newDontHaveTimeoutMgrWithParams(pc, tr.onTimeout, - dontHaveTimeout, maxTimeout, latMultiplier, messageLatencyMultiplier, expProcessTime, clock, signal) + dontHaveTimeout, maxTimeout, latMultiplier, messageLatencyMultiplier, expProcessTime, clock, timeoutsTriggered) dhtm.Start() defer dhtm.Shutdown() <-pinged @@ -242,7 +242,7 @@ func TestDontHaveTimeoutRepeatedAddPending(t *testing.T) { // Wait for the expected timeout clock.Add(latency + 5*time.Millisecond) - <-signal + <-timeoutsTriggered // At this stage all keys should have timed out if tr.timedOutCount() != len(ks) { @@ -260,10 +260,10 @@ func TestDontHaveTimeoutMgrMessageLatency(t *testing.T) { pinged := make(chan struct{}) pc := &mockPeerConn{latency: latency, clock: clock, pinged: pinged} tr := timeoutRecorder{} - signal := make(chan struct{}) + timeoutsTriggered := make(chan struct{}) dhtm := newDontHaveTimeoutMgrWithParams(pc, tr.onTimeout, - dontHaveTimeout, maxTimeout, latMultiplier, msgLatencyMultiplier, expProcessTime, clock, signal) + dontHaveTimeout, maxTimeout, latMultiplier, msgLatencyMultiplier, expProcessTime, clock, timeoutsTriggered) dhtm.Start() defer dhtm.Shutdown() <-pinged @@ -292,7 +292,7 @@ func TestDontHaveTimeoutMgrMessageLatency(t *testing.T) { // Give the queue some time to process the updates clock.Add(5 * time.Millisecond) - <-signal + <-timeoutsTriggered if tr.timedOutCount() != len(ks) { t.Fatal("expected keys to timeout") @@ -307,10 +307,10 @@ func TestDontHaveTimeoutMgrMessageLatencyMax(t *testing.T) { tr := timeoutRecorder{} msgLatencyMultiplier := 1 testMaxTimeout := time.Millisecond * 10 - signal := make(chan struct{}) + timeoutsTriggered := make(chan struct{}) dhtm := newDontHaveTimeoutMgrWithParams(pc, tr.onTimeout, - dontHaveTimeout, testMaxTimeout, pingLatencyMultiplier, msgLatencyMultiplier, maxExpectedWantProcessTime, clock, signal) + dontHaveTimeout, testMaxTimeout, pingLatencyMultiplier, msgLatencyMultiplier, maxExpectedWantProcessTime, clock, timeoutsTriggered) dhtm.Start() defer dhtm.Shutdown() <-pinged @@ -324,7 +324,7 @@ func TestDontHaveTimeoutMgrMessageLatencyMax(t *testing.T) { // Sleep until just after the maximum timeout clock.Add(testMaxTimeout + 5*time.Millisecond) - <-signal + <-timeoutsTriggered // Keys should have timed out if tr.timedOutCount() != len(ks) { @@ -343,10 +343,10 @@ func TestDontHaveTimeoutMgrUsesDefaultTimeoutIfPingError(t *testing.T) { clock := clock.NewMock() pinged := make(chan struct{}) pc := &mockPeerConn{latency: latency, clock: clock, pinged: pinged, err: fmt.Errorf("ping error")} - signal := make(chan struct{}) + timeoutsTriggered := make(chan struct{}) dhtm := newDontHaveTimeoutMgrWithParams(pc, tr.onTimeout, - defaultTimeout, dontHaveTimeout, latMultiplier, messageLatencyMultiplier, expProcessTime, clock, signal) + defaultTimeout, dontHaveTimeout, latMultiplier, messageLatencyMultiplier, expProcessTime, clock, timeoutsTriggered) dhtm.Start() defer dhtm.Shutdown() <-pinged @@ -365,7 +365,7 @@ func TestDontHaveTimeoutMgrUsesDefaultTimeoutIfPingError(t *testing.T) { // Sleep until after the expected timeout clock.Add(10 * time.Millisecond) - <-signal + <-timeoutsTriggered // Now the keys should have timed out if tr.timedOutCount() != len(ks) { @@ -383,10 +383,10 @@ func TestDontHaveTimeoutMgrUsesDefaultTimeoutIfLatencyLonger(t *testing.T) { pinged := make(chan struct{}) pc := &mockPeerConn{latency: latency, clock: clock, pinged: pinged} tr := timeoutRecorder{} - signal := make(chan struct{}) + timeoutsTriggered := make(chan struct{}) dhtm := newDontHaveTimeoutMgrWithParams(pc, tr.onTimeout, - defaultTimeout, dontHaveTimeout, latMultiplier, messageLatencyMultiplier, expProcessTime, clock, signal) + defaultTimeout, dontHaveTimeout, latMultiplier, messageLatencyMultiplier, expProcessTime, clock, timeoutsTriggered) dhtm.Start() defer dhtm.Shutdown() <-pinged @@ -405,7 +405,7 @@ func TestDontHaveTimeoutMgrUsesDefaultTimeoutIfLatencyLonger(t *testing.T) { // Sleep until after the default timeout clock.Add(defaultTimeout * 2) - <-signal + <-timeoutsTriggered // Now the keys should have timed out if tr.timedOutCount() != len(ks) { @@ -422,10 +422,10 @@ func TestDontHaveTimeoutNoTimeoutAfterShutdown(t *testing.T) { pinged := make(chan struct{}) pc := &mockPeerConn{latency: latency, clock: clock, pinged: pinged} tr := timeoutRecorder{} - signal := make(chan struct{}) + timeoutsTriggered := make(chan struct{}) dhtm := newDontHaveTimeoutMgrWithParams(pc, tr.onTimeout, - dontHaveTimeout, maxTimeout, latMultiplier, messageLatencyMultiplier, expProcessTime, clock, signal) + dontHaveTimeout, maxTimeout, latMultiplier, messageLatencyMultiplier, expProcessTime, clock, timeoutsTriggered) dhtm.Start() defer dhtm.Shutdown() <-pinged From 0cfedf78daae71235fe880ecc9be2fa076c59ccf Mon Sep 17 00:00:00 2001 From: hannahhoward Date: Fri, 4 Jun 2021 17:54:33 -0700 Subject: [PATCH 0985/1035] fix(messagequeue): fix flaky MessageQueue tests mock time in message queue to fix tests This commit was moved from ipfs/go-bitswap@9ccb51c15248cb1b74c7399566c6c8c65fd707ef --- .../messagequeue/donthavetimeoutmgr.go | 4 +- bitswap/internal/messagequeue/messagequeue.go | 51 +++++-- .../messagequeue/messagequeue_test.go | 137 +++++++++++------- 3 files changed, 130 insertions(+), 62 deletions(-) diff --git a/bitswap/internal/messagequeue/donthavetimeoutmgr.go b/bitswap/internal/messagequeue/donthavetimeoutmgr.go index 4e3aae861..e1b42c421 100644 --- a/bitswap/internal/messagequeue/donthavetimeoutmgr.go +++ b/bitswap/internal/messagequeue/donthavetimeoutmgr.go @@ -92,9 +92,9 @@ type dontHaveTimeoutMgr struct { // newDontHaveTimeoutMgr creates a new dontHaveTimeoutMgr // onDontHaveTimeout is called when pending keys expire (not cancelled before timeout) -func newDontHaveTimeoutMgr(pc PeerConnection, onDontHaveTimeout func([]cid.Cid)) *dontHaveTimeoutMgr { +func newDontHaveTimeoutMgr(pc PeerConnection, onDontHaveTimeout func([]cid.Cid), clock clock.Clock) *dontHaveTimeoutMgr { return newDontHaveTimeoutMgrWithParams(pc, onDontHaveTimeout, dontHaveTimeout, maxTimeout, - pingLatencyMultiplier, messageLatencyMultiplier, maxExpectedWantProcessTime, clock.New(), nil) + pingLatencyMultiplier, messageLatencyMultiplier, maxExpectedWantProcessTime, clock, nil) } // newDontHaveTimeoutMgrWithParams is used by the tests diff --git a/bitswap/internal/messagequeue/messagequeue.go b/bitswap/internal/messagequeue/messagequeue.go index 908f12943..19bab7623 100644 --- a/bitswap/internal/messagequeue/messagequeue.go +++ b/bitswap/internal/messagequeue/messagequeue.go @@ -6,6 +6,7 @@ import ( "sync" "time" + "github.com/benbjohnson/clock" bsmsg "github.com/ipfs/go-bitswap/message" pb "github.com/ipfs/go-bitswap/message/pb" bsnet "github.com/ipfs/go-bitswap/network" @@ -92,10 +93,16 @@ type MessageQueue struct { sender bsnet.MessageSender rebroadcastIntervalLk sync.RWMutex rebroadcastInterval time.Duration - rebroadcastTimer *time.Timer + rebroadcastTimer *clock.Timer // For performance reasons we just clear out the fields of the message // instead of creating a new one every time. msg bsmsg.BitSwapMessage + + // For simulating time -- uses mock in test + clock clock.Clock + + // Used to track things that happen asynchronously -- used only in test + events chan messageEvent } // recallWantlist keeps a list of pending wants and a list of sent wants @@ -210,10 +217,19 @@ func New(ctx context.Context, p peer.ID, network MessageNetwork, onDontHaveTimeo log.Infow("Bitswap: timeout waiting for blocks", "cids", ks, "peer", p) onDontHaveTimeout(p, ks) } - dhTimeoutMgr := newDontHaveTimeoutMgr(newPeerConnection(p, network), onTimeout) - return newMessageQueue(ctx, p, network, maxMessageSize, sendErrorBackoff, maxValidLatency, dhTimeoutMgr) + clock := clock.New() + dhTimeoutMgr := newDontHaveTimeoutMgr(newPeerConnection(p, network), onTimeout, clock) + return newMessageQueue(ctx, p, network, maxMessageSize, sendErrorBackoff, maxValidLatency, dhTimeoutMgr, clock, nil) } +type messageEvent int + +const ( + messageQueued messageEvent = iota + messageFinishedSending + latenciesRecorded +) + // This constructor is used by the tests func newMessageQueue( ctx context.Context, @@ -222,7 +238,9 @@ func newMessageQueue( maxMsgSize int, sendErrorBackoff time.Duration, maxValidLatency time.Duration, - dhTimeoutMgr DontHaveTimeoutManager) *MessageQueue { + dhTimeoutMgr DontHaveTimeoutManager, + clock clock.Clock, + events chan messageEvent) *MessageQueue { ctx, cancel := context.WithCancel(ctx) return &MessageQueue{ @@ -243,7 +261,9 @@ func newMessageQueue( priority: maxPriority, // For performance reasons we just clear out the fields of the message // after using it, instead of creating a new one every time. - msg: bsmsg.New(false), + msg: bsmsg.New(false), + clock: clock, + events: events, } } @@ -368,7 +388,7 @@ func (mq *MessageQueue) SetRebroadcastInterval(delay time.Duration) { // Startup starts the processing of messages and rebroadcasting. func (mq *MessageQueue) Startup() { mq.rebroadcastIntervalLk.RLock() - mq.rebroadcastTimer = time.NewTimer(mq.rebroadcastInterval) + mq.rebroadcastTimer = mq.clock.Timer(mq.rebroadcastInterval) mq.rebroadcastIntervalLk.RUnlock() go mq.runQueue() } @@ -392,7 +412,7 @@ func (mq *MessageQueue) runQueue() { defer mq.onShutdown() // Create a timer for debouncing scheduled work. - scheduleWork := time.NewTimer(0) + scheduleWork := mq.clock.Timer(0) if !scheduleWork.Stop() { // Need to drain the timer if Stop() returns false // See: https://golang.org/pkg/time/#Timer.Stop @@ -420,12 +440,15 @@ func (mq *MessageQueue) runQueue() { // If we have too many updates and/or we've waited too // long, send immediately. if mq.pendingWorkCount() > sendMessageCutoff || - time.Since(workScheduled) >= sendMessageMaxDelay { + mq.clock.Since(workScheduled) >= sendMessageMaxDelay { mq.sendIfReady() workScheduled = time.Time{} } else { // Otherwise, extend the timer. scheduleWork.Reset(sendMessageDebounce) + if mq.events != nil { + mq.events <- messageQueued + } } case <-scheduleWork.C: @@ -476,7 +499,7 @@ func (mq *MessageQueue) transferRebroadcastWants() bool { func (mq *MessageQueue) signalWorkReady() { select { - case mq.outgoingWork <- time.Now(): + case mq.outgoingWork <- mq.clock.Now(): default: } } @@ -566,7 +589,7 @@ func (mq *MessageQueue) simulateDontHaveWithTimeout(wantlist []bsmsg.Entry) { // handleResponse is called when a response is received from the peer, // with the CIDs of received blocks / HAVEs / DONT_HAVEs func (mq *MessageQueue) handleResponse(ks []cid.Cid) { - now := time.Now() + now := mq.clock.Now() earliest := time.Time{} mq.wllock.Lock() @@ -606,6 +629,9 @@ func (mq *MessageQueue) handleResponse(ks []cid.Cid) { // Inform the timeout manager of the calculated latency mq.dhTimeoutMgr.UpdateMessageLatency(now.Sub(earliest)) } + if mq.events != nil { + mq.events <- latenciesRecorded + } } func (mq *MessageQueue) logOutgoingMessage(wantlist []bsmsg.Entry) { @@ -787,7 +813,7 @@ FINISH: // When the message has been sent, record the time at which each want was // sent so we can calculate message latency onSent := func() { - now := time.Now() + now := mq.clock.Now() mq.wllock.Lock() defer mq.wllock.Unlock() @@ -803,6 +829,9 @@ FINISH: mq.bcstWants.SentAt(e.Cid, now) } } + if mq.events != nil { + mq.events <- messageFinishedSending + } } return mq.msg, onSent diff --git a/bitswap/internal/messagequeue/messagequeue_test.go b/bitswap/internal/messagequeue/messagequeue_test.go index 4bb538eb0..5607a3aa4 100644 --- a/bitswap/internal/messagequeue/messagequeue_test.go +++ b/bitswap/internal/messagequeue/messagequeue_test.go @@ -9,6 +9,7 @@ import ( "testing" "time" + "github.com/benbjohnson/clock" "github.com/ipfs/go-bitswap/internal/testutil" pb "github.com/ipfs/go-bitswap/message/pb" cid "github.com/ipfs/go-cid" @@ -147,6 +148,13 @@ func totalEntriesLength(messages [][]bsmsg.Entry) int { return totalLength } +func expectEvent(t *testing.T, events <-chan messageEvent, expectedEvent messageEvent) { + evt := <-events + if evt != expectedEvent { + t.Fatal("message not queued") + } +} + func TestStartupAndShutdown(t *testing.T) { ctx := context.Background() messagesSent := make(chan []bsmsg.Entry) @@ -397,7 +405,10 @@ func TestWantlistRebroadcast(t *testing.T) { fakeSender := newFakeMessageSender(resetChan, messagesSent, true) fakenet := &fakeMessageNetwork{nil, nil, fakeSender} peerID := testutil.GeneratePeers(1)[0] - messageQueue := New(ctx, peerID, fakenet, mockTimeoutCb) + dhtm := &fakeDontHaveTimeoutMgr{} + clock := clock.NewMock() + events := make(chan messageEvent) + messageQueue := newMessageQueue(ctx, peerID, fakenet, maxMessageSize, sendErrorBackoff, maxValidLatency, dhtm, clock, events) bcstwh := testutil.GenerateCids(10) wantHaves := testutil.GenerateCids(10) wantBlocks := testutil.GenerateCids(10) @@ -405,27 +416,24 @@ func TestWantlistRebroadcast(t *testing.T) { // Add some broadcast want-haves messageQueue.Startup() messageQueue.AddBroadcastWantHaves(bcstwh) - messages := collectMessages(ctx, t, messagesSent, 10*time.Millisecond) - if len(messages) != 1 { - t.Fatal("wrong number of messages were sent for initial wants") - } + expectEvent(t, events, messageQueued) + clock.Add(sendMessageDebounce) + message := <-messagesSent + expectEvent(t, events, messageFinishedSending) // All broadcast want-haves should have been sent - firstMessage := messages[0] - if len(firstMessage) != len(bcstwh) { + if len(message) != len(bcstwh) { t.Fatal("wrong number of wants") } // Tell message queue to rebroadcast after 5ms, then wait 8ms messageQueue.SetRebroadcastInterval(5 * time.Millisecond) - messages = collectMessages(ctx, t, messagesSent, 8*time.Millisecond) - if len(messages) != 1 { - t.Fatal("wrong number of messages were rebroadcast") - } + clock.Add(8 * time.Millisecond) + message = <-messagesSent + expectEvent(t, events, messageFinishedSending) // All the want-haves should have been rebroadcast - firstMessage = messages[0] - if len(firstMessage) != len(bcstwh) { + if len(message) != len(bcstwh) { t.Fatal("did not rebroadcast all wants") } @@ -434,25 +442,31 @@ func TestWantlistRebroadcast(t *testing.T) { // regular wants and collect them messageQueue.SetRebroadcastInterval(1 * time.Second) messageQueue.AddWants(wantBlocks, wantHaves) - messages = collectMessages(ctx, t, messagesSent, 10*time.Millisecond) - if len(messages) != 1 { - t.Fatal("wrong number of messages were rebroadcast") - } + expectEvent(t, events, messageQueued) + clock.Add(10 * time.Millisecond) + message = <-messagesSent + expectEvent(t, events, messageFinishedSending) // All new wants should have been sent - firstMessage = messages[0] - if len(firstMessage) != len(wantHaves)+len(wantBlocks) { + if len(message) != len(wantHaves)+len(wantBlocks) { t.Fatal("wrong number of wants") } + select { + case <-messagesSent: + t.Fatal("should only be one message in queue") + default: + } + // Tell message queue to rebroadcast after 10ms, then wait 15ms messageQueue.SetRebroadcastInterval(10 * time.Millisecond) - messages = collectMessages(ctx, t, messagesSent, 15*time.Millisecond) - firstMessage = messages[0] + clock.Add(15 * time.Millisecond) + message = <-messagesSent + expectEvent(t, events, messageFinishedSending) // Both original and new wants should have been rebroadcast totalWants := len(bcstwh) + len(wantHaves) + len(wantBlocks) - if len(firstMessage) != totalWants { + if len(message) != totalWants { t.Fatal("did not rebroadcast all wants") } @@ -460,17 +474,22 @@ func TestWantlistRebroadcast(t *testing.T) { messageQueue.SetRebroadcastInterval(1 * time.Second) cancels := append([]cid.Cid{bcstwh[0]}, wantHaves[0], wantBlocks[0]) messageQueue.AddCancels(cancels) - messages = collectMessages(ctx, t, messagesSent, 10*time.Millisecond) - if len(messages) != 1 { - t.Fatal("wrong number of messages were rebroadcast") + expectEvent(t, events, messageQueued) + clock.Add(10 * time.Millisecond) + message = <-messagesSent + expectEvent(t, events, messageFinishedSending) + + select { + case <-messagesSent: + t.Fatal("should only be one message in queue") + default: } // Cancels for each want should have been sent - firstMessage = messages[0] - if len(firstMessage) != len(cancels) { + if len(message) != len(cancels) { t.Fatal("wrong number of cancels") } - for _, entry := range firstMessage { + for _, entry := range message { if !entry.Cancel { t.Fatal("expected cancels") } @@ -478,9 +497,11 @@ func TestWantlistRebroadcast(t *testing.T) { // Tell message queue to rebroadcast after 10ms, then wait 15ms messageQueue.SetRebroadcastInterval(10 * time.Millisecond) - messages = collectMessages(ctx, t, messagesSent, 15*time.Millisecond) - firstMessage = messages[0] - if len(firstMessage) != totalWants-len(cancels) { + clock.Add(15 * time.Millisecond) + message = <-messagesSent + expectEvent(t, events, messageFinishedSending) + + if len(message) != totalWants-len(cancels) { t.Fatal("did not rebroadcast all wants") } } @@ -497,7 +518,7 @@ func TestSendingLargeMessages(t *testing.T) { wantBlocks := testutil.GenerateCids(10) entrySize := 44 maxMsgSize := entrySize * 3 // 3 wants - messageQueue := newMessageQueue(ctx, peerID, fakenet, maxMsgSize, sendErrorBackoff, maxValidLatency, dhtm) + messageQueue := newMessageQueue(ctx, peerID, fakenet, maxMsgSize, sendErrorBackoff, maxValidLatency, dhtm, clock.New(), nil) messageQueue.Startup() messageQueue.AddWants(wantBlocks, []cid.Cid{}) @@ -577,7 +598,7 @@ func TestSendToPeerThatDoesntSupportHaveMonitorsTimeouts(t *testing.T) { peerID := testutil.GeneratePeers(1)[0] dhtm := &fakeDontHaveTimeoutMgr{} - messageQueue := newMessageQueue(ctx, peerID, fakenet, maxMessageSize, sendErrorBackoff, maxValidLatency, dhtm) + messageQueue := newMessageQueue(ctx, peerID, fakenet, maxMessageSize, sendErrorBackoff, maxValidLatency, dhtm, clock.New(), nil) messageQueue.Startup() wbs := testutil.GenerateCids(10) @@ -608,33 +629,42 @@ func TestResponseReceived(t *testing.T) { peerID := testutil.GeneratePeers(1)[0] dhtm := &fakeDontHaveTimeoutMgr{} - messageQueue := newMessageQueue(ctx, peerID, fakenet, maxMessageSize, sendErrorBackoff, maxValidLatency, dhtm) + clock := clock.NewMock() + events := make(chan messageEvent) + messageQueue := newMessageQueue(ctx, peerID, fakenet, maxMessageSize, sendErrorBackoff, maxValidLatency, dhtm, clock, events) messageQueue.Startup() cids := testutil.GenerateCids(10) - // Add some wants and wait 10ms + // Add some wants messageQueue.AddWants(cids[:5], nil) - collectMessages(ctx, t, messagesSent, 10*time.Millisecond) + expectEvent(t, events, messageQueued) + clock.Add(sendMessageDebounce) + <-messagesSent + expectEvent(t, events, messageFinishedSending) + + // simulate 10 milliseconds passing + clock.Add(10 * time.Millisecond) // Add some wants and wait another 10ms messageQueue.AddWants(cids[5:8], nil) - collectMessages(ctx, t, messagesSent, 10*time.Millisecond) + expectEvent(t, events, messageQueued) + clock.Add(10 * time.Millisecond) + <-messagesSent + expectEvent(t, events, messageFinishedSending) // Receive a response for some of the wants from both groups messageQueue.ResponseReceived([]cid.Cid{cids[0], cids[6], cids[9]}) - // Wait a short time for processing - time.Sleep(10 * time.Millisecond) - // Check that message queue informs DHTM of received responses + expectEvent(t, events, latenciesRecorded) upds := dhtm.latencyUpdates() if len(upds) != 1 { t.Fatal("expected one latency update") } // Elapsed time should be between when the first want was sent and the // response received (about 20ms) - if upds[0] < 15*time.Millisecond || upds[0] > 25*time.Millisecond { + if upds[0] != 20*time.Millisecond { t.Fatal("expected latency to be time since oldest message sent") } } @@ -648,7 +678,7 @@ func TestResponseReceivedAppliesForFirstResponseOnly(t *testing.T) { peerID := testutil.GeneratePeers(1)[0] dhtm := &fakeDontHaveTimeoutMgr{} - messageQueue := newMessageQueue(ctx, peerID, fakenet, maxMessageSize, sendErrorBackoff, maxValidLatency, dhtm) + messageQueue := newMessageQueue(ctx, peerID, fakenet, maxMessageSize, sendErrorBackoff, maxValidLatency, dhtm, clock.New(), nil) messageQueue.Startup() cids := testutil.GenerateCids(2) @@ -693,28 +723,37 @@ func TestResponseReceivedDiscardsOutliers(t *testing.T) { maxValLatency := 30 * time.Millisecond dhtm := &fakeDontHaveTimeoutMgr{} - messageQueue := newMessageQueue(ctx, peerID, fakenet, maxMessageSize, sendErrorBackoff, maxValLatency, dhtm) + clock := clock.NewMock() + events := make(chan messageEvent) + messageQueue := newMessageQueue(ctx, peerID, fakenet, maxMessageSize, sendErrorBackoff, maxValLatency, dhtm, clock, events) messageQueue.Startup() cids := testutil.GenerateCids(4) // Add some wants and wait 20ms messageQueue.AddWants(cids[:2], nil) - collectMessages(ctx, t, messagesSent, 20*time.Millisecond) + expectEvent(t, events, messageQueued) + clock.Add(sendMessageDebounce) + <-messagesSent + expectEvent(t, events, messageFinishedSending) + + clock.Add(20 * time.Millisecond) // Add some more wants and wait long enough that the first wants will be // outside the maximum valid latency, but the second wants will be inside messageQueue.AddWants(cids[2:], nil) - collectMessages(ctx, t, messagesSent, maxValLatency-10*time.Millisecond) + expectEvent(t, events, messageQueued) + clock.Add(sendMessageDebounce) + <-messagesSent + expectEvent(t, events, messageFinishedSending) + clock.Add(maxValLatency - 10*time.Millisecond + sendMessageDebounce) // Receive a response for the wants messageQueue.ResponseReceived(cids) - // Wait for the response to be processed by the message queue - time.Sleep(10 * time.Millisecond) - // Check that the latency calculation excludes the first wants // (because they're older than max valid latency) + expectEvent(t, events, latenciesRecorded) upds := dhtm.latencyUpdates() if len(upds) != 1 { t.Fatal("expected one latency update") @@ -753,7 +792,7 @@ func BenchmarkMessageQueue(b *testing.B) { dhtm := &fakeDontHaveTimeoutMgr{} peerID := testutil.GeneratePeers(1)[0] - messageQueue := newMessageQueue(ctx, peerID, fakenet, maxMessageSize, sendErrorBackoff, maxValidLatency, dhtm) + messageQueue := newMessageQueue(ctx, peerID, fakenet, maxMessageSize, sendErrorBackoff, maxValidLatency, dhtm, clock.New(), nil) messageQueue.Startup() go func() { From 311faa99a8a2c5984e1330abd8b01b7678013107 Mon Sep 17 00:00:00 2001 From: Jorropo Date: Sun, 30 May 2021 11:16:38 +0200 Subject: [PATCH 0986/1035] fix: Nil dereference while using SetSendDontHaves This option is used by the benchmark to simulate the old bitswap comportement. This follows the same refactoring idea as done in f3ceb05b73e1. It was crashing since it was trying to access the `sendDontHaves` property of `bs.engine` but `bs.engine` is initialized right after the options are applied, not before. This commit was moved from ipfs/go-bitswap@f2d9b5a50aee63b0897de8aa8d43052663c0a316 --- bitswap/bitswap.go | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index ac8904372..bc87a0069 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -107,7 +107,7 @@ func EngineBlockstoreWorkerCount(count int) Option { // This option is only used for testing. func SetSendDontHaves(send bool) Option { return func(bs *Bitswap) { - bs.engine.SetSendDontHaves(send) + bs.engineSetSendDontHaves = send } } @@ -210,6 +210,7 @@ func New(parent context.Context, network bsnet.BitSwapNetwork, provSearchDelay: defaultProvSearchDelay, rebroadcastDelay: delay.Fixed(time.Minute), engineBstoreWorkerCount: defaulEngineBlockstoreWorkerCount, + engineSetSendDontHaves: true, simulateDontHavesOnTimeout: true, } @@ -220,6 +221,7 @@ func New(parent context.Context, network bsnet.BitSwapNetwork, // Set up decision engine bs.engine = decision.NewEngine(bstore, bs.engineBstoreWorkerCount, network.ConnectionManager(), network.Self(), bs.engineScoreLedger) + bs.engine.SetSendDontHaves(bs.engineSetSendDontHaves) bs.pqm.Startup() network.SetDelegate(bs) @@ -304,6 +306,11 @@ type Bitswap struct { // the score ledger used by the decision engine engineScoreLedger deciface.ScoreLedger + // indicates what to do when the engine receives a want-block for a block that + // is not in the blockstore. Either send DONT_HAVE or do nothing. + // This is used to simulate with old version of bitswap that were quiets. + engineSetSendDontHaves bool + // whether we should actually simulate dont haves on request timeout simulateDontHavesOnTimeout bool } From 548cba7b4356554e45c390084ffb60d46ba40da3 Mon Sep 17 00:00:00 2001 From: Jorropo Date: Wed, 16 Jun 2021 09:04:52 +0200 Subject: [PATCH 0987/1035] docs: better engineSetSendDontHaves description Co-authored-by: Adin Schmahmann This commit was moved from ipfs/go-bitswap@3f031b40cd5c2716fce2759986f1893aa03da4a5 --- bitswap/bitswap.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index bc87a0069..6368095b8 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -308,7 +308,7 @@ type Bitswap struct { // indicates what to do when the engine receives a want-block for a block that // is not in the blockstore. Either send DONT_HAVE or do nothing. - // This is used to simulate with old version of bitswap that were quiets. + // This is used to simulate older versions of bitswap that did nothing instead of sending back a DONT_HAVE. engineSetSendDontHaves bool // whether we should actually simulate dont haves on request timeout From 0d51592821039c726a3ce5cedc627fa6b67b45f4 Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Thu, 24 Jun 2021 10:51:55 -0700 Subject: [PATCH 0988/1035] fix: hold the task worker lock when starting task workers Otherwise, we could try to shutdown at the same time and race. This commit was moved from ipfs/go-bitswap@24c356fd1974c5252509a6ce09bd72f94ebc8bef --- bitswap/internal/decision/engine.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/bitswap/internal/decision/engine.go b/bitswap/internal/decision/engine.go index 6950f59e5..f7b0076fb 100644 --- a/bitswap/internal/decision/engine.go +++ b/bitswap/internal/decision/engine.go @@ -235,6 +235,9 @@ func (e *Engine) StartWorkers(ctx context.Context, px process.Process) { e.bsm.start(px) e.startScoreLedger(px) + e.taskWorkerLock.Lock() + defer e.taskWorkerLock.Unlock() + for i := 0; i < e.taskWorkerCount; i++ { px.Go(func(px process.Process) { e.taskWorker(ctx) From 5c71e7f2119639a551bdacdf1f8c6a743a132348 Mon Sep 17 00:00:00 2001 From: Marten Seemann Date: Tue, 11 May 2021 17:36:34 -0700 Subject: [PATCH 0989/1035] remove unused haves parameter on Engine.ReceiveFrom This commit was moved from ipfs/go-bitswap@f644f8b956cb485e2888b454faac422fa58d173e --- bitswap/bitswap.go | 12 ++++++------ bitswap/internal/decision/engine.go | 6 +++--- bitswap/internal/decision/engine_test.go | 7 +++---- 3 files changed, 12 insertions(+), 13 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 6368095b8..d75741182 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -14,10 +14,10 @@ import ( deciface "github.com/ipfs/go-bitswap/decision" bsbpm "github.com/ipfs/go-bitswap/internal/blockpresencemanager" - decision "github.com/ipfs/go-bitswap/internal/decision" + "github.com/ipfs/go-bitswap/internal/decision" bsgetter "github.com/ipfs/go-bitswap/internal/getter" bsmq "github.com/ipfs/go-bitswap/internal/messagequeue" - notifications "github.com/ipfs/go-bitswap/internal/notifications" + "github.com/ipfs/go-bitswap/internal/notifications" bspm "github.com/ipfs/go-bitswap/internal/peermanager" bspqm "github.com/ipfs/go-bitswap/internal/providerquerymanager" bssession "github.com/ipfs/go-bitswap/internal/session" @@ -27,14 +27,14 @@ import ( bsmsg "github.com/ipfs/go-bitswap/message" bsnet "github.com/ipfs/go-bitswap/network" blocks "github.com/ipfs/go-block-format" - cid "github.com/ipfs/go-cid" + "github.com/ipfs/go-cid" blockstore "github.com/ipfs/go-ipfs-blockstore" exchange "github.com/ipfs/go-ipfs-exchange-interface" logging "github.com/ipfs/go-log" - metrics "github.com/ipfs/go-metrics-interface" + "github.com/ipfs/go-metrics-interface" process "github.com/jbenet/goprocess" procctx "github.com/jbenet/goprocess/context" - peer "github.com/libp2p/go-libp2p-core/peer" + "github.com/libp2p/go-libp2p-core/peer" ) var log = logging.Logger("bitswap") @@ -422,7 +422,7 @@ func (bs *Bitswap) receiveBlocksFrom(ctx context.Context, from peer.ID, blks []b bs.sm.ReceiveFrom(ctx, from, allKs, haves, dontHaves) // Send wanted blocks to decision engine - bs.engine.ReceiveFrom(from, wanted, haves) + bs.engine.ReceiveFrom(from, wanted) // Publish the block to any Bitswap clients that had requested blocks. // (the sessions use this pubsub mechanism to inform clients of incoming diff --git a/bitswap/internal/decision/engine.go b/bitswap/internal/decision/engine.go index 6950f59e5..c3645526d 100644 --- a/bitswap/internal/decision/engine.go +++ b/bitswap/internal/decision/engine.go @@ -13,13 +13,13 @@ import ( pb "github.com/ipfs/go-bitswap/message/pb" wl "github.com/ipfs/go-bitswap/wantlist" blocks "github.com/ipfs/go-block-format" - cid "github.com/ipfs/go-cid" + "github.com/ipfs/go-cid" bstore "github.com/ipfs/go-ipfs-blockstore" logging "github.com/ipfs/go-log" "github.com/ipfs/go-peertaskqueue" "github.com/ipfs/go-peertaskqueue/peertask" process "github.com/jbenet/goprocess" - peer "github.com/libp2p/go-libp2p-core/peer" + "github.com/libp2p/go-libp2p-core/peer" ) // TODO consider taking responsibility for other types of requests. For @@ -563,7 +563,7 @@ func (e *Engine) splitWantsCancels(es []bsmsg.Entry) ([]bsmsg.Entry, []bsmsg.Ent // the blocks to them. // // This function also updates the receive side of the ledger. -func (e *Engine) ReceiveFrom(from peer.ID, blks []blocks.Block, haves []cid.Cid) { +func (e *Engine) ReceiveFrom(from peer.ID, blks []blocks.Block) { if len(blks) == 0 { return } diff --git a/bitswap/internal/decision/engine_test.go b/bitswap/internal/decision/engine_test.go index f7a752577..d8c836783 100644 --- a/bitswap/internal/decision/engine_test.go +++ b/bitswap/internal/decision/engine_test.go @@ -16,7 +16,6 @@ import ( pb "github.com/ipfs/go-bitswap/message/pb" blocks "github.com/ipfs/go-block-format" - cid "github.com/ipfs/go-cid" ds "github.com/ipfs/go-datastore" dssync "github.com/ipfs/go-datastore/sync" blockstore "github.com/ipfs/go-ipfs-blockstore" @@ -124,7 +123,7 @@ func TestConsistentAccounting(t *testing.T) { sender.Engine.MessageSent(receiver.Peer, m) receiver.Engine.MessageReceived(ctx, sender.Peer, m) - receiver.Engine.ReceiveFrom(sender.Peer, m.Blocks(), nil) + receiver.Engine.ReceiveFrom(sender.Peer, m.Blocks()) } // Ensure sender records the change @@ -900,7 +899,7 @@ func TestSendReceivedBlocksToPeersThatWantThem(t *testing.T) { if err := bs.PutMany([]blocks.Block{blks[0], blks[2]}); err != nil { t.Fatal(err) } - e.ReceiveFrom(otherPeer, []blocks.Block{blks[0], blks[2]}, []cid.Cid{}) + e.ReceiveFrom(otherPeer, []blocks.Block{blks[0], blks[2]}) _, env = getNextEnvelope(e, next, 5*time.Millisecond) if env == nil { t.Fatal("expected envelope") @@ -963,7 +962,7 @@ func TestSendDontHave(t *testing.T) { if err := bs.PutMany(blks); err != nil { t.Fatal(err) } - e.ReceiveFrom(otherPeer, blks, []cid.Cid{}) + e.ReceiveFrom(otherPeer, blks) // Envelope should contain 2 HAVEs / 2 blocks _, env = getNextEnvelope(e, next, 10*time.Millisecond) From c6ad619a81c4c1ddf430fc9a44a0ca9f2978c0f5 Mon Sep 17 00:00:00 2001 From: Marten Seemann Date: Tue, 11 May 2021 20:10:24 -0700 Subject: [PATCH 0990/1035] introduce a ledger that stores which peers are waiting for a Cid When receiving a new block (Engine.ReceiveFrom), we shouldn't have to loop over all peers in order to determine if they need this block. Instead, use a map to save which peers are waiting for a give Cid. This commit was moved from ipfs/go-bitswap@2bfc771f7941679b9e243477debb68c453e2683e --- bitswap/internal/decision/engine.go | 81 +++++++++++++++--------- bitswap/internal/decision/ledger.go | 8 ++- bitswap/internal/decision/peer_ledger.go | 46 ++++++++++++++ 3 files changed, 104 insertions(+), 31 deletions(-) create mode 100644 bitswap/internal/decision/peer_ledger.go diff --git a/bitswap/internal/decision/engine.go b/bitswap/internal/decision/engine.go index c3645526d..0b5f0d15d 100644 --- a/bitswap/internal/decision/engine.go +++ b/bitswap/internal/decision/engine.go @@ -144,11 +144,14 @@ type Engine struct { tagQueued, tagUseful string - lock sync.RWMutex // protects the fields immediatly below + lock sync.RWMutex // protects the fields immediately below // ledgerMap lists block-related Ledgers by their Partner key. ledgerMap map[peer.ID]*ledger + // peerLedger saves which peers are waiting for a Cid + peerLedger *peerLedger + // an external ledger dealing with peer scores scoreLedger ScoreLedger @@ -191,6 +194,7 @@ func newEngine(bs bstore.Blockstore, bstoreWorkerCount int, peerTagger PeerTagge taskWorkerCount: taskWorkerCount, sendDontHaves: true, self: self, + peerLedger: newPeerLedger(), } e.tagQueued = fmt.Sprintf(tagFormat, "queued", uuid.New().String()) e.tagUseful = fmt.Sprintf(tagFormat, "useful", uuid.New().String()) @@ -456,6 +460,15 @@ func (e *Engine) MessageReceived(ctx context.Context, p peer.ID, m bsmsg.BitSwap return } + e.lock.Lock() + for _, entry := range wants { + e.peerLedger.Wants(p, entry.Cid) + } + for _, entry := range cancels { + e.peerLedger.CancelWant(p, entry.Cid) + } + e.lock.Unlock() + // Get the ledger for the peer l := e.findOrCreate(p) l.lk.Lock() @@ -588,40 +601,44 @@ func (e *Engine) ReceiveFrom(from peer.ID, blks []blocks.Block) { } // Check each peer to see if it wants one of the blocks we received - work := false + var work bool e.lock.RLock() + for _, b := range blks { + k := b.Cid() - for _, l := range e.ledgerMap { - l.lk.RLock() - - for _, b := range blks { - k := b.Cid() - - if entry, ok := l.WantListContains(k); ok { - work = true - - blockSize := blockSizes[k] - isWantBlock := e.sendAsBlock(entry.WantType, blockSize) + for _, p := range e.peerLedger.Peers(k) { + ledger, ok := e.ledgerMap[p] + if !ok { + continue + } + ledger.lk.RLock() + entry, ok := ledger.WantListContains(k) + ledger.lk.RUnlock() + if !ok { // should never happen + continue + } + work = true - entrySize := blockSize - if !isWantBlock { - entrySize = bsmsg.BlockPresenceSize(k) - } + blockSize := blockSizes[k] + isWantBlock := e.sendAsBlock(entry.WantType, blockSize) - e.peerRequestQueue.PushTasks(l.Partner, peertask.Task{ - Topic: entry.Cid, - Priority: int(entry.Priority), - Work: entrySize, - Data: &taskData{ - BlockSize: blockSize, - HaveBlock: true, - IsWantBlock: isWantBlock, - SendDontHave: false, - }, - }) + entrySize := blockSize + if !isWantBlock { + entrySize = bsmsg.BlockPresenceSize(k) } + + e.peerRequestQueue.PushTasks(p, peertask.Task{ + Topic: entry.Cid, + Priority: int(entry.Priority), + Work: entrySize, + Data: &taskData{ + BlockSize: blockSize, + HaveBlock: true, + IsWantBlock: isWantBlock, + SendDontHave: false, + }, + }) } - l.lk.RUnlock() } e.lock.RUnlock() @@ -677,6 +694,12 @@ func (e *Engine) PeerDisconnected(p peer.ID) { e.lock.Lock() defer e.lock.Unlock() + ledger, ok := e.ledgerMap[p] + if ok { + for _, entry := range ledger.Entries() { + e.peerLedger.CancelWant(p, entry.Cid) + } + } delete(e.ledgerMap, p) e.scoreLedger.PeerDisconnected(p) diff --git a/bitswap/internal/decision/ledger.go b/bitswap/internal/decision/ledger.go index a607ff4f4..58723d0fb 100644 --- a/bitswap/internal/decision/ledger.go +++ b/bitswap/internal/decision/ledger.go @@ -6,8 +6,8 @@ import ( pb "github.com/ipfs/go-bitswap/message/pb" wl "github.com/ipfs/go-bitswap/wantlist" - cid "github.com/ipfs/go-cid" - peer "github.com/libp2p/go-libp2p-core/peer" + "github.com/ipfs/go-cid" + "github.com/libp2p/go-libp2p-core/peer" ) func newLedger(p peer.ID) *ledger { @@ -40,3 +40,7 @@ func (l *ledger) CancelWant(k cid.Cid) bool { func (l *ledger) WantListContains(k cid.Cid) (wl.Entry, bool) { return l.wantList.Contains(k) } + +func (l *ledger) Entries() []wl.Entry { + return l.wantList.Entries() +} diff --git a/bitswap/internal/decision/peer_ledger.go b/bitswap/internal/decision/peer_ledger.go new file mode 100644 index 000000000..d5616cecd --- /dev/null +++ b/bitswap/internal/decision/peer_ledger.go @@ -0,0 +1,46 @@ +package decision + +import ( + "github.com/ipfs/go-cid" + "github.com/libp2p/go-libp2p-core/peer" +) + +type peerLedger struct { + cids map[cid.Cid]map[peer.ID]struct{} +} + +func newPeerLedger() *peerLedger { + return &peerLedger{cids: make(map[cid.Cid]map[peer.ID]struct{})} +} + +func (l *peerLedger) Wants(p peer.ID, k cid.Cid) { + m, ok := l.cids[k] + if !ok { + m = make(map[peer.ID]struct{}) + l.cids[k]=m + } + m[p] = struct{}{} +} + +func (l *peerLedger) CancelWant(p peer.ID, k cid.Cid) { + m, ok := l.cids[k] + if !ok { + return + } + delete(m, p) + if len(m) == 0 { + delete(l.cids, k) + } +} + +func (l *peerLedger) Peers(k cid.Cid) []peer.ID { + m, ok := l.cids[k] + if !ok { + return nil + } + peers := make([]peer.ID, 0, len(m)) + for p := range m { + peers = append(peers, p) + } + return peers +} From 72240dac07bdeddb0e8d9f8dc58455b1652362e4 Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Thu, 24 Jun 2021 11:14:53 -0700 Subject: [PATCH 0991/1035] chore: go fmt This commit was moved from ipfs/go-bitswap@aa9bbf87ef89e05faacef7dbcc6e7c996c70f258 --- bitswap/internal/decision/engine.go | 2 +- bitswap/internal/decision/peer_ledger.go | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/bitswap/internal/decision/engine.go b/bitswap/internal/decision/engine.go index 0b5f0d15d..d7b823359 100644 --- a/bitswap/internal/decision/engine.go +++ b/bitswap/internal/decision/engine.go @@ -194,7 +194,7 @@ func newEngine(bs bstore.Blockstore, bstoreWorkerCount int, peerTagger PeerTagge taskWorkerCount: taskWorkerCount, sendDontHaves: true, self: self, - peerLedger: newPeerLedger(), + peerLedger: newPeerLedger(), } e.tagQueued = fmt.Sprintf(tagFormat, "queued", uuid.New().String()) e.tagUseful = fmt.Sprintf(tagFormat, "useful", uuid.New().String()) diff --git a/bitswap/internal/decision/peer_ledger.go b/bitswap/internal/decision/peer_ledger.go index d5616cecd..ecf41e6b1 100644 --- a/bitswap/internal/decision/peer_ledger.go +++ b/bitswap/internal/decision/peer_ledger.go @@ -15,9 +15,9 @@ func newPeerLedger() *peerLedger { func (l *peerLedger) Wants(p peer.ID, k cid.Cid) { m, ok := l.cids[k] - if !ok { + if !ok { m = make(map[peer.ID]struct{}) - l.cids[k]=m + l.cids[k] = m } m[p] = struct{}{} } From 693642f339a6e651c95d27ee8137816be26cd090 Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Thu, 24 Jun 2021 11:18:16 -0700 Subject: [PATCH 0992/1035] fix: cleanup ledger on mismatch This commit was moved from ipfs/go-bitswap@96382b1d0ffd4126dbecc7dfccb6151bdcbf437e --- bitswap/internal/decision/engine.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/bitswap/internal/decision/engine.go b/bitswap/internal/decision/engine.go index d7b823359..3ca45037e 100644 --- a/bitswap/internal/decision/engine.go +++ b/bitswap/internal/decision/engine.go @@ -609,12 +609,16 @@ func (e *Engine) ReceiveFrom(from peer.ID, blks []blocks.Block) { for _, p := range e.peerLedger.Peers(k) { ledger, ok := e.ledgerMap[p] if !ok { + log.Errorw("failed to find peer in ledger", "peer", p) + e.peerLedger.CancelWant(p, k) continue } ledger.lk.RLock() entry, ok := ledger.WantListContains(k) ledger.lk.RUnlock() if !ok { // should never happen + log.Errorw("wantlist index doesn't match peer's wantlist", "peer", p) + e.peerLedger.CancelWant(p, k) continue } work = true From 2702914b684eac59f8a52d44df91d2fdcbce2b91 Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Tue, 11 May 2021 18:28:59 -0700 Subject: [PATCH 0993/1035] chore: update deps And rebuild protobufs. This commit was moved from ipfs/go-bitswap@34e4dc3423db872479b61d7aa2fdaa1135198bba --- bitswap/message/pb/message.pb.go | 25 +++++-------------------- 1 file changed, 5 insertions(+), 20 deletions(-) diff --git a/bitswap/message/pb/message.pb.go b/bitswap/message/pb/message.pb.go index c1effb8ea..ef98a0a9f 100644 --- a/bitswap/message/pb/message.pb.go +++ b/bitswap/message/pb/message.pb.go @@ -983,10 +983,7 @@ func (m *Message) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthMessage - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthMessage } if (iNdEx + skippy) > l { @@ -1090,10 +1087,7 @@ func (m *Message_Wantlist) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthMessage - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthMessage } if (iNdEx + skippy) > l { @@ -1254,10 +1248,7 @@ func (m *Message_Wantlist_Entry) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthMessage - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthMessage } if (iNdEx + skippy) > l { @@ -1375,10 +1366,7 @@ func (m *Message_Block) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthMessage - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthMessage } if (iNdEx + skippy) > l { @@ -1480,10 +1468,7 @@ func (m *Message_BlockPresence) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthMessage - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthMessage } if (iNdEx + skippy) > l { From 25a56beb667d697a08c492b50c6d7c31ba7c1841 Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Thu, 24 Jun 2021 11:47:13 -0700 Subject: [PATCH 0994/1035] fix(decision): fix a datarace on disconnect We need to hold the ledger's lock while reading from it. This commit was moved from ipfs/go-bitswap@4ffb5e902366f67d333bf94fc3f81429bdb57d16 --- bitswap/internal/decision/engine.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/bitswap/internal/decision/engine.go b/bitswap/internal/decision/engine.go index 702dd34c1..c22a4d7fd 100644 --- a/bitswap/internal/decision/engine.go +++ b/bitswap/internal/decision/engine.go @@ -703,7 +703,11 @@ func (e *Engine) PeerDisconnected(p peer.ID) { ledger, ok := e.ledgerMap[p] if ok { - for _, entry := range ledger.Entries() { + ledger.lk.RLock() + entries := ledger.Entries() + ledger.lk.RUnlock() + + for _, entry := range entries { e.peerLedger.CancelWant(p, entry.Cid) } } From 9c86eacff7f61551205d178772dc8626f478816a Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Thu, 24 Jun 2021 11:41:17 -0700 Subject: [PATCH 0995/1035] fix: make blockstore cancel test less timing dependent 1. More blocks so we have more time. 2. Lock less. 3. Put without the delay (so we can put more blocks without slowing things down). This commit was moved from ipfs/go-bitswap@a45ff1b9b46dea44ca19ca6092fb59194afc7cad --- .../internal/decision/blockstoremanager_test.go | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/bitswap/internal/decision/blockstoremanager_test.go b/bitswap/internal/decision/blockstoremanager_test.go index 49a10c50c..e8d6bb014 100644 --- a/bitswap/internal/decision/blockstoremanager_test.go +++ b/bitswap/internal/decision/blockstoremanager_test.go @@ -224,20 +224,22 @@ func TestBlockstoreManagerCtxDone(t *testing.T) { delayTime := 20 * time.Millisecond bsdelay := delay.Fixed(delayTime) - dstore := ds_sync.MutexWrap(delayed.New(ds.NewMapDatastore(), bsdelay)) - bstore := blockstore.NewBlockstore(ds_sync.MutexWrap(dstore)) + underlyingDstore := ds_sync.MutexWrap(ds.NewMapDatastore()) + dstore := delayed.New(underlyingDstore, bsdelay) + underlyingBstore := blockstore.NewBlockstore(underlyingDstore) + bstore := blockstore.NewBlockstore(dstore) bsm := newBlockstoreManager(bstore, 3) proc := process.WithTeardown(func() error { return nil }) bsm.start(proc) - blks := testutil.GenerateBlocksOfSize(10, 1024) + blks := testutil.GenerateBlocksOfSize(100, 128) var ks []cid.Cid for _, b := range blks { ks = append(ks, b.Cid()) } - err := bstore.PutMany(blks) + err := underlyingBstore.PutMany(blks) if err != nil { t.Fatal(err) } @@ -251,8 +253,8 @@ func TestBlockstoreManagerCtxDone(t *testing.T) { t.Error("expected an error") } - // would expect to wait delayTime*10 if we didn't cancel. - if time.Since(before) > delayTime*2 { + // would expect to wait delayTime*100/3 if we didn't cancel. + if time.Since(before) > delayTime*10 { t.Error("expected a fast timeout") } } From 11687bcbb1de0f880bf261a26893d4405448d8cb Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Fri, 30 Jul 2021 12:03:30 -0700 Subject: [PATCH 0996/1035] fix: fix a map access race condition in the want index This commit was moved from ipfs/go-bitswap@942b6083b0151d9756f990010e18540aa5925579 --- bitswap/internal/decision/engine.go | 29 +++++++++++++++++++++++++++-- 1 file changed, 27 insertions(+), 2 deletions(-) diff --git a/bitswap/internal/decision/engine.go b/bitswap/internal/decision/engine.go index c22a4d7fd..31c50e3f3 100644 --- a/bitswap/internal/decision/engine.go +++ b/bitswap/internal/decision/engine.go @@ -605,6 +605,7 @@ func (e *Engine) ReceiveFrom(from peer.ID, blks []blocks.Block) { // Check each peer to see if it wants one of the blocks we received var work bool + missingWants := make(map[peer.ID][]cid.Cid) e.lock.RLock() for _, b := range blks { k := b.Cid() @@ -613,7 +614,7 @@ func (e *Engine) ReceiveFrom(from peer.ID, blks []blocks.Block) { ledger, ok := e.ledgerMap[p] if !ok { log.Errorw("failed to find peer in ledger", "peer", p) - e.peerLedger.CancelWant(p, k) + missingWants[p] = append(missingWants[p], k) continue } ledger.lk.RLock() @@ -621,7 +622,7 @@ func (e *Engine) ReceiveFrom(from peer.ID, blks []blocks.Block) { ledger.lk.RUnlock() if !ok { // should never happen log.Errorw("wantlist index doesn't match peer's wantlist", "peer", p) - e.peerLedger.CancelWant(p, k) + missingWants[p] = append(missingWants[p], k) continue } work = true @@ -649,6 +650,30 @@ func (e *Engine) ReceiveFrom(from peer.ID, blks []blocks.Block) { } e.lock.RUnlock() + // If we found missing wants (e.g., because the peer disconnected, we have some races here) + // remove them from the list. Unfortunately, we still have to re-check because the user + // could have re-connected in the meantime. + if len(missingWants) > 0 { + e.lock.Lock() + for p, wl := range missingWants { + if ledger, ok := e.ledgerMap[p]; ok { + ledger.lk.RLock() + for _, k := range wl { + if _, has := ledger.WantListContains(k); has { + continue + } + e.peerLedger.CancelWant(p, k) + } + ledger.lk.RUnlock() + } else { + for _, k := range wl { + e.peerLedger.CancelWant(p, k) + } + } + } + e.lock.Unlock() + } + if work { e.signalNewWork() } From 72cce0c00390e3c18261b8bf1b65799f85923d7c Mon Sep 17 00:00:00 2001 From: Petar Maymounkov Date: Wed, 18 Aug 2021 08:03:35 -0400 Subject: [PATCH 0997/1035] More stats, knobs and tunings (#514) * add configurability options for TaskWorkerCount and EngineTaskWorkerCount, * add option for maximum outstanding bytes per peer * add prometheus metrics for how long it takes to send messages, the number of pending and active tasks, and the number of pending and active block tasks * add many of the unexported defaults to a defaults subpackage of the internal package * feat: tighter send timeouts 1. Minimum timeout of 10s. 2. We add 2s due to latencies. 3. Minimum bandwidth of 100kbit/s. 4. Maximum message send time of 2min (way more time than necessary). Co-authored-by: Adin Schmahmann Co-authored-by: Steven Allen This commit was moved from ipfs/go-bitswap@2b51297a0b68198b6c4bcacdd8868a6df8dcd182 --- bitswap/bitswap.go | 128 +++++++++++++----- bitswap/bitswap_test.go | 6 +- .../internal/decision/blockstoremanager.go | 33 +++-- .../decision/blockstoremanager_test.go | 22 ++- bitswap/internal/decision/engine.go | 92 +++++++++++-- bitswap/internal/decision/engine_test.go | 67 +++++++-- bitswap/internal/defaults/defaults.go | 20 +++ bitswap/network/ipfs_impl.go | 22 ++- bitswap/network/ipfs_impl_timeout_test.go | 24 ++++ bitswap/workers.go | 16 ++- 10 files changed, 347 insertions(+), 83 deletions(-) create mode 100644 bitswap/internal/defaults/defaults.go create mode 100644 bitswap/network/ipfs_impl_timeout_test.go diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index d75741182..036943021 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -15,6 +15,7 @@ import ( deciface "github.com/ipfs/go-bitswap/decision" bsbpm "github.com/ipfs/go-bitswap/internal/blockpresencemanager" "github.com/ipfs/go-bitswap/internal/decision" + "github.com/ipfs/go-bitswap/internal/defaults" bsgetter "github.com/ipfs/go-bitswap/internal/getter" bsmq "github.com/ipfs/go-bitswap/internal/messagequeue" "github.com/ipfs/go-bitswap/internal/notifications" @@ -42,15 +43,6 @@ var sflog = log.Desugar() var _ exchange.SessionExchange = (*Bitswap)(nil) -const ( - // these requests take at _least_ two minutes at the moment. - provideTimeout = time.Minute * 3 - defaultProvSearchDelay = time.Second - - // Number of concurrent workers in decision engine that process requests to the blockstore - defaulEngineBlockstoreWorkerCount = 128 -) - var ( // HasBlockBufferSize is the buffer size of the channel for new blocks // that need to be provided. They should get pulled over by the @@ -62,6 +54,8 @@ var ( // the 1<<18+15 is to observe old file chunks that are 1<<18 + 14 in size metricsBuckets = []float64{1 << 6, 1 << 10, 1 << 14, 1 << 18, 1<<18 + 15, 1 << 22} + + timeMetricsBuckets = []float64{1, 10, 30, 60, 90, 120, 600} ) // Option defines the functional option type that can be used to configure @@ -100,6 +94,36 @@ func EngineBlockstoreWorkerCount(count int) Option { } } +// EngineTaskWorkerCount sets the number of worker threads used inside the engine +func EngineTaskWorkerCount(count int) Option { + if count <= 0 { + panic(fmt.Sprintf("Engine task worker count is %d but must be > 0", count)) + } + return func(bs *Bitswap) { + bs.engineTaskWorkerCount = count + } +} + +func TaskWorkerCount(count int) Option { + if count <= 0 { + panic(fmt.Sprintf("task worker count is %d but must be > 0", count)) + } + return func(bs *Bitswap) { + bs.taskWorkerCount = count + } +} + +// MaxOutstandingBytesPerPeer describes approximately how much work we are will to have outstanding to a peer at any +// given time. Setting it to 0 will disable any limiting. +func MaxOutstandingBytesPerPeer(count int) Option { + if count < 0 { + panic(fmt.Sprintf("max outstanding bytes per peer is %d but must be >= 0", count)) + } + return func(bs *Bitswap) { + bs.engineMaxOutstandingBytesPerPeer = count + } +} + // SetSendDontHaves indicates what to do when the engine receives a want-block // for a block that is not in the blockstore. Either // - Send a DONT_HAVE message @@ -147,6 +171,17 @@ func New(parent context.Context, network bsnet.BitSwapNetwork, sentHistogram := metrics.NewCtx(ctx, "sent_all_blocks_bytes", "Histogram of blocks sent by"+ " this bitswap").Histogram(metricsBuckets) + sendTimeHistogram := metrics.NewCtx(ctx, "send_times", "Histogram of how long it takes to send messages"+ + " in this bitswap").Histogram(timeMetricsBuckets) + + pendingEngineGauge := metrics.NewCtx(ctx, "pending_tasks", "Total number of pending tasks").Gauge() + + activeEngineGauge := metrics.NewCtx(ctx, "active_tasks", "Total number of active tasks").Gauge() + + pendingBlocksGauge := metrics.NewCtx(ctx, "pending_block_tasks", "Total number of pending blockstore tasks").Gauge() + + activeBlocksGauge := metrics.NewCtx(ctx, "active_block_tasks", "Total number of active blockstore tasks").Gauge() + px := process.WithTeardown(func() error { return nil }) @@ -192,26 +227,30 @@ func New(parent context.Context, network bsnet.BitSwapNetwork, sm = bssm.New(ctx, sessionFactory, sim, sessionPeerManagerFactory, bpm, pm, notif, network.Self()) bs = &Bitswap{ - blockstore: bstore, - network: network, - process: px, - newBlocks: make(chan cid.Cid, HasBlockBufferSize), - provideKeys: make(chan cid.Cid, provideKeysBufferSize), - pm: pm, - pqm: pqm, - sm: sm, - sim: sim, - notif: notif, - counters: new(counters), - dupMetric: dupHist, - allMetric: allHist, - sentHistogram: sentHistogram, - provideEnabled: true, - provSearchDelay: defaultProvSearchDelay, - rebroadcastDelay: delay.Fixed(time.Minute), - engineBstoreWorkerCount: defaulEngineBlockstoreWorkerCount, - engineSetSendDontHaves: true, - simulateDontHavesOnTimeout: true, + blockstore: bstore, + network: network, + process: px, + newBlocks: make(chan cid.Cid, HasBlockBufferSize), + provideKeys: make(chan cid.Cid, provideKeysBufferSize), + pm: pm, + pqm: pqm, + sm: sm, + sim: sim, + notif: notif, + counters: new(counters), + dupMetric: dupHist, + allMetric: allHist, + sentHistogram: sentHistogram, + sendTimeHistogram: sendTimeHistogram, + provideEnabled: true, + provSearchDelay: defaults.ProvSearchDelay, + rebroadcastDelay: delay.Fixed(time.Minute), + engineBstoreWorkerCount: defaults.BitswapEngineBlockstoreWorkerCount, + engineTaskWorkerCount: defaults.BitswapEngineTaskWorkerCount, + taskWorkerCount: defaults.BitswapTaskWorkerCount, + engineMaxOutstandingBytesPerPeer: defaults.BitswapMaxOutstandingBytesPerPeer, + engineSetSendDontHaves: true, + simulateDontHavesOnTimeout: true, } // apply functional options before starting and running bitswap @@ -220,7 +259,20 @@ func New(parent context.Context, network bsnet.BitSwapNetwork, } // Set up decision engine - bs.engine = decision.NewEngine(bstore, bs.engineBstoreWorkerCount, network.ConnectionManager(), network.Self(), bs.engineScoreLedger) + bs.engine = decision.NewEngine( + ctx, + bstore, + bs.engineBstoreWorkerCount, + bs.engineTaskWorkerCount, + bs.engineMaxOutstandingBytesPerPeer, + network.ConnectionManager(), + network.Self(), + bs.engineScoreLedger, + pendingEngineGauge, + activeEngineGauge, + pendingBlocksGauge, + activeBlocksGauge, + ) bs.engine.SetSendDontHaves(bs.engineSetSendDontHaves) bs.pqm.Startup() @@ -277,9 +329,10 @@ type Bitswap struct { counters *counters // Metrics interface metrics - dupMetric metrics.Histogram - allMetric metrics.Histogram - sentHistogram metrics.Histogram + dupMetric metrics.Histogram + allMetric metrics.Histogram + sentHistogram metrics.Histogram + sendTimeHistogram metrics.Histogram // External statistics interface wiretap WireTap @@ -303,6 +356,15 @@ type Bitswap struct { // how many worker threads to start for decision engine blockstore worker engineBstoreWorkerCount int + // how many worker threads to start for decision engine task worker + engineTaskWorkerCount int + + // the total number of simultaneous threads sending outgoing messages + taskWorkerCount int + + // the total amount of bytes that a peer should have outstanding, it is utilized by the decision engine + engineMaxOutstandingBytesPerPeer int + // the score ledger used by the decision engine engineScoreLedger deciface.ScoreLedger diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index f28112d79..0da62dd35 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -285,7 +285,11 @@ func PerformDistributionTest(t *testing.T, numInstances, numBlocks int) { t.SkipNow() } net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay)) - ig := testinstance.NewTestInstanceGenerator(net, nil, nil) + ig := testinstance.NewTestInstanceGenerator(net, nil, []bitswap.Option{ + bitswap.TaskWorkerCount(5), + bitswap.EngineTaskWorkerCount(5), + bitswap.MaxOutstandingBytesPerPeer(1 << 20), + }) defer ig.Close() bg := blocksutil.NewBlockGenerator() diff --git a/bitswap/internal/decision/blockstoremanager.go b/bitswap/internal/decision/blockstoremanager.go index dc022caf0..7d6864eb9 100644 --- a/bitswap/internal/decision/blockstoremanager.go +++ b/bitswap/internal/decision/blockstoremanager.go @@ -8,25 +8,36 @@ import ( blocks "github.com/ipfs/go-block-format" cid "github.com/ipfs/go-cid" bstore "github.com/ipfs/go-ipfs-blockstore" + "github.com/ipfs/go-metrics-interface" process "github.com/jbenet/goprocess" ) // blockstoreManager maintains a pool of workers that make requests to the blockstore. type blockstoreManager struct { - bs bstore.Blockstore - workerCount int - jobs chan func() - px process.Process + bs bstore.Blockstore + workerCount int + jobs chan func() + px process.Process + pendingGauge metrics.Gauge + activeGauge metrics.Gauge } // newBlockstoreManager creates a new blockstoreManager with the given context // and number of workers -func newBlockstoreManager(bs bstore.Blockstore, workerCount int) *blockstoreManager { +func newBlockstoreManager( + ctx context.Context, + bs bstore.Blockstore, + workerCount int, + pendingGauge metrics.Gauge, + activeGauge metrics.Gauge, +) *blockstoreManager { return &blockstoreManager{ - bs: bs, - workerCount: workerCount, - jobs: make(chan func()), - px: process.WithTeardown(func() error { return nil }), + bs: bs, + workerCount: workerCount, + jobs: make(chan func()), + px: process.WithTeardown(func() error { return nil }), + pendingGauge: pendingGauge, + activeGauge: activeGauge, } } @@ -46,7 +57,10 @@ func (bsm *blockstoreManager) worker(px process.Process) { case <-px.Closing(): return case job := <-bsm.jobs: + bsm.pendingGauge.Dec() + bsm.activeGauge.Inc() job() + bsm.activeGauge.Dec() } } } @@ -58,6 +72,7 @@ func (bsm *blockstoreManager) addJob(ctx context.Context, job func()) error { case <-bsm.px.Closing(): return fmt.Errorf("shutting down") case bsm.jobs <- job: + bsm.pendingGauge.Inc() return nil } } diff --git a/bitswap/internal/decision/blockstoremanager_test.go b/bitswap/internal/decision/blockstoremanager_test.go index e8d6bb014..ad447738c 100644 --- a/bitswap/internal/decision/blockstoremanager_test.go +++ b/bitswap/internal/decision/blockstoremanager_test.go @@ -9,6 +9,7 @@ import ( "github.com/ipfs/go-bitswap/internal/testutil" cid "github.com/ipfs/go-cid" + "github.com/ipfs/go-metrics-interface" blocks "github.com/ipfs/go-block-format" ds "github.com/ipfs/go-datastore" @@ -19,13 +20,23 @@ import ( process "github.com/jbenet/goprocess" ) +func newBlockstoreManagerForTesting( + ctx context.Context, + bs blockstore.Blockstore, + workerCount int, +) *blockstoreManager { + testPendingBlocksGauge := metrics.NewCtx(ctx, "pending_block_tasks", "Total number of pending blockstore tasks").Gauge() + testActiveBlocksGauge := metrics.NewCtx(ctx, "active_block_tasks", "Total number of active blockstore tasks").Gauge() + return newBlockstoreManager(ctx, bs, workerCount, testPendingBlocksGauge, testActiveBlocksGauge) +} + func TestBlockstoreManagerNotFoundKey(t *testing.T) { ctx := context.Background() bsdelay := delay.Fixed(3 * time.Millisecond) dstore := ds_sync.MutexWrap(delayed.New(ds.NewMapDatastore(), bsdelay)) bstore := blockstore.NewBlockstore(ds_sync.MutexWrap(dstore)) - bsm := newBlockstoreManager(bstore, 5) + bsm := newBlockstoreManagerForTesting(ctx, bstore, 5) bsm.start(process.WithTeardown(func() error { return nil })) cids := testutil.GenerateCids(4) @@ -64,7 +75,7 @@ func TestBlockstoreManager(t *testing.T) { dstore := ds_sync.MutexWrap(delayed.New(ds.NewMapDatastore(), bsdelay)) bstore := blockstore.NewBlockstore(ds_sync.MutexWrap(dstore)) - bsm := newBlockstoreManager(bstore, 5) + bsm := newBlockstoreManagerForTesting(ctx, bstore, 5) bsm.start(process.WithTeardown(func() error { return nil })) exp := make(map[cid.Cid]blocks.Block) @@ -148,7 +159,7 @@ func TestBlockstoreManagerConcurrency(t *testing.T) { bstore := blockstore.NewBlockstore(ds_sync.MutexWrap(dstore)) workerCount := 5 - bsm := newBlockstoreManager(bstore, workerCount) + bsm := newBlockstoreManagerForTesting(ctx, bstore, workerCount) bsm.start(process.WithTeardown(func() error { return nil })) blkSize := int64(8 * 1024) @@ -190,7 +201,7 @@ func TestBlockstoreManagerClose(t *testing.T) { dstore := ds_sync.MutexWrap(delayed.New(ds.NewMapDatastore(), bsdelay)) bstore := blockstore.NewBlockstore(ds_sync.MutexWrap(dstore)) - bsm := newBlockstoreManager(bstore, 3) + bsm := newBlockstoreManagerForTesting(ctx, bstore, 3) px := process.WithTeardown(func() error { return nil }) bsm.start(px) @@ -229,7 +240,8 @@ func TestBlockstoreManagerCtxDone(t *testing.T) { underlyingBstore := blockstore.NewBlockstore(underlyingDstore) bstore := blockstore.NewBlockstore(dstore) - bsm := newBlockstoreManager(bstore, 3) + ctx := context.Background() + bsm := newBlockstoreManagerForTesting(ctx, bstore, 3) proc := process.WithTeardown(func() error { return nil }) bsm.start(proc) diff --git a/bitswap/internal/decision/engine.go b/bitswap/internal/decision/engine.go index 31c50e3f3..76519bd36 100644 --- a/bitswap/internal/decision/engine.go +++ b/bitswap/internal/decision/engine.go @@ -16,6 +16,7 @@ import ( "github.com/ipfs/go-cid" bstore "github.com/ipfs/go-ipfs-blockstore" logging "github.com/ipfs/go-log" + "github.com/ipfs/go-metrics-interface" "github.com/ipfs/go-peertaskqueue" "github.com/ipfs/go-peertaskqueue/peertask" process "github.com/jbenet/goprocess" @@ -73,9 +74,6 @@ const ( // maxBlockSizeReplaceHasWithBlock is the maximum size of the block in // bytes up to which we will replace a want-have with a want-block maxBlockSizeReplaceHasWithBlock = 1024 - - // Number of concurrent workers that pull tasks off the request queue - taskWorkerCount = 8 ) // Envelope contains a message for a Peer. @@ -167,16 +165,65 @@ type Engine struct { sendDontHaves bool self peer.ID + + // metrics gauge for total pending tasks across all workers + pendingGauge metrics.Gauge + + // metrics gauge for total pending tasks across all workers + activeGauge metrics.Gauge + + // used to ensure metrics are reported each fixed number of operation + metricsLock sync.Mutex + metricUpdateCounter int } -// NewEngine creates a new block sending engine for the given block store -func NewEngine(bs bstore.Blockstore, bstoreWorkerCount int, peerTagger PeerTagger, self peer.ID, scoreLedger ScoreLedger) *Engine { - return newEngine(bs, bstoreWorkerCount, peerTagger, self, maxBlockSizeReplaceHasWithBlock, scoreLedger) +// NewEngine creates a new block sending engine for the given block store. +// maxOutstandingBytesPerPeer hints to the peer task queue not to give a peer more tasks if it has some maximum +// work already outstanding. +func NewEngine( + ctx context.Context, + bs bstore.Blockstore, + bstoreWorkerCount, + engineTaskWorkerCount, maxOutstandingBytesPerPeer int, + peerTagger PeerTagger, + self peer.ID, + scoreLedger ScoreLedger, + pendingEngineGauge metrics.Gauge, + activeEngineGauge metrics.Gauge, + pendingBlocksGauge metrics.Gauge, + activeBlocksGauge metrics.Gauge, +) *Engine { + return newEngine( + ctx, + bs, + bstoreWorkerCount, + engineTaskWorkerCount, + maxOutstandingBytesPerPeer, + peerTagger, + self, + maxBlockSizeReplaceHasWithBlock, + scoreLedger, + pendingEngineGauge, + activeEngineGauge, + pendingBlocksGauge, + activeBlocksGauge, + ) } -// This constructor is used by the tests -func newEngine(bs bstore.Blockstore, bstoreWorkerCount int, peerTagger PeerTagger, self peer.ID, - maxReplaceSize int, scoreLedger ScoreLedger) *Engine { +func newEngine( + ctx context.Context, + bs bstore.Blockstore, + bstoreWorkerCount, + engineTaskWorkerCount, maxOutstandingBytesPerPeer int, + peerTagger PeerTagger, + self peer.ID, + maxReplaceSize int, + scoreLedger ScoreLedger, + pendingEngineGauge metrics.Gauge, + activeEngineGauge metrics.Gauge, + pendingBlocksGauge metrics.Gauge, + activeBlocksGauge metrics.Gauge, +) *Engine { if scoreLedger == nil { scoreLedger = NewDefaultScoreLedger() @@ -185,16 +232,18 @@ func newEngine(bs bstore.Blockstore, bstoreWorkerCount int, peerTagger PeerTagge e := &Engine{ ledgerMap: make(map[peer.ID]*ledger), scoreLedger: scoreLedger, - bsm: newBlockstoreManager(bs, bstoreWorkerCount), + bsm: newBlockstoreManager(ctx, bs, bstoreWorkerCount, pendingBlocksGauge, activeBlocksGauge), peerTagger: peerTagger, outbox: make(chan (<-chan *Envelope), outboxChanBuffer), workSignal: make(chan struct{}, 1), ticker: time.NewTicker(time.Millisecond * 100), maxBlockSizeReplaceHasWithBlock: maxReplaceSize, - taskWorkerCount: taskWorkerCount, + taskWorkerCount: engineTaskWorkerCount, sendDontHaves: true, self: self, peerLedger: newPeerLedger(), + pendingGauge: pendingEngineGauge, + activeGauge: activeEngineGauge, } e.tagQueued = fmt.Sprintf(tagFormat, "queued", uuid.New().String()) e.tagUseful = fmt.Sprintf(tagFormat, "useful", uuid.New().String()) @@ -202,10 +251,24 @@ func newEngine(bs bstore.Blockstore, bstoreWorkerCount int, peerTagger PeerTagge peertaskqueue.OnPeerAddedHook(e.onPeerAdded), peertaskqueue.OnPeerRemovedHook(e.onPeerRemoved), peertaskqueue.TaskMerger(newTaskMerger()), - peertaskqueue.IgnoreFreezing(true)) + peertaskqueue.IgnoreFreezing(true), + peertaskqueue.MaxOutstandingWorkPerPeer(maxOutstandingBytesPerPeer)) return e } +func (e *Engine) updateMetrics() { + e.metricsLock.Lock() + c := e.metricUpdateCounter + e.metricUpdateCounter++ + e.metricsLock.Unlock() + + if c%100 == 0 { + stats := e.peerRequestQueue.Stats() + e.activeGauge.Set(float64(stats.NumActive)) + e.pendingGauge.Set(float64(stats.NumPending)) + } +} + // SetSendDontHaves indicates what to do when the engine receives a want-block // for a block that is not in the blockstore. Either // - Send a DONT_HAVE message @@ -316,18 +379,21 @@ func (e *Engine) nextEnvelope(ctx context.Context) (*Envelope, error) { for { // Pop some tasks off the request queue p, nextTasks, pendingBytes := e.peerRequestQueue.PopTasks(targetMessageSize) + e.updateMetrics() for len(nextTasks) == 0 { select { case <-ctx.Done(): return nil, ctx.Err() case <-e.workSignal: p, nextTasks, pendingBytes = e.peerRequestQueue.PopTasks(targetMessageSize) + e.updateMetrics() case <-e.ticker.C: // When a task is cancelled, the queue may be "frozen" for a // period of time. We periodically "thaw" the queue to make // sure it doesn't get stuck in a frozen state. e.peerRequestQueue.ThawRound() p, nextTasks, pendingBytes = e.peerRequestQueue.PopTasks(targetMessageSize) + e.updateMetrics() } } @@ -557,6 +623,7 @@ func (e *Engine) MessageReceived(ctx context.Context, p peer.ID, m bsmsg.BitSwap // Push entries onto the request queue if len(activeEntries) > 0 { e.peerRequestQueue.PushTasks(p, activeEntries...) + e.updateMetrics() } } @@ -646,6 +713,7 @@ func (e *Engine) ReceiveFrom(from peer.ID, blks []blocks.Block) { SendDontHave: false, }, }) + e.updateMetrics() } } e.lock.RUnlock() diff --git a/bitswap/internal/decision/engine_test.go b/bitswap/internal/decision/engine_test.go index d8c836783..d8445fdef 100644 --- a/bitswap/internal/decision/engine_test.go +++ b/bitswap/internal/decision/engine_test.go @@ -11,9 +11,11 @@ import ( "time" "github.com/benbjohnson/clock" + "github.com/ipfs/go-bitswap/internal/defaults" "github.com/ipfs/go-bitswap/internal/testutil" message "github.com/ipfs/go-bitswap/message" pb "github.com/ipfs/go-bitswap/message/pb" + "github.com/ipfs/go-metrics-interface" blocks "github.com/ipfs/go-block-format" ds "github.com/ipfs/go-datastore" @@ -97,7 +99,7 @@ func newTestEngine(ctx context.Context, idStr string) engineSet { func newTestEngineWithSampling(ctx context.Context, idStr string, peerSampleInterval time.Duration, sampleCh chan struct{}, clock clock.Clock) engineSet { fpt := &fakePeerTagger{} bs := blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore())) - e := newEngine(bs, 4, fpt, "localhost", 0, NewTestScoreLedger(peerSampleInterval, sampleCh, clock)) + e := newEngineForTesting(ctx, bs, 4, defaults.BitswapEngineTaskWorkerCount, defaults.BitswapMaxOutstandingBytesPerPeer, fpt, "localhost", 0, NewTestScoreLedger(peerSampleInterval, sampleCh, clock)) e.StartWorkers(ctx, process.WithTeardown(func() error { return nil })) return engineSet{ Peer: peer.ID(idStr), @@ -182,10 +184,42 @@ func peerIsPartner(p peer.ID, e *Engine) bool { return false } +func newEngineForTesting( + ctx context.Context, + bs blockstore.Blockstore, + bstoreWorkerCount, + engineTaskWorkerCount, maxOutstandingBytesPerPeer int, + peerTagger PeerTagger, + self peer.ID, + maxReplaceSize int, + scoreLedger ScoreLedger, +) *Engine { + testPendingEngineGauge := metrics.NewCtx(ctx, "pending_tasks", "Total number of pending tasks").Gauge() + testActiveEngineGauge := metrics.NewCtx(ctx, "active_tasks", "Total number of active tasks").Gauge() + testPendingBlocksGauge := metrics.NewCtx(ctx, "pending_block_tasks", "Total number of pending blockstore tasks").Gauge() + testActiveBlocksGauge := metrics.NewCtx(ctx, "active_block_tasks", "Total number of active blockstore tasks").Gauge() + return newEngine( + ctx, + bs, + bstoreWorkerCount, + engineTaskWorkerCount, + maxOutstandingBytesPerPeer, + peerTagger, + self, + maxReplaceSize, + scoreLedger, + testPendingEngineGauge, + testActiveEngineGauge, + testPendingBlocksGauge, + testActiveBlocksGauge, + ) +} + func TestOutboxClosedWhenEngineClosed(t *testing.T) { t.SkipNow() // TODO implement *Engine.Close - e := newEngine(blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore())), 4, &fakePeerTagger{}, "localhost", 0, NewTestScoreLedger(shortTerm, nil, clock.New())) - e.StartWorkers(context.Background(), process.WithTeardown(func() error { return nil })) + ctx := context.Background() + e := newEngineForTesting(ctx, blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore())), 4, defaults.BitswapEngineTaskWorkerCount, defaults.BitswapMaxOutstandingBytesPerPeer, &fakePeerTagger{}, "localhost", 0, NewTestScoreLedger(shortTerm, nil, clock.New())) + e.StartWorkers(ctx, process.WithTeardown(func() error { return nil })) var wg sync.WaitGroup wg.Add(1) go func() { @@ -512,8 +546,9 @@ func TestPartnerWantHaveWantBlockNonActive(t *testing.T) { testCases = onlyTestCases } - e := newEngine(bs, 4, &fakePeerTagger{}, "localhost", 0, NewTestScoreLedger(shortTerm, nil, clock.New())) - e.StartWorkers(context.Background(), process.WithTeardown(func() error { return nil })) + ctx := context.Background() + e := newEngineForTesting(ctx, bs, 4, defaults.BitswapEngineTaskWorkerCount, defaults.BitswapMaxOutstandingBytesPerPeer, &fakePeerTagger{}, "localhost", 0, NewTestScoreLedger(shortTerm, nil, clock.New())) + e.StartWorkers(ctx, process.WithTeardown(func() error { return nil })) for i, testCase := range testCases { t.Logf("Test case %d:", i) for _, wl := range testCase.wls { @@ -668,8 +703,9 @@ func TestPartnerWantHaveWantBlockActive(t *testing.T) { testCases = onlyTestCases } - e := newEngine(bs, 4, &fakePeerTagger{}, "localhost", 0, NewTestScoreLedger(shortTerm, nil, clock.New())) - e.StartWorkers(context.Background(), process.WithTeardown(func() error { return nil })) + ctx := context.Background() + e := newEngineForTesting(ctx, bs, 4, defaults.BitswapEngineTaskWorkerCount, defaults.BitswapMaxOutstandingBytesPerPeer, &fakePeerTagger{}, "localhost", 0, NewTestScoreLedger(shortTerm, nil, clock.New())) + e.StartWorkers(ctx, process.WithTeardown(func() error { return nil })) var next envChan for i, testCase := range testCases { @@ -853,7 +889,7 @@ func TestPartnerWantsThenCancels(t *testing.T) { ctx := context.Background() for i := 0; i < numRounds; i++ { expected := make([][]string, 0, len(testcases)) - e := newEngine(bs, 4, &fakePeerTagger{}, "localhost", 0, NewTestScoreLedger(shortTerm, nil, clock.New())) + e := newEngineForTesting(ctx, bs, 4, defaults.BitswapEngineTaskWorkerCount, defaults.BitswapMaxOutstandingBytesPerPeer, &fakePeerTagger{}, "localhost", 0, NewTestScoreLedger(shortTerm, nil, clock.New())) e.StartWorkers(ctx, process.WithTeardown(func() error { return nil })) for _, testcase := range testcases { set := testcase[0] @@ -878,8 +914,9 @@ func TestSendReceivedBlocksToPeersThatWantThem(t *testing.T) { partner := libp2ptest.RandPeerIDFatal(t) otherPeer := libp2ptest.RandPeerIDFatal(t) - e := newEngine(bs, 4, &fakePeerTagger{}, "localhost", 0, NewTestScoreLedger(shortTerm, nil, clock.New())) - e.StartWorkers(context.Background(), process.WithTeardown(func() error { return nil })) + ctx := context.Background() + e := newEngineForTesting(ctx, bs, 4, defaults.BitswapEngineTaskWorkerCount, defaults.BitswapMaxOutstandingBytesPerPeer, &fakePeerTagger{}, "localhost", 0, NewTestScoreLedger(shortTerm, nil, clock.New())) + e.StartWorkers(ctx, process.WithTeardown(func() error { return nil })) blks := testutil.GenerateBlocksOfSize(4, 8*1024) msg := message.New(false) @@ -922,8 +959,9 @@ func TestSendDontHave(t *testing.T) { partner := libp2ptest.RandPeerIDFatal(t) otherPeer := libp2ptest.RandPeerIDFatal(t) - e := newEngine(bs, 4, &fakePeerTagger{}, "localhost", 0, NewTestScoreLedger(shortTerm, nil, clock.New())) - e.StartWorkers(context.Background(), process.WithTeardown(func() error { return nil })) + ctx := context.Background() + e := newEngineForTesting(ctx, bs, 4, defaults.BitswapEngineTaskWorkerCount, defaults.BitswapMaxOutstandingBytesPerPeer, &fakePeerTagger{}, "localhost", 0, NewTestScoreLedger(shortTerm, nil, clock.New())) + e.StartWorkers(ctx, process.WithTeardown(func() error { return nil })) blks := testutil.GenerateBlocksOfSize(4, 8*1024) msg := message.New(false) @@ -986,8 +1024,9 @@ func TestWantlistForPeer(t *testing.T) { partner := libp2ptest.RandPeerIDFatal(t) otherPeer := libp2ptest.RandPeerIDFatal(t) - e := newEngine(bs, 4, &fakePeerTagger{}, "localhost", 0, NewTestScoreLedger(shortTerm, nil, clock.New())) - e.StartWorkers(context.Background(), process.WithTeardown(func() error { return nil })) + ctx := context.Background() + e := newEngineForTesting(ctx, bs, 4, defaults.BitswapEngineTaskWorkerCount, defaults.BitswapMaxOutstandingBytesPerPeer, &fakePeerTagger{}, "localhost", 0, NewTestScoreLedger(shortTerm, nil, clock.New())) + e.StartWorkers(ctx, process.WithTeardown(func() error { return nil })) blks := testutil.GenerateBlocksOfSize(4, 8*1024) msg := message.New(false) diff --git a/bitswap/internal/defaults/defaults.go b/bitswap/internal/defaults/defaults.go new file mode 100644 index 000000000..7237a996e --- /dev/null +++ b/bitswap/internal/defaults/defaults.go @@ -0,0 +1,20 @@ +package defaults + +import ( + "time" +) + +const ( + // these requests take at _least_ two minutes at the moment. + ProvideTimeout = time.Minute * 3 + ProvSearchDelay = time.Second + + // Number of concurrent workers in decision engine that process requests to the blockstore + BitswapEngineBlockstoreWorkerCount = 128 + // the total number of simultaneous threads sending outgoing messages + BitswapTaskWorkerCount = 8 + // how many worker threads to start for decision engine task worker + BitswapEngineTaskWorkerCount = 8 + // the total amount of bytes that a peer should have outstanding, it is utilized by the decision engine + BitswapMaxOutstandingBytesPerPeer = 1 << 20 +) diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index b05ce5584..7457aeb84 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -28,7 +28,11 @@ import ( var log = logging.Logger("bitswap_network") var connectTimeout = time.Second * 5 -var sendMessageTimeout = time.Minute * 10 + +var maxSendTimeout = 2 * time.Minute +var minSendTimeout = 10 * time.Second +var sendLatency = 2 * time.Second +var minSendRate = (100 * 1000) / 8 // 100kbit/s // NewFromIpfsHost returns a BitSwapNetwork supported by underlying IPFS host. func NewFromIpfsHost(host host.Host, r routing.ContentRouting, opts ...NetOpt) BitSwapNetwork { @@ -300,7 +304,7 @@ func setDefaultOpts(opts *MessageSenderOpts) *MessageSenderOpts { copy.MaxRetries = 3 } if opts.SendTimeout == 0 { - copy.SendTimeout = sendMessageTimeout + copy.SendTimeout = maxSendTimeout } if opts.SendErrorBackoff == 0 { copy.SendErrorBackoff = 100 * time.Millisecond @@ -308,6 +312,17 @@ func setDefaultOpts(opts *MessageSenderOpts) *MessageSenderOpts { return © } +func sendTimeout(size int) time.Duration { + timeout := sendLatency + timeout += time.Duration((uint64(time.Second) * uint64(size)) / uint64(minSendRate)) + if timeout > maxSendTimeout { + timeout = maxSendTimeout + } else if timeout < minSendTimeout { + timeout = minSendTimeout + } + return timeout +} + func (bsnet *impl) SendMessage( ctx context.Context, p peer.ID, @@ -321,7 +336,8 @@ func (bsnet *impl) SendMessage( return err } - if err = bsnet.msgToStream(ctx, s, outgoing, sendMessageTimeout); err != nil { + timeout := sendTimeout(outgoing.Size()) + if err = bsnet.msgToStream(ctx, s, outgoing, timeout); err != nil { _ = s.Reset() return err } diff --git a/bitswap/network/ipfs_impl_timeout_test.go b/bitswap/network/ipfs_impl_timeout_test.go new file mode 100644 index 000000000..fdbe8e950 --- /dev/null +++ b/bitswap/network/ipfs_impl_timeout_test.go @@ -0,0 +1,24 @@ +package network + +import ( + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +func TestSendTimeout(t *testing.T) { + require.Equal(t, minSendTimeout, sendTimeout(0)) + require.Equal(t, maxSendTimeout, sendTimeout(1<<30)) + + // Check a 1MiB block (very large) + oneMiB := uint64(1 << 20) + hundredKbit := uint64(100 * 1000) + hundredKB := hundredKbit / 8 + expectedTime := sendLatency + time.Duration(oneMiB*uint64(time.Second)/hundredKB) + actualTime := sendTimeout(int(oneMiB)) + require.Equal(t, expectedTime, actualTime) + + // Check a 256KiB block (expected) + require.InDelta(t, 25*time.Second, sendTimeout(256<<10), float64(5*time.Second)) +} diff --git a/bitswap/workers.go b/bitswap/workers.go index 5db534231..c5b62d255 100644 --- a/bitswap/workers.go +++ b/bitswap/workers.go @@ -3,8 +3,10 @@ package bitswap import ( "context" "fmt" + "time" engine "github.com/ipfs/go-bitswap/internal/decision" + "github.com/ipfs/go-bitswap/internal/defaults" pb "github.com/ipfs/go-bitswap/message/pb" cid "github.com/ipfs/go-cid" process "github.com/jbenet/goprocess" @@ -12,14 +14,10 @@ import ( "go.uber.org/zap" ) -// TaskWorkerCount is the total number of simultaneous threads sending -// outgoing messages -var TaskWorkerCount = 8 - func (bs *Bitswap) startWorkers(ctx context.Context, px process.Process) { // Start up workers to handle requests from other nodes for the data on this node - for i := 0; i < TaskWorkerCount; i++ { + for i := 0; i < bs.taskWorkerCount; i++ { i := i px.Go(func(px process.Process) { bs.taskWorker(ctx, i) @@ -52,6 +50,8 @@ func (bs *Bitswap) taskWorker(ctx context.Context, id int) { continue } + start := time.Now() + // TODO: Only record message as sent if there was no error? // Ideally, yes. But we'd need some way to trigger a retry and/or drop // the peer. @@ -60,6 +60,10 @@ func (bs *Bitswap) taskWorker(ctx context.Context, id int) { bs.wiretap.MessageSent(envelope.Peer, envelope.Message) } bs.sendBlocks(ctx, envelope) + + dur := time.Since(start) + bs.sendTimeHistogram.Observe(dur.Seconds()) + case <-ctx.Done(): return } @@ -159,7 +163,7 @@ func (bs *Bitswap) provideWorker(px process.Process) { log.Debugw("Bitswap.ProvideWorker.Start", "ID", wid, "cid", k) defer log.Debugw("Bitswap.ProvideWorker.End", "ID", wid, "cid", k) - ctx, cancel := context.WithTimeout(ctx, provideTimeout) // timeout ctx + ctx, cancel := context.WithTimeout(ctx, defaults.ProvideTimeout) // timeout ctx defer cancel() if err := bs.network.Provide(ctx, k); err != nil { From 783f5e1a7977fa5ecebb40b3b50d1d8a32a78dc6 Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Tue, 31 Aug 2021 12:24:10 -0700 Subject: [PATCH 0998/1035] fix: reduce log verbosity These log messages are frequent and were causing lock contention at scale. This commit was moved from ipfs/go-bitswap@6dce2a1000638a707fb65e0ba5f2c9009580f9b8 --- bitswap/internal/decision/engine.go | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/bitswap/internal/decision/engine.go b/bitswap/internal/decision/engine.go index 76519bd36..df49f0bc5 100644 --- a/bitswap/internal/decision/engine.go +++ b/bitswap/internal/decision/engine.go @@ -680,15 +680,17 @@ func (e *Engine) ReceiveFrom(from peer.ID, blks []blocks.Block) { for _, p := range e.peerLedger.Peers(k) { ledger, ok := e.ledgerMap[p] if !ok { - log.Errorw("failed to find peer in ledger", "peer", p) + // This can happen if the peer has disconnected while we're processing this list. + log.Debugw("failed to find peer in ledger", "peer", p) missingWants[p] = append(missingWants[p], k) continue } ledger.lk.RLock() entry, ok := ledger.WantListContains(k) ledger.lk.RUnlock() - if !ok { // should never happen - log.Errorw("wantlist index doesn't match peer's wantlist", "peer", p) + if !ok { + // This can happen if the peer has canceled their want while we're processing this message. + log.Debugw("wantlist index doesn't match peer's wantlist", "peer", p) missingWants[p] = append(missingWants[p], k) continue } From f5ff53ea114f2116ed6e52f59e41ca6b9237a926 Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Thu, 2 Sep 2021 19:26:01 +0200 Subject: [PATCH 0999/1035] fix: fix race on "responsive" check fixes #527 This commit was moved from ipfs/go-bitswap@e7f60bf2cdbec5d5ba72dcdcd79457546a0a2cb1 --- bitswap/network/connecteventmanager.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/bitswap/network/connecteventmanager.go b/bitswap/network/connecteventmanager.go index b28e8e5b8..bbde7af2c 100644 --- a/bitswap/network/connecteventmanager.go +++ b/bitswap/network/connecteventmanager.go @@ -83,9 +83,10 @@ func (c *connectEventManager) OnMessage(p peer.ID) { // we need to modify state c.lk.RLock() state, ok := c.conns[p] + responsive := ok && state.responsive c.lk.RUnlock() - if !ok || state.responsive { + if !ok || responsive { return } From 0a126fb84c2e3a52b8e5757aa92e455c50366b08 Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Tue, 7 Sep 2021 16:54:12 +0200 Subject: [PATCH 1000/1035] feat: cache the materialized wantlist This can become a hot-spot. This commit was moved from ipfs/go-bitswap@a19b05e23dbc2cfc6b67e3c0b370eb4d219af3ac --- bitswap/internal/decision/engine.go | 2 - bitswap/internal/messagequeue/messagequeue.go | 12 ------ bitswap/wantlist/wantlist.go | 40 ++++++++++++++----- bitswap/wantlist/wantlist_test.go | 2 - 4 files changed, 29 insertions(+), 27 deletions(-) diff --git a/bitswap/internal/decision/engine.go b/bitswap/internal/decision/engine.go index df49f0bc5..5569c4959 100644 --- a/bitswap/internal/decision/engine.go +++ b/bitswap/internal/decision/engine.go @@ -328,8 +328,6 @@ func (e *Engine) WantlistForPeer(p peer.ID) []wl.Entry { entries := partner.wantList.Entries() partner.lk.Unlock() - wl.SortEntries(entries) - return entries } diff --git a/bitswap/internal/messagequeue/messagequeue.go b/bitswap/internal/messagequeue/messagequeue.go index 19bab7623..48fdaa863 100644 --- a/bitswap/internal/messagequeue/messagequeue.go +++ b/bitswap/internal/messagequeue/messagequeue.go @@ -740,13 +740,6 @@ func (mq *MessageQueue) extractOutgoingMessage(supportsHave bool) (bsmsg.BitSwap // Next, add the wants. If we have too many entries to fit into a single // message, sort by priority and include the high priority ones first. - // However, avoid sorting till we really need to as this code is a - // called frequently. - - // Add each regular want-have / want-block to the message. - if msgSize+(len(peerEntries)*bsmsg.MaxEntrySize) > mq.maxMessageSize { - bswl.SortEntries(peerEntries) - } for _, e := range peerEntries { msgSize += mq.msg.AddEntry(e.Cid, e.Priority, e.WantType, true) @@ -757,11 +750,6 @@ func (mq *MessageQueue) extractOutgoingMessage(supportsHave bool) (bsmsg.BitSwap } } - // Add each broadcast want-have to the message. - if msgSize+(len(bcstEntries)*bsmsg.MaxEntrySize) > mq.maxMessageSize { - bswl.SortEntries(bcstEntries) - } - // Add each broadcast want-have to the message for _, e := range bcstEntries { // Broadcast wants are sent as want-have diff --git a/bitswap/wantlist/wantlist.go b/bitswap/wantlist/wantlist.go index 555c293e6..da54983e1 100644 --- a/bitswap/wantlist/wantlist.go +++ b/bitswap/wantlist/wantlist.go @@ -13,6 +13,9 @@ import ( // Wantlist is a raw list of wanted blocks and their priorities type Wantlist struct { set map[cid.Cid]Entry + + // Re-computing this can get expensive so we memoize it. + cached []Entry } // Entry is an entry in a want list, consisting of a cid and its priority @@ -58,11 +61,11 @@ func (w *Wantlist) Add(c cid.Cid, priority int32, wantType pb.Message_Wantlist_W return false } - w.set[c] = Entry{ + w.put(c, Entry{ Cid: c, Priority: priority, WantType: wantType, - } + }) return true } @@ -74,7 +77,7 @@ func (w *Wantlist) Remove(c cid.Cid) bool { return false } - delete(w.set, c) + w.delete(c) return true } @@ -91,10 +94,20 @@ func (w *Wantlist) RemoveType(c cid.Cid, wantType pb.Message_Wantlist_WantType) return false } - delete(w.set, c) + w.delete(c) return true } +func (w *Wantlist) delete(c cid.Cid) { + delete(w.set, c) + w.cached = nil +} + +func (w *Wantlist) put(c cid.Cid, e Entry) { + w.cached = nil + w.set[c] = e +} + // Contains returns the entry, if present, for the given CID, plus whether it // was present. func (w *Wantlist) Contains(c cid.Cid) (Entry, bool) { @@ -102,23 +115,28 @@ func (w *Wantlist) Contains(c cid.Cid) (Entry, bool) { return e, ok } -// Entries returns all wantlist entries for a want list. +// Entries returns all wantlist entries for a want list, sorted by priority. +// +// DO NOT MODIFY. The returned list is cached. func (w *Wantlist) Entries() []Entry { + if w.cached != nil { + return w.cached + } es := make([]Entry, 0, len(w.set)) for _, e := range w.set { es = append(es, e) } - return es + sort.Sort(entrySlice(es)) + w.cached = es + return es[0:len(es):len(es)] } // Absorb all the entries in other into this want list func (w *Wantlist) Absorb(other *Wantlist) { + // Invalidate the cache up-front to avoid doing any work trying to keep it up-to-date. + w.cached = nil + for _, e := range other.Entries() { w.Add(e.Cid, e.Priority, e.WantType) } } - -// SortEntries sorts the list of entries by priority. -func SortEntries(es []Entry) { - sort.Sort(entrySlice(es)) -} diff --git a/bitswap/wantlist/wantlist_test.go b/bitswap/wantlist/wantlist_test.go index 49dc55905..e4abf3c2b 100644 --- a/bitswap/wantlist/wantlist_test.go +++ b/bitswap/wantlist/wantlist_test.go @@ -211,8 +211,6 @@ func TestSortEntries(t *testing.T) { wl.Add(testcids[2], 4, pb.Message_Wantlist_Have) entries := wl.Entries() - SortEntries(entries) - if !entries[0].Cid.Equals(testcids[1]) || !entries[1].Cid.Equals(testcids[2]) || !entries[2].Cid.Equals(testcids[0]) { From aa6d37fbf194cca680a59ef0e3930e81d3a3f481 Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Wed, 15 Sep 2021 17:54:13 +0200 Subject: [PATCH 1001/1035] fix: rename wiretap to tracer To avoid... confusion and angst. This also removes the option to _disable_ it, because there's really no need (not safe to do at runtime anyways). This commit was moved from ipfs/go-bitswap@d3c024e510c5e2fdc59a12d86b7aff54ef74f77d --- bitswap/bitswap.go | 6 +++--- bitswap/bitswap_test.go | 22 +++++++++++----------- bitswap/tracer.go | 20 ++++++++++++++++++++ bitswap/wiretap.go | 27 --------------------------- bitswap/workers.go | 4 ++-- 5 files changed, 36 insertions(+), 43 deletions(-) create mode 100644 bitswap/tracer.go delete mode 100644 bitswap/wiretap.go diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 036943021..af648972b 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -335,7 +335,7 @@ type Bitswap struct { sendTimeHistogram metrics.Histogram // External statistics interface - wiretap WireTap + tracer Tracer // the SessionManager routes requests to interested sessions sm *bssm.SessionManager @@ -527,8 +527,8 @@ func (bs *Bitswap) ReceiveMessage(ctx context.Context, p peer.ID, incoming bsmsg // TODO: this is bad, and could be easily abused. // Should only track *useful* messages in ledger - if bs.wiretap != nil { - bs.wiretap.MessageReceived(p, incoming) + if bs.tracer != nil { + bs.tracer.MessageReceived(p, incoming) } iblocks := incoming.Blocks() diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index 0da62dd35..330321370 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -870,29 +870,29 @@ type logItem struct { pid peer.ID msg bsmsg.BitSwapMessage } -type mockWireTap struct { +type mockTracer struct { mu sync.Mutex log []logItem } -func (m *mockWireTap) MessageReceived(p peer.ID, msg bsmsg.BitSwapMessage) { +func (m *mockTracer) MessageReceived(p peer.ID, msg bsmsg.BitSwapMessage) { m.mu.Lock() defer m.mu.Unlock() m.log = append(m.log, logItem{'r', p, msg}) } -func (m *mockWireTap) MessageSent(p peer.ID, msg bsmsg.BitSwapMessage) { +func (m *mockTracer) MessageSent(p peer.ID, msg bsmsg.BitSwapMessage) { m.mu.Lock() defer m.mu.Unlock() m.log = append(m.log, logItem{'s', p, msg}) } -func (m *mockWireTap) getLog() []logItem { +func (m *mockTracer) getLog() []logItem { m.mu.Lock() defer m.mu.Unlock() return m.log[:len(m.log):len(m.log)] } -func TestWireTap(t *testing.T) { +func TestTracer(t *testing.T) { net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay)) ig := testinstance.NewTestInstanceGenerator(net, nil, nil) defer ig.Close() @@ -901,9 +901,9 @@ func TestWireTap(t *testing.T) { instances := ig.Instances(3) blocks := bg.Blocks(2) - // Install WireTap - wiretap := new(mockWireTap) - bitswap.EnableWireTap(wiretap)(instances[0].Exchange) + // Install Tracer + wiretap := new(mockTracer) + bitswap.WithTracer(wiretap)(instances[0].Exchange) // First peer has block err := instances[0].Exchange.HasBlock(blocks[0]) @@ -937,9 +937,9 @@ func TestWireTap(t *testing.T) { log := wiretap.getLog() - // After communication, 3 messages should be logged via WireTap + // After communication, 3 messages should be logged via Tracer if l := len(log); l != 3 { - t.Fatal("expected 3 items logged via WireTap, found", l) + t.Fatal("expected 3 items logged via Tracer, found", l) } // Received: 'Have' @@ -988,7 +988,7 @@ func TestWireTap(t *testing.T) { } // After disabling WireTap, no new messages are logged - bitswap.DisableWireTap()(instances[0].Exchange) + bitswap.WithTracer(nil)(instances[0].Exchange) err = instances[0].Exchange.HasBlock(blocks[1]) if err != nil { diff --git a/bitswap/tracer.go b/bitswap/tracer.go new file mode 100644 index 000000000..dc977abdf --- /dev/null +++ b/bitswap/tracer.go @@ -0,0 +1,20 @@ +package bitswap + +import ( + bsmsg "github.com/ipfs/go-bitswap/message" + peer "github.com/libp2p/go-libp2p-core/peer" +) + +// Tracer provides methods to access all messages sent and received by Bitswap. +// This interface can be used to implement various statistics (this is original intent). +type Tracer interface { + MessageReceived(peer.ID, bsmsg.BitSwapMessage) + MessageSent(peer.ID, bsmsg.BitSwapMessage) +} + +// Configures Bitswap to use given tracer. +func WithTracer(tap Tracer) Option { + return func(bs *Bitswap) { + bs.tracer = tap + } +} diff --git a/bitswap/wiretap.go b/bitswap/wiretap.go deleted file mode 100644 index 55cb21d3e..000000000 --- a/bitswap/wiretap.go +++ /dev/null @@ -1,27 +0,0 @@ -package bitswap - -import ( - bsmsg "github.com/ipfs/go-bitswap/message" - peer "github.com/libp2p/go-libp2p-core/peer" -) - -// WireTap provides methods to access all messages sent and received by Bitswap. -// This interface can be used to implement various statistics (this is original intent). -type WireTap interface { - MessageReceived(peer.ID, bsmsg.BitSwapMessage) - MessageSent(peer.ID, bsmsg.BitSwapMessage) -} - -// Configures Bitswap to use given wiretap. -func EnableWireTap(tap WireTap) Option { - return func(bs *Bitswap) { - bs.wiretap = tap - } -} - -// Configures Bitswap not to use any wiretap. -func DisableWireTap() Option { - return func(bs *Bitswap) { - bs.wiretap = nil - } -} diff --git a/bitswap/workers.go b/bitswap/workers.go index c5b62d255..af4531adc 100644 --- a/bitswap/workers.go +++ b/bitswap/workers.go @@ -56,8 +56,8 @@ func (bs *Bitswap) taskWorker(ctx context.Context, id int) { // Ideally, yes. But we'd need some way to trigger a retry and/or drop // the peer. bs.engine.MessageSent(envelope.Peer, envelope.Message) - if bs.wiretap != nil { - bs.wiretap.MessageSent(envelope.Peer, envelope.Message) + if bs.tracer != nil { + bs.tracer.MessageSent(envelope.Peer, envelope.Message) } bs.sendBlocks(ctx, envelope) From 64ea7b2213b165a5c4db1092743678f6cdec9477 Mon Sep 17 00:00:00 2001 From: hannahhoward Date: Thu, 7 Oct 2021 17:40:59 -0700 Subject: [PATCH 1002/1035] test(providerquerymanager): fix timings Fix several sensitive timings on ProviderQueryManager tests that could lead to intermittent failures in CI This commit was moved from ipfs/go-bitswap@e0025401ca9bfed66f14ccfccd08e2cbb1a3c1f4 --- .../providerquerymanager_test.go | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/bitswap/internal/providerquerymanager/providerquerymanager_test.go b/bitswap/internal/providerquerymanager/providerquerymanager_test.go index a39e9661f..f98836780 100644 --- a/bitswap/internal/providerquerymanager/providerquerymanager_test.go +++ b/bitswap/internal/providerquerymanager/providerquerymanager_test.go @@ -69,7 +69,7 @@ func TestNormalSimultaneousFetch(t *testing.T) { providerQueryManager.Startup() keys := testutil.GenerateCids(2) - sessionCtx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) + sessionCtx, cancel := context.WithTimeout(ctx, 5*time.Second) defer cancel() firstRequestChan := providerQueryManager.FindProvidersAsync(sessionCtx, keys[0]) secondRequestChan := providerQueryManager.FindProvidersAsync(sessionCtx, keys[1]) @@ -107,7 +107,7 @@ func TestDedupingProviderRequests(t *testing.T) { providerQueryManager.Startup() key := testutil.GenerateCids(1)[0] - sessionCtx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) + sessionCtx, cancel := context.WithTimeout(ctx, 5*time.Second) defer cancel() firstRequestChan := providerQueryManager.FindProvidersAsync(sessionCtx, key) secondRequestChan := providerQueryManager.FindProvidersAsync(sessionCtx, key) @@ -152,7 +152,7 @@ func TestCancelOneRequestDoesNotTerminateAnother(t *testing.T) { firstSessionCtx, firstCancel := context.WithTimeout(ctx, 3*time.Millisecond) defer firstCancel() firstRequestChan := providerQueryManager.FindProvidersAsync(firstSessionCtx, key) - secondSessionCtx, secondCancel := context.WithTimeout(ctx, 100*time.Millisecond) + secondSessionCtx, secondCancel := context.WithTimeout(ctx, 5*time.Second) defer secondCancel() secondRequestChan := providerQueryManager.FindProvidersAsync(secondSessionCtx, key) @@ -262,7 +262,7 @@ func TestRateLimitingRequests(t *testing.T) { providerQueryManager.Startup() keys := testutil.GenerateCids(maxInProcessRequests + 1) - sessionCtx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) + sessionCtx, cancel := context.WithTimeout(ctx, 5*time.Second) defer cancel() var requestChannels []<-chan peer.ID for i := 0; i < maxInProcessRequests+1; i++ { @@ -283,6 +283,7 @@ func TestRateLimitingRequests(t *testing.T) { fpn.queriesMadeMutex.Lock() defer fpn.queriesMadeMutex.Unlock() if fpn.queriesMade != maxInProcessRequests+1 { + t.Logf("Queries made: %d\n", fpn.queriesMade) t.Fatal("Did not make all seperate requests") } } @@ -291,7 +292,7 @@ func TestFindProviderTimeout(t *testing.T) { peers := testutil.GeneratePeers(10) fpn := &fakeProviderNetwork{ peersFound: peers, - delay: 1 * time.Millisecond, + delay: 10 * time.Millisecond, } ctx := context.Background() providerQueryManager := New(ctx, fpn) @@ -299,7 +300,7 @@ func TestFindProviderTimeout(t *testing.T) { providerQueryManager.SetFindProviderTimeout(2 * time.Millisecond) keys := testutil.GenerateCids(1) - sessionCtx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) + sessionCtx, cancel := context.WithTimeout(ctx, 5*time.Second) defer cancel() firstRequestChan := providerQueryManager.FindProvidersAsync(sessionCtx, keys[0]) var firstPeersReceived []peer.ID From 94e34de54803d84bfa006dfa14687b6d50531c2c Mon Sep 17 00:00:00 2001 From: Simon Zhu Date: Mon, 11 Oct 2021 21:12:57 -0700 Subject: [PATCH 1003/1035] enable custom task prioritization logic This commit was moved from ipfs/go-bitswap@d5168fec19720bd02e262c2aee4986a99e92f567 --- bitswap/bitswap.go | 10 +++++ bitswap/internal/decision/engine.go | 69 ++++++++++++++++++++++++++++- 2 files changed, 77 insertions(+), 2 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index af648972b..98de8d78d 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -148,6 +148,13 @@ func SetSimulateDontHavesOnTimeout(send bool) Option { } } +// WithTaskComparator configures custom task prioritization logic. +func WithTaskComparator(comparator decision.TaskComparator) Option { + return func(bs *Bitswap) { + bs.taskComparator = comparator + } +} + // New initializes a BitSwap instance that communicates over the provided // BitSwapNetwork. This function registers the returned instance as the network // delegate. Runs until context is cancelled or bitswap.Close is called. @@ -272,6 +279,7 @@ func New(parent context.Context, network bsnet.BitSwapNetwork, activeEngineGauge, pendingBlocksGauge, activeBlocksGauge, + decision.WithTaskComparator(bs.taskComparator), ) bs.engine.SetSendDontHaves(bs.engineSetSendDontHaves) @@ -375,6 +383,8 @@ type Bitswap struct { // whether we should actually simulate dont haves on request timeout simulateDontHavesOnTimeout bool + + taskComparator TaskComparator } type counters struct { diff --git a/bitswap/internal/decision/engine.go b/bitswap/internal/decision/engine.go index df49f0bc5..548917f94 100644 --- a/bitswap/internal/decision/engine.go +++ b/bitswap/internal/decision/engine.go @@ -19,6 +19,7 @@ import ( "github.com/ipfs/go-metrics-interface" "github.com/ipfs/go-peertaskqueue" "github.com/ipfs/go-peertaskqueue/peertask" + "github.com/ipfs/go-peertaskqueue/peertracker" process "github.com/jbenet/goprocess" "github.com/libp2p/go-libp2p-core/peer" ) @@ -175,6 +176,33 @@ type Engine struct { // used to ensure metrics are reported each fixed number of operation metricsLock sync.Mutex metricUpdateCounter int + + taskComparator TaskComparator +} + +// TaskInfo represents the details of a request from a peer. +type TaskInfo struct { + Cid cid.Cid + // Tasks can be want-have or want-block + IsWantBlock bool + // Whether to immediately send a response if the block is not found + SendDontHave bool + // The size of the block corresponding to the task + BlockSize int + // Whether the block was found + HaveBlock bool +} + +// TaskComparator is used for task prioritization. +// It should return true if task 'ta' has higher priority than task 'tb' +type TaskComparator func(ta, tb *TaskInfo) bool + +type Option func(*Engine) + +func WithTaskComparator(comparator TaskComparator) Option { + return func(e *Engine) { + e.taskComparator = comparator + } } // NewEngine creates a new block sending engine for the given block store. @@ -192,6 +220,7 @@ func NewEngine( activeEngineGauge metrics.Gauge, pendingBlocksGauge metrics.Gauge, activeBlocksGauge metrics.Gauge, + opts ...Option, ) *Engine { return newEngine( ctx, @@ -207,6 +236,7 @@ func NewEngine( activeEngineGauge, pendingBlocksGauge, activeBlocksGauge, + opts..., ) } @@ -223,6 +253,7 @@ func newEngine( activeEngineGauge metrics.Gauge, pendingBlocksGauge metrics.Gauge, activeBlocksGauge metrics.Gauge, + opts ...Option, ) *Engine { if scoreLedger == nil { @@ -247,12 +278,46 @@ func newEngine( } e.tagQueued = fmt.Sprintf(tagFormat, "queued", uuid.New().String()) e.tagUseful = fmt.Sprintf(tagFormat, "useful", uuid.New().String()) - e.peerRequestQueue = peertaskqueue.New( + + for _, opt := range opts { + opt(e) + } + + // default peer task queue options + peerTaskQueueOpts := []peertaskqueue.Option{ peertaskqueue.OnPeerAddedHook(e.onPeerAdded), peertaskqueue.OnPeerRemovedHook(e.onPeerRemoved), peertaskqueue.TaskMerger(newTaskMerger()), peertaskqueue.IgnoreFreezing(true), - peertaskqueue.MaxOutstandingWorkPerPeer(maxOutstandingBytesPerPeer)) + peertaskqueue.MaxOutstandingWorkPerPeer(maxOutstandingBytesPerPeer), + } + + if e.taskComparator != nil { + peerTaskComparator := func(a, b *peertask.QueueTask) bool { + taskDataA := a.Task.Data.(*taskData) + taskInfoA := &TaskInfo{ + Cid: a.Task.Topic.(cid.Cid), + IsWantBlock: taskDataA.IsWantBlock, + SendDontHave: taskDataA.SendDontHave, + BlockSize: taskDataA.BlockSize, + HaveBlock: taskDataA.HaveBlock, + } + taskDataB := b.Task.Data.(*taskData) + taskInfoB := &TaskInfo{ + Cid: b.Task.Topic.(cid.Cid), + IsWantBlock: taskDataB.IsWantBlock, + SendDontHave: taskDataB.SendDontHave, + BlockSize: taskDataB.BlockSize, + HaveBlock: taskDataB.HaveBlock, + } + return e.taskComparator(taskInfoA, taskInfoB) + } + peerTaskQueueOpts = append(peerTaskQueueOpts, peertaskqueue.PeerComparator(peertracker.TaskPriorityPeerComparator(peerTaskComparator))) + peerTaskQueueOpts = append(peerTaskQueueOpts, peertaskqueue.TaskComparator(peerTaskComparator)) + } + + e.peerRequestQueue = peertaskqueue.New(peerTaskQueueOpts...) + return e } From 394c1692dc22af4184f4139689a05863f8c6baf6 Mon Sep 17 00:00:00 2001 From: Simon Zhu Date: Tue, 12 Oct 2021 08:47:23 -0700 Subject: [PATCH 1004/1035] add peer to TaskInfo This commit was moved from ipfs/go-bitswap@41662895a2b84421881fa91d148b3d0b86245f03 --- bitswap/internal/decision/engine.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/bitswap/internal/decision/engine.go b/bitswap/internal/decision/engine.go index 548917f94..2cede3b49 100644 --- a/bitswap/internal/decision/engine.go +++ b/bitswap/internal/decision/engine.go @@ -182,6 +182,8 @@ type Engine struct { // TaskInfo represents the details of a request from a peer. type TaskInfo struct { + Peer peer.ID + // The CID of the block Cid cid.Cid // Tasks can be want-have or want-block IsWantBlock bool @@ -296,6 +298,7 @@ func newEngine( peerTaskComparator := func(a, b *peertask.QueueTask) bool { taskDataA := a.Task.Data.(*taskData) taskInfoA := &TaskInfo{ + Peer: a.Target, Cid: a.Task.Topic.(cid.Cid), IsWantBlock: taskDataA.IsWantBlock, SendDontHave: taskDataA.SendDontHave, @@ -304,6 +307,7 @@ func newEngine( } taskDataB := b.Task.Data.(*taskData) taskInfoB := &TaskInfo{ + Peer: b.Target, Cid: b.Task.Topic.(cid.Cid), IsWantBlock: taskDataB.IsWantBlock, SendDontHave: taskDataB.SendDontHave, From ab1e64bec66eab1c438a79bdf0468bde1c7412b5 Mon Sep 17 00:00:00 2001 From: Simon Zhu Date: Tue, 12 Oct 2021 08:56:15 -0700 Subject: [PATCH 1005/1035] move task comparator wrapper to separate function This commit was moved from ipfs/go-bitswap@68ae19476785ae7e8de3fea99d2bad846e9bd4bb --- bitswap/internal/decision/engine.go | 51 ++++++++++++++++------------- 1 file changed, 28 insertions(+), 23 deletions(-) diff --git a/bitswap/internal/decision/engine.go b/bitswap/internal/decision/engine.go index 2cede3b49..4426d8ce4 100644 --- a/bitswap/internal/decision/engine.go +++ b/bitswap/internal/decision/engine.go @@ -207,6 +207,31 @@ func WithTaskComparator(comparator TaskComparator) Option { } } +// wrapTaskComparator wraps a TaskComparator so it can be used as a QueueTaskComparator +func wrapTaskComparator(tc TaskComparator) peertask.QueueTaskComparator { + return func(a, b *peertask.QueueTask) bool { + taskDataA := a.Task.Data.(*taskData) + taskInfoA := &TaskInfo{ + Peer: a.Target, + Cid: a.Task.Topic.(cid.Cid), + IsWantBlock: taskDataA.IsWantBlock, + SendDontHave: taskDataA.SendDontHave, + BlockSize: taskDataA.BlockSize, + HaveBlock: taskDataA.HaveBlock, + } + taskDataB := b.Task.Data.(*taskData) + taskInfoB := &TaskInfo{ + Peer: b.Target, + Cid: b.Task.Topic.(cid.Cid), + IsWantBlock: taskDataB.IsWantBlock, + SendDontHave: taskDataB.SendDontHave, + BlockSize: taskDataB.BlockSize, + HaveBlock: taskDataB.HaveBlock, + } + return tc(taskInfoA, taskInfoB) + } +} + // NewEngine creates a new block sending engine for the given block store. // maxOutstandingBytesPerPeer hints to the peer task queue not to give a peer more tasks if it has some maximum // work already outstanding. @@ -295,29 +320,9 @@ func newEngine( } if e.taskComparator != nil { - peerTaskComparator := func(a, b *peertask.QueueTask) bool { - taskDataA := a.Task.Data.(*taskData) - taskInfoA := &TaskInfo{ - Peer: a.Target, - Cid: a.Task.Topic.(cid.Cid), - IsWantBlock: taskDataA.IsWantBlock, - SendDontHave: taskDataA.SendDontHave, - BlockSize: taskDataA.BlockSize, - HaveBlock: taskDataA.HaveBlock, - } - taskDataB := b.Task.Data.(*taskData) - taskInfoB := &TaskInfo{ - Peer: b.Target, - Cid: b.Task.Topic.(cid.Cid), - IsWantBlock: taskDataB.IsWantBlock, - SendDontHave: taskDataB.SendDontHave, - BlockSize: taskDataB.BlockSize, - HaveBlock: taskDataB.HaveBlock, - } - return e.taskComparator(taskInfoA, taskInfoB) - } - peerTaskQueueOpts = append(peerTaskQueueOpts, peertaskqueue.PeerComparator(peertracker.TaskPriorityPeerComparator(peerTaskComparator))) - peerTaskQueueOpts = append(peerTaskQueueOpts, peertaskqueue.TaskComparator(peerTaskComparator)) + queueTaskComparator := wrapTaskComparator(e.taskComparator) + peerTaskQueueOpts = append(peerTaskQueueOpts, peertaskqueue.PeerComparator(peertracker.TaskPriorityPeerComparator(queueTaskComparator))) + peerTaskQueueOpts = append(peerTaskQueueOpts, peertaskqueue.TaskComparator(queueTaskComparator)) } e.peerRequestQueue = peertaskqueue.New(peerTaskQueueOpts...) From 0f5bcd9b9b129da1330fce3d1181d658da85f4ad Mon Sep 17 00:00:00 2001 From: Simon Zhu Date: Tue, 12 Oct 2021 18:23:03 -0700 Subject: [PATCH 1006/1035] fix undeclared name error This commit was moved from ipfs/go-bitswap@b67d113637285ead9cc1abe27fe2b0e22afcc11b --- bitswap/bitswap.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 98de8d78d..eebc0bb70 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -384,7 +384,7 @@ type Bitswap struct { // whether we should actually simulate dont haves on request timeout simulateDontHavesOnTimeout bool - taskComparator TaskComparator + taskComparator decision.TaskComparator } type counters struct { From d9e440a521ba25bdf76f24ddd01ccf0c5b4b78b3 Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Thu, 21 Oct 2021 12:12:37 -0700 Subject: [PATCH 1007/1035] fix: reduce receive contention This means we need to frequently re-take this lock, but it also means we don't hold it while calling other functions that might block (e.g., while pushing jobs). This commit was moved from ipfs/go-bitswap@10d1b2c5613b1985d67ad31ccd4d236e7891dfe1 --- bitswap/internal/decision/engine.go | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/bitswap/internal/decision/engine.go b/bitswap/internal/decision/engine.go index df49f0bc5..ea7e9db07 100644 --- a/bitswap/internal/decision/engine.go +++ b/bitswap/internal/decision/engine.go @@ -673,12 +673,18 @@ func (e *Engine) ReceiveFrom(from peer.ID, blks []blocks.Block) { // Check each peer to see if it wants one of the blocks we received var work bool missingWants := make(map[peer.ID][]cid.Cid) - e.lock.RLock() for _, b := range blks { k := b.Cid() - for _, p := range e.peerLedger.Peers(k) { + e.lock.RLock() + peers := e.peerLedger.Peers(k) + e.lock.RUnlock() + + for _, p := range peers { + e.lock.RLock() ledger, ok := e.ledgerMap[p] + e.lock.RUnlock() + if !ok { // This can happen if the peer has disconnected while we're processing this list. log.Debugw("failed to find peer in ledger", "peer", p) @@ -718,7 +724,6 @@ func (e *Engine) ReceiveFrom(from peer.ID, blks []blocks.Block) { e.updateMetrics() } } - e.lock.RUnlock() // If we found missing wants (e.g., because the peer disconnected, we have some races here) // remove them from the list. Unfortunately, we still have to re-check because the user From d5a3b5ac8bea3f20c8a35c6efd94505de29dd59d Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Mon, 25 Oct 2021 23:25:26 -0700 Subject: [PATCH 1008/1035] test: make sure the cache is cleared when changing the wantlist This test explicitly calls entries to make sure the cache is materialized. This commit was moved from ipfs/go-bitswap@e6c8199d145663be224470d6097f4818ea2531be --- bitswap/wantlist/wantlist_test.go | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/bitswap/wantlist/wantlist_test.go b/bitswap/wantlist/wantlist_test.go index e4abf3c2b..2f64f3856 100644 --- a/bitswap/wantlist/wantlist_test.go +++ b/bitswap/wantlist/wantlist_test.go @@ -5,6 +5,7 @@ import ( pb "github.com/ipfs/go-bitswap/message/pb" cid "github.com/ipfs/go-cid" + "github.com/stretchr/testify/require" ) var testcids []cid.Cid @@ -216,4 +217,19 @@ func TestSortEntries(t *testing.T) { !entries[2].Cid.Equals(testcids[0]) { t.Fatal("wrong order") } + +} + +// Test adding and removing interleaved with checking entries to make sure we clear the cache. +func TestCache(t *testing.T) { + wl := New() + + wl.Add(testcids[0], 3, pb.Message_Wantlist_Block) + require.Len(t, wl.Entries(), 1) + + wl.Add(testcids[1], 3, pb.Message_Wantlist_Block) + require.Len(t, wl.Entries(), 2) + + wl.Remove(testcids[1]) + require.Len(t, wl.Entries(), 1) } From 1e7e48026fd9ca0cf9456b01447fe48bdb811404 Mon Sep 17 00:00:00 2001 From: Simon Zhu Date: Wed, 27 Oct 2021 14:59:28 -0700 Subject: [PATCH 1009/1035] Add TaskComparator test This commit was moved from ipfs/go-bitswap@1a344b1fe5ef5d937e1f8df5e4599302c087b060 --- bitswap/internal/decision/engine_test.go | 65 ++++++++++++++++++++++-- 1 file changed, 61 insertions(+), 4 deletions(-) diff --git a/bitswap/internal/decision/engine_test.go b/bitswap/internal/decision/engine_test.go index d8445fdef..3b7aaf3c9 100644 --- a/bitswap/internal/decision/engine_test.go +++ b/bitswap/internal/decision/engine_test.go @@ -18,6 +18,7 @@ import ( "github.com/ipfs/go-metrics-interface" blocks "github.com/ipfs/go-block-format" + "github.com/ipfs/go-cid" ds "github.com/ipfs/go-datastore" dssync "github.com/ipfs/go-datastore/sync" blockstore "github.com/ipfs/go-ipfs-blockstore" @@ -92,14 +93,14 @@ type engineSet struct { Blockstore blockstore.Blockstore } -func newTestEngine(ctx context.Context, idStr string) engineSet { - return newTestEngineWithSampling(ctx, idStr, shortTerm, nil, clock.New()) +func newTestEngine(ctx context.Context, idStr string, opts ...Option) engineSet { + return newTestEngineWithSampling(ctx, idStr, shortTerm, nil, clock.New(), opts...) } -func newTestEngineWithSampling(ctx context.Context, idStr string, peerSampleInterval time.Duration, sampleCh chan struct{}, clock clock.Clock) engineSet { +func newTestEngineWithSampling(ctx context.Context, idStr string, peerSampleInterval time.Duration, sampleCh chan struct{}, clock clock.Clock, opts ...Option) engineSet { fpt := &fakePeerTagger{} bs := blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore())) - e := newEngineForTesting(ctx, bs, 4, defaults.BitswapEngineTaskWorkerCount, defaults.BitswapMaxOutstandingBytesPerPeer, fpt, "localhost", 0, NewTestScoreLedger(peerSampleInterval, sampleCh, clock)) + e := newEngineForTesting(ctx, bs, 4, defaults.BitswapEngineTaskWorkerCount, defaults.BitswapMaxOutstandingBytesPerPeer, fpt, "localhost", 0, NewTestScoreLedger(peerSampleInterval, sampleCh, clock), opts...) e.StartWorkers(ctx, process.WithTeardown(func() error { return nil })) return engineSet{ Peer: peer.ID(idStr), @@ -193,6 +194,7 @@ func newEngineForTesting( self peer.ID, maxReplaceSize int, scoreLedger ScoreLedger, + opts ...Option, ) *Engine { testPendingEngineGauge := metrics.NewCtx(ctx, "pending_tasks", "Total number of pending tasks").Gauge() testActiveEngineGauge := metrics.NewCtx(ctx, "active_tasks", "Total number of active tasks").Gauge() @@ -212,6 +214,7 @@ func newEngineForTesting( testActiveEngineGauge, testPendingBlocksGauge, testActiveBlocksGauge, + opts..., ) } @@ -1054,6 +1057,60 @@ func TestWantlistForPeer(t *testing.T) { } +func TestTaskComparator(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) + defer cancel() + + keys := []string{"a", "b", "c", "d", "e", "f", "g", "h", "i", "j"} + cids := make(map[cid.Cid]int) + blks := make([]blocks.Block, 0, len(keys)) + for i, letter := range keys { + block := blocks.NewBlock([]byte(letter)) + blks = append(blks, block) + cids[block.Cid()] = i + } + + fpt := &fakePeerTagger{} + sl := NewTestScoreLedger(shortTerm, nil, clock.New()) + bs := blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore())) + if err := bs.PutMany(blks); err != nil { + t.Fatal(err) + } + + // use a single task worker so that the order of outgoing messages is deterministic + engineTaskWorkerCount := 1 + e := newEngineForTesting(ctx, bs, 4, engineTaskWorkerCount, defaults.BitswapMaxOutstandingBytesPerPeer, fpt, "localhost", 0, sl, + WithTaskComparator(func(ta, tb *TaskInfo) bool { + // prioritize based on lexicographic ordering of block content + return cids[ta.Cid] < cids[tb.Cid] + }), + ) + e.StartWorkers(ctx, process.WithTeardown(func() error { return nil })) + + // rely on randomness of Go map's iteration order to add Want entries in random order + peerIDs := make([]peer.ID, len(keys)) + for _, i := range cids { + peerID := libp2ptest.RandPeerIDFatal(t) + peerIDs[i] = peerID + partnerWantBlocks(e, keys[i:i+1], peerID) + } + + // check that outgoing messages are sent in the correct order + for i, peerID := range peerIDs { + next := <-e.Outbox() + envelope := <-next + if peerID != envelope.Peer { + t.Errorf("expected message for peer ID %#v but instead got message for peer ID %#v", peerID, envelope.Peer) + } + responseBlocks := envelope.Message.Blocks() + if len(responseBlocks) != 1 { + t.Errorf("expected 1 block in response but instead got %v", len(blks)) + } else if responseBlocks[0].Cid() != blks[i].Cid() { + t.Errorf("expected block with CID %#v but instead got block with CID %#v", blks[i].Cid(), responseBlocks[0].Cid()) + } + } +} + func TestTaggingPeers(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) defer cancel() From 112e3d4f3778f582020138d725af7ff6da97cdb9 Mon Sep 17 00:00:00 2001 From: Simon Zhu Date: Wed, 27 Oct 2021 15:37:05 -0700 Subject: [PATCH 1010/1035] Add type aliases for TaskInfo and TaskComparator This commit was moved from ipfs/go-bitswap@b1246539f85e99d126e83df3c91854dec083d33d --- bitswap/bitswap.go | 7 +++++-- bitswap/internal/decision/engine_test.go | 1 + 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index eebc0bb70..4a15fc580 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -148,8 +148,11 @@ func SetSimulateDontHavesOnTimeout(send bool) Option { } } +type TaskInfo = decision.TaskInfo +type TaskComparator = decision.TaskComparator + // WithTaskComparator configures custom task prioritization logic. -func WithTaskComparator(comparator decision.TaskComparator) Option { +func WithTaskComparator(comparator TaskComparator) Option { return func(bs *Bitswap) { bs.taskComparator = comparator } @@ -384,7 +387,7 @@ type Bitswap struct { // whether we should actually simulate dont haves on request timeout simulateDontHavesOnTimeout bool - taskComparator decision.TaskComparator + taskComparator TaskComparator } type counters struct { diff --git a/bitswap/internal/decision/engine_test.go b/bitswap/internal/decision/engine_test.go index 3b7aaf3c9..acde17954 100644 --- a/bitswap/internal/decision/engine_test.go +++ b/bitswap/internal/decision/engine_test.go @@ -1080,6 +1080,7 @@ func TestTaskComparator(t *testing.T) { // use a single task worker so that the order of outgoing messages is deterministic engineTaskWorkerCount := 1 e := newEngineForTesting(ctx, bs, 4, engineTaskWorkerCount, defaults.BitswapMaxOutstandingBytesPerPeer, fpt, "localhost", 0, sl, + // if this Option is omitted, the test fails WithTaskComparator(func(ta, tb *TaskInfo) bool { // prioritize based on lexicographic ordering of block content return cids[ta.Cid] < cids[tb.Cid] From 406a86dfc29e3d2dd38a05f0e5ad2b479d517eb5 Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Mon, 25 Oct 2021 18:07:08 -0700 Subject: [PATCH 1011/1035] fix: optimize handling for peers with lots of tasks This should fix a CPU hotspot when peers request tons of tiny blocks. This commit was moved from ipfs/go-bitswap@cc28305f08e757d44b077ece9fc593cae7cdfc31 --- bitswap/internal/decision/taskmerger.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bitswap/internal/decision/taskmerger.go b/bitswap/internal/decision/taskmerger.go index 190486419..191200e58 100644 --- a/bitswap/internal/decision/taskmerger.go +++ b/bitswap/internal/decision/taskmerger.go @@ -24,7 +24,7 @@ func newTaskMerger() *taskMerger { // The request queue uses this Method to decide if a newly pushed task has any // new information beyond the tasks with the same Topic (CID) in the queue. -func (*taskMerger) HasNewInfo(task peertask.Task, existing []peertask.Task) bool { +func (*taskMerger) HasNewInfo(task peertask.Task, existing []*peertask.Task) bool { haveSize := false isWantBlock := false for _, et := range existing { From e18b97c869a3eea748c7d50d81a15520be727c53 Mon Sep 17 00:00:00 2001 From: Gus Eggert Date: Wed, 10 Nov 2021 10:44:34 -0500 Subject: [PATCH 1012/1035] feat: plumb through contexts (#539) This commit was moved from ipfs/go-bitswap@d74d6584e47aae04c4094e858184fe1544d0dcbe --- bitswap/benchmarks_test.go | 12 ++++---- bitswap/bitswap.go | 8 ++--- bitswap/bitswap_test.go | 30 +++++++++---------- bitswap/bitswap_with_sessions_test.go | 18 +++++------ .../internal/decision/blockstoremanager.go | 4 +-- .../decision/blockstoremanager_test.go | 8 ++--- bitswap/internal/decision/engine_test.go | 14 ++++----- 7 files changed, 47 insertions(+), 47 deletions(-) diff --git a/bitswap/benchmarks_test.go b/bitswap/benchmarks_test.go index dd4cf5b6c..ca92820f3 100644 --- a/bitswap/benchmarks_test.go +++ b/bitswap/benchmarks_test.go @@ -437,7 +437,7 @@ func runDistribution(b *testing.B, instances []testinstance.Instance, blocks []b func allToAll(b *testing.B, provs []testinstance.Instance, blocks []blocks.Block) { for _, p := range provs { - if err := p.Blockstore().PutMany(blocks); err != nil { + if err := p.Blockstore().PutMany(context.Background(), blocks); err != nil { b.Fatal(err) } } @@ -452,10 +452,10 @@ func overlap1(b *testing.B, provs []testinstance.Instance, blks []blocks.Block) bill := provs[0] jeff := provs[1] - if err := bill.Blockstore().PutMany(blks[:75]); err != nil { + if err := bill.Blockstore().PutMany(context.Background(), blks[:75]); err != nil { b.Fatal(err) } - if err := jeff.Blockstore().PutMany(blks[25:]); err != nil { + if err := jeff.Blockstore().PutMany(context.Background(), blks[25:]); err != nil { b.Fatal(err) } } @@ -473,12 +473,12 @@ func overlap2(b *testing.B, provs []testinstance.Instance, blks []blocks.Block) even := i%2 == 0 third := i%3 == 0 if third || even { - if err := bill.Blockstore().Put(blk); err != nil { + if err := bill.Blockstore().Put(context.Background(), blk); err != nil { b.Fatal(err) } } if third || !even { - if err := jeff.Blockstore().Put(blk); err != nil { + if err := jeff.Blockstore().Put(context.Background(), blk); err != nil { b.Fatal(err) } } @@ -490,7 +490,7 @@ func overlap2(b *testing.B, provs []testinstance.Instance, blks []blocks.Block) // but we're mostly just testing performance of the sync algorithm func onePeerPerBlock(b *testing.B, provs []testinstance.Instance, blks []blocks.Block) { for _, blk := range blks { - err := provs[rand.Intn(len(provs))].Blockstore().Put(blk) + err := provs[rand.Intn(len(provs))].Blockstore().Put(context.Background(), blk) if err != nil { b.Fatal(err) } diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 4a15fc580..fe0c4855a 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -436,8 +436,8 @@ func (bs *Bitswap) GetBlocks(ctx context.Context, keys []cid.Cid) (<-chan blocks // HasBlock announces the existence of a block to this bitswap service. The // service will potentially notify its peers. -func (bs *Bitswap) HasBlock(blk blocks.Block) error { - return bs.receiveBlocksFrom(context.Background(), "", []blocks.Block{blk}, nil, nil) +func (bs *Bitswap) HasBlock(ctx context.Context, blk blocks.Block) error { + return bs.receiveBlocksFrom(ctx, "", []blocks.Block{blk}, nil, nil) } // TODO: Some of this stuff really only needs to be done when adding a block @@ -464,7 +464,7 @@ func (bs *Bitswap) receiveBlocksFrom(ctx context.Context, from peer.ID, blks []b // Put wanted blocks into blockstore if len(wanted) > 0 { - err := bs.blockstore.PutMany(wanted) + err := bs.blockstore.PutMany(ctx, wanted) if err != nil { log.Errorf("Error writing %d blocks to datastore: %s", len(wanted), err) return err @@ -604,7 +604,7 @@ func (bs *Bitswap) blockstoreHas(blks []blocks.Block) []bool { go func(i int, b blocks.Block) { defer wg.Done() - has, err := bs.blockstore.Has(b.Cid()) + has, err := bs.blockstore.Has(context.TODO(), b.Cid()) if err != nil { log.Infof("blockstore.Has error: %s", err) has = false diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index 330321370..c85f06f75 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -90,7 +90,7 @@ func TestGetBlockFromPeerAfterPeerAnnounces(t *testing.T) { hasBlock := peers[0] defer hasBlock.Exchange.Close() - if err := hasBlock.Exchange.HasBlock(block); err != nil { + if err := hasBlock.Exchange.HasBlock(context.Background(), block); err != nil { t.Fatal(err) } @@ -123,7 +123,7 @@ func TestDoesNotProvideWhenConfiguredNotTo(t *testing.T) { wantsBlock := ig.Next() defer wantsBlock.Exchange.Close() - if err := hasBlock.Exchange.HasBlock(block); err != nil { + if err := hasBlock.Exchange.HasBlock(context.Background(), block); err != nil { t.Fatal(err) } @@ -158,7 +158,7 @@ func TestUnwantedBlockNotAdded(t *testing.T) { hasBlock := peers[0] defer hasBlock.Exchange.Close() - if err := hasBlock.Exchange.HasBlock(block); err != nil { + if err := hasBlock.Exchange.HasBlock(context.Background(), block); err != nil { t.Fatal(err) } @@ -170,7 +170,7 @@ func TestUnwantedBlockNotAdded(t *testing.T) { doesNotWantBlock.Exchange.ReceiveMessage(ctx, hasBlock.Peer, bsMessage) - blockInStore, err := doesNotWantBlock.Blockstore().Has(block.Cid()) + blockInStore, err := doesNotWantBlock.Blockstore().Has(ctx, block.Cid()) if err != nil || blockInStore { t.Fatal("Unwanted block added to block store") } @@ -229,7 +229,7 @@ func TestPendingBlockAdded(t *testing.T) { } // Make sure Bitswap adds the block to the blockstore - blockInStore, err := instance.Blockstore().Has(lastBlock.Cid()) + blockInStore, err := instance.Blockstore().Has(context.Background(), lastBlock.Cid()) if err != nil { t.Fatal(err) } @@ -302,7 +302,7 @@ func PerformDistributionTest(t *testing.T, numInstances, numBlocks int) { first := instances[0] for _, b := range blocks { blkeys = append(blkeys, b.Cid()) - err := first.Exchange.HasBlock(b) + err := first.Exchange.HasBlock(ctx, b) if err != nil { t.Fatal(err) } @@ -341,7 +341,7 @@ func PerformDistributionTest(t *testing.T, numInstances, numBlocks int) { for _, inst := range instances { for _, b := range blocks { - if _, err := inst.Blockstore().Get(b.Cid()); err != nil { + if _, err := inst.Blockstore().Get(ctx, b.Cid()); err != nil { t.Fatal(err) } } @@ -378,7 +378,7 @@ func TestSendToWantingPeer(t *testing.T) { } // peerB announces to the network that he has block alpha - err = peerB.Exchange.HasBlock(alpha) + err = peerB.Exchange.HasBlock(ctx, alpha) if err != nil { t.Fatal(err) } @@ -440,7 +440,7 @@ func TestBasicBitswap(t *testing.T) { blocks := bg.Blocks(1) // First peer has block - err := instances[0].Exchange.HasBlock(blocks[0]) + err := instances[0].Exchange.HasBlock(context.Background(), blocks[0]) if err != nil { t.Fatal(err) } @@ -540,7 +540,7 @@ func TestDoubleGet(t *testing.T) { t.Fatal("expected channel to be closed") } - err = instances[0].Exchange.HasBlock(blocks[0]) + err = instances[0].Exchange.HasBlock(context.Background(), blocks[0]) if err != nil { t.Fatal(err) } @@ -703,7 +703,7 @@ func TestBitswapLedgerOneWay(t *testing.T) { instances := ig.Instances(2) blocks := bg.Blocks(1) - err := instances[0].Exchange.HasBlock(blocks[0]) + err := instances[0].Exchange.HasBlock(context.Background(), blocks[0]) if err != nil { t.Fatal(err) } @@ -755,12 +755,12 @@ func TestBitswapLedgerTwoWay(t *testing.T) { instances := ig.Instances(2) blocks := bg.Blocks(2) - err := instances[0].Exchange.HasBlock(blocks[0]) + err := instances[0].Exchange.HasBlock(context.Background(), blocks[0]) if err != nil { t.Fatal(err) } - err = instances[1].Exchange.HasBlock(blocks[1]) + err = instances[1].Exchange.HasBlock(context.Background(), blocks[1]) if err != nil { t.Fatal(err) } @@ -906,7 +906,7 @@ func TestTracer(t *testing.T) { bitswap.WithTracer(wiretap)(instances[0].Exchange) // First peer has block - err := instances[0].Exchange.HasBlock(blocks[0]) + err := instances[0].Exchange.HasBlock(context.Background(), blocks[0]) if err != nil { t.Fatal(err) } @@ -990,7 +990,7 @@ func TestTracer(t *testing.T) { // After disabling WireTap, no new messages are logged bitswap.WithTracer(nil)(instances[0].Exchange) - err = instances[0].Exchange.HasBlock(blocks[1]) + err = instances[0].Exchange.HasBlock(context.Background(), blocks[1]) if err != nil { t.Fatal(err) } diff --git a/bitswap/bitswap_with_sessions_test.go b/bitswap/bitswap_with_sessions_test.go index 441745329..40eed0ff2 100644 --- a/bitswap/bitswap_with_sessions_test.go +++ b/bitswap/bitswap_with_sessions_test.go @@ -34,7 +34,7 @@ func TestBasicSessions(t *testing.T) { b := inst[1] // Add a block to Peer B - if err := b.Blockstore().Put(block); err != nil { + if err := b.Blockstore().Put(ctx, block); err != nil { t.Fatal(err) } @@ -82,7 +82,7 @@ func TestSessionBetweenPeers(t *testing.T) { // Add 101 blocks to Peer A blks := bgen.Blocks(101) - if err := inst[0].Blockstore().PutMany(blks); err != nil { + if err := inst[0].Blockstore().PutMany(ctx, blks); err != nil { t.Fatal(err) } @@ -143,7 +143,7 @@ func TestSessionSplitFetch(t *testing.T) { // Add 10 distinct blocks to each of 10 peers blks := bgen.Blocks(100) for i := 0; i < 10; i++ { - if err := inst[i].Blockstore().PutMany(blks[i*10 : (i+1)*10]); err != nil { + if err := inst[i].Blockstore().PutMany(ctx, blks[i*10:(i+1)*10]); err != nil { t.Fatal(err) } } @@ -187,7 +187,7 @@ func TestFetchNotConnected(t *testing.T) { // Provide 10 blocks on Peer A blks := bgen.Blocks(10) for _, block := range blks { - if err := other.Exchange.HasBlock(block); err != nil { + if err := other.Exchange.HasBlock(ctx, block); err != nil { t.Fatal(err) } } @@ -243,7 +243,7 @@ func TestFetchAfterDisconnect(t *testing.T) { firstBlks := blks[:5] for _, block := range firstBlks { - if err := peerA.Exchange.HasBlock(block); err != nil { + if err := peerA.Exchange.HasBlock(ctx, block); err != nil { t.Fatal(err) } } @@ -279,7 +279,7 @@ func TestFetchAfterDisconnect(t *testing.T) { // Provide remaining blocks lastBlks := blks[5:] for _, block := range lastBlks { - if err := peerA.Exchange.HasBlock(block); err != nil { + if err := peerA.Exchange.HasBlock(ctx, block); err != nil { t.Fatal(err) } } @@ -334,7 +334,7 @@ func TestInterestCacheOverflow(t *testing.T) { // wait to ensure that all the above cids were added to the sessions cache time.Sleep(time.Millisecond * 50) - if err := b.Exchange.HasBlock(blks[0]); err != nil { + if err := b.Exchange.HasBlock(ctx, blks[0]); err != nil { t.Fatal(err) } @@ -381,7 +381,7 @@ func TestPutAfterSessionCacheEvict(t *testing.T) { // wait to ensure that all the above cids were added to the sessions cache time.Sleep(time.Millisecond * 50) - if err := a.Exchange.HasBlock(blks[17]); err != nil { + if err := a.Exchange.HasBlock(ctx, blks[17]); err != nil { t.Fatal(err) } @@ -423,7 +423,7 @@ func TestMultipleSessions(t *testing.T) { } time.Sleep(time.Millisecond * 10) - if err := b.Exchange.HasBlock(blk); err != nil { + if err := b.Exchange.HasBlock(ctx, blk); err != nil { t.Fatal(err) } diff --git a/bitswap/internal/decision/blockstoremanager.go b/bitswap/internal/decision/blockstoremanager.go index 7d6864eb9..2d205c2ea 100644 --- a/bitswap/internal/decision/blockstoremanager.go +++ b/bitswap/internal/decision/blockstoremanager.go @@ -85,7 +85,7 @@ func (bsm *blockstoreManager) getBlockSizes(ctx context.Context, ks []cid.Cid) ( var lk sync.Mutex return res, bsm.jobPerKey(ctx, ks, func(c cid.Cid) { - size, err := bsm.bs.GetSize(c) + size, err := bsm.bs.GetSize(ctx, c) if err != nil { if err != bstore.ErrNotFound { // Note: this isn't a fatal error. We shouldn't abort the request @@ -107,7 +107,7 @@ func (bsm *blockstoreManager) getBlocks(ctx context.Context, ks []cid.Cid) (map[ var lk sync.Mutex return res, bsm.jobPerKey(ctx, ks, func(c cid.Cid) { - blk, err := bsm.bs.Get(c) + blk, err := bsm.bs.Get(ctx, c) if err != nil { if err != bstore.ErrNotFound { // Note: this isn't a fatal error. We shouldn't abort the request diff --git a/bitswap/internal/decision/blockstoremanager_test.go b/bitswap/internal/decision/blockstoremanager_test.go index ad447738c..fa026efb9 100644 --- a/bitswap/internal/decision/blockstoremanager_test.go +++ b/bitswap/internal/decision/blockstoremanager_test.go @@ -89,7 +89,7 @@ func TestBlockstoreManager(t *testing.T) { } // Put all blocks in the blockstore except the last one - if err := bstore.PutMany(blks[:len(blks)-1]); err != nil { + if err := bstore.PutMany(ctx, blks[:len(blks)-1]); err != nil { t.Fatal(err) } @@ -169,7 +169,7 @@ func TestBlockstoreManagerConcurrency(t *testing.T) { ks = append(ks, b.Cid()) } - err := bstore.PutMany(blks) + err := bstore.PutMany(ctx, blks) if err != nil { t.Fatal(err) } @@ -211,7 +211,7 @@ func TestBlockstoreManagerClose(t *testing.T) { ks = append(ks, b.Cid()) } - err := bstore.PutMany(blks) + err := bstore.PutMany(ctx, blks) if err != nil { t.Fatal(err) } @@ -251,7 +251,7 @@ func TestBlockstoreManagerCtxDone(t *testing.T) { ks = append(ks, b.Cid()) } - err := underlyingBstore.PutMany(blks) + err := underlyingBstore.PutMany(ctx, blks) if err != nil { t.Fatal(err) } diff --git a/bitswap/internal/decision/engine_test.go b/bitswap/internal/decision/engine_test.go index acde17954..315604aa7 100644 --- a/bitswap/internal/decision/engine_test.go +++ b/bitswap/internal/decision/engine_test.go @@ -245,7 +245,7 @@ func TestPartnerWantHaveWantBlockNonActive(t *testing.T) { bs := blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore())) for _, letter := range strings.Split(alphabet, "") { block := blocks.NewBlock([]byte(letter)) - if err := bs.Put(block); err != nil { + if err := bs.Put(context.Background(), block); err != nil { t.Fatal(err) } } @@ -584,7 +584,7 @@ func TestPartnerWantHaveWantBlockActive(t *testing.T) { bs := blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore())) for _, letter := range strings.Split(alphabet, "") { block := blocks.NewBlock([]byte(letter)) - if err := bs.Put(block); err != nil { + if err := bs.Put(context.Background(), block); err != nil { t.Fatal(err) } } @@ -884,7 +884,7 @@ func TestPartnerWantsThenCancels(t *testing.T) { bs := blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore())) for _, letter := range alphabet { block := blocks.NewBlock([]byte(letter)) - if err := bs.Put(block); err != nil { + if err := bs.Put(context.Background(), block); err != nil { t.Fatal(err) } } @@ -936,7 +936,7 @@ func TestSendReceivedBlocksToPeersThatWantThem(t *testing.T) { t.Fatal("expected no envelope yet") } - if err := bs.PutMany([]blocks.Block{blks[0], blks[2]}); err != nil { + if err := bs.PutMany(context.Background(), []blocks.Block{blks[0], blks[2]}); err != nil { t.Fatal(err) } e.ReceiveFrom(otherPeer, []blocks.Block{blks[0], blks[2]}) @@ -1000,7 +1000,7 @@ func TestSendDontHave(t *testing.T) { } // Receive all the blocks - if err := bs.PutMany(blks); err != nil { + if err := bs.PutMany(context.Background(), blks); err != nil { t.Fatal(err) } e.ReceiveFrom(otherPeer, blks) @@ -1073,7 +1073,7 @@ func TestTaskComparator(t *testing.T) { fpt := &fakePeerTagger{} sl := NewTestScoreLedger(shortTerm, nil, clock.New()) bs := blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore())) - if err := bs.PutMany(blks); err != nil { + if err := bs.PutMany(ctx, blks); err != nil { t.Fatal(err) } @@ -1121,7 +1121,7 @@ func TestTaggingPeers(t *testing.T) { keys := []string{"a", "b", "c", "d", "e"} for _, letter := range keys { block := blocks.NewBlock([]byte(letter)) - if err := sanfrancisco.Blockstore.Put(block); err != nil { + if err := sanfrancisco.Blockstore.Put(context.Background(), block); err != nil { t.Fatal(err) } } From 430ca5686e935893fc03a6184b7d1b7c650fa89c Mon Sep 17 00:00:00 2001 From: susarlanikhilesh Date: Thu, 18 Nov 2021 01:49:38 +0530 Subject: [PATCH 1013/1035] Change incorrect function name in README (#541) NewFromIPFSHost -> NewFromIpfsHost This commit was moved from ipfs/go-bitswap@ee3cce7eba0547ccfbc351e75bbf76c5747b7dfa --- bitswap/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bitswap/README.md b/bitswap/README.md index 488d9993d..aeb5948cc 100644 --- a/bitswap/README.md +++ b/bitswap/README.md @@ -70,7 +70,7 @@ var host host.Host var router routing.ContentRouting var bstore blockstore.Blockstore -network := bsnet.NewFromIPFSHost(host, router) +network := bsnet.NewFromIpfsHost(host, router) exchange := bitswap.New(ctx, network, bstore) ``` From 11a5cc0125d33d0bcc46f20fcd3a010383279296 Mon Sep 17 00:00:00 2001 From: whyrusleeping Date: Mon, 13 Dec 2021 17:12:42 -0800 Subject: [PATCH 1014/1035] configurable target message size This commit was moved from ipfs/go-bitswap@ada55fc18021cea48f769164342851f244bd89ec --- bitswap/bitswap.go | 11 +++++++++++ bitswap/internal/decision/engine.go | 17 +++++++++++++---- bitswap/internal/defaults/defaults.go | 2 ++ 3 files changed, 26 insertions(+), 4 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index fe0c4855a..c78753077 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -148,6 +148,12 @@ func SetSimulateDontHavesOnTimeout(send bool) Option { } } +func WithTargetMessageSize(tms int) Option { + return func(bs *Bitswap) { + bs.engineTargetMessageSize = tms + } +} + type TaskInfo = decision.TaskInfo type TaskComparator = decision.TaskComparator @@ -259,6 +265,7 @@ func New(parent context.Context, network bsnet.BitSwapNetwork, engineTaskWorkerCount: defaults.BitswapEngineTaskWorkerCount, taskWorkerCount: defaults.BitswapTaskWorkerCount, engineMaxOutstandingBytesPerPeer: defaults.BitswapMaxOutstandingBytesPerPeer, + engineTargetMessageSize: defaults.BitswapEngineTargetMessageSize, engineSetSendDontHaves: true, simulateDontHavesOnTimeout: true, } @@ -283,6 +290,7 @@ func New(parent context.Context, network bsnet.BitSwapNetwork, pendingBlocksGauge, activeBlocksGauge, decision.WithTaskComparator(bs.taskComparator), + decision.WithTargetMessageSize(bs.engineTargetMessageSize), ) bs.engine.SetSendDontHaves(bs.engineSetSendDontHaves) @@ -379,6 +387,9 @@ type Bitswap struct { // the score ledger used by the decision engine engineScoreLedger deciface.ScoreLedger + // target message size setting for engines peer task queue + engineTargetMessageSize int + // indicates what to do when the engine receives a want-block for a block that // is not in the blockstore. Either send DONT_HAVE or do nothing. // This is used to simulate older versions of bitswap that did nothing instead of sending back a DONT_HAVE. diff --git a/bitswap/internal/decision/engine.go b/bitswap/internal/decision/engine.go index abb0bcd6d..24e45f169 100644 --- a/bitswap/internal/decision/engine.go +++ b/bitswap/internal/decision/engine.go @@ -64,7 +64,7 @@ const ( // targetMessageSize is the ideal size of the batched payload. We try to // pop this much data off the request queue, but it may be a little more // or less depending on what's in the queue. - targetMessageSize = 16 * 1024 + defaultTargetMessageSize = 16 * 1024 // tagFormat is the tag given to peers associated an engine tagFormat = "bs-engine-%s-%s" @@ -159,6 +159,8 @@ type Engine struct { taskWorkerLock sync.Mutex taskWorkerCount int + targetMessageSize int + // maxBlockSizeReplaceHasWithBlock is the maximum size of the block in // bytes up to which we will replace a want-have with a want-block maxBlockSizeReplaceHasWithBlock int @@ -207,6 +209,12 @@ func WithTaskComparator(comparator TaskComparator) Option { } } +func WithTargetMessageSize(size int) Option { + return func(e *Engine) { + e.targetMessageSize = size + } +} + // wrapTaskComparator wraps a TaskComparator so it can be used as a QueueTaskComparator func wrapTaskComparator(tc TaskComparator) peertask.QueueTaskComparator { return func(a, b *peertask.QueueTask) bool { @@ -302,6 +310,7 @@ func newEngine( peerLedger: newPeerLedger(), pendingGauge: pendingEngineGauge, activeGauge: activeEngineGauge, + targetMessageSize: defaultTargetMessageSize, } e.tagQueued = fmt.Sprintf(tagFormat, "queued", uuid.New().String()) e.tagUseful = fmt.Sprintf(tagFormat, "useful", uuid.New().String()) @@ -450,21 +459,21 @@ func (e *Engine) taskWorkerExit() { func (e *Engine) nextEnvelope(ctx context.Context) (*Envelope, error) { for { // Pop some tasks off the request queue - p, nextTasks, pendingBytes := e.peerRequestQueue.PopTasks(targetMessageSize) + p, nextTasks, pendingBytes := e.peerRequestQueue.PopTasks(e.targetMessageSize) e.updateMetrics() for len(nextTasks) == 0 { select { case <-ctx.Done(): return nil, ctx.Err() case <-e.workSignal: - p, nextTasks, pendingBytes = e.peerRequestQueue.PopTasks(targetMessageSize) + p, nextTasks, pendingBytes = e.peerRequestQueue.PopTasks(e.targetMessageSize) e.updateMetrics() case <-e.ticker.C: // When a task is cancelled, the queue may be "frozen" for a // period of time. We periodically "thaw" the queue to make // sure it doesn't get stuck in a frozen state. e.peerRequestQueue.ThawRound() - p, nextTasks, pendingBytes = e.peerRequestQueue.PopTasks(targetMessageSize) + p, nextTasks, pendingBytes = e.peerRequestQueue.PopTasks(e.targetMessageSize) e.updateMetrics() } } diff --git a/bitswap/internal/defaults/defaults.go b/bitswap/internal/defaults/defaults.go index 7237a996e..54a9eaa66 100644 --- a/bitswap/internal/defaults/defaults.go +++ b/bitswap/internal/defaults/defaults.go @@ -17,4 +17,6 @@ const ( BitswapEngineTaskWorkerCount = 8 // the total amount of bytes that a peer should have outstanding, it is utilized by the decision engine BitswapMaxOutstandingBytesPerPeer = 1 << 20 + // the number of bytes we attempt to make each outgoing bitswap message + BitswapEngineTargetMessageSize = 16 * 1024 ) From 050b78308c89ba0a10ad26b743fb989c93cc1c29 Mon Sep 17 00:00:00 2001 From: Laurent Senta Date: Thu, 17 Mar 2022 17:11:48 +0100 Subject: [PATCH 1015/1035] feat: add peer block filter option (#549) * feat: add peer block filter option This feature lets a user configure a function that will allow / deny request for a block coming from a peer. This commit was moved from ipfs/go-bitswap@b6f0cc7c83aaa27a39cc7e1b16ee34bba2d8b5b8 --- bitswap/bitswap.go | 11 + bitswap/internal/decision/engine.go | 91 ++++-- bitswap/internal/decision/engine_test.go | 340 ++++++++++++++++++++++- 3 files changed, 414 insertions(+), 28 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index c78753077..73ca266e2 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -154,8 +154,15 @@ func WithTargetMessageSize(tms int) Option { } } +func WithPeerBlockRequestFilter(pbrf PeerBlockRequestFilter) Option { + return func(bs *Bitswap) { + bs.peerBlockRequestFilter = pbrf + } +} + type TaskInfo = decision.TaskInfo type TaskComparator = decision.TaskComparator +type PeerBlockRequestFilter = decision.PeerBlockRequestFilter // WithTaskComparator configures custom task prioritization logic. func WithTaskComparator(comparator TaskComparator) Option { @@ -291,6 +298,7 @@ func New(parent context.Context, network bsnet.BitSwapNetwork, activeBlocksGauge, decision.WithTaskComparator(bs.taskComparator), decision.WithTargetMessageSize(bs.engineTargetMessageSize), + decision.WithPeerBlockRequestFilter(bs.peerBlockRequestFilter), ) bs.engine.SetSendDontHaves(bs.engineSetSendDontHaves) @@ -399,6 +407,9 @@ type Bitswap struct { simulateDontHavesOnTimeout bool taskComparator TaskComparator + + // an optional feature to accept / deny requests for blocks + peerBlockRequestFilter PeerBlockRequestFilter } type counters struct { diff --git a/bitswap/internal/decision/engine.go b/bitswap/internal/decision/engine.go index 24e45f169..c8c330975 100644 --- a/bitswap/internal/decision/engine.go +++ b/bitswap/internal/decision/engine.go @@ -180,6 +180,8 @@ type Engine struct { metricUpdateCounter int taskComparator TaskComparator + + peerBlockRequestFilter PeerBlockRequestFilter } // TaskInfo represents the details of a request from a peer. @@ -201,6 +203,10 @@ type TaskInfo struct { // It should return true if task 'ta' has higher priority than task 'tb' type TaskComparator func(ta, tb *TaskInfo) bool +// PeerBlockRequestFilter is used to accept / deny requests for a CID coming from a PeerID +// It should return true if the request should be fullfilled. +type PeerBlockRequestFilter func(p peer.ID, c cid.Cid) bool + type Option func(*Engine) func WithTaskComparator(comparator TaskComparator) Option { @@ -209,6 +215,12 @@ func WithTaskComparator(comparator TaskComparator) Option { } } +func WithPeerBlockRequestFilter(pbrf PeerBlockRequestFilter) Option { + return func(e *Engine) { + e.peerBlockRequestFilter = pbrf + } +} + func WithTargetMessageSize(size int) Option { return func(e *Engine) { e.targetMessageSize = size @@ -598,8 +610,11 @@ func (e *Engine) MessageReceived(ctx context.Context, p peer.ID, m bsmsg.BitSwap } }() - // Get block sizes + // Dispatch entries wants, cancels := e.splitWantsCancels(entries) + wants, denials := e.splitWantsDenials(p, wants) + + // Get block sizes wantKs := cid.NewSet() for _, entry := range wants { wantKs.Add(entry.Cid) @@ -639,6 +654,38 @@ func (e *Engine) MessageReceived(ctx context.Context, p peer.ID, m bsmsg.BitSwap } } + // Cancel a block operation + sendDontHave := func(entry bsmsg.Entry) { + // Only add the task to the queue if the requester wants a DONT_HAVE + if e.sendDontHaves && entry.SendDontHave { + c := entry.Cid + + newWorkExists = true + isWantBlock := false + if entry.WantType == pb.Message_Wantlist_Block { + isWantBlock = true + } + + activeEntries = append(activeEntries, peertask.Task{ + Topic: c, + Priority: int(entry.Priority), + Work: bsmsg.BlockPresenceSize(c), + Data: &taskData{ + BlockSize: 0, + HaveBlock: false, + IsWantBlock: isWantBlock, + SendDontHave: entry.SendDontHave, + }, + }) + } + } + + // Deny access to blocks + for _, entry := range denials { + log.Debugw("Bitswap engine: block denied access", "local", e.self, "from", p, "cid", entry.Cid, "sendDontHave", entry.SendDontHave) + sendDontHave(entry) + } + // For each want-have / want-block for _, entry := range wants { c := entry.Cid @@ -650,27 +697,7 @@ func (e *Engine) MessageReceived(ctx context.Context, p peer.ID, m bsmsg.BitSwap // If the block was not found if !found { log.Debugw("Bitswap engine: block not found", "local", e.self, "from", p, "cid", entry.Cid, "sendDontHave", entry.SendDontHave) - - // Only add the task to the queue if the requester wants a DONT_HAVE - if e.sendDontHaves && entry.SendDontHave { - newWorkExists = true - isWantBlock := false - if entry.WantType == pb.Message_Wantlist_Block { - isWantBlock = true - } - - activeEntries = append(activeEntries, peertask.Task{ - Topic: c, - Priority: int(entry.Priority), - Work: bsmsg.BlockPresenceSize(c), - Data: &taskData{ - BlockSize: 0, - HaveBlock: false, - IsWantBlock: isWantBlock, - SendDontHave: entry.SendDontHave, - }, - }) - } + sendDontHave(entry) } else { // The block was found, add it to the queue newWorkExists = true @@ -722,6 +749,26 @@ func (e *Engine) splitWantsCancels(es []bsmsg.Entry) ([]bsmsg.Entry, []bsmsg.Ent return wants, cancels } +// Split the want-have / want-block entries from the block that will be denied access +func (e *Engine) splitWantsDenials(p peer.ID, allWants []bsmsg.Entry) ([]bsmsg.Entry, []bsmsg.Entry) { + if e.peerBlockRequestFilter == nil { + return allWants, nil + } + + wants := make([]bsmsg.Entry, 0, len(allWants)) + denied := make([]bsmsg.Entry, 0, len(allWants)) + + for _, et := range allWants { + if e.peerBlockRequestFilter(p, et.Cid) { + wants = append(wants, et) + } else { + denied = append(denied, et) + } + } + + return wants, denied +} + // ReceiveFrom is called when new blocks are received and added to the block // store, meaning there may be peers who want those blocks, so we should send // the blocks to them. diff --git a/bitswap/internal/decision/engine_test.go b/bitswap/internal/decision/engine_test.go index 315604aa7..c4dc53486 100644 --- a/bitswap/internal/decision/engine_test.go +++ b/bitswap/internal/decision/engine_test.go @@ -1112,6 +1112,334 @@ func TestTaskComparator(t *testing.T) { } } +func TestPeerBlockFilter(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) + defer cancel() + + // Generate a few keys + keys := []string{"a", "b", "c", "d"} + blks := make([]blocks.Block, 0, len(keys)) + for _, letter := range keys { + block := blocks.NewBlock([]byte(letter)) + blks = append(blks, block) + } + + // Generate a few partner peers + peerIDs := make([]peer.ID, 3) + peerIDs[0] = libp2ptest.RandPeerIDFatal(t) + peerIDs[1] = libp2ptest.RandPeerIDFatal(t) + peerIDs[2] = libp2ptest.RandPeerIDFatal(t) + + // Setup the main peer + fpt := &fakePeerTagger{} + sl := NewTestScoreLedger(shortTerm, nil, clock.New()) + bs := blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore())) + if err := bs.PutMany(ctx, blks); err != nil { + t.Fatal(err) + } + + e := newEngineForTesting(ctx, bs, 4, defaults.BitswapEngineTaskWorkerCount, defaults.BitswapMaxOutstandingBytesPerPeer, fpt, "localhost", 0, sl, + WithPeerBlockRequestFilter(func(p peer.ID, c cid.Cid) bool { + // peer 0 has access to everything + if p == peerIDs[0] { + return true + } + // peer 1 can only access key c and d + if p == peerIDs[1] { + return blks[2].Cid().Equals(c) || blks[3].Cid().Equals(c) + } + // peer 2 and other can only access key d + return blks[3].Cid().Equals(c) + }), + ) + e.StartWorkers(ctx, process.WithTeardown(func() error { return nil })) + + // Setup the test + type testCaseEntry struct { + peerIndex int + wantBlks string + wantHaves string + } + + type testCaseExp struct { + blks string + haves string + dontHaves string + } + + type testCase struct { + only bool + wl testCaseEntry + exp testCaseExp + } + + testCases := []testCase{ + // Peer 0 has access to everything: want-block `a` succeeds. + { + wl: testCaseEntry{ + peerIndex: 0, + wantBlks: "a", + }, + exp: testCaseExp{ + blks: "a", + }, + }, + // Peer 0 has access to everything: want-have `b` succeeds. + { + wl: testCaseEntry{ + peerIndex: 0, + wantHaves: "b1", + }, + exp: testCaseExp{ + haves: "b", + dontHaves: "1", + }, + }, + // Peer 1 has access to [c, d]: want-have `a` result in dont-have. + { + wl: testCaseEntry{ + peerIndex: 1, + wantHaves: "ac", + }, + exp: testCaseExp{ + haves: "c", + dontHaves: "a", + }, + }, + // Peer 1 has access to [c, d]: want-block `b` result in dont-have. + { + wl: testCaseEntry{ + peerIndex: 1, + wantBlks: "bd", + }, + exp: testCaseExp{ + blks: "d", + dontHaves: "b", + }, + }, + // Peer 2 has access to [d]: want-have `a` and want-block `b` result in dont-have. + { + wl: testCaseEntry{ + peerIndex: 2, + wantHaves: "a", + wantBlks: "bcd1", + }, + exp: testCaseExp{ + haves: "", + blks: "d", + dontHaves: "abc1", + }, + }, + } + + var onlyTestCases []testCase + for _, testCase := range testCases { + if testCase.only { + onlyTestCases = append(onlyTestCases, testCase) + } + } + if len(onlyTestCases) > 0 { + testCases = onlyTestCases + } + + for i, testCase := range testCases { + // Create wants requests + wl := testCase.wl + + t.Logf("test case %v: Peer%v / want-blocks '%s' / want-haves '%s'", + i, wl.peerIndex, wl.wantBlks, wl.wantHaves) + + wantBlks := strings.Split(wl.wantBlks, "") + wantHaves := strings.Split(wl.wantHaves, "") + + partnerWantBlocksHaves(e, wantBlks, wantHaves, true, peerIDs[wl.peerIndex]) + + // Check result + exp := testCase.exp + + next := <-e.Outbox() + envelope := <-next + + expBlks := strings.Split(exp.blks, "") + expHaves := strings.Split(exp.haves, "") + expDontHaves := strings.Split(exp.dontHaves, "") + + err := checkOutput(t, e, envelope, expBlks, expHaves, expDontHaves) + if err != nil { + t.Fatal(err) + } + } +} + +func TestPeerBlockFilterMutability(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) + defer cancel() + + // Generate a few keys + keys := []string{"a", "b", "c", "d"} + blks := make([]blocks.Block, 0, len(keys)) + for _, letter := range keys { + block := blocks.NewBlock([]byte(letter)) + blks = append(blks, block) + } + + partnerID := libp2ptest.RandPeerIDFatal(t) + + // Setup the main peer + fpt := &fakePeerTagger{} + sl := NewTestScoreLedger(shortTerm, nil, clock.New()) + bs := blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore())) + if err := bs.PutMany(ctx, blks); err != nil { + t.Fatal(err) + } + + filterAllowList := make(map[cid.Cid]bool) + + e := newEngineForTesting(ctx, bs, 4, defaults.BitswapEngineTaskWorkerCount, defaults.BitswapMaxOutstandingBytesPerPeer, fpt, "localhost", 0, sl, + WithPeerBlockRequestFilter(func(p peer.ID, c cid.Cid) bool { + return filterAllowList[c] + }), + ) + e.StartWorkers(ctx, process.WithTeardown(func() error { return nil })) + + // Setup the test + type testCaseEntry struct { + allowList string + wantBlks string + wantHaves string + } + + type testCaseExp struct { + blks string + haves string + dontHaves string + } + + type testCase struct { + only bool + wls []testCaseEntry + exps []testCaseExp + } + + testCases := []testCase{ + { + wls: []testCaseEntry{ + { + // Peer has no accesses & request a want-block + allowList: "", + wantBlks: "a", + }, + { + // Then Peer is allowed access to a + allowList: "a", + wantBlks: "a", + }, + }, + exps: []testCaseExp{ + { + dontHaves: "a", + }, + { + blks: "a", + }, + }, + }, + { + wls: []testCaseEntry{ + { + // Peer has access to bc + allowList: "bc", + wantHaves: "bc", + }, + { + // Then Peer loses access to b + allowList: "c", + wantBlks: "bc", // Note: We request a block here to force a response from the node + }, + }, + exps: []testCaseExp{ + { + haves: "bc", + }, + { + blks: "c", + dontHaves: "b", + }, + }, + }, + { + wls: []testCaseEntry{ + { + // Peer has no accesses & request a want-have + allowList: "", + wantHaves: "d", + }, + { + // Then Peer gains access to d + allowList: "d", + wantHaves: "d", + }, + }, + exps: []testCaseExp{ + { + dontHaves: "d", + }, + { + haves: "d", + }, + }, + }, + } + + var onlyTestCases []testCase + for _, testCase := range testCases { + if testCase.only { + onlyTestCases = append(onlyTestCases, testCase) + } + } + if len(onlyTestCases) > 0 { + testCases = onlyTestCases + } + + for i, testCase := range testCases { + for j := range testCase.wls { + wl := testCase.wls[j] + exp := testCase.exps[j] + + // Create wants requests + t.Logf("test case %v, %v: allow-list '%s' / want-blocks '%s' / want-haves '%s'", + i, j, wl.allowList, wl.wantBlks, wl.wantHaves) + + allowList := strings.Split(wl.allowList, "") + wantBlks := strings.Split(wl.wantBlks, "") + wantHaves := strings.Split(wl.wantHaves, "") + + // Update the allow list + filterAllowList = make(map[cid.Cid]bool) + for _, letter := range allowList { + block := blocks.NewBlock([]byte(letter)) + filterAllowList[block.Cid()] = true + } + + // Send the request + partnerWantBlocksHaves(e, wantBlks, wantHaves, true, partnerID) + + // Check result + next := <-e.Outbox() + envelope := <-next + + expBlks := strings.Split(exp.blks, "") + expHaves := strings.Split(exp.haves, "") + expDontHaves := strings.Split(exp.dontHaves, "") + + err := checkOutput(t, e, envelope, expBlks, expHaves, expDontHaves) + if err != nil { + t.Fatal(err) + } + } + } +} + func TestTaggingPeers(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) defer cancel() @@ -1199,24 +1527,24 @@ func TestTaggingUseful(t *testing.T) { } } -func partnerWantBlocks(e *Engine, keys []string, partner peer.ID) { +func partnerWantBlocks(e *Engine, wantBlocks []string, partner peer.ID) { add := message.New(false) - for i, letter := range keys { + for i, letter := range wantBlocks { block := blocks.NewBlock([]byte(letter)) - add.AddEntry(block.Cid(), int32(len(keys)-i), pb.Message_Wantlist_Block, true) + add.AddEntry(block.Cid(), int32(len(wantBlocks)-i), pb.Message_Wantlist_Block, true) } e.MessageReceived(context.Background(), partner, add) } -func partnerWantBlocksHaves(e *Engine, keys []string, wantHaves []string, sendDontHave bool, partner peer.ID) { +func partnerWantBlocksHaves(e *Engine, wantBlocks []string, wantHaves []string, sendDontHave bool, partner peer.ID) { add := message.New(false) - priority := int32(len(wantHaves) + len(keys)) + priority := int32(len(wantHaves) + len(wantBlocks)) for _, letter := range wantHaves { block := blocks.NewBlock([]byte(letter)) add.AddEntry(block.Cid(), priority, pb.Message_Wantlist_Have, sendDontHave) priority-- } - for _, letter := range keys { + for _, letter := range wantBlocks { block := blocks.NewBlock([]byte(letter)) add.AddEntry(block.Cid(), priority, pb.Message_Wantlist_Block, sendDontHave) priority-- From aeeac06c3e366f939172a1dac6f2a497e51c53b0 Mon Sep 17 00:00:00 2001 From: Hector Sanjuan Date: Tue, 7 Apr 2020 23:38:33 +0200 Subject: [PATCH 1016/1035] Use ipld.ErrNotFound This commit was moved from ipfs/go-bitswap@b892ed1548f75a929f3c9a6d4a9d6b17f1c7478b --- bitswap/bitswap_test.go | 4 ++-- bitswap/internal/decision/blockstoremanager.go | 5 +++-- bitswap/internal/getter/getter.go | 4 ++-- 3 files changed, 7 insertions(+), 6 deletions(-) diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index c85f06f75..6e397a17d 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -19,10 +19,10 @@ import ( blocks "github.com/ipfs/go-block-format" cid "github.com/ipfs/go-cid" detectrace "github.com/ipfs/go-detect-race" - blockstore "github.com/ipfs/go-ipfs-blockstore" blocksutil "github.com/ipfs/go-ipfs-blocksutil" delay "github.com/ipfs/go-ipfs-delay" mockrouting "github.com/ipfs/go-ipfs-routing/mock" + ipld "github.com/ipfs/go-ipld-format" peer "github.com/libp2p/go-libp2p-core/peer" p2ptestutil "github.com/libp2p/go-libp2p-netutil" travis "github.com/libp2p/go-libp2p-testing/ci/travis" @@ -405,7 +405,7 @@ func TestEmptyKey(t *testing.T) { defer cancel() _, err := bs.GetBlock(ctx, cid.Cid{}) - if err != blockstore.ErrNotFound { + if !ipld.IsNotFound(err) { t.Error("empty str key should return ErrNotFound") } } diff --git a/bitswap/internal/decision/blockstoremanager.go b/bitswap/internal/decision/blockstoremanager.go index 2d205c2ea..80ee98a0a 100644 --- a/bitswap/internal/decision/blockstoremanager.go +++ b/bitswap/internal/decision/blockstoremanager.go @@ -8,6 +8,7 @@ import ( blocks "github.com/ipfs/go-block-format" cid "github.com/ipfs/go-cid" bstore "github.com/ipfs/go-ipfs-blockstore" + ipld "github.com/ipfs/go-ipld-format" "github.com/ipfs/go-metrics-interface" process "github.com/jbenet/goprocess" ) @@ -87,7 +88,7 @@ func (bsm *blockstoreManager) getBlockSizes(ctx context.Context, ks []cid.Cid) ( return res, bsm.jobPerKey(ctx, ks, func(c cid.Cid) { size, err := bsm.bs.GetSize(ctx, c) if err != nil { - if err != bstore.ErrNotFound { + if !ipld.IsNotFound(err) { // Note: this isn't a fatal error. We shouldn't abort the request log.Errorf("blockstore.GetSize(%s) error: %s", c, err) } @@ -109,7 +110,7 @@ func (bsm *blockstoreManager) getBlocks(ctx context.Context, ks []cid.Cid) (map[ return res, bsm.jobPerKey(ctx, ks, func(c cid.Cid) { blk, err := bsm.bs.Get(ctx, c) if err != nil { - if err != bstore.ErrNotFound { + if !ipld.IsNotFound(err) { // Note: this isn't a fatal error. We shouldn't abort the request log.Errorf("blockstore.Get(%s) error: %s", c, err) } diff --git a/bitswap/internal/getter/getter.go b/bitswap/internal/getter/getter.go index 02e3b54b7..3f3f4a0eb 100644 --- a/bitswap/internal/getter/getter.go +++ b/bitswap/internal/getter/getter.go @@ -9,7 +9,7 @@ import ( blocks "github.com/ipfs/go-block-format" cid "github.com/ipfs/go-cid" - blockstore "github.com/ipfs/go-ipfs-blockstore" + ipld "github.com/ipfs/go-ipld-format" ) var log = logging.Logger("bitswap") @@ -24,7 +24,7 @@ type GetBlocksFunc func(context.Context, []cid.Cid) (<-chan blocks.Block, error) func SyncGetBlock(p context.Context, k cid.Cid, gb GetBlocksFunc) (blocks.Block, error) { if !k.Defined() { log.Error("undefined cid in GetBlock") - return nil, blockstore.ErrNotFound + return nil, ipld.ErrNotFound{Cid: k} } // Any async work initiated by this function must end when this function From aa686102117c9e9e7cbc888ca315fe150f96e7f3 Mon Sep 17 00:00:00 2001 From: Marten Seemann Date: Fri, 22 Apr 2022 16:45:04 +0100 Subject: [PATCH 1017/1035] fix initialisation example in README (#552) This commit was moved from ipfs/go-bitswap@35b5af95d30319094d448df61b7286b72a8c7b16 --- bitswap/README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/bitswap/README.md b/bitswap/README.md index aeb5948cc..c337ffa98 100644 --- a/bitswap/README.md +++ b/bitswap/README.md @@ -59,10 +59,10 @@ wants those blocks. import ( "context" bitswap "github.com/ipfs/go-bitswap" - bsnet "github.com/ipfs/go-graphsync/network" + bsnet "github.com/ipfs/go-bitswap/network" blockstore "github.com/ipfs/go-ipfs-blockstore" "github.com/libp2p/go-libp2p-core/routing" - "github.com/libp2p/go-libp2p-core/host" + "github.com/libp2p/go-libp2p-core/host" ) var ctx context.Context From 817239b724cea741f9e5c3e5ed24972ba9eacf8a Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Mon, 13 Jun 2022 09:02:59 -0700 Subject: [PATCH 1018/1035] feat: coalesce and queue connection event handling (#565) * feat: batch and queue connection event handling * address feedback * fix: mark responsive on new connection This commit was moved from ipfs/go-bitswap@a06a9eaeaadb16d39046b7251cf07b0dc363aa46 --- bitswap/bitswap.go | 3 +- bitswap/network/connecteventmanager.go | 183 ++++++++++++++---- bitswap/network/connecteventmanager_test.go | 196 +++++++++++--------- bitswap/network/interface.go | 7 +- bitswap/network/ipfs_impl.go | 13 +- bitswap/network/ipfs_impl_test.go | 59 +++--- bitswap/testnet/network_test.go | 6 +- bitswap/testnet/virtual.go | 5 +- 8 files changed, 312 insertions(+), 160 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 73ca266e2..100ce8599 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -303,7 +303,7 @@ func New(parent context.Context, network bsnet.BitSwapNetwork, bs.engine.SetSendDontHaves(bs.engineSetSendDontHaves) bs.pqm.Startup() - network.SetDelegate(bs) + network.Start(bs) // Start up bitswaps async worker routines bs.startWorkers(ctx, px) @@ -316,6 +316,7 @@ func New(parent context.Context, network bsnet.BitSwapNetwork, sm.Shutdown() cancelFunc() notif.Shutdown() + network.Stop() }() procctx.CloseAfterContext(px, ctx) // parent cancelled first diff --git a/bitswap/network/connecteventmanager.go b/bitswap/network/connecteventmanager.go index bbde7af2c..a9053ba6a 100644 --- a/bitswap/network/connecteventmanager.go +++ b/bitswap/network/connecteventmanager.go @@ -11,96 +11,203 @@ type ConnectionListener interface { PeerDisconnected(peer.ID) } +type state byte + +const ( + stateDisconnected = iota + stateResponsive + stateUnresponsive +) + type connectEventManager struct { connListener ConnectionListener lk sync.RWMutex - conns map[peer.ID]*connState + cond sync.Cond + peers map[peer.ID]*peerState + + changeQueue []peer.ID + stop bool + done chan struct{} } -type connState struct { - refs int - responsive bool +type peerState struct { + newState, curState state + pending bool } func newConnectEventManager(connListener ConnectionListener) *connectEventManager { - return &connectEventManager{ + evtManager := &connectEventManager{ connListener: connListener, - conns: make(map[peer.ID]*connState), + peers: make(map[peer.ID]*peerState), + done: make(chan struct{}), } + evtManager.cond = sync.Cond{L: &evtManager.lk} + return evtManager } -func (c *connectEventManager) Connected(p peer.ID) { +func (c *connectEventManager) Start() { + go c.worker() +} + +func (c *connectEventManager) Stop() { c.lk.Lock() - defer c.lk.Unlock() + c.stop = true + c.lk.Unlock() + c.cond.Broadcast() - state, ok := c.conns[p] + <-c.done +} + +func (c *connectEventManager) getState(p peer.ID) state { + if state, ok := c.peers[p]; ok { + return state.newState + } else { + return stateDisconnected + } +} + +func (c *connectEventManager) setState(p peer.ID, newState state) { + state, ok := c.peers[p] if !ok { - state = &connState{responsive: true} - c.conns[p] = state + state = new(peerState) + c.peers[p] = state + } + state.newState = newState + if !state.pending && state.newState != state.curState { + state.pending = true + c.changeQueue = append(c.changeQueue, p) + c.cond.Broadcast() } - state.refs++ +} - if state.refs == 1 && state.responsive { - c.connListener.PeerConnected(p) +// Waits for a change to be enqueued, or for the event manager to be stopped. Returns false if the +// connect event manager has been stopped. +func (c *connectEventManager) waitChange() bool { + for !c.stop && len(c.changeQueue) == 0 { + c.cond.Wait() } + return !c.stop } -func (c *connectEventManager) Disconnected(p peer.ID) { +func (c *connectEventManager) worker() { c.lk.Lock() defer c.lk.Unlock() + defer close(c.done) + + for c.waitChange() { + pid := c.changeQueue[0] + c.changeQueue[0] = peer.ID("") // free the peer ID (slicing won't do that) + c.changeQueue = c.changeQueue[1:] + + state, ok := c.peers[pid] + // If we've disconnected and forgotten, continue. + if !ok { + // This shouldn't be possible because _this_ thread is responsible for + // removing peers from this map, and we shouldn't get duplicate entries in + // the change queue. + log.Error("a change was enqueued for a peer we're not tracking") + continue + } - state, ok := c.conns[p] - if !ok { - // Should never happen + // Record the fact that this "state" is no longer in the queue. + state.pending = false + + // Then, if there's nothing to do, continue. + if state.curState == state.newState { + continue + } + + // Or record the state update, then apply it. + oldState := state.curState + state.curState = state.newState + + switch state.newState { + case stateDisconnected: + delete(c.peers, pid) + fallthrough + case stateUnresponsive: + // Only trigger a disconnect event if the peer was responsive. + // We could be transitioning from unresponsive to disconnected. + if oldState == stateResponsive { + c.lk.Unlock() + c.connListener.PeerDisconnected(pid) + c.lk.Lock() + } + case stateResponsive: + c.lk.Unlock() + c.connListener.PeerConnected(pid) + c.lk.Lock() + } + } +} + +// Called whenever we receive a new connection. May be called many times. +func (c *connectEventManager) Connected(p peer.ID) { + c.lk.Lock() + defer c.lk.Unlock() + + // !responsive -> responsive + + if c.getState(p) == stateResponsive { return } - state.refs-- + c.setState(p, stateResponsive) +} - if state.refs == 0 { - if state.responsive { - c.connListener.PeerDisconnected(p) - } - delete(c.conns, p) +// Called when we drop the final connection to a peer. +func (c *connectEventManager) Disconnected(p peer.ID) { + c.lk.Lock() + defer c.lk.Unlock() + + // !disconnected -> disconnected + + if c.getState(p) == stateDisconnected { + return } + + c.setState(p, stateDisconnected) } +// Called whenever a peer is unresponsive. func (c *connectEventManager) MarkUnresponsive(p peer.ID) { c.lk.Lock() defer c.lk.Unlock() - state, ok := c.conns[p] - if !ok || !state.responsive { + // responsive -> unresponsive + + if c.getState(p) != stateResponsive { return } - state.responsive = false - c.connListener.PeerDisconnected(p) + c.setState(p, stateUnresponsive) } +// Called whenever we receive a message from a peer. +// +// - When we're connected to the peer, this will mark the peer as responsive (from unresponsive). +// - When not connected, we ignore this call. Unfortunately, a peer may disconnect before we process +// the "on message" event, so we can't treat this as evidence of a connection. func (c *connectEventManager) OnMessage(p peer.ID) { - // This is a frequent operation so to avoid different message arrivals - // getting blocked by a write lock, first take a read lock to check if - // we need to modify state c.lk.RLock() - state, ok := c.conns[p] - responsive := ok && state.responsive + unresponsive := c.getState(p) == stateUnresponsive c.lk.RUnlock() - if !ok || responsive { + // Only continue if both connected, and unresponsive. + if !unresponsive { return } + // unresponsive -> responsive + // We need to make a modification so now take a write lock c.lk.Lock() defer c.lk.Unlock() // Note: state may have changed in the time between when read lock // was released and write lock taken, so check again - state, ok = c.conns[p] - if !ok || state.responsive { + if c.getState(p) != stateUnresponsive { return } - state.responsive = true - c.connListener.PeerConnected(p) + c.setState(p, stateResponsive) } diff --git a/bitswap/network/connecteventmanager_test.go b/bitswap/network/connecteventmanager_test.go index fb81abeec..4ed7edd73 100644 --- a/bitswap/network/connecteventmanager_test.go +++ b/bitswap/network/connecteventmanager_test.go @@ -1,144 +1,168 @@ package network import ( + "sync" "testing" + "time" "github.com/ipfs/go-bitswap/internal/testutil" "github.com/libp2p/go-libp2p-core/peer" + "github.com/stretchr/testify/require" ) +type mockConnEvent struct { + connected bool + peer peer.ID +} + type mockConnListener struct { - conns map[peer.ID]int + sync.Mutex + events []mockConnEvent } func newMockConnListener() *mockConnListener { - return &mockConnListener{ - conns: make(map[peer.ID]int), - } + return new(mockConnListener) } func (cl *mockConnListener) PeerConnected(p peer.ID) { - cl.conns[p]++ + cl.Lock() + defer cl.Unlock() + cl.events = append(cl.events, mockConnEvent{connected: true, peer: p}) } func (cl *mockConnListener) PeerDisconnected(p peer.ID) { - cl.conns[p]-- + cl.Lock() + defer cl.Unlock() + cl.events = append(cl.events, mockConnEvent{connected: false, peer: p}) +} + +func wait(t *testing.T, c *connectEventManager) { + require.Eventually(t, func() bool { + c.lk.RLock() + defer c.lk.RUnlock() + return len(c.changeQueue) == 0 + }, time.Second, time.Millisecond, "connection event manager never processed events") } -func TestConnectEventManagerConnectionCount(t *testing.T) { +func TestConnectEventManagerConnectDisconnect(t *testing.T) { connListener := newMockConnListener() peers := testutil.GeneratePeers(2) cem := newConnectEventManager(connListener) + cem.Start() + t.Cleanup(cem.Stop) - // Peer A: 1 Connection - cem.Connected(peers[0]) - if connListener.conns[peers[0]] != 1 { - t.Fatal("Expected Connected event") - } + var expectedEvents []mockConnEvent - // Peer A: 2 Connections + // Connect A twice, should only see one event + cem.Connected(peers[0]) cem.Connected(peers[0]) - if connListener.conns[peers[0]] != 1 { - t.Fatal("Unexpected no Connected event for the same peer") - } + expectedEvents = append(expectedEvents, mockConnEvent{ + peer: peers[0], + connected: true, + }) - // Peer A: 2 Connections - // Peer B: 1 Connection + // Flush the event queue. + wait(t, cem) + require.Equal(t, expectedEvents, connListener.events) + + // Block up the event loop. + connListener.Lock() cem.Connected(peers[1]) - if connListener.conns[peers[1]] != 1 { - t.Fatal("Expected Connected event") - } - - // Peer A: 2 Connections - // Peer B: 0 Connections - cem.Disconnected(peers[1]) - if connListener.conns[peers[1]] != 0 { - t.Fatal("Expected Disconnected event") - } - - // Peer A: 1 Connection - // Peer B: 0 Connections - cem.Disconnected(peers[0]) - if connListener.conns[peers[0]] != 1 { - t.Fatal("Expected no Disconnected event for peer with one remaining conn") - } + expectedEvents = append(expectedEvents, mockConnEvent{ + peer: peers[1], + connected: true, + }) - // Peer A: 0 Connections - // Peer B: 0 Connections + // We don't expect this to show up. cem.Disconnected(peers[0]) - if connListener.conns[peers[0]] != 0 { - t.Fatal("Expected Disconnected event") - } + cem.Connected(peers[0]) + + connListener.Unlock() + + wait(t, cem) + require.Equal(t, expectedEvents, connListener.events) } func TestConnectEventManagerMarkUnresponsive(t *testing.T) { connListener := newMockConnListener() p := testutil.GeneratePeers(1)[0] cem := newConnectEventManager(connListener) + cem.Start() + t.Cleanup(cem.Stop) - // Peer A: 1 Connection - cem.Connected(p) - if connListener.conns[p] != 1 { - t.Fatal("Expected Connected event") - } + var expectedEvents []mockConnEvent - // Peer A: 1 Connection - cem.MarkUnresponsive(p) - if connListener.conns[p] != 0 { - t.Fatal("Expected Disconnected event") - } + // Don't mark as connected when we receive a message (could have been delayed). + cem.OnMessage(p) + wait(t, cem) + require.Equal(t, expectedEvents, connListener.events) - // Peer A: 2 Connections + // Handle connected event. cem.Connected(p) - if connListener.conns[p] != 0 { - t.Fatal("Expected no Connected event for unresponsive peer") - } + wait(t, cem) - // Peer A: 2 Connections - cem.OnMessage(p) - if connListener.conns[p] != 1 { - t.Fatal("Expected Connected event for newly responsive peer") - } + expectedEvents = append(expectedEvents, mockConnEvent{ + peer: p, + connected: true, + }) + require.Equal(t, expectedEvents, connListener.events) - // Peer A: 2 Connections - cem.OnMessage(p) - if connListener.conns[p] != 1 { - t.Fatal("Expected no further Connected event for subsequent messages") - } + // Becomes unresponsive. + cem.MarkUnresponsive(p) + wait(t, cem) - // Peer A: 1 Connection - cem.Disconnected(p) - if connListener.conns[p] != 1 { - t.Fatal("Expected no Disconnected event for peer with one remaining conn") - } + expectedEvents = append(expectedEvents, mockConnEvent{ + peer: p, + connected: false, + }) + require.Equal(t, expectedEvents, connListener.events) - // Peer A: 0 Connections - cem.Disconnected(p) - if connListener.conns[p] != 0 { - t.Fatal("Expected Disconnected event") - } + // We have a new connection, mark them responsive. + cem.Connected(p) + wait(t, cem) + expectedEvents = append(expectedEvents, mockConnEvent{ + peer: p, + connected: true, + }) + require.Equal(t, expectedEvents, connListener.events) + + // No duplicate event. + cem.OnMessage(p) + wait(t, cem) + require.Equal(t, expectedEvents, connListener.events) } func TestConnectEventManagerDisconnectAfterMarkUnresponsive(t *testing.T) { connListener := newMockConnListener() p := testutil.GeneratePeers(1)[0] cem := newConnectEventManager(connListener) + cem.Start() + t.Cleanup(cem.Stop) - // Peer A: 1 Connection + var expectedEvents []mockConnEvent + + // Handle connected event. cem.Connected(p) - if connListener.conns[p] != 1 { - t.Fatal("Expected Connected event") - } + wait(t, cem) + + expectedEvents = append(expectedEvents, mockConnEvent{ + peer: p, + connected: true, + }) + require.Equal(t, expectedEvents, connListener.events) - // Peer A: 1 Connection + // Becomes unresponsive. cem.MarkUnresponsive(p) - if connListener.conns[p] != 0 { - t.Fatal("Expected Disconnected event") - } + wait(t, cem) + + expectedEvents = append(expectedEvents, mockConnEvent{ + peer: p, + connected: false, + }) + require.Equal(t, expectedEvents, connListener.events) - // Peer A: 0 Connections cem.Disconnected(p) - if connListener.conns[p] != 0 { - t.Fatal("Expected not to receive a second Disconnected event") - } + wait(t, cem) + require.Empty(t, cem.peers) // all disconnected + require.Equal(t, expectedEvents, connListener.events) } diff --git a/bitswap/network/interface.go b/bitswap/network/interface.go index a350d5254..8648f8dd4 100644 --- a/bitswap/network/interface.go +++ b/bitswap/network/interface.go @@ -35,9 +35,10 @@ type BitSwapNetwork interface { peer.ID, bsmsg.BitSwapMessage) error - // SetDelegate registers the Reciver to handle messages received from the - // network. - SetDelegate(Receiver) + // Start registers the Reciver and starts handling new messages, connectivity events, etc. + Start(Receiver) + // Stop stops the network service. + Stop() ConnectTo(context.Context, peer.ID) error DisconnectFrom(context.Context, peer.ID) error diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index 7457aeb84..6f69b26a6 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -349,17 +349,22 @@ func (bsnet *impl) newStreamToPeer(ctx context.Context, p peer.ID) (network.Stre return bsnet.host.NewStream(ctx, p, bsnet.supportedProtocols...) } -func (bsnet *impl) SetDelegate(r Receiver) { +func (bsnet *impl) Start(r Receiver) { bsnet.receiver = r bsnet.connectEvtMgr = newConnectEventManager(r) for _, proto := range bsnet.supportedProtocols { bsnet.host.SetStreamHandler(proto, bsnet.handleNewStream) } bsnet.host.Network().Notify((*netNotifiee)(bsnet)) - // TODO: StopNotify. + bsnet.connectEvtMgr.Start() } +func (bsnet *impl) Stop() { + bsnet.connectEvtMgr.Stop() + bsnet.host.Network().StopNotify((*netNotifiee)(bsnet)) +} + func (bsnet *impl) ConnectTo(ctx context.Context, p peer.ID) error { return bsnet.host.Connect(ctx, peer.AddrInfo{ID: p}) } @@ -450,8 +455,8 @@ func (nn *netNotifiee) Connected(n network.Network, v network.Conn) { nn.impl().connectEvtMgr.Connected(v.RemotePeer()) } func (nn *netNotifiee) Disconnected(n network.Network, v network.Conn) { - // ignore transient connections - if v.Stat().Transient { + // Only record a "disconnect" when we actually disconnect. + if n.Connectedness(v.RemotePeer()) == network.Connected { return } diff --git a/bitswap/network/ipfs_impl_test.go b/bitswap/network/ipfs_impl_test.go index 0d7968ecb..9e0694896 100644 --- a/bitswap/network/ipfs_impl_test.go +++ b/bitswap/network/ipfs_impl_test.go @@ -38,7 +38,8 @@ func newReceiver() *receiver { return &receiver{ peers: make(map[peer.ID]struct{}), messageReceived: make(chan struct{}), - connectionEvent: make(chan bool, 1), + // Avoid blocking. 100 is good enough for tests. + connectionEvent: make(chan bool, 100), } } @@ -169,8 +170,10 @@ func TestMessageSendAndReceive(t *testing.T) { bsnet2 := streamNet.Adapter(p2) r1 := newReceiver() r2 := newReceiver() - bsnet1.SetDelegate(r1) - bsnet2.SetDelegate(r2) + bsnet1.Start(r1) + t.Cleanup(bsnet1.Stop) + bsnet2.Start(r2) + t.Cleanup(bsnet2.Stop) err = mn.LinkAll() if err != nil { @@ -268,7 +271,8 @@ func prepareNetwork(t *testing.T, ctx context.Context, p1 tnet.Identity, r1 *rec eh1 := &ErrHost{Host: h1} routing1 := mr.ClientWithDatastore(context.TODO(), p1, ds.NewMapDatastore()) bsnet1 := bsnet.NewFromIpfsHost(eh1, routing1) - bsnet1.SetDelegate(r1) + bsnet1.Start(r1) + t.Cleanup(bsnet1.Stop) if r1.listener != nil { eh1.Network().Notify(r1.listener) } @@ -281,7 +285,8 @@ func prepareNetwork(t *testing.T, ctx context.Context, p1 tnet.Identity, r1 *rec eh2 := &ErrHost{Host: h2} routing2 := mr.ClientWithDatastore(context.TODO(), p2, ds.NewMapDatastore()) bsnet2 := bsnet.NewFromIpfsHost(eh2, routing2) - bsnet2.SetDelegate(r2) + bsnet2.Start(r2) + t.Cleanup(bsnet2.Stop) if r2.listener != nil { eh2.Network().Notify(r2.listener) } @@ -454,28 +459,32 @@ func TestSupportsHave(t *testing.T) { } for _, tc := range testCases { - p1 := tnet.RandIdentityOrFatal(t) - bsnet1 := streamNet.Adapter(p1) - bsnet1.SetDelegate(newReceiver()) - - p2 := tnet.RandIdentityOrFatal(t) - bsnet2 := streamNet.Adapter(p2, bsnet.SupportedProtocols([]protocol.ID{tc.proto})) - bsnet2.SetDelegate(newReceiver()) - - err = mn.LinkAll() - if err != nil { - t.Fatal(err) - } + t.Run(fmt.Sprintf("%s-%v", tc.proto, tc.expSupportsHave), func(t *testing.T) { + p1 := tnet.RandIdentityOrFatal(t) + bsnet1 := streamNet.Adapter(p1) + bsnet1.Start(newReceiver()) + t.Cleanup(bsnet1.Stop) + + p2 := tnet.RandIdentityOrFatal(t) + bsnet2 := streamNet.Adapter(p2, bsnet.SupportedProtocols([]protocol.ID{tc.proto})) + bsnet2.Start(newReceiver()) + t.Cleanup(bsnet2.Stop) + + err = mn.LinkAll() + if err != nil { + t.Fatal(err) + } - senderCurrent, err := bsnet1.NewMessageSender(ctx, p2.ID(), &bsnet.MessageSenderOpts{}) - if err != nil { - t.Fatal(err) - } - defer senderCurrent.Close() + senderCurrent, err := bsnet1.NewMessageSender(ctx, p2.ID(), &bsnet.MessageSenderOpts{}) + if err != nil { + t.Fatal(err) + } + defer senderCurrent.Close() - if senderCurrent.SupportsHave() != tc.expSupportsHave { - t.Fatal("Expected sender HAVE message support", tc.proto, tc.expSupportsHave) - } + if senderCurrent.SupportsHave() != tc.expSupportsHave { + t.Fatal("Expected sender HAVE message support", tc.proto, tc.expSupportsHave) + } + }) } } diff --git a/bitswap/testnet/network_test.go b/bitswap/testnet/network_test.go index 89f3d68f0..fbd1fa41a 100644 --- a/bitswap/testnet/network_test.go +++ b/bitswap/testnet/network_test.go @@ -28,7 +28,7 @@ func TestSendMessageAsyncButWaitForResponse(t *testing.T) { expectedStr := "received async" - responder.SetDelegate(lambda(func( + responder.Start(lambda(func( ctx context.Context, fromWaiter peer.ID, msgFromWaiter bsmsg.BitSwapMessage) { @@ -40,8 +40,9 @@ func TestSendMessageAsyncButWaitForResponse(t *testing.T) { t.Error(err) } })) + t.Cleanup(responder.Stop) - waiter.SetDelegate(lambda(func( + waiter.Start(lambda(func( ctx context.Context, fromResponder peer.ID, msgFromResponder bsmsg.BitSwapMessage) { @@ -59,6 +60,7 @@ func TestSendMessageAsyncButWaitForResponse(t *testing.T) { t.Fatal("Message not received from the responder") } })) + t.Cleanup(waiter.Stop) messageSentAsync := bsmsg.New(true) messageSentAsync.AddBlock(blocks.NewBlock([]byte("data"))) diff --git a/bitswap/testnet/virtual.go b/bitswap/testnet/virtual.go index 66f5e8216..b5405841b 100644 --- a/bitswap/testnet/virtual.go +++ b/bitswap/testnet/virtual.go @@ -300,10 +300,13 @@ func (nc *networkClient) Provide(ctx context.Context, k cid.Cid) error { return nc.routing.Provide(ctx, k, true) } -func (nc *networkClient) SetDelegate(r bsnet.Receiver) { +func (nc *networkClient) Start(r bsnet.Receiver) { nc.Receiver = r } +func (nc *networkClient) Stop() { +} + func (nc *networkClient) ConnectTo(_ context.Context, p peer.ID) error { nc.network.mu.Lock() otherClient, ok := nc.network.clients[p] From 2fa11e5e5a35abc2dc6797d09e758f61ba2a8a43 Mon Sep 17 00:00:00 2001 From: Gus Eggert Date: Mon, 13 Jun 2022 22:19:33 -0400 Subject: [PATCH 1019/1035] feat: add basic tracing (#562) This adds tracing spans to the costly Bitswap entry points. It doesn't instrument the bitswap internals, which will take some time. In go-ipfs, this will at least let us know the contribution of Bitswap to the overall request handling time. This also plumbs contexts through internally so that they reach the content routing APIs, so that traces are propagated through and we can start instrumenting e.g. the DHT. This commit was moved from ipfs/go-bitswap@b18a91d6023b83821c72253bbe5e37190db64d63 --- bitswap/bitswap.go | 15 +++++++++++++-- .../blockpresencemanager_test.go | 3 +-- bitswap/internal/getter/getter.go | 6 ++++++ .../providerquerymanager.go | 19 +++++++++++++------ bitswap/internal/session/session.go | 10 ++++++++-- .../internal/sessionmanager/sessionmanager.go | 7 +++++++ bitswap/internal/tracing.go | 13 +++++++++++++ 7 files changed, 61 insertions(+), 12 deletions(-) create mode 100644 bitswap/internal/tracing.go diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 100ce8599..cfb138cfe 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -11,8 +11,11 @@ import ( "time" delay "github.com/ipfs/go-ipfs-delay" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" deciface "github.com/ipfs/go-bitswap/decision" + "github.com/ipfs/go-bitswap/internal" bsbpm "github.com/ipfs/go-bitswap/internal/blockpresencemanager" "github.com/ipfs/go-bitswap/internal/decision" "github.com/ipfs/go-bitswap/internal/defaults" @@ -425,8 +428,10 @@ type counters struct { // GetBlock attempts to retrieve a particular block from peers within the // deadline enforced by the context. -func (bs *Bitswap) GetBlock(parent context.Context, k cid.Cid) (blocks.Block, error) { - return bsgetter.SyncGetBlock(parent, k, bs.GetBlocks) +func (bs *Bitswap) GetBlock(ctx context.Context, k cid.Cid) (blocks.Block, error) { + ctx, span := internal.StartSpan(ctx, "GetBlock", trace.WithAttributes(attribute.String("Key", k.String()))) + defer span.End() + return bsgetter.SyncGetBlock(ctx, k, bs.GetBlocks) } // WantlistForPeer returns the currently understood list of blocks requested by a @@ -453,6 +458,8 @@ func (bs *Bitswap) LedgerForPeer(p peer.ID) *decision.Receipt { // resources, provide a context with a reasonably short deadline (ie. not one // that lasts throughout the lifetime of the server) func (bs *Bitswap) GetBlocks(ctx context.Context, keys []cid.Cid) (<-chan blocks.Block, error) { + ctx, span := internal.StartSpan(ctx, "GetBlocks", trace.WithAttributes(attribute.Int("NumKeys", len(keys)))) + defer span.End() session := bs.sm.NewSession(ctx, bs.provSearchDelay, bs.rebroadcastDelay) return session.GetBlocks(ctx, keys) } @@ -460,6 +467,8 @@ func (bs *Bitswap) GetBlocks(ctx context.Context, keys []cid.Cid) (<-chan blocks // HasBlock announces the existence of a block to this bitswap service. The // service will potentially notify its peers. func (bs *Bitswap) HasBlock(ctx context.Context, blk blocks.Block) error { + ctx, span := internal.StartSpan(ctx, "GetBlocks", trace.WithAttributes(attribute.String("Block", blk.Cid().String()))) + defer span.End() return bs.receiveBlocksFrom(ctx, "", []blocks.Block{blk}, nil, nil) } @@ -696,5 +705,7 @@ func (bs *Bitswap) IsOnline() bool { // be more efficient in its requests to peers. If you are using a session // from go-blockservice, it will create a bitswap session automatically. func (bs *Bitswap) NewSession(ctx context.Context) exchange.Fetcher { + ctx, span := internal.StartSpan(ctx, "NewSession") + defer span.End() return bs.sm.NewSession(ctx, bs.provSearchDelay, bs.rebroadcastDelay) } diff --git a/bitswap/internal/blockpresencemanager/blockpresencemanager_test.go b/bitswap/internal/blockpresencemanager/blockpresencemanager_test.go index 0d65c457e..66f489dfd 100644 --- a/bitswap/internal/blockpresencemanager/blockpresencemanager_test.go +++ b/bitswap/internal/blockpresencemanager/blockpresencemanager_test.go @@ -1,7 +1,6 @@ package blockpresencemanager import ( - "fmt" "testing" "github.com/ipfs/go-bitswap/internal/testutil" @@ -233,7 +232,7 @@ func TestAllPeersDoNotHaveBlock(t *testing.T) { bpm.AllPeersDoNotHaveBlock(tc.peers, tc.ks), tc.exp, ) { - t.Fatal(fmt.Sprintf("test case %d failed: expected matching keys", i)) + t.Fatalf("test case %d failed: expected matching keys", i) } } } diff --git a/bitswap/internal/getter/getter.go b/bitswap/internal/getter/getter.go index 3f3f4a0eb..c5c1951b8 100644 --- a/bitswap/internal/getter/getter.go +++ b/bitswap/internal/getter/getter.go @@ -4,6 +4,7 @@ import ( "context" "errors" + "github.com/ipfs/go-bitswap/internal" notifications "github.com/ipfs/go-bitswap/internal/notifications" logging "github.com/ipfs/go-log" @@ -22,6 +23,9 @@ type GetBlocksFunc func(context.Context, []cid.Cid) (<-chan blocks.Block, error) // blocks that returns a channel, and uses that function to return the // block syncronously. func SyncGetBlock(p context.Context, k cid.Cid, gb GetBlocksFunc) (blocks.Block, error) { + p, span := internal.StartSpan(p, "Getter.SyncGetBlock") + defer span.End() + if !k.Defined() { log.Error("undefined cid in GetBlock") return nil, ipld.ErrNotFound{Cid: k} @@ -65,6 +69,8 @@ type WantFunc func(context.Context, []cid.Cid) // incoming blocks. func AsyncGetBlocks(ctx context.Context, sessctx context.Context, keys []cid.Cid, notif notifications.PubSub, want WantFunc, cwants func([]cid.Cid)) (<-chan blocks.Block, error) { + ctx, span := internal.StartSpan(ctx, "Getter.AsyncGetBlocks") + defer span.End() // If there are no keys supplied, just return a closed channel if len(keys) == 0 { diff --git a/bitswap/internal/providerquerymanager/providerquerymanager.go b/bitswap/internal/providerquerymanager/providerquerymanager.go index d47ffdb5a..b3d29dea1 100644 --- a/bitswap/internal/providerquerymanager/providerquerymanager.go +++ b/bitswap/internal/providerquerymanager/providerquerymanager.go @@ -44,15 +44,18 @@ type providerQueryMessage interface { } type receivedProviderMessage struct { - k cid.Cid - p peer.ID + ctx context.Context + k cid.Cid + p peer.ID } type finishedProviderQueryMessage struct { - k cid.Cid + ctx context.Context + k cid.Cid } type newProvideQueryMessage struct { + ctx context.Context k cid.Cid inProgressRequestChan chan<- inProgressRequest } @@ -120,6 +123,7 @@ func (pqm *ProviderQueryManager) FindProvidersAsync(sessionCtx context.Context, select { case pqm.providerQueryMessages <- &newProvideQueryMessage{ + ctx: sessionCtx, k: k, inProgressRequestChan: inProgressRequestChan, }: @@ -244,8 +248,9 @@ func (pqm *ProviderQueryManager) findProviderWorker() { } select { case pqm.providerQueryMessages <- &receivedProviderMessage{ - k: k, - p: p, + ctx: findProviderCtx, + k: k, + p: p, }: case <-pqm.ctx.Done(): return @@ -256,7 +261,8 @@ func (pqm *ProviderQueryManager) findProviderWorker() { cancel() select { case pqm.providerQueryMessages <- &finishedProviderQueryMessage{ - k: k, + ctx: findProviderCtx, + k: k, }: case <-pqm.ctx.Done(): } @@ -372,6 +378,7 @@ func (npqm *newProvideQueryMessage) debugMessage() string { func (npqm *newProvideQueryMessage) handle(pqm *ProviderQueryManager) { requestStatus, ok := pqm.inProgressRequestStatuses[npqm.k] if !ok { + ctx, cancelFn := context.WithCancel(pqm.ctx) requestStatus = &inProgressRequestStatus{ listeners: make(map[chan peer.ID]struct{}), diff --git a/bitswap/internal/session/session.go b/bitswap/internal/session/session.go index f2a4d2e46..fa3c87b97 100644 --- a/bitswap/internal/session/session.go +++ b/bitswap/internal/session/session.go @@ -4,6 +4,7 @@ import ( "context" "time" + "github.com/ipfs/go-bitswap/internal" bsbpm "github.com/ipfs/go-bitswap/internal/blockpresencemanager" bsgetter "github.com/ipfs/go-bitswap/internal/getter" notifications "github.com/ipfs/go-bitswap/internal/notifications" @@ -228,14 +229,19 @@ func (s *Session) logReceiveFrom(from peer.ID, interestedKs []cid.Cid, haves []c } // GetBlock fetches a single block. -func (s *Session) GetBlock(parent context.Context, k cid.Cid) (blocks.Block, error) { - return bsgetter.SyncGetBlock(parent, k, s.GetBlocks) +func (s *Session) GetBlock(ctx context.Context, k cid.Cid) (blocks.Block, error) { + ctx, span := internal.StartSpan(ctx, "Session.GetBlock") + defer span.End() + return bsgetter.SyncGetBlock(ctx, k, s.GetBlocks) } // GetBlocks fetches a set of blocks within the context of this session and // returns a channel that found blocks will be returned on. No order is // guaranteed on the returned blocks. func (s *Session) GetBlocks(ctx context.Context, keys []cid.Cid) (<-chan blocks.Block, error) { + ctx, span := internal.StartSpan(ctx, "Session.GetBlocks") + defer span.End() + ctx = logging.ContextWithLoggable(ctx, s.uuid) return bsgetter.AsyncGetBlocks(ctx, s.ctx, keys, s.notif, diff --git a/bitswap/internal/sessionmanager/sessionmanager.go b/bitswap/internal/sessionmanager/sessionmanager.go index 42b209387..7a48e14db 100644 --- a/bitswap/internal/sessionmanager/sessionmanager.go +++ b/bitswap/internal/sessionmanager/sessionmanager.go @@ -2,12 +2,16 @@ package sessionmanager import ( "context" + "strconv" "sync" "time" cid "github.com/ipfs/go-cid" delay "github.com/ipfs/go-ipfs-delay" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" + "github.com/ipfs/go-bitswap/internal" bsbpm "github.com/ipfs/go-bitswap/internal/blockpresencemanager" notifications "github.com/ipfs/go-bitswap/internal/notifications" bssession "github.com/ipfs/go-bitswap/internal/session" @@ -87,6 +91,9 @@ func (sm *SessionManager) NewSession(ctx context.Context, rebroadcastDelay delay.D) exchange.Fetcher { id := sm.GetNextSessionID() + ctx, span := internal.StartSpan(ctx, "SessionManager.NewSession", trace.WithAttributes(attribute.String("ID", strconv.FormatUint(id, 10)))) + defer span.End() + pm := sm.peerManagerFactory(ctx, id) session := sm.sessionFactory(ctx, sm, id, pm, sm.sessionInterestManager, sm.peerManager, sm.blockPresenceManager, sm.notif, provSearchDelay, rebroadcastDelay, sm.self) diff --git a/bitswap/internal/tracing.go b/bitswap/internal/tracing.go new file mode 100644 index 000000000..aa1f7992f --- /dev/null +++ b/bitswap/internal/tracing.go @@ -0,0 +1,13 @@ +package internal + +import ( + "context" + "fmt" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/trace" +) + +func StartSpan(ctx context.Context, name string, opts ...trace.SpanStartOption) (context.Context, trace.Span) { + return otel.Tracer("go-bitswap").Start(ctx, fmt.Sprintf("Bitswap.%s", name), opts...) +} From dbc09b82f7c709cdacbb3903cbaa2068f0833664 Mon Sep 17 00:00:00 2001 From: Marco Munizaga Date: Fri, 1 Jul 2022 15:24:31 -0700 Subject: [PATCH 1020/1035] Remove dependency on travis package from go-libp2p-testing This commit was moved from ipfs/go-bitswap@a02a3be6dfee010d50263201251d1a2601f5686c --- bitswap/bitswap_test.go | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index 6e397a17d..048d7e6a1 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -4,6 +4,7 @@ import ( "bytes" "context" "fmt" + "os" "sync" "testing" "time" @@ -25,10 +26,14 @@ import ( ipld "github.com/ipfs/go-ipld-format" peer "github.com/libp2p/go-libp2p-core/peer" p2ptestutil "github.com/libp2p/go-libp2p-netutil" - travis "github.com/libp2p/go-libp2p-testing/ci/travis" tu "github.com/libp2p/go-libp2p-testing/etc" ) +func isCI() bool { + // https://github.blog/changelog/2020-04-15-github-actions-sets-the-ci-environment-variable-to-true/ + return os.Getenv("CI") != "" +} + // FIXME the tests are really sensitive to the network delay. fix them to work // well under varying conditions const kNetworkDelay = 0 * time.Millisecond @@ -248,7 +253,7 @@ func TestLargeSwarm(t *testing.T) { // when running with the race detector, 500 instances launches // well over 8k goroutines. This hits a race detector limit. numInstances = 20 - } else if travis.IsRunning() { + } else if isCI() { numInstances = 200 } else { t.Parallel() @@ -261,7 +266,7 @@ func TestLargeFile(t *testing.T) { t.SkipNow() } - if !travis.IsRunning() { + if !isCI() { t.Parallel() } From 7c7f1a84662706ce65aa56361f1d0cb6f807493e Mon Sep 17 00:00:00 2001 From: GitHub Date: Thu, 21 Jul 2022 09:56:02 +0000 Subject: [PATCH 1021/1035] chore: Update .github/workflows/stale.yml [skip ci] This commit was moved from ipfs/go-bitswap@5ffb3ec4ecdfd5232905491784bad7eaf36c57af From 3ccffd3cc6e8cd903f5048dbb9b792e1995a3778 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Michael=20Mur=C3=A9?= Date: Fri, 8 Jul 2022 18:28:39 +0200 Subject: [PATCH 1022/1035] feat: don't add blocks to the datastore This leave the responsibility and choice to do so to the caller, typically go-blockservice. This has several benefit: - untangle the code - allow to use an exchange as pure block retrieval - avoid double add Close https://github.com/ipfs/kubo/issues/7956 This commit was moved from ipfs/go-bitswap@a052ec947ac914f2a6dbb4ab41ef274b4580c6d6 --- bitswap/bitswap.go | 111 +++++++++--------- bitswap/bitswap_test.go | 94 +++++---------- bitswap/bitswap_with_sessions_test.go | 24 +--- bitswap/internal/decision/engine.go | 30 ++--- bitswap/internal/decision/engine_test.go | 10 +- .../internal/notifications/notifications.go | 8 +- 6 files changed, 113 insertions(+), 164 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index cfb138cfe..8c549ede3 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -6,7 +6,6 @@ import ( "context" "errors" "fmt" - "sync" "time" @@ -464,72 +463,82 @@ func (bs *Bitswap) GetBlocks(ctx context.Context, keys []cid.Cid) (<-chan blocks return session.GetBlocks(ctx, keys) } -// HasBlock announces the existence of a block to this bitswap service. The +// NotifyNewBlocks announces the existence of blocks to this bitswap service. The // service will potentially notify its peers. -func (bs *Bitswap) HasBlock(ctx context.Context, blk blocks.Block) error { - ctx, span := internal.StartSpan(ctx, "GetBlocks", trace.WithAttributes(attribute.String("Block", blk.Cid().String()))) +// Bitswap itself doesn't store new blocks. It's the caller responsibility to ensure +// that those blocks are available in the blockstore before calling this function. +func (bs *Bitswap) NotifyNewBlocks(ctx context.Context, blks ...blocks.Block) error { + ctx, span := internal.StartSpan(ctx, "NotifyNewBlocks") defer span.End() - return bs.receiveBlocksFrom(ctx, "", []blocks.Block{blk}, nil, nil) -} -// TODO: Some of this stuff really only needs to be done when adding a block -// from the user, not when receiving it from the network. -// In case you run `git blame` on this comment, I'll save you some time: ask -// @whyrusleeping, I don't know the answers you seek. -func (bs *Bitswap) receiveBlocksFrom(ctx context.Context, from peer.ID, blks []blocks.Block, haves []cid.Cid, dontHaves []cid.Cid) error { select { case <-bs.process.Closing(): return errors.New("bitswap is closed") default: } - wanted := blks + blkCids := make([]cid.Cid, len(blks)) + for i, blk := range blks { + blkCids[i] = blk.Cid() + } + + // Send all block keys (including duplicates) to any sessions that want them. + // (The duplicates are needed by sessions for accounting purposes) + bs.sm.ReceiveFrom(ctx, "", blkCids, nil, nil) + + // Send wanted blocks to decision engine + bs.engine.NotifyNewBlocks(blks) - // If blocks came from the network - if from != "" { - var notWanted []blocks.Block - wanted, notWanted = bs.sim.SplitWantedUnwanted(blks) - for _, b := range notWanted { - log.Debugf("[recv] block not in wantlist; cid=%s, peer=%s", b.Cid(), from) + // Publish the block to any Bitswap clients that had requested blocks. + // (the sessions use this pubsub mechanism to inform clients of incoming + // blocks) + bs.notif.Publish(blks...) + + // If the reprovider is enabled, send block to reprovider + if bs.provideEnabled { + for _, blk := range blks { + select { + case bs.newBlocks <- blk.Cid(): + // send block off to be reprovided + case <-bs.process.Closing(): + return bs.process.Close() + } } } - // Put wanted blocks into blockstore - if len(wanted) > 0 { - err := bs.blockstore.PutMany(ctx, wanted) - if err != nil { - log.Errorf("Error writing %d blocks to datastore: %s", len(wanted), err) - return err - } + return nil +} + +// receiveBlocksFrom process blocks received from the network +func (bs *Bitswap) receiveBlocksFrom(ctx context.Context, from peer.ID, blks []blocks.Block, haves []cid.Cid, dontHaves []cid.Cid) error { + select { + case <-bs.process.Closing(): + return errors.New("bitswap is closed") + default: } - // NOTE: There exists the possiblity for a race condition here. If a user - // creates a node, then adds it to the dagservice while another goroutine - // is waiting on a GetBlock for that object, they will receive a reference - // to the same node. We should address this soon, but i'm not going to do - // it now as it requires more thought and isnt causing immediate problems. + wanted, notWanted := bs.sim.SplitWantedUnwanted(blks) + for _, b := range notWanted { + log.Debugf("[recv] block not in wantlist; cid=%s, peer=%s", b.Cid(), from) + } allKs := make([]cid.Cid, 0, len(blks)) for _, b := range blks { allKs = append(allKs, b.Cid()) } - // If the message came from the network - if from != "" { - // Inform the PeerManager so that we can calculate per-peer latency - combined := make([]cid.Cid, 0, len(allKs)+len(haves)+len(dontHaves)) - combined = append(combined, allKs...) - combined = append(combined, haves...) - combined = append(combined, dontHaves...) - bs.pm.ResponseReceived(from, combined) - } + // Inform the PeerManager so that we can calculate per-peer latency + combined := make([]cid.Cid, 0, len(allKs)+len(haves)+len(dontHaves)) + combined = append(combined, allKs...) + combined = append(combined, haves...) + combined = append(combined, dontHaves...) + bs.pm.ResponseReceived(from, combined) - // Send all block keys (including duplicates) to any sessions that want them. - // (The duplicates are needed by sessions for accounting purposes) + // Send all block keys (including duplicates) to any sessions that want them for accounting purpose. bs.sm.ReceiveFrom(ctx, from, allKs, haves, dontHaves) // Send wanted blocks to decision engine - bs.engine.ReceiveFrom(from, wanted) + bs.engine.ReceivedBlocks(from, wanted) // Publish the block to any Bitswap clients that had requested blocks. // (the sessions use this pubsub mechanism to inform clients of incoming @@ -538,22 +547,8 @@ func (bs *Bitswap) receiveBlocksFrom(ctx context.Context, from peer.ID, blks []b bs.notif.Publish(b) } - // If the reprovider is enabled, send wanted blocks to reprovider - if bs.provideEnabled { - for _, blk := range wanted { - select { - case bs.newBlocks <- blk.Cid(): - // send block off to be reprovided - case <-bs.process.Closing(): - return bs.process.Close() - } - } - } - - if from != "" { - for _, b := range wanted { - log.Debugw("Bitswap.GetBlockRequest.End", "cid", b.Cid()) - } + for _, b := range wanted { + log.Debugw("Bitswap.GetBlockRequest.End", "cid", b.Cid()) } return nil diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index 048d7e6a1..eae7fa750 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -42,6 +42,18 @@ func getVirtualNetwork() tn.Network { return tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay)) } +func addBlock(t *testing.T, ctx context.Context, inst testinstance.Instance, blk blocks.Block) { + t.Helper() + err := inst.Blockstore().Put(ctx, blk) + if err != nil { + t.Fatal(err) + } + err = inst.Exchange.NotifyNewBlocks(ctx, blk) + if err != nil { + t.Fatal(err) + } +} + func TestClose(t *testing.T) { vnet := getVirtualNetwork() ig := testinstance.NewTestInstanceGenerator(vnet, nil, nil) @@ -95,9 +107,7 @@ func TestGetBlockFromPeerAfterPeerAnnounces(t *testing.T) { hasBlock := peers[0] defer hasBlock.Exchange.Close() - if err := hasBlock.Exchange.HasBlock(context.Background(), block); err != nil { - t.Fatal(err) - } + addBlock(t, context.Background(), hasBlock, block) wantsBlock := peers[1] defer wantsBlock.Exchange.Close() @@ -128,9 +138,7 @@ func TestDoesNotProvideWhenConfiguredNotTo(t *testing.T) { wantsBlock := ig.Next() defer wantsBlock.Exchange.Close() - if err := hasBlock.Exchange.HasBlock(context.Background(), block); err != nil { - t.Fatal(err) - } + addBlock(t, context.Background(), hasBlock, block) ctx, cancel := context.WithTimeout(context.Background(), 60*time.Millisecond) defer cancel() @@ -163,9 +171,7 @@ func TestUnwantedBlockNotAdded(t *testing.T) { hasBlock := peers[0] defer hasBlock.Exchange.Close() - if err := hasBlock.Exchange.HasBlock(context.Background(), block); err != nil { - t.Fatal(err) - } + addBlock(t, context.Background(), hasBlock, block) doesNotWantBlock := peers[1] defer doesNotWantBlock.Exchange.Close() @@ -232,15 +238,6 @@ func TestPendingBlockAdded(t *testing.T) { if !blkrecvd.Cid().Equals(lastBlock.Cid()) { t.Fatal("received wrong block") } - - // Make sure Bitswap adds the block to the blockstore - blockInStore, err := instance.Blockstore().Has(context.Background(), lastBlock.Cid()) - if err != nil { - t.Fatal(err) - } - if !blockInStore { - t.Fatal("Block was not added to block store") - } } func TestLargeSwarm(t *testing.T) { @@ -307,10 +304,7 @@ func PerformDistributionTest(t *testing.T, numInstances, numBlocks int) { first := instances[0] for _, b := range blocks { blkeys = append(blkeys, b.Cid()) - err := first.Exchange.HasBlock(ctx, b) - if err != nil { - t.Fatal(err) - } + addBlock(t, ctx, first, b) } t.Log("Distribute!") @@ -341,16 +335,6 @@ func PerformDistributionTest(t *testing.T, numInstances, numBlocks int) { t.Fatal(err) } } - - t.Log("Verify!") - - for _, inst := range instances { - for _, b := range blocks { - if _, err := inst.Blockstore().Get(ctx, b.Cid()); err != nil { - t.Fatal(err) - } - } - } } // TODO simplify this test. get to the _essence_! @@ -383,10 +367,7 @@ func TestSendToWantingPeer(t *testing.T) { } // peerB announces to the network that he has block alpha - err = peerB.Exchange.HasBlock(ctx, alpha) - if err != nil { - t.Fatal(err) - } + addBlock(t, ctx, peerB, alpha) // At some point, peerA should get alpha (or timeout) blkrecvd, ok := <-alphaPromise @@ -445,10 +426,7 @@ func TestBasicBitswap(t *testing.T) { blocks := bg.Blocks(1) // First peer has block - err := instances[0].Exchange.HasBlock(context.Background(), blocks[0]) - if err != nil { - t.Fatal(err) - } + addBlock(t, context.Background(), instances[0], blocks[0]) ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) defer cancel() @@ -545,10 +523,7 @@ func TestDoubleGet(t *testing.T) { t.Fatal("expected channel to be closed") } - err = instances[0].Exchange.HasBlock(context.Background(), blocks[0]) - if err != nil { - t.Fatal(err) - } + addBlock(t, context.Background(), instances[0], blocks[0]) select { case blk, ok := <-blkch2: @@ -708,10 +683,7 @@ func TestBitswapLedgerOneWay(t *testing.T) { instances := ig.Instances(2) blocks := bg.Blocks(1) - err := instances[0].Exchange.HasBlock(context.Background(), blocks[0]) - if err != nil { - t.Fatal(err) - } + addBlock(t, context.Background(), instances[0], blocks[0]) ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) defer cancel() @@ -760,19 +732,12 @@ func TestBitswapLedgerTwoWay(t *testing.T) { instances := ig.Instances(2) blocks := bg.Blocks(2) - err := instances[0].Exchange.HasBlock(context.Background(), blocks[0]) - if err != nil { - t.Fatal(err) - } - - err = instances[1].Exchange.HasBlock(context.Background(), blocks[1]) - if err != nil { - t.Fatal(err) - } + addBlock(t, context.Background(), instances[0], blocks[0]) + addBlock(t, context.Background(), instances[1], blocks[1]) ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) defer cancel() - _, err = instances[1].Exchange.GetBlock(ctx, blocks[0].Cid()) + _, err := instances[1].Exchange.GetBlock(ctx, blocks[0].Cid()) if err != nil { t.Fatal(err) } @@ -911,17 +876,14 @@ func TestTracer(t *testing.T) { bitswap.WithTracer(wiretap)(instances[0].Exchange) // First peer has block - err := instances[0].Exchange.HasBlock(context.Background(), blocks[0]) - if err != nil { - t.Fatal(err) - } + addBlock(t, context.Background(), instances[0], blocks[0]) ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) defer cancel() // Second peer broadcasts want for block CID // (Received by first and third peers) - _, err = instances[1].Exchange.GetBlock(ctx, blocks[0].Cid()) + _, err := instances[1].Exchange.GetBlock(ctx, blocks[0].Cid()) if err != nil { t.Fatal(err) } @@ -995,10 +957,8 @@ func TestTracer(t *testing.T) { // After disabling WireTap, no new messages are logged bitswap.WithTracer(nil)(instances[0].Exchange) - err = instances[0].Exchange.HasBlock(context.Background(), blocks[1]) - if err != nil { - t.Fatal(err) - } + addBlock(t, context.Background(), instances[0], blocks[1]) + _, err = instances[1].Exchange.GetBlock(ctx, blocks[1].Cid()) if err != nil { t.Fatal(err) diff --git a/bitswap/bitswap_with_sessions_test.go b/bitswap/bitswap_with_sessions_test.go index 40eed0ff2..7532a908c 100644 --- a/bitswap/bitswap_with_sessions_test.go +++ b/bitswap/bitswap_with_sessions_test.go @@ -187,9 +187,7 @@ func TestFetchNotConnected(t *testing.T) { // Provide 10 blocks on Peer A blks := bgen.Blocks(10) for _, block := range blks { - if err := other.Exchange.HasBlock(ctx, block); err != nil { - t.Fatal(err) - } + addBlock(t, ctx, other, block) } var cids []cid.Cid @@ -243,9 +241,7 @@ func TestFetchAfterDisconnect(t *testing.T) { firstBlks := blks[:5] for _, block := range firstBlks { - if err := peerA.Exchange.HasBlock(ctx, block); err != nil { - t.Fatal(err) - } + addBlock(t, ctx, peerA, block) } // Request all blocks with Peer B @@ -279,9 +275,7 @@ func TestFetchAfterDisconnect(t *testing.T) { // Provide remaining blocks lastBlks := blks[5:] for _, block := range lastBlks { - if err := peerA.Exchange.HasBlock(ctx, block); err != nil { - t.Fatal(err) - } + addBlock(t, ctx, peerA, block) } // Peer B should call FindProviders() and find Peer A @@ -334,9 +328,7 @@ func TestInterestCacheOverflow(t *testing.T) { // wait to ensure that all the above cids were added to the sessions cache time.Sleep(time.Millisecond * 50) - if err := b.Exchange.HasBlock(ctx, blks[0]); err != nil { - t.Fatal(err) - } + addBlock(t, ctx, b, blks[0]) select { case blk, ok := <-zeroch: @@ -381,9 +373,7 @@ func TestPutAfterSessionCacheEvict(t *testing.T) { // wait to ensure that all the above cids were added to the sessions cache time.Sleep(time.Millisecond * 50) - if err := a.Exchange.HasBlock(ctx, blks[17]); err != nil { - t.Fatal(err) - } + addBlock(t, ctx, a, blks[17]) select { case <-blkch: @@ -423,9 +413,7 @@ func TestMultipleSessions(t *testing.T) { } time.Sleep(time.Millisecond * 10) - if err := b.Exchange.HasBlock(ctx, blk); err != nil { - t.Fatal(err) - } + addBlock(t, ctx, b, blk) select { case <-blkch2: diff --git a/bitswap/internal/decision/engine.go b/bitswap/internal/decision/engine.go index c8c330975..b38777574 100644 --- a/bitswap/internal/decision/engine.go +++ b/bitswap/internal/decision/engine.go @@ -769,27 +769,29 @@ func (e *Engine) splitWantsDenials(p peer.ID, allWants []bsmsg.Entry) ([]bsmsg.E return wants, denied } -// ReceiveFrom is called when new blocks are received and added to the block -// store, meaning there may be peers who want those blocks, so we should send -// the blocks to them. -// +// ReceivedBlocks is called when new blocks are received from the network. // This function also updates the receive side of the ledger. -func (e *Engine) ReceiveFrom(from peer.ID, blks []blocks.Block) { +func (e *Engine) ReceivedBlocks(from peer.ID, blks []blocks.Block) { if len(blks) == 0 { return } - if from != "" { - l := e.findOrCreate(from) - l.lk.Lock() + l := e.findOrCreate(from) - // Record how many bytes were received in the ledger - for _, blk := range blks { - log.Debugw("Bitswap engine <- block", "local", e.self, "from", from, "cid", blk.Cid(), "size", len(blk.RawData())) - e.scoreLedger.AddToReceivedBytes(l.Partner, len(blk.RawData())) - } + // Record how many bytes were received in the ledger + l.lk.Lock() + for _, blk := range blks { + log.Debugw("Bitswap engine <- block", "local", e.self, "from", from, "cid", blk.Cid(), "size", len(blk.RawData())) + e.scoreLedger.AddToReceivedBytes(l.Partner, len(blk.RawData())) + } + l.lk.Unlock() +} - l.lk.Unlock() +// NotifyNewBlocks is called when new blocks becomes available locally, and in particular when the caller of bitswap +// decide to store those blocks and make them available on the network. +func (e *Engine) NotifyNewBlocks(blks []blocks.Block) { + if len(blks) == 0 { + return } // Get the size of each block diff --git a/bitswap/internal/decision/engine_test.go b/bitswap/internal/decision/engine_test.go index c4dc53486..ca3c7abd8 100644 --- a/bitswap/internal/decision/engine_test.go +++ b/bitswap/internal/decision/engine_test.go @@ -104,7 +104,7 @@ func newTestEngineWithSampling(ctx context.Context, idStr string, peerSampleInte e.StartWorkers(ctx, process.WithTeardown(func() error { return nil })) return engineSet{ Peer: peer.ID(idStr), - //Strategy: New(true), + // Strategy: New(true), PeerTagger: fpt, Blockstore: bs, Engine: e, @@ -126,7 +126,7 @@ func TestConsistentAccounting(t *testing.T) { sender.Engine.MessageSent(receiver.Peer, m) receiver.Engine.MessageReceived(ctx, sender.Peer, m) - receiver.Engine.ReceiveFrom(sender.Peer, m.Blocks()) + receiver.Engine.ReceivedBlocks(sender.Peer, m.Blocks()) } // Ensure sender records the change @@ -936,10 +936,11 @@ func TestSendReceivedBlocksToPeersThatWantThem(t *testing.T) { t.Fatal("expected no envelope yet") } + e.ReceivedBlocks(otherPeer, []blocks.Block{blks[0], blks[2]}) if err := bs.PutMany(context.Background(), []blocks.Block{blks[0], blks[2]}); err != nil { t.Fatal(err) } - e.ReceiveFrom(otherPeer, []blocks.Block{blks[0], blks[2]}) + e.NotifyNewBlocks([]blocks.Block{blks[0], blks[2]}) _, env = getNextEnvelope(e, next, 5*time.Millisecond) if env == nil { t.Fatal("expected envelope") @@ -1000,10 +1001,11 @@ func TestSendDontHave(t *testing.T) { } // Receive all the blocks + e.ReceivedBlocks(otherPeer, []blocks.Block{blks[0], blks[2]}) if err := bs.PutMany(context.Background(), blks); err != nil { t.Fatal(err) } - e.ReceiveFrom(otherPeer, blks) + e.NotifyNewBlocks(blks) // Envelope should contain 2 HAVEs / 2 blocks _, env = getNextEnvelope(e, next, 10*time.Millisecond) diff --git a/bitswap/internal/notifications/notifications.go b/bitswap/internal/notifications/notifications.go index 7defea739..ed4b79f57 100644 --- a/bitswap/internal/notifications/notifications.go +++ b/bitswap/internal/notifications/notifications.go @@ -15,7 +15,7 @@ const bufferSize = 16 // for cids. It's used internally by bitswap to decouple receiving blocks // and actually providing them back to the GetBlocks caller. type PubSub interface { - Publish(block blocks.Block) + Publish(blocks ...blocks.Block) Subscribe(ctx context.Context, keys ...cid.Cid) <-chan blocks.Block Shutdown() } @@ -35,7 +35,7 @@ type impl struct { closed chan struct{} } -func (ps *impl) Publish(block blocks.Block) { +func (ps *impl) Publish(blocks ...blocks.Block) { ps.lk.RLock() defer ps.lk.RUnlock() select { @@ -44,7 +44,9 @@ func (ps *impl) Publish(block blocks.Block) { default: } - ps.wrapped.Pub(block, block.Cid().KeyString()) + for _, block := range blocks { + ps.wrapped.Pub(block, block.Cid().KeyString()) + } } func (ps *impl) Shutdown() { From a9ef4b5f35150d1bad70bb93e933dd7d1f540e5d Mon Sep 17 00:00:00 2001 From: Jorropo Date: Thu, 28 Jul 2022 04:35:53 +0200 Subject: [PATCH 1023/1035] chore: bump deps & cleanup dont add This commit was moved from ipfs/go-bitswap@9bbccf862bde17d584ef658a01cf597afe573016 --- bitswap/internal/decision/engine.go | 2 +- bitswap/internal/decision/engine_test.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/bitswap/internal/decision/engine.go b/bitswap/internal/decision/engine.go index b38777574..0bd8d7f4a 100644 --- a/bitswap/internal/decision/engine.go +++ b/bitswap/internal/decision/engine.go @@ -780,11 +780,11 @@ func (e *Engine) ReceivedBlocks(from peer.ID, blks []blocks.Block) { // Record how many bytes were received in the ledger l.lk.Lock() + defer l.lk.Unlock() for _, blk := range blks { log.Debugw("Bitswap engine <- block", "local", e.self, "from", from, "cid", blk.Cid(), "size", len(blk.RawData())) e.scoreLedger.AddToReceivedBytes(l.Partner, len(blk.RawData())) } - l.lk.Unlock() } // NotifyNewBlocks is called when new blocks becomes available locally, and in particular when the caller of bitswap diff --git a/bitswap/internal/decision/engine_test.go b/bitswap/internal/decision/engine_test.go index ca3c7abd8..f09bc3b5e 100644 --- a/bitswap/internal/decision/engine_test.go +++ b/bitswap/internal/decision/engine_test.go @@ -104,7 +104,7 @@ func newTestEngineWithSampling(ctx context.Context, idStr string, peerSampleInte e.StartWorkers(ctx, process.WithTeardown(func() error { return nil })) return engineSet{ Peer: peer.ID(idStr), - // Strategy: New(true), + //Strategy: New(true), PeerTagger: fpt, Blockstore: bs, Engine: e, From dea23a1433e2d6c8bc1de9da54924f593640e08a Mon Sep 17 00:00:00 2001 From: Gus Eggert Date: Fri, 5 Aug 2022 10:16:37 -0400 Subject: [PATCH 1024/1035] chore: remove goprocess from blockstoremanager This commit was moved from ipfs/go-bitswap@4fcd29137eaf8983d5791de0b12bbbd01fb00d08 --- bitswap/bitswap.go | 1 - .../internal/decision/blockstoremanager.go | 32 +++++++++++-------- .../decision/blockstoremanager_test.go | 26 +++++++-------- bitswap/internal/decision/engine.go | 19 +++++++---- bitswap/internal/decision/engine_test.go | 1 - 5 files changed, 42 insertions(+), 37 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 8c549ede3..7a032ec96 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -286,7 +286,6 @@ func New(parent context.Context, network bsnet.BitSwapNetwork, // Set up decision engine bs.engine = decision.NewEngine( - ctx, bstore, bs.engineBstoreWorkerCount, bs.engineTaskWorkerCount, diff --git a/bitswap/internal/decision/blockstoremanager.go b/bitswap/internal/decision/blockstoremanager.go index 80ee98a0a..5bc456a96 100644 --- a/bitswap/internal/decision/blockstoremanager.go +++ b/bitswap/internal/decision/blockstoremanager.go @@ -10,7 +10,6 @@ import ( bstore "github.com/ipfs/go-ipfs-blockstore" ipld "github.com/ipfs/go-ipld-format" "github.com/ipfs/go-metrics-interface" - process "github.com/jbenet/goprocess" ) // blockstoreManager maintains a pool of workers that make requests to the blockstore. @@ -18,15 +17,17 @@ type blockstoreManager struct { bs bstore.Blockstore workerCount int jobs chan func() - px process.Process pendingGauge metrics.Gauge activeGauge metrics.Gauge + + workerWG sync.WaitGroup + stopChan chan struct{} + stopOnce sync.Once } // newBlockstoreManager creates a new blockstoreManager with the given context // and number of workers func newBlockstoreManager( - ctx context.Context, bs bstore.Blockstore, workerCount int, pendingGauge metrics.Gauge, @@ -36,26 +37,31 @@ func newBlockstoreManager( bs: bs, workerCount: workerCount, jobs: make(chan func()), - px: process.WithTeardown(func() error { return nil }), pendingGauge: pendingGauge, activeGauge: activeGauge, + stopChan: make(chan struct{}), } } -func (bsm *blockstoreManager) start(px process.Process) { - px.AddChild(bsm.px) - // Start up workers +func (bsm *blockstoreManager) start() { + bsm.workerWG.Add(bsm.workerCount) for i := 0; i < bsm.workerCount; i++ { - bsm.px.Go(func(px process.Process) { - bsm.worker(px) - }) + go bsm.worker() } } -func (bsm *blockstoreManager) worker(px process.Process) { +func (bsm *blockstoreManager) stop() { + bsm.stopOnce.Do(func() { + close(bsm.stopChan) + }) + bsm.workerWG.Wait() +} + +func (bsm *blockstoreManager) worker() { + defer bsm.workerWG.Done() for { select { - case <-px.Closing(): + case <-bsm.stopChan: return case job := <-bsm.jobs: bsm.pendingGauge.Dec() @@ -70,7 +76,7 @@ func (bsm *blockstoreManager) addJob(ctx context.Context, job func()) error { select { case <-ctx.Done(): return ctx.Err() - case <-bsm.px.Closing(): + case <-bsm.stopChan: return fmt.Errorf("shutting down") case bsm.jobs <- job: bsm.pendingGauge.Inc() diff --git a/bitswap/internal/decision/blockstoremanager_test.go b/bitswap/internal/decision/blockstoremanager_test.go index fa026efb9..d1c150278 100644 --- a/bitswap/internal/decision/blockstoremanager_test.go +++ b/bitswap/internal/decision/blockstoremanager_test.go @@ -17,17 +17,20 @@ import ( ds_sync "github.com/ipfs/go-datastore/sync" blockstore "github.com/ipfs/go-ipfs-blockstore" delay "github.com/ipfs/go-ipfs-delay" - process "github.com/jbenet/goprocess" ) func newBlockstoreManagerForTesting( + t *testing.T, ctx context.Context, bs blockstore.Blockstore, workerCount int, ) *blockstoreManager { testPendingBlocksGauge := metrics.NewCtx(ctx, "pending_block_tasks", "Total number of pending blockstore tasks").Gauge() testActiveBlocksGauge := metrics.NewCtx(ctx, "active_block_tasks", "Total number of active blockstore tasks").Gauge() - return newBlockstoreManager(ctx, bs, workerCount, testPendingBlocksGauge, testActiveBlocksGauge) + bsm := newBlockstoreManager(bs, workerCount, testPendingBlocksGauge, testActiveBlocksGauge) + bsm.start() + t.Cleanup(bsm.stop) + return bsm } func TestBlockstoreManagerNotFoundKey(t *testing.T) { @@ -36,8 +39,7 @@ func TestBlockstoreManagerNotFoundKey(t *testing.T) { dstore := ds_sync.MutexWrap(delayed.New(ds.NewMapDatastore(), bsdelay)) bstore := blockstore.NewBlockstore(ds_sync.MutexWrap(dstore)) - bsm := newBlockstoreManagerForTesting(ctx, bstore, 5) - bsm.start(process.WithTeardown(func() error { return nil })) + bsm := newBlockstoreManagerForTesting(t, ctx, bstore, 5) cids := testutil.GenerateCids(4) sizes, err := bsm.getBlockSizes(ctx, cids) @@ -75,8 +77,7 @@ func TestBlockstoreManager(t *testing.T) { dstore := ds_sync.MutexWrap(delayed.New(ds.NewMapDatastore(), bsdelay)) bstore := blockstore.NewBlockstore(ds_sync.MutexWrap(dstore)) - bsm := newBlockstoreManagerForTesting(ctx, bstore, 5) - bsm.start(process.WithTeardown(func() error { return nil })) + bsm := newBlockstoreManagerForTesting(t, ctx, bstore, 5) exp := make(map[cid.Cid]blocks.Block) var blks []blocks.Block @@ -159,8 +160,7 @@ func TestBlockstoreManagerConcurrency(t *testing.T) { bstore := blockstore.NewBlockstore(ds_sync.MutexWrap(dstore)) workerCount := 5 - bsm := newBlockstoreManagerForTesting(ctx, bstore, workerCount) - bsm.start(process.WithTeardown(func() error { return nil })) + bsm := newBlockstoreManagerForTesting(t, ctx, bstore, workerCount) blkSize := int64(8 * 1024) blks := testutil.GenerateBlocksOfSize(32, blkSize) @@ -201,9 +201,7 @@ func TestBlockstoreManagerClose(t *testing.T) { dstore := ds_sync.MutexWrap(delayed.New(ds.NewMapDatastore(), bsdelay)) bstore := blockstore.NewBlockstore(ds_sync.MutexWrap(dstore)) - bsm := newBlockstoreManagerForTesting(ctx, bstore, 3) - px := process.WithTeardown(func() error { return nil }) - bsm.start(px) + bsm := newBlockstoreManagerForTesting(t, ctx, bstore, 3) blks := testutil.GenerateBlocksOfSize(10, 1024) var ks []cid.Cid @@ -216,7 +214,7 @@ func TestBlockstoreManagerClose(t *testing.T) { t.Fatal(err) } - go px.Close() + bsm.stop() time.Sleep(5 * time.Millisecond) @@ -241,9 +239,7 @@ func TestBlockstoreManagerCtxDone(t *testing.T) { bstore := blockstore.NewBlockstore(dstore) ctx := context.Background() - bsm := newBlockstoreManagerForTesting(ctx, bstore, 3) - proc := process.WithTeardown(func() error { return nil }) - bsm.start(proc) + bsm := newBlockstoreManagerForTesting(t, ctx, bstore, 3) blks := testutil.GenerateBlocksOfSize(100, 128) var ks []cid.Cid diff --git a/bitswap/internal/decision/engine.go b/bitswap/internal/decision/engine.go index 0bd8d7f4a..27809a4c8 100644 --- a/bitswap/internal/decision/engine.go +++ b/bitswap/internal/decision/engine.go @@ -256,7 +256,6 @@ func wrapTaskComparator(tc TaskComparator) peertask.QueueTaskComparator { // maxOutstandingBytesPerPeer hints to the peer task queue not to give a peer more tasks if it has some maximum // work already outstanding. func NewEngine( - ctx context.Context, bs bstore.Blockstore, bstoreWorkerCount, engineTaskWorkerCount, maxOutstandingBytesPerPeer int, @@ -270,7 +269,6 @@ func NewEngine( opts ...Option, ) *Engine { return newEngine( - ctx, bs, bstoreWorkerCount, engineTaskWorkerCount, @@ -288,7 +286,6 @@ func NewEngine( } func newEngine( - ctx context.Context, bs bstore.Blockstore, bstoreWorkerCount, engineTaskWorkerCount, maxOutstandingBytesPerPeer int, @@ -310,7 +307,7 @@ func newEngine( e := &Engine{ ledgerMap: make(map[peer.ID]*ledger), scoreLedger: scoreLedger, - bsm: newBlockstoreManager(ctx, bs, bstoreWorkerCount, pendingBlocksGauge, activeBlocksGauge), + bsm: newBlockstoreManager(bs, bstoreWorkerCount, pendingBlocksGauge, activeBlocksGauge), peerTagger: peerTagger, outbox: make(chan (<-chan *Envelope), outboxChanBuffer), workSignal: make(chan struct{}, 1), @@ -391,20 +388,28 @@ func (e *Engine) startScoreLedger(px process.Process) { }) } +func (e *Engine) startBlockstoreManager(px process.Process) { + e.bsm.start() + px.Go(func(ppx process.Process) { + <-ppx.Closing() + e.bsm.stop() + }) +} + // Start up workers to handle requests from other nodes for the data on this node func (e *Engine) StartWorkers(ctx context.Context, px process.Process) { - // Start up blockstore manager - e.bsm.start(px) + e.startBlockstoreManager(px) e.startScoreLedger(px) e.taskWorkerLock.Lock() defer e.taskWorkerLock.Unlock() for i := 0; i < e.taskWorkerCount; i++ { - px.Go(func(px process.Process) { + px.Go(func(_ process.Process) { e.taskWorker(ctx) }) } + } func (e *Engine) onPeerAdded(p peer.ID) { diff --git a/bitswap/internal/decision/engine_test.go b/bitswap/internal/decision/engine_test.go index f09bc3b5e..79b80cb52 100644 --- a/bitswap/internal/decision/engine_test.go +++ b/bitswap/internal/decision/engine_test.go @@ -201,7 +201,6 @@ func newEngineForTesting( testPendingBlocksGauge := metrics.NewCtx(ctx, "pending_block_tasks", "Total number of pending blockstore tasks").Gauge() testActiveBlocksGauge := metrics.NewCtx(ctx, "active_block_tasks", "Total number of active blockstore tasks").Gauge() return newEngine( - ctx, bs, bstoreWorkerCount, engineTaskWorkerCount, From d15b5f1866736bf8cec5b221ea461ebc8811692c Mon Sep 17 00:00:00 2001 From: Jorropo Date: Mon, 20 Jun 2022 14:38:32 +0200 Subject: [PATCH 1025/1035] refactor: split client and server and all sideeffects that this incurs This commit was moved from ipfs/go-bitswap@8a75bc2c47a5f09bc0618a577af3fb9c409033aa --- bitswap/benchmarks_test.go | 7 +- bitswap/bitswap.go | 705 ------------------ bitswap/bitswap_test.go | 50 +- .../bitswap_with_sessions_test.go | 32 +- bitswap/client/client.go | 481 ++++++++++++ bitswap/{ => client}/docs/go-bitswap.png | Bin bitswap/{ => client}/docs/go-bitswap.puml | 0 .../{ => client}/docs/how-bitswap-works.md | 0 .../blockpresencemanager.go | 0 .../blockpresencemanager_test.go | 0 .../{ => client}/internal/getter/getter.go | 4 +- .../messagequeue/donthavetimeoutmgr.go | 0 .../messagequeue/donthavetimeoutmgr_test.go | 0 .../internal/messagequeue/messagequeue.go | 2 +- .../messagequeue/messagequeue_test.go | 0 .../internal/notifications/notifications.go | 0 .../notifications/notifications_test.go | 0 .../internal/peermanager/peermanager.go | 0 .../internal/peermanager/peermanager_test.go | 0 .../internal/peermanager/peerwantmanager.go | 0 .../peermanager/peerwantmanager_test.go | 0 .../providerquerymanager.go | 0 .../providerquerymanager_test.go | 0 .../{ => client}/internal/session/cidqueue.go | 0 .../internal/session/peerresponsetracker.go | 0 .../session/peerresponsetracker_test.go | 0 .../internal/session/sentwantblockstracker.go | 0 .../session/sentwantblockstracker_test.go | 0 .../{ => client}/internal/session/session.go | 20 +- .../internal/session/session_test.go | 10 +- .../internal/session/sessionwants.go | 0 .../internal/session/sessionwants_test.go | 0 .../internal/session/sessionwantsender.go | 4 +- .../session/sessionwantsender_test.go | 6 +- .../internal/session/wantinfo_test.go | 0 .../sessioninterestmanager.go | 0 .../sessioninterestmanager_test.go | 0 .../internal/sessionmanager/sessionmanager.go | 10 +- .../sessionmanager/sessionmanager_test.go | 10 +- .../sessionpeermanager/sessionpeermanager.go | 0 .../sessionpeermanager_test.go | 0 bitswap/client/internal/tracing.go | 13 + bitswap/{ => client}/stat.go | 24 +- .../{ => client}/testinstance/testinstance.go | 4 +- bitswap/{ => client}/wantlist/wantlist.go | 0 .../{ => client}/wantlist/wantlist_test.go | 0 bitswap/decision/decision.go | 12 - bitswap/internal/testutil/testutil.go | 2 +- bitswap/message/message.go | 2 +- bitswap/message/message_test.go | 2 +- bitswap/metrics/gen.go | 111 +++ bitswap/network/connecteventmanager.go | 27 +- bitswap/network/interface.go | 2 +- bitswap/network/ipfs_impl.go | 24 +- bitswap/options.go | 88 +++ bitswap/polyfill.go | 174 +++++ bitswap/sendOnlyTracer.go | 20 + bitswap/server/forward.go | 13 + .../internal/decision/blockstoremanager.go | 13 +- .../decision/blockstoremanager_test.go | 0 .../{ => server}/internal/decision/engine.go | 99 ++- .../internal/decision/engine_test.go | 42 +- .../{ => server}/internal/decision/ewma.go | 0 .../{ => server}/internal/decision/ledger.go | 2 +- .../internal/decision/peer_ledger.go | 0 .../internal/decision/scoreledger.go | 0 .../internal/decision/taskmerger.go | 0 .../internal/decision/taskmerger_test.go | 0 bitswap/server/server.go | 531 +++++++++++++ bitswap/testnet/virtual.go | 37 +- bitswap/{ => tracer}/tracer.go | 9 +- bitswap/workers.go | 228 ------ 72 files changed, 1675 insertions(+), 1145 deletions(-) delete mode 100644 bitswap/bitswap.go rename bitswap/{ => client}/bitswap_with_sessions_test.go (92%) create mode 100644 bitswap/client/client.go rename bitswap/{ => client}/docs/go-bitswap.png (100%) rename bitswap/{ => client}/docs/go-bitswap.puml (100%) rename bitswap/{ => client}/docs/how-bitswap-works.md (100%) rename bitswap/{ => client}/internal/blockpresencemanager/blockpresencemanager.go (100%) rename bitswap/{ => client}/internal/blockpresencemanager/blockpresencemanager_test.go (100%) rename bitswap/{ => client}/internal/getter/getter.go (96%) rename bitswap/{ => client}/internal/messagequeue/donthavetimeoutmgr.go (100%) rename bitswap/{ => client}/internal/messagequeue/donthavetimeoutmgr_test.go (100%) rename bitswap/{ => client}/internal/messagequeue/messagequeue.go (99%) rename bitswap/{ => client}/internal/messagequeue/messagequeue_test.go (100%) rename bitswap/{ => client}/internal/notifications/notifications.go (100%) rename bitswap/{ => client}/internal/notifications/notifications_test.go (100%) rename bitswap/{ => client}/internal/peermanager/peermanager.go (100%) rename bitswap/{ => client}/internal/peermanager/peermanager_test.go (100%) rename bitswap/{ => client}/internal/peermanager/peerwantmanager.go (100%) rename bitswap/{ => client}/internal/peermanager/peerwantmanager_test.go (100%) rename bitswap/{ => client}/internal/providerquerymanager/providerquerymanager.go (100%) rename bitswap/{ => client}/internal/providerquerymanager/providerquerymanager_test.go (100%) rename bitswap/{ => client}/internal/session/cidqueue.go (100%) rename bitswap/{ => client}/internal/session/peerresponsetracker.go (100%) rename bitswap/{ => client}/internal/session/peerresponsetracker_test.go (100%) rename bitswap/{ => client}/internal/session/sentwantblockstracker.go (100%) rename bitswap/{ => client}/internal/session/sentwantblockstracker_test.go (100%) rename bitswap/{ => client}/internal/session/session.go (96%) rename bitswap/{ => client}/internal/session/session_test.go (97%) rename bitswap/{ => client}/internal/session/sessionwants.go (100%) rename bitswap/{ => client}/internal/session/sessionwants_test.go (100%) rename bitswap/{ => client}/internal/session/sessionwantsender.go (99%) rename bitswap/{ => client}/internal/session/sessionwantsender_test.go (99%) rename bitswap/{ => client}/internal/session/wantinfo_test.go (100%) rename bitswap/{ => client}/internal/sessioninterestmanager/sessioninterestmanager.go (100%) rename bitswap/{ => client}/internal/sessioninterestmanager/sessioninterestmanager_test.go (100%) rename bitswap/{ => client}/internal/sessionmanager/sessionmanager.go (94%) rename bitswap/{ => client}/internal/sessionmanager/sessionmanager_test.go (95%) rename bitswap/{ => client}/internal/sessionpeermanager/sessionpeermanager.go (100%) rename bitswap/{ => client}/internal/sessionpeermanager/sessionpeermanager_test.go (100%) create mode 100644 bitswap/client/internal/tracing.go rename bitswap/{ => client}/stat.go (59%) rename bitswap/{ => client}/testinstance/testinstance.go (97%) rename bitswap/{ => client}/wantlist/wantlist.go (100%) rename bitswap/{ => client}/wantlist/wantlist_test.go (100%) delete mode 100644 bitswap/decision/decision.go create mode 100644 bitswap/metrics/gen.go create mode 100644 bitswap/options.go create mode 100644 bitswap/polyfill.go create mode 100644 bitswap/sendOnlyTracer.go create mode 100644 bitswap/server/forward.go rename bitswap/{ => server}/internal/decision/blockstoremanager.go (96%) rename bitswap/{ => server}/internal/decision/blockstoremanager_test.go (100%) rename bitswap/{ => server}/internal/decision/engine.go (92%) rename bitswap/{ => server}/internal/decision/engine_test.go (93%) rename bitswap/{ => server}/internal/decision/ewma.go (100%) rename bitswap/{ => server}/internal/decision/ledger.go (94%) rename bitswap/{ => server}/internal/decision/peer_ledger.go (100%) rename bitswap/{ => server}/internal/decision/scoreledger.go (100%) rename bitswap/{ => server}/internal/decision/taskmerger.go (100%) rename bitswap/{ => server}/internal/decision/taskmerger_test.go (100%) create mode 100644 bitswap/server/server.go rename bitswap/{ => tracer}/tracer.go (72%) delete mode 100644 bitswap/workers.go diff --git a/bitswap/benchmarks_test.go b/bitswap/benchmarks_test.go index ca92820f3..ea6767713 100644 --- a/bitswap/benchmarks_test.go +++ b/bitswap/benchmarks_test.go @@ -17,10 +17,9 @@ import ( blocks "github.com/ipfs/go-block-format" protocol "github.com/libp2p/go-libp2p-core/protocol" - bitswap "github.com/ipfs/go-bitswap" - bssession "github.com/ipfs/go-bitswap/internal/session" + "github.com/ipfs/go-bitswap" + testinstance "github.com/ipfs/go-bitswap/client/testinstance" bsnet "github.com/ipfs/go-bitswap/network" - testinstance "github.com/ipfs/go-bitswap/testinstance" tn "github.com/ipfs/go-bitswap/testnet" cid "github.com/ipfs/go-cid" delay "github.com/ipfs/go-ipfs-delay" @@ -498,7 +497,7 @@ func onePeerPerBlock(b *testing.B, provs []testinstance.Instance, blks []blocks. } func oneAtATime(b *testing.B, bs *bitswap.Bitswap, ks []cid.Cid) { - ses := bs.NewSession(context.Background()).(*bssession.Session) + ses := bs.NewSession(context.Background()) for _, c := range ks { _, err := ses.GetBlock(context.Background(), c) if err != nil { diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go deleted file mode 100644 index 7a032ec96..000000000 --- a/bitswap/bitswap.go +++ /dev/null @@ -1,705 +0,0 @@ -// Package bitswap implements the IPFS exchange interface with the BitSwap -// bilateral exchange protocol. -package bitswap - -import ( - "context" - "errors" - "fmt" - "sync" - "time" - - delay "github.com/ipfs/go-ipfs-delay" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/trace" - - deciface "github.com/ipfs/go-bitswap/decision" - "github.com/ipfs/go-bitswap/internal" - bsbpm "github.com/ipfs/go-bitswap/internal/blockpresencemanager" - "github.com/ipfs/go-bitswap/internal/decision" - "github.com/ipfs/go-bitswap/internal/defaults" - bsgetter "github.com/ipfs/go-bitswap/internal/getter" - bsmq "github.com/ipfs/go-bitswap/internal/messagequeue" - "github.com/ipfs/go-bitswap/internal/notifications" - bspm "github.com/ipfs/go-bitswap/internal/peermanager" - bspqm "github.com/ipfs/go-bitswap/internal/providerquerymanager" - bssession "github.com/ipfs/go-bitswap/internal/session" - bssim "github.com/ipfs/go-bitswap/internal/sessioninterestmanager" - bssm "github.com/ipfs/go-bitswap/internal/sessionmanager" - bsspm "github.com/ipfs/go-bitswap/internal/sessionpeermanager" - bsmsg "github.com/ipfs/go-bitswap/message" - bsnet "github.com/ipfs/go-bitswap/network" - blocks "github.com/ipfs/go-block-format" - "github.com/ipfs/go-cid" - blockstore "github.com/ipfs/go-ipfs-blockstore" - exchange "github.com/ipfs/go-ipfs-exchange-interface" - logging "github.com/ipfs/go-log" - "github.com/ipfs/go-metrics-interface" - process "github.com/jbenet/goprocess" - procctx "github.com/jbenet/goprocess/context" - "github.com/libp2p/go-libp2p-core/peer" -) - -var log = logging.Logger("bitswap") -var sflog = log.Desugar() - -var _ exchange.SessionExchange = (*Bitswap)(nil) - -var ( - // HasBlockBufferSize is the buffer size of the channel for new blocks - // that need to be provided. They should get pulled over by the - // provideCollector even before they are actually provided. - // TODO: Does this need to be this large givent that? - HasBlockBufferSize = 256 - provideKeysBufferSize = 2048 - provideWorkerMax = 6 - - // the 1<<18+15 is to observe old file chunks that are 1<<18 + 14 in size - metricsBuckets = []float64{1 << 6, 1 << 10, 1 << 14, 1 << 18, 1<<18 + 15, 1 << 22} - - timeMetricsBuckets = []float64{1, 10, 30, 60, 90, 120, 600} -) - -// Option defines the functional option type that can be used to configure -// bitswap instances -type Option func(*Bitswap) - -// ProvideEnabled is an option for enabling/disabling provide announcements -func ProvideEnabled(enabled bool) Option { - return func(bs *Bitswap) { - bs.provideEnabled = enabled - } -} - -// ProviderSearchDelay overwrites the global provider search delay -func ProviderSearchDelay(newProvSearchDelay time.Duration) Option { - return func(bs *Bitswap) { - bs.provSearchDelay = newProvSearchDelay - } -} - -// RebroadcastDelay overwrites the global provider rebroadcast delay -func RebroadcastDelay(newRebroadcastDelay delay.D) Option { - return func(bs *Bitswap) { - bs.rebroadcastDelay = newRebroadcastDelay - } -} - -// EngineBlockstoreWorkerCount sets the number of worker threads used for -// blockstore operations in the decision engine -func EngineBlockstoreWorkerCount(count int) Option { - if count <= 0 { - panic(fmt.Sprintf("Engine blockstore worker count is %d but must be > 0", count)) - } - return func(bs *Bitswap) { - bs.engineBstoreWorkerCount = count - } -} - -// EngineTaskWorkerCount sets the number of worker threads used inside the engine -func EngineTaskWorkerCount(count int) Option { - if count <= 0 { - panic(fmt.Sprintf("Engine task worker count is %d but must be > 0", count)) - } - return func(bs *Bitswap) { - bs.engineTaskWorkerCount = count - } -} - -func TaskWorkerCount(count int) Option { - if count <= 0 { - panic(fmt.Sprintf("task worker count is %d but must be > 0", count)) - } - return func(bs *Bitswap) { - bs.taskWorkerCount = count - } -} - -// MaxOutstandingBytesPerPeer describes approximately how much work we are will to have outstanding to a peer at any -// given time. Setting it to 0 will disable any limiting. -func MaxOutstandingBytesPerPeer(count int) Option { - if count < 0 { - panic(fmt.Sprintf("max outstanding bytes per peer is %d but must be >= 0", count)) - } - return func(bs *Bitswap) { - bs.engineMaxOutstandingBytesPerPeer = count - } -} - -// SetSendDontHaves indicates what to do when the engine receives a want-block -// for a block that is not in the blockstore. Either -// - Send a DONT_HAVE message -// - Simply don't respond -// This option is only used for testing. -func SetSendDontHaves(send bool) Option { - return func(bs *Bitswap) { - bs.engineSetSendDontHaves = send - } -} - -// Configures the engine to use the given score decision logic. -func WithScoreLedger(scoreLedger deciface.ScoreLedger) Option { - return func(bs *Bitswap) { - bs.engineScoreLedger = scoreLedger - } -} - -func SetSimulateDontHavesOnTimeout(send bool) Option { - return func(bs *Bitswap) { - bs.simulateDontHavesOnTimeout = send - } -} - -func WithTargetMessageSize(tms int) Option { - return func(bs *Bitswap) { - bs.engineTargetMessageSize = tms - } -} - -func WithPeerBlockRequestFilter(pbrf PeerBlockRequestFilter) Option { - return func(bs *Bitswap) { - bs.peerBlockRequestFilter = pbrf - } -} - -type TaskInfo = decision.TaskInfo -type TaskComparator = decision.TaskComparator -type PeerBlockRequestFilter = decision.PeerBlockRequestFilter - -// WithTaskComparator configures custom task prioritization logic. -func WithTaskComparator(comparator TaskComparator) Option { - return func(bs *Bitswap) { - bs.taskComparator = comparator - } -} - -// New initializes a BitSwap instance that communicates over the provided -// BitSwapNetwork. This function registers the returned instance as the network -// delegate. Runs until context is cancelled or bitswap.Close is called. -func New(parent context.Context, network bsnet.BitSwapNetwork, - bstore blockstore.Blockstore, options ...Option) exchange.Interface { - - // important to use provided parent context (since it may include important - // loggable data). It's probably not a good idea to allow bitswap to be - // coupled to the concerns of the ipfs daemon in this way. - // - // FIXME(btc) Now that bitswap manages itself using a process, it probably - // shouldn't accept a context anymore. Clients should probably use Close() - // exclusively. We should probably find another way to share logging data - ctx, cancelFunc := context.WithCancel(parent) - ctx = metrics.CtxSubScope(ctx, "bitswap") - dupHist := metrics.NewCtx(ctx, "recv_dup_blocks_bytes", "Summary of duplicate"+ - " data blocks recived").Histogram(metricsBuckets) - allHist := metrics.NewCtx(ctx, "recv_all_blocks_bytes", "Summary of all"+ - " data blocks recived").Histogram(metricsBuckets) - - sentHistogram := metrics.NewCtx(ctx, "sent_all_blocks_bytes", "Histogram of blocks sent by"+ - " this bitswap").Histogram(metricsBuckets) - - sendTimeHistogram := metrics.NewCtx(ctx, "send_times", "Histogram of how long it takes to send messages"+ - " in this bitswap").Histogram(timeMetricsBuckets) - - pendingEngineGauge := metrics.NewCtx(ctx, "pending_tasks", "Total number of pending tasks").Gauge() - - activeEngineGauge := metrics.NewCtx(ctx, "active_tasks", "Total number of active tasks").Gauge() - - pendingBlocksGauge := metrics.NewCtx(ctx, "pending_block_tasks", "Total number of pending blockstore tasks").Gauge() - - activeBlocksGauge := metrics.NewCtx(ctx, "active_block_tasks", "Total number of active blockstore tasks").Gauge() - - px := process.WithTeardown(func() error { - return nil - }) - - // onDontHaveTimeout is called when a want-block is sent to a peer that - // has an old version of Bitswap that doesn't support DONT_HAVE messages, - // or when no response is received within a timeout. - var sm *bssm.SessionManager - var bs *Bitswap - onDontHaveTimeout := func(p peer.ID, dontHaves []cid.Cid) { - // Simulate a message arriving with DONT_HAVEs - if bs.simulateDontHavesOnTimeout { - sm.ReceiveFrom(ctx, p, nil, nil, dontHaves) - } - } - peerQueueFactory := func(ctx context.Context, p peer.ID) bspm.PeerQueue { - return bsmq.New(ctx, p, network, onDontHaveTimeout) - } - - sim := bssim.New() - bpm := bsbpm.New() - pm := bspm.New(ctx, peerQueueFactory, network.Self()) - pqm := bspqm.New(ctx, network) - - sessionFactory := func( - sessctx context.Context, - sessmgr bssession.SessionManager, - id uint64, - spm bssession.SessionPeerManager, - sim *bssim.SessionInterestManager, - pm bssession.PeerManager, - bpm *bsbpm.BlockPresenceManager, - notif notifications.PubSub, - provSearchDelay time.Duration, - rebroadcastDelay delay.D, - self peer.ID) bssm.Session { - return bssession.New(sessctx, sessmgr, id, spm, pqm, sim, pm, bpm, notif, provSearchDelay, rebroadcastDelay, self) - } - sessionPeerManagerFactory := func(ctx context.Context, id uint64) bssession.SessionPeerManager { - return bsspm.New(id, network.ConnectionManager()) - } - notif := notifications.New() - sm = bssm.New(ctx, sessionFactory, sim, sessionPeerManagerFactory, bpm, pm, notif, network.Self()) - - bs = &Bitswap{ - blockstore: bstore, - network: network, - process: px, - newBlocks: make(chan cid.Cid, HasBlockBufferSize), - provideKeys: make(chan cid.Cid, provideKeysBufferSize), - pm: pm, - pqm: pqm, - sm: sm, - sim: sim, - notif: notif, - counters: new(counters), - dupMetric: dupHist, - allMetric: allHist, - sentHistogram: sentHistogram, - sendTimeHistogram: sendTimeHistogram, - provideEnabled: true, - provSearchDelay: defaults.ProvSearchDelay, - rebroadcastDelay: delay.Fixed(time.Minute), - engineBstoreWorkerCount: defaults.BitswapEngineBlockstoreWorkerCount, - engineTaskWorkerCount: defaults.BitswapEngineTaskWorkerCount, - taskWorkerCount: defaults.BitswapTaskWorkerCount, - engineMaxOutstandingBytesPerPeer: defaults.BitswapMaxOutstandingBytesPerPeer, - engineTargetMessageSize: defaults.BitswapEngineTargetMessageSize, - engineSetSendDontHaves: true, - simulateDontHavesOnTimeout: true, - } - - // apply functional options before starting and running bitswap - for _, option := range options { - option(bs) - } - - // Set up decision engine - bs.engine = decision.NewEngine( - bstore, - bs.engineBstoreWorkerCount, - bs.engineTaskWorkerCount, - bs.engineMaxOutstandingBytesPerPeer, - network.ConnectionManager(), - network.Self(), - bs.engineScoreLedger, - pendingEngineGauge, - activeEngineGauge, - pendingBlocksGauge, - activeBlocksGauge, - decision.WithTaskComparator(bs.taskComparator), - decision.WithTargetMessageSize(bs.engineTargetMessageSize), - decision.WithPeerBlockRequestFilter(bs.peerBlockRequestFilter), - ) - bs.engine.SetSendDontHaves(bs.engineSetSendDontHaves) - - bs.pqm.Startup() - network.Start(bs) - - // Start up bitswaps async worker routines - bs.startWorkers(ctx, px) - bs.engine.StartWorkers(ctx, px) - - // bind the context and process. - // do it over here to avoid closing before all setup is done. - go func() { - <-px.Closing() // process closes first - sm.Shutdown() - cancelFunc() - notif.Shutdown() - network.Stop() - }() - procctx.CloseAfterContext(px, ctx) // parent cancelled first - - return bs -} - -// Bitswap instances implement the bitswap protocol. -type Bitswap struct { - pm *bspm.PeerManager - - // the provider query manager manages requests to find providers - pqm *bspqm.ProviderQueryManager - - // the engine is the bit of logic that decides who to send which blocks to - engine *decision.Engine - - // network delivers messages on behalf of the session - network bsnet.BitSwapNetwork - - // blockstore is the local database - // NB: ensure threadsafety - blockstore blockstore.Blockstore - - // manages channels of outgoing blocks for sessions - notif notifications.PubSub - - // newBlocks is a channel for newly added blocks to be provided to the - // network. blocks pushed down this channel get buffered and fed to the - // provideKeys channel later on to avoid too much network activity - newBlocks chan cid.Cid - // provideKeys directly feeds provide workers - provideKeys chan cid.Cid - - process process.Process - - // Counters for various statistics - counterLk sync.Mutex - counters *counters - - // Metrics interface metrics - dupMetric metrics.Histogram - allMetric metrics.Histogram - sentHistogram metrics.Histogram - sendTimeHistogram metrics.Histogram - - // External statistics interface - tracer Tracer - - // the SessionManager routes requests to interested sessions - sm *bssm.SessionManager - - // the SessionInterestManager keeps track of which sessions are interested - // in which CIDs - sim *bssim.SessionInterestManager - - // whether or not to make provide announcements - provideEnabled bool - - // how long to wait before looking for providers in a session - provSearchDelay time.Duration - - // how often to rebroadcast providing requests to find more optimized providers - rebroadcastDelay delay.D - - // how many worker threads to start for decision engine blockstore worker - engineBstoreWorkerCount int - - // how many worker threads to start for decision engine task worker - engineTaskWorkerCount int - - // the total number of simultaneous threads sending outgoing messages - taskWorkerCount int - - // the total amount of bytes that a peer should have outstanding, it is utilized by the decision engine - engineMaxOutstandingBytesPerPeer int - - // the score ledger used by the decision engine - engineScoreLedger deciface.ScoreLedger - - // target message size setting for engines peer task queue - engineTargetMessageSize int - - // indicates what to do when the engine receives a want-block for a block that - // is not in the blockstore. Either send DONT_HAVE or do nothing. - // This is used to simulate older versions of bitswap that did nothing instead of sending back a DONT_HAVE. - engineSetSendDontHaves bool - - // whether we should actually simulate dont haves on request timeout - simulateDontHavesOnTimeout bool - - taskComparator TaskComparator - - // an optional feature to accept / deny requests for blocks - peerBlockRequestFilter PeerBlockRequestFilter -} - -type counters struct { - blocksRecvd uint64 - dupBlocksRecvd uint64 - dupDataRecvd uint64 - blocksSent uint64 - dataSent uint64 - dataRecvd uint64 - messagesRecvd uint64 -} - -// GetBlock attempts to retrieve a particular block from peers within the -// deadline enforced by the context. -func (bs *Bitswap) GetBlock(ctx context.Context, k cid.Cid) (blocks.Block, error) { - ctx, span := internal.StartSpan(ctx, "GetBlock", trace.WithAttributes(attribute.String("Key", k.String()))) - defer span.End() - return bsgetter.SyncGetBlock(ctx, k, bs.GetBlocks) -} - -// WantlistForPeer returns the currently understood list of blocks requested by a -// given peer. -func (bs *Bitswap) WantlistForPeer(p peer.ID) []cid.Cid { - var out []cid.Cid - for _, e := range bs.engine.WantlistForPeer(p) { - out = append(out, e.Cid) - } - return out -} - -// LedgerForPeer returns aggregated data about blocks swapped and communication -// with a given peer. -func (bs *Bitswap) LedgerForPeer(p peer.ID) *decision.Receipt { - return bs.engine.LedgerForPeer(p) -} - -// GetBlocks returns a channel where the caller may receive blocks that -// correspond to the provided |keys|. Returns an error if BitSwap is unable to -// begin this request within the deadline enforced by the context. -// -// NB: Your request remains open until the context expires. To conserve -// resources, provide a context with a reasonably short deadline (ie. not one -// that lasts throughout the lifetime of the server) -func (bs *Bitswap) GetBlocks(ctx context.Context, keys []cid.Cid) (<-chan blocks.Block, error) { - ctx, span := internal.StartSpan(ctx, "GetBlocks", trace.WithAttributes(attribute.Int("NumKeys", len(keys)))) - defer span.End() - session := bs.sm.NewSession(ctx, bs.provSearchDelay, bs.rebroadcastDelay) - return session.GetBlocks(ctx, keys) -} - -// NotifyNewBlocks announces the existence of blocks to this bitswap service. The -// service will potentially notify its peers. -// Bitswap itself doesn't store new blocks. It's the caller responsibility to ensure -// that those blocks are available in the blockstore before calling this function. -func (bs *Bitswap) NotifyNewBlocks(ctx context.Context, blks ...blocks.Block) error { - ctx, span := internal.StartSpan(ctx, "NotifyNewBlocks") - defer span.End() - - select { - case <-bs.process.Closing(): - return errors.New("bitswap is closed") - default: - } - - blkCids := make([]cid.Cid, len(blks)) - for i, blk := range blks { - blkCids[i] = blk.Cid() - } - - // Send all block keys (including duplicates) to any sessions that want them. - // (The duplicates are needed by sessions for accounting purposes) - bs.sm.ReceiveFrom(ctx, "", blkCids, nil, nil) - - // Send wanted blocks to decision engine - bs.engine.NotifyNewBlocks(blks) - - // Publish the block to any Bitswap clients that had requested blocks. - // (the sessions use this pubsub mechanism to inform clients of incoming - // blocks) - bs.notif.Publish(blks...) - - // If the reprovider is enabled, send block to reprovider - if bs.provideEnabled { - for _, blk := range blks { - select { - case bs.newBlocks <- blk.Cid(): - // send block off to be reprovided - case <-bs.process.Closing(): - return bs.process.Close() - } - } - } - - return nil -} - -// receiveBlocksFrom process blocks received from the network -func (bs *Bitswap) receiveBlocksFrom(ctx context.Context, from peer.ID, blks []blocks.Block, haves []cid.Cid, dontHaves []cid.Cid) error { - select { - case <-bs.process.Closing(): - return errors.New("bitswap is closed") - default: - } - - wanted, notWanted := bs.sim.SplitWantedUnwanted(blks) - for _, b := range notWanted { - log.Debugf("[recv] block not in wantlist; cid=%s, peer=%s", b.Cid(), from) - } - - allKs := make([]cid.Cid, 0, len(blks)) - for _, b := range blks { - allKs = append(allKs, b.Cid()) - } - - // Inform the PeerManager so that we can calculate per-peer latency - combined := make([]cid.Cid, 0, len(allKs)+len(haves)+len(dontHaves)) - combined = append(combined, allKs...) - combined = append(combined, haves...) - combined = append(combined, dontHaves...) - bs.pm.ResponseReceived(from, combined) - - // Send all block keys (including duplicates) to any sessions that want them for accounting purpose. - bs.sm.ReceiveFrom(ctx, from, allKs, haves, dontHaves) - - // Send wanted blocks to decision engine - bs.engine.ReceivedBlocks(from, wanted) - - // Publish the block to any Bitswap clients that had requested blocks. - // (the sessions use this pubsub mechanism to inform clients of incoming - // blocks) - for _, b := range wanted { - bs.notif.Publish(b) - } - - for _, b := range wanted { - log.Debugw("Bitswap.GetBlockRequest.End", "cid", b.Cid()) - } - - return nil -} - -// ReceiveMessage is called by the network interface when a new message is -// received. -func (bs *Bitswap) ReceiveMessage(ctx context.Context, p peer.ID, incoming bsmsg.BitSwapMessage) { - bs.counterLk.Lock() - bs.counters.messagesRecvd++ - bs.counterLk.Unlock() - - // This call records changes to wantlists, blocks received, - // and number of bytes transfered. - bs.engine.MessageReceived(ctx, p, incoming) - // TODO: this is bad, and could be easily abused. - // Should only track *useful* messages in ledger - - if bs.tracer != nil { - bs.tracer.MessageReceived(p, incoming) - } - - iblocks := incoming.Blocks() - - if len(iblocks) > 0 { - bs.updateReceiveCounters(iblocks) - for _, b := range iblocks { - log.Debugf("[recv] block; cid=%s, peer=%s", b.Cid(), p) - } - } - - haves := incoming.Haves() - dontHaves := incoming.DontHaves() - if len(iblocks) > 0 || len(haves) > 0 || len(dontHaves) > 0 { - // Process blocks - err := bs.receiveBlocksFrom(ctx, p, iblocks, haves, dontHaves) - if err != nil { - log.Warnf("ReceiveMessage recvBlockFrom error: %s", err) - return - } - } -} - -func (bs *Bitswap) updateReceiveCounters(blocks []blocks.Block) { - // Check which blocks are in the datastore - // (Note: any errors from the blockstore are simply logged out in - // blockstoreHas()) - blocksHas := bs.blockstoreHas(blocks) - - bs.counterLk.Lock() - defer bs.counterLk.Unlock() - - // Do some accounting for each block - for i, b := range blocks { - has := blocksHas[i] - - blkLen := len(b.RawData()) - bs.allMetric.Observe(float64(blkLen)) - if has { - bs.dupMetric.Observe(float64(blkLen)) - } - - c := bs.counters - - c.blocksRecvd++ - c.dataRecvd += uint64(blkLen) - if has { - c.dupBlocksRecvd++ - c.dupDataRecvd += uint64(blkLen) - } - } -} - -func (bs *Bitswap) blockstoreHas(blks []blocks.Block) []bool { - res := make([]bool, len(blks)) - - wg := sync.WaitGroup{} - for i, block := range blks { - wg.Add(1) - go func(i int, b blocks.Block) { - defer wg.Done() - - has, err := bs.blockstore.Has(context.TODO(), b.Cid()) - if err != nil { - log.Infof("blockstore.Has error: %s", err) - has = false - } - - res[i] = has - }(i, block) - } - wg.Wait() - - return res -} - -// PeerConnected is called by the network interface -// when a peer initiates a new connection to bitswap. -func (bs *Bitswap) PeerConnected(p peer.ID) { - bs.pm.Connected(p) - bs.engine.PeerConnected(p) -} - -// PeerDisconnected is called by the network interface when a peer -// closes a connection -func (bs *Bitswap) PeerDisconnected(p peer.ID) { - bs.pm.Disconnected(p) - bs.engine.PeerDisconnected(p) -} - -// ReceiveError is called by the network interface when an error happens -// at the network layer. Currently just logs error. -func (bs *Bitswap) ReceiveError(err error) { - log.Infof("Bitswap ReceiveError: %s", err) - // TODO log the network error - // TODO bubble the network error up to the parent context/error logger -} - -// Close is called to shutdown Bitswap -func (bs *Bitswap) Close() error { - return bs.process.Close() -} - -// GetWantlist returns the current local wantlist (both want-blocks and -// want-haves). -func (bs *Bitswap) GetWantlist() []cid.Cid { - return bs.pm.CurrentWants() -} - -// GetWantBlocks returns the current list of want-blocks. -func (bs *Bitswap) GetWantBlocks() []cid.Cid { - return bs.pm.CurrentWantBlocks() -} - -// GetWanthaves returns the current list of want-haves. -func (bs *Bitswap) GetWantHaves() []cid.Cid { - return bs.pm.CurrentWantHaves() -} - -// IsOnline is needed to match go-ipfs-exchange-interface -func (bs *Bitswap) IsOnline() bool { - return true -} - -// NewSession generates a new Bitswap session. You should use this, rather -// that calling Bitswap.GetBlocks, any time you intend to do several related -// block requests in a row. The session returned will have it's own GetBlocks -// method, but the session will use the fact that the requests are related to -// be more efficient in its requests to peers. If you are using a session -// from go-blockservice, it will create a bitswap session automatically. -func (bs *Bitswap) NewSession(ctx context.Context) exchange.Fetcher { - ctx, span := internal.StartSpan(ctx, "NewSession") - defer span.End() - return bs.sm.NewSession(ctx, bs.provSearchDelay, bs.rebroadcastDelay) -} diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index eae7fa750..7c32c6469 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -9,14 +9,13 @@ import ( "testing" "time" - bitswap "github.com/ipfs/go-bitswap" - deciface "github.com/ipfs/go-bitswap/decision" - decision "github.com/ipfs/go-bitswap/internal/decision" - bssession "github.com/ipfs/go-bitswap/internal/session" + "github.com/ipfs/go-bitswap" + testinstance "github.com/ipfs/go-bitswap/client/testinstance" bsmsg "github.com/ipfs/go-bitswap/message" pb "github.com/ipfs/go-bitswap/message/pb" - testinstance "github.com/ipfs/go-bitswap/testinstance" + "github.com/ipfs/go-bitswap/server" tn "github.com/ipfs/go-bitswap/testnet" + "github.com/ipfs/go-bitswap/tracer" blocks "github.com/ipfs/go-block-format" cid "github.com/ipfs/go-cid" detectrace "github.com/ipfs/go-detect-race" @@ -34,14 +33,6 @@ func isCI() bool { return os.Getenv("CI") != "" } -// FIXME the tests are really sensitive to the network delay. fix them to work -// well under varying conditions -const kNetworkDelay = 0 * time.Millisecond - -func getVirtualNetwork() tn.Network { - return tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay)) -} - func addBlock(t *testing.T, ctx context.Context, inst testinstance.Instance, blk blocks.Block) { t.Helper() err := inst.Blockstore().Put(ctx, blk) @@ -54,8 +45,12 @@ func addBlock(t *testing.T, ctx context.Context, inst testinstance.Instance, blk } } +// FIXME the tests are really sensitive to the network delay. fix them to work +// well under varying conditions +const kNetworkDelay = 0 * time.Millisecond + func TestClose(t *testing.T) { - vnet := getVirtualNetwork() + vnet := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay)) ig := testinstance.NewTestInstanceGenerator(vnet, nil, nil) defer ig.Close() bgen := blocksutil.NewBlockGenerator() @@ -143,7 +138,7 @@ func TestDoesNotProvideWhenConfiguredNotTo(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), 60*time.Millisecond) defer cancel() - ns := wantsBlock.Exchange.NewSession(ctx).(*bssession.Session) + ns := wantsBlock.Exchange.NewSession(ctx) received, err := ns.GetBlock(ctx, block.Cid()) if received != nil { @@ -191,7 +186,8 @@ func TestUnwantedBlockNotAdded(t *testing.T) { // blockstore in the following scenario: // - the want for the block has been requested by the client // - the want for the block has not yet been sent out to a peer -// (because the live request queue is full) +// +// (because the live request queue is full) func TestPendingBlockAdded(t *testing.T) { ctx := context.Background() net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay)) @@ -627,7 +623,7 @@ func TestWantlistCleanup(t *testing.T) { } } -func assertLedgerMatch(ra, rb *decision.Receipt) error { +func assertLedgerMatch(ra, rb *server.Receipt) error { if ra.Sent != rb.Recv { return fmt.Errorf("mismatch in ledgers (exchanged bytes): %d sent vs %d recvd", ra.Sent, rb.Recv) } @@ -643,7 +639,7 @@ func assertLedgerMatch(ra, rb *decision.Receipt) error { return nil } -func assertLedgerEqual(ra, rb *decision.Receipt) error { +func assertLedgerEqual(ra, rb *server.Receipt) error { if ra.Value != rb.Value { return fmt.Errorf("mismatch in ledgers (value/debt ratio): %f vs %f ", ra.Value, rb.Value) } @@ -663,8 +659,8 @@ func assertLedgerEqual(ra, rb *decision.Receipt) error { return nil } -func newReceipt(sent, recv, exchanged uint64) *decision.Receipt { - return &decision.Receipt{ +func newReceipt(sent, recv, exchanged uint64) *server.Receipt { + return &server.Receipt{ Peer: "test", Value: float64(sent) / (1 + float64(recv)), Sent: sent, @@ -780,7 +776,7 @@ func TestBitswapLedgerTwoWay(t *testing.T) { } type testingScoreLedger struct { - scorePeer deciface.ScorePeerFunc + scorePeer server.ScorePeerFunc started chan struct{} closed chan struct{} } @@ -793,14 +789,14 @@ func newTestingScoreLedger() *testingScoreLedger { } } -func (tsl *testingScoreLedger) GetReceipt(p peer.ID) *deciface.Receipt { +func (tsl *testingScoreLedger) GetReceipt(p peer.ID) *server.Receipt { return nil } func (tsl *testingScoreLedger) AddToSentBytes(p peer.ID, n int) {} func (tsl *testingScoreLedger) AddToReceivedBytes(p peer.ID, n int) {} func (tsl *testingScoreLedger) PeerConnected(p peer.ID) {} func (tsl *testingScoreLedger) PeerDisconnected(p peer.ID) {} -func (tsl *testingScoreLedger) Start(scorePeer deciface.ScorePeerFunc) { +func (tsl *testingScoreLedger) Start(scorePeer server.ScorePeerFunc) { tsl.scorePeer = scorePeer close(tsl.started) } @@ -873,7 +869,7 @@ func TestTracer(t *testing.T) { // Install Tracer wiretap := new(mockTracer) - bitswap.WithTracer(wiretap)(instances[0].Exchange) + updateTracer(instances[0].Exchange, wiretap) // First peer has block addBlock(t, context.Background(), instances[0], blocks[0]) @@ -955,7 +951,7 @@ func TestTracer(t *testing.T) { } // After disabling WireTap, no new messages are logged - bitswap.WithTracer(nil)(instances[0].Exchange) + updateTracer(instances[0].Exchange, nil) addBlock(t, context.Background(), instances[0], blocks[1]) @@ -985,3 +981,7 @@ func TestTracer(t *testing.T) { } } } + +func updateTracer(bs *bitswap.Bitswap, tap tracer.Tracer) { + bitswap.WithTracer(tap).V.(func(*bitswap.Bitswap))(bs) +} diff --git a/bitswap/bitswap_with_sessions_test.go b/bitswap/client/bitswap_with_sessions_test.go similarity index 92% rename from bitswap/bitswap_with_sessions_test.go rename to bitswap/client/bitswap_with_sessions_test.go index 7532a908c..8ba2d6e9f 100644 --- a/bitswap/bitswap_with_sessions_test.go +++ b/bitswap/client/bitswap_with_sessions_test.go @@ -1,4 +1,4 @@ -package bitswap_test +package client_test import ( "context" @@ -6,9 +6,9 @@ import ( "testing" "time" - bitswap "github.com/ipfs/go-bitswap" - bssession "github.com/ipfs/go-bitswap/internal/session" - testinstance "github.com/ipfs/go-bitswap/testinstance" + "github.com/ipfs/go-bitswap" + "github.com/ipfs/go-bitswap/client/internal/session" + testinstance "github.com/ipfs/go-bitswap/client/testinstance" tn "github.com/ipfs/go-bitswap/testnet" blocks "github.com/ipfs/go-block-format" cid "github.com/ipfs/go-cid" @@ -18,6 +18,24 @@ import ( tu "github.com/libp2p/go-libp2p-testing/etc" ) +func getVirtualNetwork() tn.Network { + // FIXME: the tests are really sensitive to the network delay. fix them to work + // well under varying conditions + return tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(0)) +} + +func addBlock(t *testing.T, ctx context.Context, inst testinstance.Instance, blk blocks.Block) { + t.Helper() + err := inst.Blockstore().Put(ctx, blk) + if err != nil { + t.Fatal(err) + } + err = inst.Exchange.NotifyNewBlocks(ctx, blk) + if err != nil { + t.Fatal(err) + } +} + func TestBasicSessions(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -154,7 +172,7 @@ func TestSessionSplitFetch(t *testing.T) { } // Create a session on the remaining peer and fetch all the blocks 10 at a time - ses := inst[10].Exchange.NewSession(ctx).(*bssession.Session) + ses := inst[10].Exchange.NewSession(ctx).(*session.Session) ses.SetBaseTickDelay(time.Millisecond * 10) for i := 0; i < 10; i++ { @@ -199,7 +217,7 @@ func TestFetchNotConnected(t *testing.T) { // Note: Peer A and Peer B are not initially connected, so this tests // that Peer B will search for and find Peer A thisNode := ig.Next() - ses := thisNode.Exchange.NewSession(ctx).(*bssession.Session) + ses := thisNode.Exchange.NewSession(ctx).(*session.Session) ses.SetBaseTickDelay(time.Millisecond * 10) ch, err := ses.GetBlocks(ctx, cids) @@ -245,7 +263,7 @@ func TestFetchAfterDisconnect(t *testing.T) { } // Request all blocks with Peer B - ses := peerB.Exchange.NewSession(ctx).(*bssession.Session) + ses := peerB.Exchange.NewSession(ctx).(*session.Session) ses.SetBaseTickDelay(time.Millisecond * 10) ch, err := ses.GetBlocks(ctx, cids) diff --git a/bitswap/client/client.go b/bitswap/client/client.go new file mode 100644 index 000000000..1380e0d9b --- /dev/null +++ b/bitswap/client/client.go @@ -0,0 +1,481 @@ +// Package bitswap implements the IPFS exchange interface with the BitSwap +// bilateral exchange protocol. +package client + +import ( + "context" + "errors" + + "sync" + "time" + + delay "github.com/ipfs/go-ipfs-delay" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" + + bsbpm "github.com/ipfs/go-bitswap/client/internal/blockpresencemanager" + bsgetter "github.com/ipfs/go-bitswap/client/internal/getter" + bsmq "github.com/ipfs/go-bitswap/client/internal/messagequeue" + "github.com/ipfs/go-bitswap/client/internal/notifications" + bspm "github.com/ipfs/go-bitswap/client/internal/peermanager" + bspqm "github.com/ipfs/go-bitswap/client/internal/providerquerymanager" + bssession "github.com/ipfs/go-bitswap/client/internal/session" + bssim "github.com/ipfs/go-bitswap/client/internal/sessioninterestmanager" + bssm "github.com/ipfs/go-bitswap/client/internal/sessionmanager" + bsspm "github.com/ipfs/go-bitswap/client/internal/sessionpeermanager" + "github.com/ipfs/go-bitswap/internal" + "github.com/ipfs/go-bitswap/internal/defaults" + bsmsg "github.com/ipfs/go-bitswap/message" + bmetrics "github.com/ipfs/go-bitswap/metrics" + bsnet "github.com/ipfs/go-bitswap/network" + "github.com/ipfs/go-bitswap/tracer" + blocks "github.com/ipfs/go-block-format" + "github.com/ipfs/go-cid" + blockstore "github.com/ipfs/go-ipfs-blockstore" + exchange "github.com/ipfs/go-ipfs-exchange-interface" + logging "github.com/ipfs/go-log" + "github.com/ipfs/go-metrics-interface" + process "github.com/jbenet/goprocess" + procctx "github.com/jbenet/goprocess/context" + "github.com/libp2p/go-libp2p-core/peer" +) + +var log = logging.Logger("bitswap-client") + +// Option defines the functional option type that can be used to configure +// bitswap instances +type Option func(*Client) + +// ProviderSearchDelay overwrites the global provider search delay +func ProviderSearchDelay(newProvSearchDelay time.Duration) Option { + return func(bs *Client) { + bs.provSearchDelay = newProvSearchDelay + } +} + +// RebroadcastDelay overwrites the global provider rebroadcast delay +func RebroadcastDelay(newRebroadcastDelay delay.D) Option { + return func(bs *Client) { + bs.rebroadcastDelay = newRebroadcastDelay + } +} + +func SetSimulateDontHavesOnTimeout(send bool) Option { + return func(bs *Client) { + bs.simulateDontHavesOnTimeout = send + } +} + +// Configures the Client to use given tracer. +// This provides methods to access all messages sent and received by the Client. +// This interface can be used to implement various statistics (this is original intent). +func WithTracer(tap tracer.Tracer) Option { + return func(bs *Client) { + bs.tracer = tap + } +} + +func WithBlockReceivedNotifier(brn BlockReceivedNotifier) Option { + return func(bs *Client) { + bs.blockReceivedNotifier = brn + } +} + +type BlockReceivedNotifier interface { + // ReceivedBlocks notify the decision engine that a peer is well behaving + // and gave us usefull data, potentially increasing it's score and making us + // send them more data in exchange. + ReceivedBlocks(peer.ID, []blocks.Block) +} + +// New initializes a BitSwap instance that communicates over the provided +// BitSwapNetwork. This function registers the returned instance as the network +// delegate. Runs until context is cancelled or bitswap.Close is called. +func New(parent context.Context, network bsnet.BitSwapNetwork, bstore blockstore.Blockstore, m *bmetrics.Metrics, options ...Option) *Client { + // important to use provided parent context (since it may include important + // loggable data). It's probably not a good idea to allow bitswap to be + // coupled to the concerns of the ipfs daemon in this way. + // + // FIXME(btc) Now that bitswap manages itself using a process, it probably + // shouldn't accept a context anymore. Clients should probably use Close() + // exclusively. We should probably find another way to share logging data + ctx, cancelFunc := context.WithCancel(parent) + + px := process.WithTeardown(func() error { + return nil + }) + + // onDontHaveTimeout is called when a want-block is sent to a peer that + // has an old version of Bitswap that doesn't support DONT_HAVE messages, + // or when no response is received within a timeout. + var sm *bssm.SessionManager + var bs *Client + onDontHaveTimeout := func(p peer.ID, dontHaves []cid.Cid) { + // Simulate a message arriving with DONT_HAVEs + if bs.simulateDontHavesOnTimeout { + sm.ReceiveFrom(ctx, p, nil, nil, dontHaves) + } + } + peerQueueFactory := func(ctx context.Context, p peer.ID) bspm.PeerQueue { + return bsmq.New(ctx, p, network, onDontHaveTimeout) + } + + sim := bssim.New() + bpm := bsbpm.New() + pm := bspm.New(ctx, peerQueueFactory, network.Self()) + pqm := bspqm.New(ctx, network) + + sessionFactory := func( + sessctx context.Context, + sessmgr bssession.SessionManager, + id uint64, + spm bssession.SessionPeerManager, + sim *bssim.SessionInterestManager, + pm bssession.PeerManager, + bpm *bsbpm.BlockPresenceManager, + notif notifications.PubSub, + provSearchDelay time.Duration, + rebroadcastDelay delay.D, + self peer.ID) bssm.Session { + return bssession.New(sessctx, sessmgr, id, spm, pqm, sim, pm, bpm, notif, provSearchDelay, rebroadcastDelay, self) + } + sessionPeerManagerFactory := func(ctx context.Context, id uint64) bssession.SessionPeerManager { + return bsspm.New(id, network.ConnectionManager()) + } + notif := notifications.New() + sm = bssm.New(ctx, sessionFactory, sim, sessionPeerManagerFactory, bpm, pm, notif, network.Self()) + + bs = &Client{ + blockstore: bstore, + network: network, + process: px, + pm: pm, + pqm: pqm, + sm: sm, + sim: sim, + notif: notif, + counters: new(counters), + dupMetric: m.DupHist(), + allMetric: m.AllHist(), + provSearchDelay: defaults.ProvSearchDelay, + rebroadcastDelay: delay.Fixed(time.Minute), + simulateDontHavesOnTimeout: true, + } + + // apply functional options before starting and running bitswap + for _, option := range options { + option(bs) + } + + bs.pqm.Startup() + + // bind the context and process. + // do it over here to avoid closing before all setup is done. + go func() { + <-px.Closing() // process closes first + sm.Shutdown() + cancelFunc() + notif.Shutdown() + }() + procctx.CloseAfterContext(px, ctx) // parent cancelled first + + return bs +} + +// Client instances implement the bitswap protocol. +type Client struct { + pm *bspm.PeerManager + + // the provider query manager manages requests to find providers + pqm *bspqm.ProviderQueryManager + + // network delivers messages on behalf of the session + network bsnet.BitSwapNetwork + + // blockstore is the local database + // NB: ensure threadsafety + blockstore blockstore.Blockstore + + // manages channels of outgoing blocks for sessions + notif notifications.PubSub + + process process.Process + + // Counters for various statistics + counterLk sync.Mutex + counters *counters + + // Metrics interface metrics + dupMetric metrics.Histogram + allMetric metrics.Histogram + + // External statistics interface + tracer tracer.Tracer + + // the SessionManager routes requests to interested sessions + sm *bssm.SessionManager + + // the SessionInterestManager keeps track of which sessions are interested + // in which CIDs + sim *bssim.SessionInterestManager + + // how long to wait before looking for providers in a session + provSearchDelay time.Duration + + // how often to rebroadcast providing requests to find more optimized providers + rebroadcastDelay delay.D + + blockReceivedNotifier BlockReceivedNotifier + + // whether we should actually simulate dont haves on request timeout + simulateDontHavesOnTimeout bool +} + +type counters struct { + blocksRecvd uint64 + dupBlocksRecvd uint64 + dupDataRecvd uint64 + dataRecvd uint64 + messagesRecvd uint64 +} + +// GetBlock attempts to retrieve a particular block from peers within the +// deadline enforced by the context. +func (bs *Client) GetBlock(ctx context.Context, k cid.Cid) (blocks.Block, error) { + ctx, span := internal.StartSpan(ctx, "GetBlock", trace.WithAttributes(attribute.String("Key", k.String()))) + defer span.End() + return bsgetter.SyncGetBlock(ctx, k, bs.GetBlocks) +} + +// GetBlocks returns a channel where the caller may receive blocks that +// correspond to the provided |keys|. Returns an error if BitSwap is unable to +// begin this request within the deadline enforced by the context. +// +// NB: Your request remains open until the context expires. To conserve +// resources, provide a context with a reasonably short deadline (ie. not one +// that lasts throughout the lifetime of the server) +func (bs *Client) GetBlocks(ctx context.Context, keys []cid.Cid) (<-chan blocks.Block, error) { + ctx, span := internal.StartSpan(ctx, "GetBlocks", trace.WithAttributes(attribute.Int("NumKeys", len(keys)))) + defer span.End() + session := bs.sm.NewSession(ctx, bs.provSearchDelay, bs.rebroadcastDelay) + return session.GetBlocks(ctx, keys) +} + +// NotifyNewBlocks announces the existence of blocks to this bitswap service. +// Bitswap itself doesn't store new blocks. It's the caller responsibility to ensure +// that those blocks are available in the blockstore before calling this function. +func (bs *Client) NotifyNewBlocks(ctx context.Context, blks ...blocks.Block) error { + ctx, span := internal.StartSpan(ctx, "NotifyNewBlocks") + defer span.End() + + select { + case <-bs.process.Closing(): + return errors.New("bitswap is closed") + default: + } + + blkCids := make([]cid.Cid, len(blks)) + for i, blk := range blks { + blkCids[i] = blk.Cid() + } + + // Send all block keys (including duplicates) to any sessions that want them. + // (The duplicates are needed by sessions for accounting purposes) + bs.sm.ReceiveFrom(ctx, "", blkCids, nil, nil) + + // Publish the block to any Bitswap clients that had requested blocks. + // (the sessions use this pubsub mechanism to inform clients of incoming + // blocks) + bs.notif.Publish(blks...) + + return nil +} + +// receiveBlocksFrom process blocks received from the network +func (bs *Client) receiveBlocksFrom(ctx context.Context, from peer.ID, blks []blocks.Block, haves []cid.Cid, dontHaves []cid.Cid) error { + select { + case <-bs.process.Closing(): + return errors.New("bitswap is closed") + default: + } + + wanted, notWanted := bs.sim.SplitWantedUnwanted(blks) + for _, b := range notWanted { + log.Debugf("[recv] block not in wantlist; cid=%s, peer=%s", b.Cid(), from) + } + + allKs := make([]cid.Cid, 0, len(blks)) + for _, b := range blks { + allKs = append(allKs, b.Cid()) + } + + // Inform the PeerManager so that we can calculate per-peer latency + combined := make([]cid.Cid, 0, len(allKs)+len(haves)+len(dontHaves)) + combined = append(combined, allKs...) + combined = append(combined, haves...) + combined = append(combined, dontHaves...) + bs.pm.ResponseReceived(from, combined) + + // Send all block keys (including duplicates) to any sessions that want them for accounting purpose. + bs.sm.ReceiveFrom(ctx, from, allKs, haves, dontHaves) + + if bs.blockReceivedNotifier != nil { + bs.blockReceivedNotifier.ReceivedBlocks(from, wanted) + } + + // Publish the block to any Bitswap clients that had requested blocks. + // (the sessions use this pubsub mechanism to inform clients of incoming + // blocks) + for _, b := range wanted { + bs.notif.Publish(b) + } + + for _, b := range wanted { + log.Debugw("Bitswap.GetBlockRequest.End", "cid", b.Cid()) + } + + return nil +} + +// ReceiveMessage is called by the network interface when a new message is +// received. +func (bs *Client) ReceiveMessage(ctx context.Context, p peer.ID, incoming bsmsg.BitSwapMessage) { + bs.counterLk.Lock() + bs.counters.messagesRecvd++ + bs.counterLk.Unlock() + + if bs.tracer != nil { + bs.tracer.MessageReceived(p, incoming) + } + + iblocks := incoming.Blocks() + + if len(iblocks) > 0 { + bs.updateReceiveCounters(iblocks) + for _, b := range iblocks { + log.Debugf("[recv] block; cid=%s, peer=%s", b.Cid(), p) + } + } + + haves := incoming.Haves() + dontHaves := incoming.DontHaves() + if len(iblocks) > 0 || len(haves) > 0 || len(dontHaves) > 0 { + // Process blocks + err := bs.receiveBlocksFrom(ctx, p, iblocks, haves, dontHaves) + if err != nil { + log.Warnf("ReceiveMessage recvBlockFrom error: %s", err) + return + } + } +} + +func (bs *Client) updateReceiveCounters(blocks []blocks.Block) { + // Check which blocks are in the datastore + // (Note: any errors from the blockstore are simply logged out in + // blockstoreHas()) + blocksHas := bs.blockstoreHas(blocks) + + bs.counterLk.Lock() + defer bs.counterLk.Unlock() + + // Do some accounting for each block + for i, b := range blocks { + has := blocksHas[i] + + blkLen := len(b.RawData()) + bs.allMetric.Observe(float64(blkLen)) + if has { + bs.dupMetric.Observe(float64(blkLen)) + } + + c := bs.counters + + c.blocksRecvd++ + c.dataRecvd += uint64(blkLen) + if has { + c.dupBlocksRecvd++ + c.dupDataRecvd += uint64(blkLen) + } + } +} + +func (bs *Client) blockstoreHas(blks []blocks.Block) []bool { + res := make([]bool, len(blks)) + + wg := sync.WaitGroup{} + for i, block := range blks { + wg.Add(1) + go func(i int, b blocks.Block) { + defer wg.Done() + + has, err := bs.blockstore.Has(context.TODO(), b.Cid()) + if err != nil { + log.Infof("blockstore.Has error: %s", err) + has = false + } + + res[i] = has + }(i, block) + } + wg.Wait() + + return res +} + +// PeerConnected is called by the network interface +// when a peer initiates a new connection to bitswap. +func (bs *Client) PeerConnected(p peer.ID) { + bs.pm.Connected(p) +} + +// PeerDisconnected is called by the network interface when a peer +// closes a connection +func (bs *Client) PeerDisconnected(p peer.ID) { + bs.pm.Disconnected(p) +} + +// ReceiveError is called by the network interface when an error happens +// at the network layer. Currently just logs error. +func (bs *Client) ReceiveError(err error) { + log.Infof("Bitswap Client ReceiveError: %s", err) + // TODO log the network error + // TODO bubble the network error up to the parent context/error logger +} + +// Close is called to shutdown the Client +func (bs *Client) Close() error { + return bs.process.Close() +} + +// GetWantlist returns the current local wantlist (both want-blocks and +// want-haves). +func (bs *Client) GetWantlist() []cid.Cid { + return bs.pm.CurrentWants() +} + +// GetWantBlocks returns the current list of want-blocks. +func (bs *Client) GetWantBlocks() []cid.Cid { + return bs.pm.CurrentWantBlocks() +} + +// GetWanthaves returns the current list of want-haves. +func (bs *Client) GetWantHaves() []cid.Cid { + return bs.pm.CurrentWantHaves() +} + +// IsOnline is needed to match go-ipfs-exchange-interface +func (bs *Client) IsOnline() bool { + return true +} + +// NewSession generates a new Bitswap session. You should use this, rather +// that calling Client.GetBlocks, any time you intend to do several related +// block requests in a row. The session returned will have it's own GetBlocks +// method, but the session will use the fact that the requests are related to +// be more efficient in its requests to peers. If you are using a session +// from go-blockservice, it will create a bitswap session automatically. +func (bs *Client) NewSession(ctx context.Context) exchange.Fetcher { + ctx, span := internal.StartSpan(ctx, "NewSession") + defer span.End() + return bs.sm.NewSession(ctx, bs.provSearchDelay, bs.rebroadcastDelay) +} diff --git a/bitswap/docs/go-bitswap.png b/bitswap/client/docs/go-bitswap.png similarity index 100% rename from bitswap/docs/go-bitswap.png rename to bitswap/client/docs/go-bitswap.png diff --git a/bitswap/docs/go-bitswap.puml b/bitswap/client/docs/go-bitswap.puml similarity index 100% rename from bitswap/docs/go-bitswap.puml rename to bitswap/client/docs/go-bitswap.puml diff --git a/bitswap/docs/how-bitswap-works.md b/bitswap/client/docs/how-bitswap-works.md similarity index 100% rename from bitswap/docs/how-bitswap-works.md rename to bitswap/client/docs/how-bitswap-works.md diff --git a/bitswap/internal/blockpresencemanager/blockpresencemanager.go b/bitswap/client/internal/blockpresencemanager/blockpresencemanager.go similarity index 100% rename from bitswap/internal/blockpresencemanager/blockpresencemanager.go rename to bitswap/client/internal/blockpresencemanager/blockpresencemanager.go diff --git a/bitswap/internal/blockpresencemanager/blockpresencemanager_test.go b/bitswap/client/internal/blockpresencemanager/blockpresencemanager_test.go similarity index 100% rename from bitswap/internal/blockpresencemanager/blockpresencemanager_test.go rename to bitswap/client/internal/blockpresencemanager/blockpresencemanager_test.go diff --git a/bitswap/internal/getter/getter.go b/bitswap/client/internal/getter/getter.go similarity index 96% rename from bitswap/internal/getter/getter.go rename to bitswap/client/internal/getter/getter.go index c5c1951b8..5a58e187b 100644 --- a/bitswap/internal/getter/getter.go +++ b/bitswap/client/internal/getter/getter.go @@ -4,8 +4,8 @@ import ( "context" "errors" - "github.com/ipfs/go-bitswap/internal" - notifications "github.com/ipfs/go-bitswap/internal/notifications" + "github.com/ipfs/go-bitswap/client/internal" + notifications "github.com/ipfs/go-bitswap/client/internal/notifications" logging "github.com/ipfs/go-log" blocks "github.com/ipfs/go-block-format" diff --git a/bitswap/internal/messagequeue/donthavetimeoutmgr.go b/bitswap/client/internal/messagequeue/donthavetimeoutmgr.go similarity index 100% rename from bitswap/internal/messagequeue/donthavetimeoutmgr.go rename to bitswap/client/internal/messagequeue/donthavetimeoutmgr.go diff --git a/bitswap/internal/messagequeue/donthavetimeoutmgr_test.go b/bitswap/client/internal/messagequeue/donthavetimeoutmgr_test.go similarity index 100% rename from bitswap/internal/messagequeue/donthavetimeoutmgr_test.go rename to bitswap/client/internal/messagequeue/donthavetimeoutmgr_test.go diff --git a/bitswap/internal/messagequeue/messagequeue.go b/bitswap/client/internal/messagequeue/messagequeue.go similarity index 99% rename from bitswap/internal/messagequeue/messagequeue.go rename to bitswap/client/internal/messagequeue/messagequeue.go index 48fdaa863..6135fa54b 100644 --- a/bitswap/internal/messagequeue/messagequeue.go +++ b/bitswap/client/internal/messagequeue/messagequeue.go @@ -7,10 +7,10 @@ import ( "time" "github.com/benbjohnson/clock" + bswl "github.com/ipfs/go-bitswap/client/wantlist" bsmsg "github.com/ipfs/go-bitswap/message" pb "github.com/ipfs/go-bitswap/message/pb" bsnet "github.com/ipfs/go-bitswap/network" - bswl "github.com/ipfs/go-bitswap/wantlist" cid "github.com/ipfs/go-cid" logging "github.com/ipfs/go-log" peer "github.com/libp2p/go-libp2p-core/peer" diff --git a/bitswap/internal/messagequeue/messagequeue_test.go b/bitswap/client/internal/messagequeue/messagequeue_test.go similarity index 100% rename from bitswap/internal/messagequeue/messagequeue_test.go rename to bitswap/client/internal/messagequeue/messagequeue_test.go diff --git a/bitswap/internal/notifications/notifications.go b/bitswap/client/internal/notifications/notifications.go similarity index 100% rename from bitswap/internal/notifications/notifications.go rename to bitswap/client/internal/notifications/notifications.go diff --git a/bitswap/internal/notifications/notifications_test.go b/bitswap/client/internal/notifications/notifications_test.go similarity index 100% rename from bitswap/internal/notifications/notifications_test.go rename to bitswap/client/internal/notifications/notifications_test.go diff --git a/bitswap/internal/peermanager/peermanager.go b/bitswap/client/internal/peermanager/peermanager.go similarity index 100% rename from bitswap/internal/peermanager/peermanager.go rename to bitswap/client/internal/peermanager/peermanager.go diff --git a/bitswap/internal/peermanager/peermanager_test.go b/bitswap/client/internal/peermanager/peermanager_test.go similarity index 100% rename from bitswap/internal/peermanager/peermanager_test.go rename to bitswap/client/internal/peermanager/peermanager_test.go diff --git a/bitswap/internal/peermanager/peerwantmanager.go b/bitswap/client/internal/peermanager/peerwantmanager.go similarity index 100% rename from bitswap/internal/peermanager/peerwantmanager.go rename to bitswap/client/internal/peermanager/peerwantmanager.go diff --git a/bitswap/internal/peermanager/peerwantmanager_test.go b/bitswap/client/internal/peermanager/peerwantmanager_test.go similarity index 100% rename from bitswap/internal/peermanager/peerwantmanager_test.go rename to bitswap/client/internal/peermanager/peerwantmanager_test.go diff --git a/bitswap/internal/providerquerymanager/providerquerymanager.go b/bitswap/client/internal/providerquerymanager/providerquerymanager.go similarity index 100% rename from bitswap/internal/providerquerymanager/providerquerymanager.go rename to bitswap/client/internal/providerquerymanager/providerquerymanager.go diff --git a/bitswap/internal/providerquerymanager/providerquerymanager_test.go b/bitswap/client/internal/providerquerymanager/providerquerymanager_test.go similarity index 100% rename from bitswap/internal/providerquerymanager/providerquerymanager_test.go rename to bitswap/client/internal/providerquerymanager/providerquerymanager_test.go diff --git a/bitswap/internal/session/cidqueue.go b/bitswap/client/internal/session/cidqueue.go similarity index 100% rename from bitswap/internal/session/cidqueue.go rename to bitswap/client/internal/session/cidqueue.go diff --git a/bitswap/internal/session/peerresponsetracker.go b/bitswap/client/internal/session/peerresponsetracker.go similarity index 100% rename from bitswap/internal/session/peerresponsetracker.go rename to bitswap/client/internal/session/peerresponsetracker.go diff --git a/bitswap/internal/session/peerresponsetracker_test.go b/bitswap/client/internal/session/peerresponsetracker_test.go similarity index 100% rename from bitswap/internal/session/peerresponsetracker_test.go rename to bitswap/client/internal/session/peerresponsetracker_test.go diff --git a/bitswap/internal/session/sentwantblockstracker.go b/bitswap/client/internal/session/sentwantblockstracker.go similarity index 100% rename from bitswap/internal/session/sentwantblockstracker.go rename to bitswap/client/internal/session/sentwantblockstracker.go diff --git a/bitswap/internal/session/sentwantblockstracker_test.go b/bitswap/client/internal/session/sentwantblockstracker_test.go similarity index 100% rename from bitswap/internal/session/sentwantblockstracker_test.go rename to bitswap/client/internal/session/sentwantblockstracker_test.go diff --git a/bitswap/internal/session/session.go b/bitswap/client/internal/session/session.go similarity index 96% rename from bitswap/internal/session/session.go rename to bitswap/client/internal/session/session.go index fa3c87b97..7b7eb871c 100644 --- a/bitswap/internal/session/session.go +++ b/bitswap/client/internal/session/session.go @@ -4,12 +4,12 @@ import ( "context" "time" - "github.com/ipfs/go-bitswap/internal" - bsbpm "github.com/ipfs/go-bitswap/internal/blockpresencemanager" - bsgetter "github.com/ipfs/go-bitswap/internal/getter" - notifications "github.com/ipfs/go-bitswap/internal/notifications" - bspm "github.com/ipfs/go-bitswap/internal/peermanager" - bssim "github.com/ipfs/go-bitswap/internal/sessioninterestmanager" + "github.com/ipfs/go-bitswap/client/internal" + bsbpm "github.com/ipfs/go-bitswap/client/internal/blockpresencemanager" + bsgetter "github.com/ipfs/go-bitswap/client/internal/getter" + notifications "github.com/ipfs/go-bitswap/client/internal/notifications" + bspm "github.com/ipfs/go-bitswap/client/internal/peermanager" + bssim "github.com/ipfs/go-bitswap/client/internal/sessioninterestmanager" blocks "github.com/ipfs/go-block-format" cid "github.com/ipfs/go-cid" delay "github.com/ipfs/go-ipfs-delay" @@ -476,10 +476,10 @@ func (s *Session) broadcastWantHaves(ctx context.Context, wants []cid.Cid) { // The session will broadcast if it has outstanding wants and doesn't receive // any blocks for some time. // The length of time is calculated -// - initially -// as a fixed delay -// - once some blocks are received -// from a base delay and average latency, with a backoff +// - initially +// as a fixed delay +// - once some blocks are received +// from a base delay and average latency, with a backoff func (s *Session) resetIdleTick() { var tickDelay time.Duration if !s.latencyTrkr.hasLatency() { diff --git a/bitswap/internal/session/session_test.go b/bitswap/client/internal/session/session_test.go similarity index 97% rename from bitswap/internal/session/session_test.go rename to bitswap/client/internal/session/session_test.go index b63a20d9d..eb99380b1 100644 --- a/bitswap/internal/session/session_test.go +++ b/bitswap/client/internal/session/session_test.go @@ -6,11 +6,11 @@ import ( "testing" "time" - bsbpm "github.com/ipfs/go-bitswap/internal/blockpresencemanager" - notifications "github.com/ipfs/go-bitswap/internal/notifications" - bspm "github.com/ipfs/go-bitswap/internal/peermanager" - bssim "github.com/ipfs/go-bitswap/internal/sessioninterestmanager" - bsspm "github.com/ipfs/go-bitswap/internal/sessionpeermanager" + bsbpm "github.com/ipfs/go-bitswap/client/internal/blockpresencemanager" + notifications "github.com/ipfs/go-bitswap/client/internal/notifications" + bspm "github.com/ipfs/go-bitswap/client/internal/peermanager" + bssim "github.com/ipfs/go-bitswap/client/internal/sessioninterestmanager" + bsspm "github.com/ipfs/go-bitswap/client/internal/sessionpeermanager" "github.com/ipfs/go-bitswap/internal/testutil" cid "github.com/ipfs/go-cid" blocksutil "github.com/ipfs/go-ipfs-blocksutil" diff --git a/bitswap/internal/session/sessionwants.go b/bitswap/client/internal/session/sessionwants.go similarity index 100% rename from bitswap/internal/session/sessionwants.go rename to bitswap/client/internal/session/sessionwants.go diff --git a/bitswap/internal/session/sessionwants_test.go b/bitswap/client/internal/session/sessionwants_test.go similarity index 100% rename from bitswap/internal/session/sessionwants_test.go rename to bitswap/client/internal/session/sessionwants_test.go diff --git a/bitswap/internal/session/sessionwantsender.go b/bitswap/client/internal/session/sessionwantsender.go similarity index 99% rename from bitswap/internal/session/sessionwantsender.go rename to bitswap/client/internal/session/sessionwantsender.go index 95439a9bf..f26356b74 100644 --- a/bitswap/internal/session/sessionwantsender.go +++ b/bitswap/client/internal/session/sessionwantsender.go @@ -3,7 +3,7 @@ package session import ( "context" - bsbpm "github.com/ipfs/go-bitswap/internal/blockpresencemanager" + bsbpm "github.com/ipfs/go-bitswap/client/internal/blockpresencemanager" cid "github.com/ipfs/go-cid" peer "github.com/libp2p/go-libp2p-core/peer" @@ -70,14 +70,12 @@ type change struct { type onSendFn func(to peer.ID, wantBlocks []cid.Cid, wantHaves []cid.Cid) type onPeersExhaustedFn func([]cid.Cid) -// // sessionWantSender is responsible for sending want-have and want-block to // peers. For each want, it sends a single optimistic want-block request to // one peer and want-have requests to all other peers in the session. // To choose the best peer for the optimistic want-block it maintains a list // of how peers have responded to each want (HAVE / DONT_HAVE / Unknown) and // consults the peer response tracker (records which peers sent us blocks). -// type sessionWantSender struct { // The context is used when sending wants ctx context.Context diff --git a/bitswap/internal/session/sessionwantsender_test.go b/bitswap/client/internal/session/sessionwantsender_test.go similarity index 99% rename from bitswap/internal/session/sessionwantsender_test.go rename to bitswap/client/internal/session/sessionwantsender_test.go index 4b39a893f..079d73fa1 100644 --- a/bitswap/internal/session/sessionwantsender_test.go +++ b/bitswap/client/internal/session/sessionwantsender_test.go @@ -6,9 +6,9 @@ import ( "testing" "time" - bsbpm "github.com/ipfs/go-bitswap/internal/blockpresencemanager" - bspm "github.com/ipfs/go-bitswap/internal/peermanager" - bsspm "github.com/ipfs/go-bitswap/internal/sessionpeermanager" + bsbpm "github.com/ipfs/go-bitswap/client/internal/blockpresencemanager" + bspm "github.com/ipfs/go-bitswap/client/internal/peermanager" + bsspm "github.com/ipfs/go-bitswap/client/internal/sessionpeermanager" "github.com/ipfs/go-bitswap/internal/testutil" cid "github.com/ipfs/go-cid" peer "github.com/libp2p/go-libp2p-core/peer" diff --git a/bitswap/internal/session/wantinfo_test.go b/bitswap/client/internal/session/wantinfo_test.go similarity index 100% rename from bitswap/internal/session/wantinfo_test.go rename to bitswap/client/internal/session/wantinfo_test.go diff --git a/bitswap/internal/sessioninterestmanager/sessioninterestmanager.go b/bitswap/client/internal/sessioninterestmanager/sessioninterestmanager.go similarity index 100% rename from bitswap/internal/sessioninterestmanager/sessioninterestmanager.go rename to bitswap/client/internal/sessioninterestmanager/sessioninterestmanager.go diff --git a/bitswap/internal/sessioninterestmanager/sessioninterestmanager_test.go b/bitswap/client/internal/sessioninterestmanager/sessioninterestmanager_test.go similarity index 100% rename from bitswap/internal/sessioninterestmanager/sessioninterestmanager_test.go rename to bitswap/client/internal/sessioninterestmanager/sessioninterestmanager_test.go diff --git a/bitswap/internal/sessionmanager/sessionmanager.go b/bitswap/client/internal/sessionmanager/sessionmanager.go similarity index 94% rename from bitswap/internal/sessionmanager/sessionmanager.go rename to bitswap/client/internal/sessionmanager/sessionmanager.go index 7a48e14db..174b8b90c 100644 --- a/bitswap/internal/sessionmanager/sessionmanager.go +++ b/bitswap/client/internal/sessionmanager/sessionmanager.go @@ -11,11 +11,11 @@ import ( "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/trace" - "github.com/ipfs/go-bitswap/internal" - bsbpm "github.com/ipfs/go-bitswap/internal/blockpresencemanager" - notifications "github.com/ipfs/go-bitswap/internal/notifications" - bssession "github.com/ipfs/go-bitswap/internal/session" - bssim "github.com/ipfs/go-bitswap/internal/sessioninterestmanager" + "github.com/ipfs/go-bitswap/client/internal" + bsbpm "github.com/ipfs/go-bitswap/client/internal/blockpresencemanager" + notifications "github.com/ipfs/go-bitswap/client/internal/notifications" + bssession "github.com/ipfs/go-bitswap/client/internal/session" + bssim "github.com/ipfs/go-bitswap/client/internal/sessioninterestmanager" exchange "github.com/ipfs/go-ipfs-exchange-interface" peer "github.com/libp2p/go-libp2p-core/peer" ) diff --git a/bitswap/internal/sessionmanager/sessionmanager_test.go b/bitswap/client/internal/sessionmanager/sessionmanager_test.go similarity index 95% rename from bitswap/internal/sessionmanager/sessionmanager_test.go rename to bitswap/client/internal/sessionmanager/sessionmanager_test.go index 8025bd5fa..00e07696a 100644 --- a/bitswap/internal/sessionmanager/sessionmanager_test.go +++ b/bitswap/client/internal/sessionmanager/sessionmanager_test.go @@ -9,11 +9,11 @@ import ( delay "github.com/ipfs/go-ipfs-delay" - bsbpm "github.com/ipfs/go-bitswap/internal/blockpresencemanager" - notifications "github.com/ipfs/go-bitswap/internal/notifications" - bspm "github.com/ipfs/go-bitswap/internal/peermanager" - bssession "github.com/ipfs/go-bitswap/internal/session" - bssim "github.com/ipfs/go-bitswap/internal/sessioninterestmanager" + bsbpm "github.com/ipfs/go-bitswap/client/internal/blockpresencemanager" + notifications "github.com/ipfs/go-bitswap/client/internal/notifications" + bspm "github.com/ipfs/go-bitswap/client/internal/peermanager" + bssession "github.com/ipfs/go-bitswap/client/internal/session" + bssim "github.com/ipfs/go-bitswap/client/internal/sessioninterestmanager" "github.com/ipfs/go-bitswap/internal/testutil" blocks "github.com/ipfs/go-block-format" diff --git a/bitswap/internal/sessionpeermanager/sessionpeermanager.go b/bitswap/client/internal/sessionpeermanager/sessionpeermanager.go similarity index 100% rename from bitswap/internal/sessionpeermanager/sessionpeermanager.go rename to bitswap/client/internal/sessionpeermanager/sessionpeermanager.go diff --git a/bitswap/internal/sessionpeermanager/sessionpeermanager_test.go b/bitswap/client/internal/sessionpeermanager/sessionpeermanager_test.go similarity index 100% rename from bitswap/internal/sessionpeermanager/sessionpeermanager_test.go rename to bitswap/client/internal/sessionpeermanager/sessionpeermanager_test.go diff --git a/bitswap/client/internal/tracing.go b/bitswap/client/internal/tracing.go new file mode 100644 index 000000000..aa1f7992f --- /dev/null +++ b/bitswap/client/internal/tracing.go @@ -0,0 +1,13 @@ +package internal + +import ( + "context" + "fmt" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/trace" +) + +func StartSpan(ctx context.Context, name string, opts ...trace.SpanStartOption) (context.Context, trace.Span) { + return otel.Tracer("go-bitswap").Start(ctx, fmt.Sprintf("Bitswap.%s", name), opts...) +} diff --git a/bitswap/stat.go b/bitswap/client/stat.go similarity index 59% rename from bitswap/stat.go rename to bitswap/client/stat.go index af39ecb2e..013afec67 100644 --- a/bitswap/stat.go +++ b/bitswap/client/stat.go @@ -1,48 +1,30 @@ -package bitswap +package client import ( - "sort" - cid "github.com/ipfs/go-cid" ) // Stat is a struct that provides various statistics on bitswap operations type Stat struct { - ProvideBufLen int Wantlist []cid.Cid - Peers []string BlocksReceived uint64 DataReceived uint64 - BlocksSent uint64 - DataSent uint64 DupBlksReceived uint64 DupDataReceived uint64 MessagesReceived uint64 } // Stat returns aggregated statistics about bitswap operations -func (bs *Bitswap) Stat() (*Stat, error) { - st := new(Stat) - st.ProvideBufLen = len(bs.newBlocks) - st.Wantlist = bs.GetWantlist() +func (bs *Client) Stat() (st Stat, err error) { bs.counterLk.Lock() c := bs.counters st.BlocksReceived = c.blocksRecvd st.DupBlksReceived = c.dupBlocksRecvd st.DupDataReceived = c.dupDataRecvd - st.BlocksSent = c.blocksSent - st.DataSent = c.dataSent st.DataReceived = c.dataRecvd st.MessagesReceived = c.messagesRecvd bs.counterLk.Unlock() - - peers := bs.engine.Peers() - st.Peers = make([]string, 0, len(peers)) - - for _, p := range peers { - st.Peers = append(st.Peers, p.Pretty()) - } - sort.Strings(st.Peers) + st.Wantlist = bs.GetWantlist() return st, nil } diff --git a/bitswap/testinstance/testinstance.go b/bitswap/client/testinstance/testinstance.go similarity index 97% rename from bitswap/testinstance/testinstance.go rename to bitswap/client/testinstance/testinstance.go index 05e3d515e..6522de3d4 100644 --- a/bitswap/testinstance/testinstance.go +++ b/bitswap/client/testinstance/testinstance.go @@ -4,7 +4,7 @@ import ( "context" "time" - bitswap "github.com/ipfs/go-bitswap" + "github.com/ipfs/go-bitswap" bsnet "github.com/ipfs/go-bitswap/network" tn "github.com/ipfs/go-bitswap/testnet" ds "github.com/ipfs/go-datastore" @@ -120,7 +120,7 @@ func NewInstance(ctx context.Context, net tn.Network, p tnet.Identity, netOption panic(err.Error()) // FIXME perhaps change signature and return error. } - bs := bitswap.New(ctx, adapter, bstore, bsOptions...).(*bitswap.Bitswap) + bs := bitswap.New(ctx, adapter, bstore, bsOptions...) return Instance{ Adapter: adapter, diff --git a/bitswap/wantlist/wantlist.go b/bitswap/client/wantlist/wantlist.go similarity index 100% rename from bitswap/wantlist/wantlist.go rename to bitswap/client/wantlist/wantlist.go diff --git a/bitswap/wantlist/wantlist_test.go b/bitswap/client/wantlist/wantlist_test.go similarity index 100% rename from bitswap/wantlist/wantlist_test.go rename to bitswap/client/wantlist/wantlist_test.go diff --git a/bitswap/decision/decision.go b/bitswap/decision/decision.go deleted file mode 100644 index 4afc463ec..000000000 --- a/bitswap/decision/decision.go +++ /dev/null @@ -1,12 +0,0 @@ -package decision - -import intdec "github.com/ipfs/go-bitswap/internal/decision" - -// Expose Receipt externally -type Receipt = intdec.Receipt - -// Expose ScoreLedger externally -type ScoreLedger = intdec.ScoreLedger - -// Expose ScorePeerFunc externally -type ScorePeerFunc = intdec.ScorePeerFunc diff --git a/bitswap/internal/testutil/testutil.go b/bitswap/internal/testutil/testutil.go index 6b9fc6f39..2bce60e56 100644 --- a/bitswap/internal/testutil/testutil.go +++ b/bitswap/internal/testutil/testutil.go @@ -4,8 +4,8 @@ import ( "fmt" "math/rand" + "github.com/ipfs/go-bitswap/client/wantlist" bsmsg "github.com/ipfs/go-bitswap/message" - "github.com/ipfs/go-bitswap/wantlist" blocks "github.com/ipfs/go-block-format" cid "github.com/ipfs/go-cid" blocksutil "github.com/ipfs/go-ipfs-blocksutil" diff --git a/bitswap/message/message.go b/bitswap/message/message.go index 88c3f7d41..43ac11d41 100644 --- a/bitswap/message/message.go +++ b/bitswap/message/message.go @@ -5,8 +5,8 @@ import ( "errors" "io" + "github.com/ipfs/go-bitswap/client/wantlist" pb "github.com/ipfs/go-bitswap/message/pb" - "github.com/ipfs/go-bitswap/wantlist" blocks "github.com/ipfs/go-block-format" cid "github.com/ipfs/go-cid" diff --git a/bitswap/message/message_test.go b/bitswap/message/message_test.go index caddc6c26..46de49613 100644 --- a/bitswap/message/message_test.go +++ b/bitswap/message/message_test.go @@ -4,8 +4,8 @@ import ( "bytes" "testing" + "github.com/ipfs/go-bitswap/client/wantlist" pb "github.com/ipfs/go-bitswap/message/pb" - "github.com/ipfs/go-bitswap/wantlist" blocksutil "github.com/ipfs/go-ipfs-blocksutil" blocks "github.com/ipfs/go-block-format" diff --git a/bitswap/metrics/gen.go b/bitswap/metrics/gen.go new file mode 100644 index 000000000..22f16c535 --- /dev/null +++ b/bitswap/metrics/gen.go @@ -0,0 +1,111 @@ +package metrics + +import ( + "context" + "sync" + + "github.com/ipfs/go-metrics-interface" +) + +var ( + // the 1<<18+15 is to observe old file chunks that are 1<<18 + 14 in size + metricsBuckets = []float64{1 << 6, 1 << 10, 1 << 14, 1 << 18, 1<<18 + 15, 1 << 22} + + timeMetricsBuckets = []float64{1, 10, 30, 60, 90, 120, 600} +) + +type onceAble[T any] struct { + o sync.Once + v T +} + +func (o *onceAble[T]) reuseOrInit(creator func() T) T { + o.o.Do(func() { + o.v = creator() + }) + return o.v +} + +// Metrics is a type which lazy initialize metrics objects. +// It MUST not be copied. +type Metrics struct { + ctx context.Context + + dupHist onceAble[metrics.Histogram] + allHist onceAble[metrics.Histogram] + sentHist onceAble[metrics.Histogram] + sendTimeHist onceAble[metrics.Histogram] + + pendingEngineGauge onceAble[metrics.Gauge] + activeEngineGauge onceAble[metrics.Gauge] + pendingBlocksGauge onceAble[metrics.Gauge] + activeBlocksGauge onceAble[metrics.Gauge] +} + +func New(ctx context.Context) *Metrics { + return &Metrics{ctx: metrics.CtxSubScope(ctx, "bitswap")} +} + +// DupHist return recv_dup_blocks_bytes. +// Threadsafe +func (m *Metrics) DupHist() metrics.Histogram { + return m.dupHist.reuseOrInit(func() metrics.Histogram { + return metrics.NewCtx(m.ctx, "recv_dup_blocks_bytes", "Summary of duplicate data blocks recived").Histogram(metricsBuckets) + }) +} + +// AllHist returns recv_all_blocks_bytes. +// Threadsafe +func (m *Metrics) AllHist() metrics.Histogram { + return m.allHist.reuseOrInit(func() metrics.Histogram { + return metrics.NewCtx(m.ctx, "recv_all_blocks_bytes", "Summary of all data blocks recived").Histogram(metricsBuckets) + }) +} + +// SentHist returns sent_all_blocks_bytes. +// Threadsafe +func (m *Metrics) SentHist() metrics.Histogram { + return m.sentHist.reuseOrInit(func() metrics.Histogram { + return metrics.NewCtx(m.ctx, "sent_all_blocks_bytes", "Histogram of blocks sent by this bitswap").Histogram(metricsBuckets) + }) +} + +// SendTimeHist returns send_times. +// Threadsafe +func (m *Metrics) SendTimeHist() metrics.Histogram { + return m.sendTimeHist.reuseOrInit(func() metrics.Histogram { + return metrics.NewCtx(m.ctx, "send_times", "Histogram of how long it takes to send messages in this bitswap").Histogram(timeMetricsBuckets) + }) +} + +// PendingEngineGauge returns pending_tasks. +// Threadsafe +func (m *Metrics) PendingEngineGauge() metrics.Gauge { + return m.pendingEngineGauge.reuseOrInit(func() metrics.Gauge { + return metrics.NewCtx(m.ctx, "pending_tasks", "Total number of pending tasks").Gauge() + }) +} + +// ActiveEngineGauge returns active_tasks. +// Threadsafe +func (m *Metrics) ActiveEngineGauge() metrics.Gauge { + return m.activeEngineGauge.reuseOrInit(func() metrics.Gauge { + return metrics.NewCtx(m.ctx, "active_tasks", "Total number of active tasks").Gauge() + }) +} + +// PendingBlocksGauge returns pending_block_tasks. +// Threadsafe +func (m *Metrics) PendingBlocksGauge() metrics.Gauge { + return m.pendingBlocksGauge.reuseOrInit(func() metrics.Gauge { + return metrics.NewCtx(m.ctx, "pending_block_tasks", "Total number of pending blockstore tasks").Gauge() + }) +} + +// ActiveBlocksGauge returns active_block_tasks. +// Threadsafe +func (m *Metrics) ActiveBlocksGauge() metrics.Gauge { + return m.activeBlocksGauge.reuseOrInit(func() metrics.Gauge { + return metrics.NewCtx(m.ctx, "active_block_tasks", "Total number of active blockstore tasks").Gauge() + }) +} diff --git a/bitswap/network/connecteventmanager.go b/bitswap/network/connecteventmanager.go index a9053ba6a..723bf614e 100644 --- a/bitswap/network/connecteventmanager.go +++ b/bitswap/network/connecteventmanager.go @@ -20,10 +20,10 @@ const ( ) type connectEventManager struct { - connListener ConnectionListener - lk sync.RWMutex - cond sync.Cond - peers map[peer.ID]*peerState + connListeners []ConnectionListener + lk sync.RWMutex + cond sync.Cond + peers map[peer.ID]*peerState changeQueue []peer.ID stop bool @@ -35,11 +35,11 @@ type peerState struct { pending bool } -func newConnectEventManager(connListener ConnectionListener) *connectEventManager { +func newConnectEventManager(connListeners ...ConnectionListener) *connectEventManager { evtManager := &connectEventManager{ - connListener: connListener, - peers: make(map[peer.ID]*peerState), - done: make(chan struct{}), + connListeners: connListeners, + peers: make(map[peer.ID]*peerState), + done: make(chan struct{}), } evtManager.cond = sync.Cond{L: &evtManager.lk} return evtManager @@ -130,12 +130,16 @@ func (c *connectEventManager) worker() { // We could be transitioning from unresponsive to disconnected. if oldState == stateResponsive { c.lk.Unlock() - c.connListener.PeerDisconnected(pid) + for _, v := range c.connListeners { + v.PeerDisconnected(pid) + } c.lk.Lock() } case stateResponsive: c.lk.Unlock() - c.connListener.PeerConnected(pid) + for _, v := range c.connListeners { + v.PeerConnected(pid) + } c.lk.Lock() } } @@ -186,7 +190,8 @@ func (c *connectEventManager) MarkUnresponsive(p peer.ID) { // // - When we're connected to the peer, this will mark the peer as responsive (from unresponsive). // - When not connected, we ignore this call. Unfortunately, a peer may disconnect before we process -// the "on message" event, so we can't treat this as evidence of a connection. +// +// the "on message" event, so we can't treat this as evidence of a connection. func (c *connectEventManager) OnMessage(p peer.ID) { c.lk.RLock() unresponsive := c.getState(p) == stateUnresponsive diff --git a/bitswap/network/interface.go b/bitswap/network/interface.go index 8648f8dd4..018d57ba0 100644 --- a/bitswap/network/interface.go +++ b/bitswap/network/interface.go @@ -36,7 +36,7 @@ type BitSwapNetwork interface { bsmsg.BitSwapMessage) error // Start registers the Reciver and starts handling new messages, connectivity events, etc. - Start(Receiver) + Start(...Receiver) // Stop stops the network service. Stop() diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index 6f69b26a6..9762f5601 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -90,7 +90,7 @@ type impl struct { supportedProtocols []protocol.ID // inbound messages from the network are forwarded to the receiver - receiver Receiver + receivers []Receiver } type streamMessageSender struct { @@ -349,9 +349,15 @@ func (bsnet *impl) newStreamToPeer(ctx context.Context, p peer.ID) (network.Stre return bsnet.host.NewStream(ctx, p, bsnet.supportedProtocols...) } -func (bsnet *impl) Start(r Receiver) { - bsnet.receiver = r - bsnet.connectEvtMgr = newConnectEventManager(r) +func (bsnet *impl) Start(r ...Receiver) { + bsnet.receivers = r + { + connectionListeners := make([]ConnectionListener, len(r)) + for i, v := range r { + connectionListeners[i] = v + } + bsnet.connectEvtMgr = newConnectEventManager(connectionListeners...) + } for _, proto := range bsnet.supportedProtocols { bsnet.host.SetStreamHandler(proto, bsnet.handleNewStream) } @@ -403,7 +409,7 @@ func (bsnet *impl) Provide(ctx context.Context, k cid.Cid) error { func (bsnet *impl) handleNewStream(s network.Stream) { defer s.Close() - if bsnet.receiver == nil { + if len(bsnet.receivers) == 0 { _ = s.Reset() return } @@ -414,7 +420,9 @@ func (bsnet *impl) handleNewStream(s network.Stream) { if err != nil { if err != io.EOF { _ = s.Reset() - bsnet.receiver.ReceiveError(err) + for _, v := range bsnet.receivers { + v.ReceiveError(err) + } log.Debugf("bitswap net handleNewStream from %s error: %s", s.Conn().RemotePeer(), err) } return @@ -425,7 +433,9 @@ func (bsnet *impl) handleNewStream(s network.Stream) { log.Debugf("bitswap net handleNewStream from %s", s.Conn().RemotePeer()) bsnet.connectEvtMgr.OnMessage(s.Conn().RemotePeer()) atomic.AddUint64(&bsnet.stats.MessagesRecvd, 1) - bsnet.receiver.ReceiveMessage(ctx, p, received) + for _, v := range bsnet.receivers { + v.ReceiveMessage(ctx, p, received) + } } } diff --git a/bitswap/options.go b/bitswap/options.go new file mode 100644 index 000000000..0c087b713 --- /dev/null +++ b/bitswap/options.go @@ -0,0 +1,88 @@ +package bitswap + +import ( + "time" + + "github.com/ipfs/go-bitswap/client" + "github.com/ipfs/go-bitswap/server" + "github.com/ipfs/go-bitswap/tracer" + delay "github.com/ipfs/go-ipfs-delay" +) + +type option func(*Bitswap) + +// Option is interface{} of server.Option or client.Option or func(*Bitswap) +// wrapped in a struct to gain strong type checking. +type Option struct { + V interface{} +} + +func EngineBlockstoreWorkerCount(count int) Option { + return Option{server.EngineBlockstoreWorkerCount(count)} +} + +func EngineTaskWorkerCount(count int) Option { + return Option{server.EngineTaskWorkerCount(count)} +} + +func MaxOutstandingBytesPerPeer(count int) Option { + return Option{server.MaxOutstandingBytesPerPeer(count)} +} + +func TaskWorkerCount(count int) Option { + return Option{server.TaskWorkerCount(count)} +} + +func ProvideEnabled(enabled bool) Option { + return Option{server.ProvideEnabled(enabled)} +} + +func SetSendDontHaves(send bool) Option { + return Option{server.SetSendDontHaves(send)} +} + +func WithPeerBlockRequestFilter(pbrf server.PeerBlockRequestFilter) Option { + return Option{server.WithPeerBlockRequestFilter(pbrf)} +} + +func WithScoreLedger(scoreLedger server.ScoreLedger) Option { + return Option{server.WithScoreLedger(scoreLedger)} +} + +func WithTargetMessageSize(tms int) Option { + return Option{server.WithTargetMessageSize(tms)} +} + +func WithTaskComparator(comparator server.TaskComparator) Option { + return Option{server.WithTaskComparator(comparator)} +} + +func ProviderSearchDelay(newProvSearchDelay time.Duration) Option { + return Option{client.ProviderSearchDelay(newProvSearchDelay)} +} + +func RebroadcastDelay(newRebroadcastDelay delay.D) Option { + return Option{client.RebroadcastDelay(newRebroadcastDelay)} +} + +func SetSimulateDontHavesOnTimeout(send bool) Option { + return Option{client.SetSimulateDontHavesOnTimeout(send)} +} + +func WithTracer(tap tracer.Tracer) Option { + // Only trace the server, both receive the same messages anyway + return Option{ + func(bs *Bitswap) { + bs.tracer = tap + // the tests use this to hot update tracers, we need to update tracers of impls if we are running + if bs.Client != nil { + if tap != nil { + tap = nopReceiveTracer{tap} + } + client.WithTracer(tap)(bs.Client) + // no need to check for server as they can't not be both running + server.WithTracer(tap)(bs.Server) + } + }, + } +} diff --git a/bitswap/polyfill.go b/bitswap/polyfill.go new file mode 100644 index 000000000..3ca47b1b4 --- /dev/null +++ b/bitswap/polyfill.go @@ -0,0 +1,174 @@ +package bitswap + +import ( + "context" + "fmt" + + "github.com/ipfs/go-bitswap/client" + "github.com/ipfs/go-bitswap/message" + "github.com/ipfs/go-bitswap/metrics" + "github.com/ipfs/go-bitswap/network" + "github.com/ipfs/go-bitswap/server" + "github.com/ipfs/go-bitswap/tracer" + + "github.com/ipfs/go-block-format" + "github.com/ipfs/go-cid" + "github.com/ipfs/go-ipfs-blockstore" + "github.com/ipfs/go-ipfs-exchange-interface" + logging "github.com/ipfs/go-log" + "github.com/libp2p/go-libp2p-core/peer" + + "go.uber.org/multierr" +) + +var log = logging.Logger("bitswap") + +// old interface we are targeting +type old interface { + Close() error + GetBlock(ctx context.Context, k cid.Cid) (blocks.Block, error) + GetBlocks(ctx context.Context, keys []cid.Cid) (<-chan blocks.Block, error) + GetWantBlocks() []cid.Cid + GetWantHaves() []cid.Cid + GetWantlist() []cid.Cid + IsOnline() bool + LedgerForPeer(p peer.ID) *server.Receipt + NewSession(ctx context.Context) exchange.Fetcher + NotifyNewBlocks(ctx context.Context, blks ...blocks.Block) error + PeerConnected(p peer.ID) + PeerDisconnected(p peer.ID) + ReceiveError(err error) + ReceiveMessage(ctx context.Context, p peer.ID, incoming message.BitSwapMessage) + Stat() (*Stat, error) + WantlistForPeer(p peer.ID) []cid.Cid +} + +var _ exchange.SessionExchange = (*Bitswap)(nil) +var _ old = (*Bitswap)(nil) + +type Bitswap struct { + *client.Client + *server.Server + + tracer tracer.Tracer + net network.BitSwapNetwork +} + +func New(ctx context.Context, net network.BitSwapNetwork, bstore blockstore.Blockstore, options ...Option) *Bitswap { + bs := &Bitswap{ + net: net, + } + + var serverOptions []server.Option + var clientOptions []client.Option + + for _, o := range options { + switch typedOption := o.V.(type) { + case server.Option: + serverOptions = append(serverOptions, typedOption) + case client.Option: + clientOptions = append(clientOptions, typedOption) + case option: + typedOption(bs) + default: + panic(fmt.Errorf("unknown option type passed to bitswap.New, got: %T, %v; expected: %T, %T or %T", typedOption, typedOption, server.Option(nil), client.Option(nil), server.Option(nil))) + } + } + + if bs.tracer != nil { + var tracer tracer.Tracer = nopReceiveTracer{bs.tracer} + clientOptions = append(clientOptions, client.WithTracer(tracer)) + serverOptions = append(serverOptions, server.WithTracer(tracer)) + } + + stats := metrics.New(ctx) + bs.Server = server.New(ctx, net, bstore, stats, serverOptions...) + bs.Client = client.New(ctx, net, bstore, stats, append(clientOptions, client.WithBlockReceivedNotifier(bs.Server))...) + net.Start(bs) // use the polyfill receiver to log received errors and trace messages only once + + return bs +} + +func (bs *Bitswap) NotifyNewBlocks(ctx context.Context, blks ...blocks.Block) error { + return multierr.Combine( + bs.Client.NotifyNewBlocks(ctx, blks...), + bs.Server.NotifyNewBlocks(ctx, blks...), + ) +} + +type Stat struct { + Wantlist []cid.Cid + Peers []string + BlocksReceived uint64 + DataReceived uint64 + DupBlksReceived uint64 + DupDataReceived uint64 + MessagesReceived uint64 + BlocksSent uint64 + DataSent uint64 + ProvideBufLen int +} + +func (bs *Bitswap) Stat() (*Stat, error) { + cs, err := bs.Client.Stat() + if err != nil { + return nil, err + } + ss, err := bs.Server.Stat() + if err != nil { + return nil, err + } + + return &Stat{ + Wantlist: cs.Wantlist, + BlocksReceived: cs.BlocksReceived, + DataReceived: cs.DataReceived, + DupBlksReceived: cs.DupBlksReceived, + DupDataReceived: cs.DupDataReceived, + MessagesReceived: cs.MessagesReceived, + Peers: ss.Peers, + BlocksSent: ss.BlocksSent, + DataSent: ss.DataSent, + ProvideBufLen: ss.ProvideBufLen, + }, nil +} + +func (bs *Bitswap) Close() error { + bs.net.Stop() + return multierr.Combine( + bs.Client.Close(), + bs.Server.Close(), + ) +} + +func (bs *Bitswap) WantlistForPeer(p peer.ID) []cid.Cid { + if p == bs.net.Self() { + return bs.Client.GetWantlist() + } + return bs.Server.WantlistForPeer(p) +} + +func (bs *Bitswap) PeerConnected(p peer.ID) { + bs.Client.PeerConnected(p) + bs.Server.PeerConnected(p) +} + +func (bs *Bitswap) PeerDisconnected(p peer.ID) { + bs.Client.PeerDisconnected(p) + bs.Server.PeerDisconnected(p) +} + +func (bs *Bitswap) ReceiveError(err error) { + log.Infof("Bitswap Client ReceiveError: %s", err) + // TODO log the network error + // TODO bubble the network error up to the parent context/error logger +} + +func (bs *Bitswap) ReceiveMessage(ctx context.Context, p peer.ID, incoming message.BitSwapMessage) { + if bs.tracer != nil { + bs.tracer.MessageReceived(p, incoming) + } + + bs.Client.ReceiveMessage(ctx, p, incoming) + bs.Server.ReceiveMessage(ctx, p, incoming) +} diff --git a/bitswap/sendOnlyTracer.go b/bitswap/sendOnlyTracer.go new file mode 100644 index 000000000..1a12403fa --- /dev/null +++ b/bitswap/sendOnlyTracer.go @@ -0,0 +1,20 @@ +package bitswap + +import ( + "github.com/ipfs/go-bitswap/message" + "github.com/ipfs/go-bitswap/tracer" + "github.com/libp2p/go-libp2p-core/peer" +) + +type sendOnlyTracer interface { + MessageSent(peer.ID, message.BitSwapMessage) +} + +var _ tracer.Tracer = nopReceiveTracer{} + +// we need to only trace sends because we already trace receives in the polyfill object (to not get them traced twice) +type nopReceiveTracer struct { + sendOnlyTracer +} + +func (nopReceiveTracer) MessageReceived(peer.ID, message.BitSwapMessage) {} diff --git a/bitswap/server/forward.go b/bitswap/server/forward.go new file mode 100644 index 000000000..67f5b2a5e --- /dev/null +++ b/bitswap/server/forward.go @@ -0,0 +1,13 @@ +package server + +import ( + "github.com/ipfs/go-bitswap/server/internal/decision" +) + +type ( + Receipt = decision.Receipt + PeerBlockRequestFilter = decision.PeerBlockRequestFilter + TaskComparator = decision.TaskComparator + ScoreLedger = decision.ScoreLedger + ScorePeerFunc = decision.ScorePeerFunc +) diff --git a/bitswap/internal/decision/blockstoremanager.go b/bitswap/server/internal/decision/blockstoremanager.go similarity index 96% rename from bitswap/internal/decision/blockstoremanager.go rename to bitswap/server/internal/decision/blockstoremanager.go index 5bc456a96..01eae5a3c 100644 --- a/bitswap/internal/decision/blockstoremanager.go +++ b/bitswap/server/internal/decision/blockstoremanager.go @@ -107,7 +107,7 @@ func (bsm *blockstoreManager) getBlockSizes(ctx context.Context, ks []cid.Cid) ( } func (bsm *blockstoreManager) getBlocks(ctx context.Context, ks []cid.Cid) (map[cid.Cid]blocks.Block, error) { - res := make(map[cid.Cid]blocks.Block) + res := make(map[cid.Cid]blocks.Block, len(ks)) if len(ks) == 0 { return res, nil } @@ -120,17 +120,18 @@ func (bsm *blockstoreManager) getBlocks(ctx context.Context, ks []cid.Cid) (map[ // Note: this isn't a fatal error. We shouldn't abort the request log.Errorf("blockstore.Get(%s) error: %s", c, err) } - } else { - lk.Lock() - res[c] = blk - lk.Unlock() + return } + + lk.Lock() + res[c] = blk + lk.Unlock() }) } func (bsm *blockstoreManager) jobPerKey(ctx context.Context, ks []cid.Cid, jobFn func(c cid.Cid)) error { var err error - wg := sync.WaitGroup{} + var wg sync.WaitGroup for _, k := range ks { c := k wg.Add(1) diff --git a/bitswap/internal/decision/blockstoremanager_test.go b/bitswap/server/internal/decision/blockstoremanager_test.go similarity index 100% rename from bitswap/internal/decision/blockstoremanager_test.go rename to bitswap/server/internal/decision/blockstoremanager_test.go diff --git a/bitswap/internal/decision/engine.go b/bitswap/server/internal/decision/engine.go similarity index 92% rename from bitswap/internal/decision/engine.go rename to bitswap/server/internal/decision/engine.go index 27809a4c8..d1ccdeb02 100644 --- a/bitswap/internal/decision/engine.go +++ b/bitswap/server/internal/decision/engine.go @@ -9,9 +9,11 @@ import ( "github.com/google/uuid" + wl "github.com/ipfs/go-bitswap/client/wantlist" + "github.com/ipfs/go-bitswap/internal/defaults" bsmsg "github.com/ipfs/go-bitswap/message" pb "github.com/ipfs/go-bitswap/message/pb" - wl "github.com/ipfs/go-bitswap/wantlist" + bmetrics "github.com/ipfs/go-bitswap/metrics" blocks "github.com/ipfs/go-block-format" "github.com/ipfs/go-cid" bstore "github.com/ipfs/go-ipfs-blockstore" @@ -182,6 +184,9 @@ type Engine struct { taskComparator TaskComparator peerBlockRequestFilter PeerBlockRequestFilter + + bstoreWorkerCount int + maxOutstandingBytesPerPeer int } // TaskInfo represents the details of a request from a peer. @@ -227,6 +232,50 @@ func WithTargetMessageSize(size int) Option { } } +func WithScoreLedger(scoreledger ScoreLedger) Option { + return func(e *Engine) { + e.scoreLedger = scoreledger + } +} + +// WithBlockstoreWorkerCount sets the number of worker threads used for +// blockstore operations in the decision engine +func WithBlockstoreWorkerCount(count int) Option { + if count <= 0 { + panic(fmt.Sprintf("Engine blockstore worker count is %d but must be > 0", count)) + } + return func(e *Engine) { + e.bstoreWorkerCount = count + } +} + +// WithTaskWorkerCount sets the number of worker threads used inside the engine +func WithTaskWorkerCount(count int) Option { + if count <= 0 { + panic(fmt.Sprintf("Engine task worker count is %d but must be > 0", count)) + } + return func(e *Engine) { + e.taskWorkerCount = count + } +} + +// WithMaxOutstandingBytesPerPeer describes approximately how much work we are will to have outstanding to a peer at any +// given time. Setting it to 0 will disable any limiting. +func WithMaxOutstandingBytesPerPeer(count int) Option { + if count < 0 { + panic(fmt.Sprintf("max outstanding bytes per peer is %d but must be >= 0", count)) + } + return func(e *Engine) { + e.maxOutstandingBytesPerPeer = count + } +} + +func WithSetSendDontHave(send bool) Option { + return func(e *Engine) { + e.sendDontHaves = send + } +} + // wrapTaskComparator wraps a TaskComparator so it can be used as a QueueTaskComparator func wrapTaskComparator(tc TaskComparator) peertask.QueueTaskComparator { return func(a, b *peertask.QueueTask) bool { @@ -257,84 +306,64 @@ func wrapTaskComparator(tc TaskComparator) peertask.QueueTaskComparator { // work already outstanding. func NewEngine( bs bstore.Blockstore, - bstoreWorkerCount, - engineTaskWorkerCount, maxOutstandingBytesPerPeer int, peerTagger PeerTagger, self peer.ID, - scoreLedger ScoreLedger, - pendingEngineGauge metrics.Gauge, - activeEngineGauge metrics.Gauge, - pendingBlocksGauge metrics.Gauge, - activeBlocksGauge metrics.Gauge, + metrics *bmetrics.Metrics, opts ...Option, ) *Engine { return newEngine( bs, - bstoreWorkerCount, - engineTaskWorkerCount, - maxOutstandingBytesPerPeer, peerTagger, self, maxBlockSizeReplaceHasWithBlock, - scoreLedger, - pendingEngineGauge, - activeEngineGauge, - pendingBlocksGauge, - activeBlocksGauge, + metrics, opts..., ) } func newEngine( bs bstore.Blockstore, - bstoreWorkerCount, - engineTaskWorkerCount, maxOutstandingBytesPerPeer int, peerTagger PeerTagger, self peer.ID, maxReplaceSize int, - scoreLedger ScoreLedger, - pendingEngineGauge metrics.Gauge, - activeEngineGauge metrics.Gauge, - pendingBlocksGauge metrics.Gauge, - activeBlocksGauge metrics.Gauge, + metrics *bmetrics.Metrics, opts ...Option, ) *Engine { - if scoreLedger == nil { - scoreLedger = NewDefaultScoreLedger() - } - e := &Engine{ ledgerMap: make(map[peer.ID]*ledger), - scoreLedger: scoreLedger, - bsm: newBlockstoreManager(bs, bstoreWorkerCount, pendingBlocksGauge, activeBlocksGauge), + scoreLedger: NewDefaultScoreLedger(), + bstoreWorkerCount: defaults.BitswapEngineBlockstoreWorkerCount, + maxOutstandingBytesPerPeer: defaults.BitswapMaxOutstandingBytesPerPeer, peerTagger: peerTagger, outbox: make(chan (<-chan *Envelope), outboxChanBuffer), workSignal: make(chan struct{}, 1), ticker: time.NewTicker(time.Millisecond * 100), maxBlockSizeReplaceHasWithBlock: maxReplaceSize, - taskWorkerCount: engineTaskWorkerCount, + taskWorkerCount: defaults.BitswapEngineTaskWorkerCount, sendDontHaves: true, self: self, peerLedger: newPeerLedger(), - pendingGauge: pendingEngineGauge, - activeGauge: activeEngineGauge, + pendingGauge: metrics.PendingEngineGauge(), + activeGauge: metrics.ActiveEngineGauge(), targetMessageSize: defaultTargetMessageSize, + tagQueued: fmt.Sprintf(tagFormat, "queued", uuid.New().String()), + tagUseful: fmt.Sprintf(tagFormat, "useful", uuid.New().String()), } - e.tagQueued = fmt.Sprintf(tagFormat, "queued", uuid.New().String()) - e.tagUseful = fmt.Sprintf(tagFormat, "useful", uuid.New().String()) for _, opt := range opts { opt(e) } + e.bsm = newBlockstoreManager(bs, e.bstoreWorkerCount, metrics.PendingBlocksGauge(), metrics.ActiveBlocksGauge()) + // default peer task queue options peerTaskQueueOpts := []peertaskqueue.Option{ peertaskqueue.OnPeerAddedHook(e.onPeerAdded), peertaskqueue.OnPeerRemovedHook(e.onPeerRemoved), peertaskqueue.TaskMerger(newTaskMerger()), peertaskqueue.IgnoreFreezing(true), - peertaskqueue.MaxOutstandingWorkPerPeer(maxOutstandingBytesPerPeer), + peertaskqueue.MaxOutstandingWorkPerPeer(e.maxOutstandingBytesPerPeer), } if e.taskComparator != nil { diff --git a/bitswap/internal/decision/engine_test.go b/bitswap/server/internal/decision/engine_test.go similarity index 93% rename from bitswap/internal/decision/engine_test.go rename to bitswap/server/internal/decision/engine_test.go index 79b80cb52..853cc3bf2 100644 --- a/bitswap/internal/decision/engine_test.go +++ b/bitswap/server/internal/decision/engine_test.go @@ -11,11 +11,10 @@ import ( "time" "github.com/benbjohnson/clock" - "github.com/ipfs/go-bitswap/internal/defaults" "github.com/ipfs/go-bitswap/internal/testutil" message "github.com/ipfs/go-bitswap/message" pb "github.com/ipfs/go-bitswap/message/pb" - "github.com/ipfs/go-metrics-interface" + "github.com/ipfs/go-bitswap/metrics" blocks "github.com/ipfs/go-block-format" "github.com/ipfs/go-cid" @@ -100,7 +99,7 @@ func newTestEngine(ctx context.Context, idStr string, opts ...Option) engineSet func newTestEngineWithSampling(ctx context.Context, idStr string, peerSampleInterval time.Duration, sampleCh chan struct{}, clock clock.Clock, opts ...Option) engineSet { fpt := &fakePeerTagger{} bs := blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore())) - e := newEngineForTesting(ctx, bs, 4, defaults.BitswapEngineTaskWorkerCount, defaults.BitswapMaxOutstandingBytesPerPeer, fpt, "localhost", 0, NewTestScoreLedger(peerSampleInterval, sampleCh, clock), opts...) + e := newEngineForTesting(ctx, bs, fpt, "localhost", 0, append(opts[:len(opts):len(opts)], WithScoreLedger(NewTestScoreLedger(peerSampleInterval, sampleCh, clock)), WithBlockstoreWorkerCount(4))...) e.StartWorkers(ctx, process.WithTeardown(func() error { return nil })) return engineSet{ Peer: peer.ID(idStr), @@ -188,31 +187,17 @@ func peerIsPartner(p peer.ID, e *Engine) bool { func newEngineForTesting( ctx context.Context, bs blockstore.Blockstore, - bstoreWorkerCount, - engineTaskWorkerCount, maxOutstandingBytesPerPeer int, peerTagger PeerTagger, self peer.ID, maxReplaceSize int, - scoreLedger ScoreLedger, opts ...Option, ) *Engine { - testPendingEngineGauge := metrics.NewCtx(ctx, "pending_tasks", "Total number of pending tasks").Gauge() - testActiveEngineGauge := metrics.NewCtx(ctx, "active_tasks", "Total number of active tasks").Gauge() - testPendingBlocksGauge := metrics.NewCtx(ctx, "pending_block_tasks", "Total number of pending blockstore tasks").Gauge() - testActiveBlocksGauge := metrics.NewCtx(ctx, "active_block_tasks", "Total number of active blockstore tasks").Gauge() return newEngine( bs, - bstoreWorkerCount, - engineTaskWorkerCount, - maxOutstandingBytesPerPeer, peerTagger, self, maxReplaceSize, - scoreLedger, - testPendingEngineGauge, - testActiveEngineGauge, - testPendingBlocksGauge, - testActiveBlocksGauge, + metrics.New(ctx), opts..., ) } @@ -220,7 +205,7 @@ func newEngineForTesting( func TestOutboxClosedWhenEngineClosed(t *testing.T) { t.SkipNow() // TODO implement *Engine.Close ctx := context.Background() - e := newEngineForTesting(ctx, blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore())), 4, defaults.BitswapEngineTaskWorkerCount, defaults.BitswapMaxOutstandingBytesPerPeer, &fakePeerTagger{}, "localhost", 0, NewTestScoreLedger(shortTerm, nil, clock.New())) + e := newEngineForTesting(ctx, blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore())), &fakePeerTagger{}, "localhost", 0, WithScoreLedger(NewTestScoreLedger(shortTerm, nil, clock.New())), WithBlockstoreWorkerCount(4)) e.StartWorkers(ctx, process.WithTeardown(func() error { return nil })) var wg sync.WaitGroup wg.Add(1) @@ -549,7 +534,7 @@ func TestPartnerWantHaveWantBlockNonActive(t *testing.T) { } ctx := context.Background() - e := newEngineForTesting(ctx, bs, 4, defaults.BitswapEngineTaskWorkerCount, defaults.BitswapMaxOutstandingBytesPerPeer, &fakePeerTagger{}, "localhost", 0, NewTestScoreLedger(shortTerm, nil, clock.New())) + e := newEngineForTesting(ctx, bs, &fakePeerTagger{}, "localhost", 0, WithScoreLedger(NewTestScoreLedger(shortTerm, nil, clock.New())), WithBlockstoreWorkerCount(4)) e.StartWorkers(ctx, process.WithTeardown(func() error { return nil })) for i, testCase := range testCases { t.Logf("Test case %d:", i) @@ -706,7 +691,7 @@ func TestPartnerWantHaveWantBlockActive(t *testing.T) { } ctx := context.Background() - e := newEngineForTesting(ctx, bs, 4, defaults.BitswapEngineTaskWorkerCount, defaults.BitswapMaxOutstandingBytesPerPeer, &fakePeerTagger{}, "localhost", 0, NewTestScoreLedger(shortTerm, nil, clock.New())) + e := newEngineForTesting(ctx, bs, &fakePeerTagger{}, "localhost", 0, WithScoreLedger(NewTestScoreLedger(shortTerm, nil, clock.New())), WithBlockstoreWorkerCount(4)) e.StartWorkers(ctx, process.WithTeardown(func() error { return nil })) var next envChan @@ -891,7 +876,7 @@ func TestPartnerWantsThenCancels(t *testing.T) { ctx := context.Background() for i := 0; i < numRounds; i++ { expected := make([][]string, 0, len(testcases)) - e := newEngineForTesting(ctx, bs, 4, defaults.BitswapEngineTaskWorkerCount, defaults.BitswapMaxOutstandingBytesPerPeer, &fakePeerTagger{}, "localhost", 0, NewTestScoreLedger(shortTerm, nil, clock.New())) + e := newEngineForTesting(ctx, bs, &fakePeerTagger{}, "localhost", 0, WithScoreLedger(NewTestScoreLedger(shortTerm, nil, clock.New())), WithBlockstoreWorkerCount(4)) e.StartWorkers(ctx, process.WithTeardown(func() error { return nil })) for _, testcase := range testcases { set := testcase[0] @@ -917,7 +902,7 @@ func TestSendReceivedBlocksToPeersThatWantThem(t *testing.T) { otherPeer := libp2ptest.RandPeerIDFatal(t) ctx := context.Background() - e := newEngineForTesting(ctx, bs, 4, defaults.BitswapEngineTaskWorkerCount, defaults.BitswapMaxOutstandingBytesPerPeer, &fakePeerTagger{}, "localhost", 0, NewTestScoreLedger(shortTerm, nil, clock.New())) + e := newEngineForTesting(ctx, bs, &fakePeerTagger{}, "localhost", 0, WithScoreLedger(NewTestScoreLedger(shortTerm, nil, clock.New())), WithBlockstoreWorkerCount(4)) e.StartWorkers(ctx, process.WithTeardown(func() error { return nil })) blks := testutil.GenerateBlocksOfSize(4, 8*1024) @@ -963,7 +948,7 @@ func TestSendDontHave(t *testing.T) { otherPeer := libp2ptest.RandPeerIDFatal(t) ctx := context.Background() - e := newEngineForTesting(ctx, bs, 4, defaults.BitswapEngineTaskWorkerCount, defaults.BitswapMaxOutstandingBytesPerPeer, &fakePeerTagger{}, "localhost", 0, NewTestScoreLedger(shortTerm, nil, clock.New())) + e := newEngineForTesting(ctx, bs, &fakePeerTagger{}, "localhost", 0, WithScoreLedger(NewTestScoreLedger(shortTerm, nil, clock.New())), WithBlockstoreWorkerCount(4)) e.StartWorkers(ctx, process.WithTeardown(func() error { return nil })) blks := testutil.GenerateBlocksOfSize(4, 8*1024) @@ -1029,7 +1014,7 @@ func TestWantlistForPeer(t *testing.T) { otherPeer := libp2ptest.RandPeerIDFatal(t) ctx := context.Background() - e := newEngineForTesting(ctx, bs, 4, defaults.BitswapEngineTaskWorkerCount, defaults.BitswapMaxOutstandingBytesPerPeer, &fakePeerTagger{}, "localhost", 0, NewTestScoreLedger(shortTerm, nil, clock.New())) + e := newEngineForTesting(ctx, bs, &fakePeerTagger{}, "localhost", 0, WithScoreLedger(NewTestScoreLedger(shortTerm, nil, clock.New())), WithBlockstoreWorkerCount(4)) e.StartWorkers(ctx, process.WithTeardown(func() error { return nil })) blks := testutil.GenerateBlocksOfSize(4, 8*1024) @@ -1079,8 +1064,7 @@ func TestTaskComparator(t *testing.T) { } // use a single task worker so that the order of outgoing messages is deterministic - engineTaskWorkerCount := 1 - e := newEngineForTesting(ctx, bs, 4, engineTaskWorkerCount, defaults.BitswapMaxOutstandingBytesPerPeer, fpt, "localhost", 0, sl, + e := newEngineForTesting(ctx, bs, fpt, "localhost", 0, WithScoreLedger(sl), WithBlockstoreWorkerCount(4), WithTaskWorkerCount(1), // if this Option is omitted, the test fails WithTaskComparator(func(ta, tb *TaskInfo) bool { // prioritize based on lexicographic ordering of block content @@ -1139,7 +1123,7 @@ func TestPeerBlockFilter(t *testing.T) { t.Fatal(err) } - e := newEngineForTesting(ctx, bs, 4, defaults.BitswapEngineTaskWorkerCount, defaults.BitswapMaxOutstandingBytesPerPeer, fpt, "localhost", 0, sl, + e := newEngineForTesting(ctx, bs, fpt, "localhost", 0, WithScoreLedger(sl), WithBlockstoreWorkerCount(4), WithPeerBlockRequestFilter(func(p peer.ID, c cid.Cid) bool { // peer 0 has access to everything if p == peerIDs[0] { @@ -1296,7 +1280,7 @@ func TestPeerBlockFilterMutability(t *testing.T) { filterAllowList := make(map[cid.Cid]bool) - e := newEngineForTesting(ctx, bs, 4, defaults.BitswapEngineTaskWorkerCount, defaults.BitswapMaxOutstandingBytesPerPeer, fpt, "localhost", 0, sl, + e := newEngineForTesting(ctx, bs, fpt, "localhost", 0, WithScoreLedger(sl), WithBlockstoreWorkerCount(4), WithPeerBlockRequestFilter(func(p peer.ID, c cid.Cid) bool { return filterAllowList[c] }), diff --git a/bitswap/internal/decision/ewma.go b/bitswap/server/internal/decision/ewma.go similarity index 100% rename from bitswap/internal/decision/ewma.go rename to bitswap/server/internal/decision/ewma.go diff --git a/bitswap/internal/decision/ledger.go b/bitswap/server/internal/decision/ledger.go similarity index 94% rename from bitswap/internal/decision/ledger.go rename to bitswap/server/internal/decision/ledger.go index 58723d0fb..a848f7b03 100644 --- a/bitswap/internal/decision/ledger.go +++ b/bitswap/server/internal/decision/ledger.go @@ -3,8 +3,8 @@ package decision import ( "sync" + wl "github.com/ipfs/go-bitswap/client/wantlist" pb "github.com/ipfs/go-bitswap/message/pb" - wl "github.com/ipfs/go-bitswap/wantlist" "github.com/ipfs/go-cid" "github.com/libp2p/go-libp2p-core/peer" diff --git a/bitswap/internal/decision/peer_ledger.go b/bitswap/server/internal/decision/peer_ledger.go similarity index 100% rename from bitswap/internal/decision/peer_ledger.go rename to bitswap/server/internal/decision/peer_ledger.go diff --git a/bitswap/internal/decision/scoreledger.go b/bitswap/server/internal/decision/scoreledger.go similarity index 100% rename from bitswap/internal/decision/scoreledger.go rename to bitswap/server/internal/decision/scoreledger.go diff --git a/bitswap/internal/decision/taskmerger.go b/bitswap/server/internal/decision/taskmerger.go similarity index 100% rename from bitswap/internal/decision/taskmerger.go rename to bitswap/server/internal/decision/taskmerger.go diff --git a/bitswap/internal/decision/taskmerger_test.go b/bitswap/server/internal/decision/taskmerger_test.go similarity index 100% rename from bitswap/internal/decision/taskmerger_test.go rename to bitswap/server/internal/decision/taskmerger_test.go diff --git a/bitswap/server/server.go b/bitswap/server/server.go new file mode 100644 index 000000000..8cbe4682c --- /dev/null +++ b/bitswap/server/server.go @@ -0,0 +1,531 @@ +package server + +import ( + "context" + "errors" + "fmt" + "sort" + "sync" + "time" + + "github.com/ipfs/go-bitswap/internal/defaults" + "github.com/ipfs/go-bitswap/message" + pb "github.com/ipfs/go-bitswap/message/pb" + bmetrics "github.com/ipfs/go-bitswap/metrics" + bsnet "github.com/ipfs/go-bitswap/network" + "github.com/ipfs/go-bitswap/server/internal/decision" + "github.com/ipfs/go-bitswap/tracer" + blocks "github.com/ipfs/go-block-format" + "github.com/ipfs/go-cid" + "github.com/ipfs/go-ipfs-blockstore" + logging "github.com/ipfs/go-log" + "github.com/ipfs/go-metrics-interface" + process "github.com/jbenet/goprocess" + procctx "github.com/jbenet/goprocess/context" + "github.com/libp2p/go-libp2p-core/peer" + "go.uber.org/zap" +) + +var ( + // HasBlockBufferSize is the buffer size of the channel for new blocks + // that need to be provided. They should get pulled over by the + // provideCollector even before they are actually provided. + // TODO: Does this need to be this large givent that? + HasBlockBufferSize = 256 + provideKeysBufferSize = 2048 +) + +var log = logging.Logger("bitswap-server") +var sflog = log.Desugar() + +const provideWorkerMax = 6 + +type Option func(*Server) + +type Server struct { + sentHistogram metrics.Histogram + sendTimeHistogram metrics.Histogram + + // the engine is the bit of logic that decides who to send which blocks to + engine *decision.Engine + + // network delivers messages on behalf of the session + network bsnet.BitSwapNetwork + + // External statistics interface + tracer tracer.Tracer + + // Counters for various statistics + counterLk sync.Mutex + counters Stat + + // the total number of simultaneous threads sending outgoing messages + taskWorkerCount int + + process process.Process + + // newBlocks is a channel for newly added blocks to be provided to the + // network. blocks pushed down this channel get buffered and fed to the + // provideKeys channel later on to avoid too much network activity + newBlocks chan cid.Cid + // provideKeys directly feeds provide workers + provideKeys chan cid.Cid + + // Extra options to pass to the decision manager + engineOptions []decision.Option + + // whether or not to make provide announcements + provideEnabled bool +} + +func New(ctx context.Context, network bsnet.BitSwapNetwork, bstore blockstore.Blockstore, m *bmetrics.Metrics, options ...Option) *Server { + ctx, cancel := context.WithCancel(ctx) + + px := process.WithTeardown(func() error { + return nil + }) + go func() { + <-px.Closing() // process closes first + cancel() + }() + + s := &Server{ + sentHistogram: m.SentHist(), + sendTimeHistogram: m.SendTimeHist(), + taskWorkerCount: defaults.BitswapTaskWorkerCount, + network: network, + process: px, + provideEnabled: true, + newBlocks: make(chan cid.Cid, HasBlockBufferSize), + provideKeys: make(chan cid.Cid, provideKeysBufferSize), + } + + for _, o := range options { + o(s) + } + + // Set up decision engine + s.engine = decision.NewEngine( + bstore, + network.ConnectionManager(), + network.Self(), + m, + s.engineOptions..., + ) + s.engineOptions = nil + + s.startWorkers(ctx, px) + + return s +} + +func TaskWorkerCount(count int) Option { + if count <= 0 { + panic(fmt.Sprintf("task worker count is %d but must be > 0", count)) + } + return func(bs *Server) { + bs.taskWorkerCount = count + } +} + +func WithTracer(tap tracer.Tracer) Option { + return func(bs *Server) { + bs.tracer = tap + } +} + +// ProvideEnabled is an option for enabling/disabling provide announcements +func ProvideEnabled(enabled bool) Option { + return func(bs *Server) { + bs.provideEnabled = enabled + } +} + +func WithPeerBlockRequestFilter(pbrf decision.PeerBlockRequestFilter) Option { + o := decision.WithPeerBlockRequestFilter(pbrf) + return func(bs *Server) { + bs.engineOptions = append(bs.engineOptions, o) + } +} + +// WithTaskComparator configures custom task prioritization logic. +func WithTaskComparator(comparator decision.TaskComparator) Option { + o := decision.WithTaskComparator(comparator) + return func(bs *Server) { + bs.engineOptions = append(bs.engineOptions, o) + } +} + +// Configures the engine to use the given score decision logic. +func WithScoreLedger(scoreLedger decision.ScoreLedger) Option { + o := decision.WithScoreLedger(scoreLedger) + return func(bs *Server) { + bs.engineOptions = append(bs.engineOptions, o) + } +} + +// LedgerForPeer returns aggregated data about blocks swapped and communication +// with a given peer. +func (bs *Server) LedgerForPeer(p peer.ID) *decision.Receipt { + return bs.engine.LedgerForPeer(p) +} + +// EngineTaskWorkerCount sets the number of worker threads used inside the engine +func EngineTaskWorkerCount(count int) Option { + o := decision.WithTaskWorkerCount(count) + return func(bs *Server) { + bs.engineOptions = append(bs.engineOptions, o) + } +} + +// SetSendDontHaves indicates what to do when the engine receives a want-block +// for a block that is not in the blockstore. Either +// - Send a DONT_HAVE message +// - Simply don't respond +// This option is only used for testing. +func SetSendDontHaves(send bool) Option { + o := decision.WithSetSendDontHave(send) + return func(bs *Server) { + bs.engineOptions = append(bs.engineOptions, o) + } +} + +// EngineBlockstoreWorkerCount sets the number of worker threads used for +// blockstore operations in the decision engine +func EngineBlockstoreWorkerCount(count int) Option { + o := decision.WithBlockstoreWorkerCount(count) + return func(bs *Server) { + bs.engineOptions = append(bs.engineOptions, o) + } +} + +func WithTargetMessageSize(tms int) Option { + o := decision.WithTargetMessageSize(tms) + return func(bs *Server) { + bs.engineOptions = append(bs.engineOptions, o) + } +} + +// MaxOutstandingBytesPerPeer describes approximately how much work we are will to have outstanding to a peer at any +// given time. Setting it to 0 will disable any limiting. +func MaxOutstandingBytesPerPeer(count int) Option { + o := decision.WithMaxOutstandingBytesPerPeer(count) + return func(bs *Server) { + bs.engineOptions = append(bs.engineOptions, o) + } +} + +// WantlistForPeer returns the currently understood list of blocks requested by a +// given peer. +func (bs *Server) WantlistForPeer(p peer.ID) []cid.Cid { + var out []cid.Cid + for _, e := range bs.engine.WantlistForPeer(p) { + out = append(out, e.Cid) + } + return out +} + +func (bs *Server) startWorkers(ctx context.Context, px process.Process) { + bs.engine.StartWorkers(ctx, px) + + // Start up workers to handle requests from other nodes for the data on this node + for i := 0; i < bs.taskWorkerCount; i++ { + i := i + px.Go(func(px process.Process) { + bs.taskWorker(ctx, i) + }) + } + + if bs.provideEnabled { + // Start up a worker to manage sending out provides messages + px.Go(func(px process.Process) { + bs.provideCollector(ctx) + }) + + // Spawn up multiple workers to handle incoming blocks + // consider increasing number if providing blocks bottlenecks + // file transfers + px.Go(bs.provideWorker) + } +} + +func (bs *Server) taskWorker(ctx context.Context, id int) { + defer log.Debug("bitswap task worker shutting down...") + log := log.With("ID", id) + for { + log.Debug("Bitswap.TaskWorker.Loop") + select { + case nextEnvelope := <-bs.engine.Outbox(): + select { + case envelope, ok := <-nextEnvelope: + if !ok { + continue + } + + start := time.Now() + + // TODO: Only record message as sent if there was no error? + // Ideally, yes. But we'd need some way to trigger a retry and/or drop + // the peer. + bs.engine.MessageSent(envelope.Peer, envelope.Message) + if bs.tracer != nil { + bs.tracer.MessageSent(envelope.Peer, envelope.Message) + } + bs.sendBlocks(ctx, envelope) + + dur := time.Since(start) + bs.sendTimeHistogram.Observe(dur.Seconds()) + + case <-ctx.Done(): + return + } + case <-ctx.Done(): + return + } + } +} + +func (bs *Server) logOutgoingBlocks(env *decision.Envelope) { + if ce := sflog.Check(zap.DebugLevel, "sent message"); ce == nil { + return + } + + self := bs.network.Self() + + for _, blockPresence := range env.Message.BlockPresences() { + c := blockPresence.Cid + switch blockPresence.Type { + case pb.Message_Have: + log.Debugw("sent message", + "type", "HAVE", + "cid", c, + "local", self, + "to", env.Peer, + ) + case pb.Message_DontHave: + log.Debugw("sent message", + "type", "DONT_HAVE", + "cid", c, + "local", self, + "to", env.Peer, + ) + default: + panic(fmt.Sprintf("unrecognized BlockPresence type %v", blockPresence.Type)) + } + + } + for _, block := range env.Message.Blocks() { + log.Debugw("sent message", + "type", "BLOCK", + "cid", block.Cid(), + "local", self, + "to", env.Peer, + ) + } +} + +func (bs *Server) sendBlocks(ctx context.Context, env *decision.Envelope) { + // Blocks need to be sent synchronously to maintain proper backpressure + // throughout the network stack + defer env.Sent() + + err := bs.network.SendMessage(ctx, env.Peer, env.Message) + if err != nil { + log.Debugw("failed to send blocks message", + "peer", env.Peer, + "error", err, + ) + return + } + + bs.logOutgoingBlocks(env) + + dataSent := 0 + blocks := env.Message.Blocks() + for _, b := range blocks { + dataSent += len(b.RawData()) + } + bs.counterLk.Lock() + bs.counters.BlocksSent += uint64(len(blocks)) + bs.counters.DataSent += uint64(dataSent) + bs.counterLk.Unlock() + bs.sentHistogram.Observe(float64(env.Message.Size())) + log.Debugw("sent message", "peer", env.Peer) +} + +type Stat struct { + Peers []string + ProvideBufLen int + BlocksSent uint64 + DataSent uint64 +} + +// Stat returns aggregated statistics about bitswap operations +func (bs *Server) Stat() (Stat, error) { + bs.counterLk.Lock() + s := bs.counters + bs.counterLk.Unlock() + s.ProvideBufLen = len(bs.newBlocks) + + peers := bs.engine.Peers() + peersStr := make([]string, len(peers)) + for i, p := range peers { + peersStr[i] = p.Pretty() + } + sort.Strings(peersStr) + s.Peers = peersStr + + return s, nil +} + +// NotifyNewBlocks announces the existence of blocks to this bitswap service. The +// service will potentially notify its peers. +// Bitswap itself doesn't store new blocks. It's the caller responsibility to ensure +// that those blocks are available in the blockstore before calling this function. +func (bs *Server) NotifyNewBlocks(ctx context.Context, blks ...blocks.Block) error { + select { + case <-bs.process.Closing(): + return errors.New("bitswap is closed") + default: + } + + // Send wanted blocks to decision engine + bs.engine.NotifyNewBlocks(blks) + + // If the reprovider is enabled, send block to reprovider + if bs.provideEnabled { + for _, blk := range blks { + select { + case bs.newBlocks <- blk.Cid(): + // send block off to be reprovided + case <-bs.process.Closing(): + return bs.process.Close() + } + } + } + + return nil +} + +func (bs *Server) provideCollector(ctx context.Context) { + defer close(bs.provideKeys) + var toProvide []cid.Cid + var nextKey cid.Cid + var keysOut chan cid.Cid + + for { + select { + case blkey, ok := <-bs.newBlocks: + if !ok { + log.Debug("newBlocks channel closed") + return + } + + if keysOut == nil { + nextKey = blkey + keysOut = bs.provideKeys + } else { + toProvide = append(toProvide, blkey) + } + case keysOut <- nextKey: + if len(toProvide) > 0 { + nextKey = toProvide[0] + toProvide = toProvide[1:] + } else { + keysOut = nil + } + case <-ctx.Done(): + return + } + } +} + +func (bs *Server) provideWorker(px process.Process) { + // FIXME: OnClosingContext returns a _custom_ context type. + // Unfortunately, deriving a new cancelable context from this custom + // type fires off a goroutine. To work around this, we create a single + // cancelable context up-front and derive all sub-contexts from that. + // + // See: https://github.com/ipfs/go-ipfs/issues/5810 + ctx := procctx.OnClosingContext(px) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + limit := make(chan struct{}, provideWorkerMax) + + limitedGoProvide := func(k cid.Cid, wid int) { + defer func() { + // replace token when done + <-limit + }() + + log.Debugw("Bitswap.ProvideWorker.Start", "ID", wid, "cid", k) + defer log.Debugw("Bitswap.ProvideWorker.End", "ID", wid, "cid", k) + + ctx, cancel := context.WithTimeout(ctx, defaults.ProvideTimeout) // timeout ctx + defer cancel() + + if err := bs.network.Provide(ctx, k); err != nil { + log.Warn(err) + } + } + + // worker spawner, reads from bs.provideKeys until it closes, spawning a + // _ratelimited_ number of workers to handle each key. + for wid := 2; ; wid++ { + log.Debug("Bitswap.ProvideWorker.Loop") + + select { + case <-px.Closing(): + return + case k, ok := <-bs.provideKeys: + if !ok { + log.Debug("provideKeys channel closed") + return + } + select { + case <-px.Closing(): + return + case limit <- struct{}{}: + go limitedGoProvide(k, wid) + } + } + } +} + +func (bs *Server) ReceiveMessage(ctx context.Context, p peer.ID, incoming message.BitSwapMessage) { + // This call records changes to wantlists, blocks received, + // and number of bytes transfered. + bs.engine.MessageReceived(ctx, p, incoming) + // TODO: this is bad, and could be easily abused. + // Should only track *useful* messages in ledger + + if bs.tracer != nil { + bs.tracer.MessageReceived(p, incoming) + } +} + +// ReceivedBlocks notify the decision engine that a peer is well behaving +// and gave us usefull data, potentially increasing it's score and making us +// send them more data in exchange. +func (bs *Server) ReceivedBlocks(from peer.ID, blks []blocks.Block) { + bs.engine.ReceivedBlocks(from, blks) +} + +func (*Server) ReceiveError(err error) { + log.Infof("Bitswap Client ReceiveError: %s", err) + // TODO log the network error + // TODO bubble the network error up to the parent context/error logger + +} +func (bs *Server) PeerConnected(p peer.ID) { + bs.engine.PeerConnected(p) +} +func (bs *Server) PeerDisconnected(p peer.ID) { + bs.engine.PeerDisconnected(p) +} + +// Close is called to shutdown the Client +func (bs *Server) Close() error { + return bs.process.Close() +} diff --git a/bitswap/testnet/virtual.go b/bitswap/testnet/virtual.go index b5405841b..975bf98b3 100644 --- a/bitswap/testnet/virtual.go +++ b/bitswap/testnet/virtual.go @@ -183,17 +183,42 @@ func (n *network) SendMessage( return nil } +var _ bsnet.Receiver = (*networkClient)(nil) + type networkClient struct { // These need to be at the top of the struct (allocated on the heap) for alignment on 32bit platforms. stats bsnet.Stats - local peer.ID - bsnet.Receiver + local peer.ID + receivers []bsnet.Receiver network *network routing routing.Routing supportedProtocols []protocol.ID } +func (nc *networkClient) ReceiveMessage(ctx context.Context, sender peer.ID, incoming bsmsg.BitSwapMessage) { + for _, v := range nc.receivers { + v.ReceiveMessage(ctx, sender, incoming) + } +} + +func (nc *networkClient) ReceiveError(e error) { + for _, v := range nc.receivers { + v.ReceiveError(e) + } +} + +func (nc *networkClient) PeerConnected(p peer.ID) { + for _, v := range nc.receivers { + v.PeerConnected(p) + } +} +func (nc *networkClient) PeerDisconnected(p peer.ID) { + for _, v := range nc.receivers { + v.PeerDisconnected(p) + } +} + func (nc *networkClient) Self() peer.ID { return nc.local } @@ -300,8 +325,8 @@ func (nc *networkClient) Provide(ctx context.Context, k cid.Cid) error { return nc.routing.Provide(ctx, k, true) } -func (nc *networkClient) Start(r bsnet.Receiver) { - nc.Receiver = r +func (nc *networkClient) Start(r ...bsnet.Receiver) { + nc.receivers = r } func (nc *networkClient) Stop() { @@ -325,7 +350,7 @@ func (nc *networkClient) ConnectTo(_ context.Context, p peer.ID) error { nc.network.mu.Unlock() otherClient.receiver.PeerConnected(nc.local) - nc.Receiver.PeerConnected(p) + nc.PeerConnected(p) return nil } @@ -346,7 +371,7 @@ func (nc *networkClient) DisconnectFrom(_ context.Context, p peer.ID) error { delete(nc.network.conns, tag) otherClient.receiver.PeerDisconnected(nc.local) - nc.Receiver.PeerDisconnected(p) + nc.PeerDisconnected(p) return nil } diff --git a/bitswap/tracer.go b/bitswap/tracer/tracer.go similarity index 72% rename from bitswap/tracer.go rename to bitswap/tracer/tracer.go index dc977abdf..c5b70b7cd 100644 --- a/bitswap/tracer.go +++ b/bitswap/tracer/tracer.go @@ -1,4 +1,4 @@ -package bitswap +package tracer import ( bsmsg "github.com/ipfs/go-bitswap/message" @@ -11,10 +11,3 @@ type Tracer interface { MessageReceived(peer.ID, bsmsg.BitSwapMessage) MessageSent(peer.ID, bsmsg.BitSwapMessage) } - -// Configures Bitswap to use given tracer. -func WithTracer(tap Tracer) Option { - return func(bs *Bitswap) { - bs.tracer = tap - } -} diff --git a/bitswap/workers.go b/bitswap/workers.go deleted file mode 100644 index af4531adc..000000000 --- a/bitswap/workers.go +++ /dev/null @@ -1,228 +0,0 @@ -package bitswap - -import ( - "context" - "fmt" - "time" - - engine "github.com/ipfs/go-bitswap/internal/decision" - "github.com/ipfs/go-bitswap/internal/defaults" - pb "github.com/ipfs/go-bitswap/message/pb" - cid "github.com/ipfs/go-cid" - process "github.com/jbenet/goprocess" - procctx "github.com/jbenet/goprocess/context" - "go.uber.org/zap" -) - -func (bs *Bitswap) startWorkers(ctx context.Context, px process.Process) { - - // Start up workers to handle requests from other nodes for the data on this node - for i := 0; i < bs.taskWorkerCount; i++ { - i := i - px.Go(func(px process.Process) { - bs.taskWorker(ctx, i) - }) - } - - if bs.provideEnabled { - // Start up a worker to manage sending out provides messages - px.Go(func(px process.Process) { - bs.provideCollector(ctx) - }) - - // Spawn up multiple workers to handle incoming blocks - // consider increasing number if providing blocks bottlenecks - // file transfers - px.Go(bs.provideWorker) - } -} - -func (bs *Bitswap) taskWorker(ctx context.Context, id int) { - defer log.Debug("bitswap task worker shutting down...") - log := log.With("ID", id) - for { - log.Debug("Bitswap.TaskWorker.Loop") - select { - case nextEnvelope := <-bs.engine.Outbox(): - select { - case envelope, ok := <-nextEnvelope: - if !ok { - continue - } - - start := time.Now() - - // TODO: Only record message as sent if there was no error? - // Ideally, yes. But we'd need some way to trigger a retry and/or drop - // the peer. - bs.engine.MessageSent(envelope.Peer, envelope.Message) - if bs.tracer != nil { - bs.tracer.MessageSent(envelope.Peer, envelope.Message) - } - bs.sendBlocks(ctx, envelope) - - dur := time.Since(start) - bs.sendTimeHistogram.Observe(dur.Seconds()) - - case <-ctx.Done(): - return - } - case <-ctx.Done(): - return - } - } -} - -func (bs *Bitswap) logOutgoingBlocks(env *engine.Envelope) { - if ce := sflog.Check(zap.DebugLevel, "sent message"); ce == nil { - return - } - - self := bs.network.Self() - - for _, blockPresence := range env.Message.BlockPresences() { - c := blockPresence.Cid - switch blockPresence.Type { - case pb.Message_Have: - log.Debugw("sent message", - "type", "HAVE", - "cid", c, - "local", self, - "to", env.Peer, - ) - case pb.Message_DontHave: - log.Debugw("sent message", - "type", "DONT_HAVE", - "cid", c, - "local", self, - "to", env.Peer, - ) - default: - panic(fmt.Sprintf("unrecognized BlockPresence type %v", blockPresence.Type)) - } - - } - for _, block := range env.Message.Blocks() { - log.Debugw("sent message", - "type", "BLOCK", - "cid", block.Cid(), - "local", self, - "to", env.Peer, - ) - } -} - -func (bs *Bitswap) sendBlocks(ctx context.Context, env *engine.Envelope) { - // Blocks need to be sent synchronously to maintain proper backpressure - // throughout the network stack - defer env.Sent() - - err := bs.network.SendMessage(ctx, env.Peer, env.Message) - if err != nil { - log.Debugw("failed to send blocks message", - "peer", env.Peer, - "error", err, - ) - return - } - - bs.logOutgoingBlocks(env) - - dataSent := 0 - blocks := env.Message.Blocks() - for _, b := range blocks { - dataSent += len(b.RawData()) - } - bs.counterLk.Lock() - bs.counters.blocksSent += uint64(len(blocks)) - bs.counters.dataSent += uint64(dataSent) - bs.counterLk.Unlock() - bs.sentHistogram.Observe(float64(env.Message.Size())) - log.Debugw("sent message", "peer", env.Peer) -} - -func (bs *Bitswap) provideWorker(px process.Process) { - // FIXME: OnClosingContext returns a _custom_ context type. - // Unfortunately, deriving a new cancelable context from this custom - // type fires off a goroutine. To work around this, we create a single - // cancelable context up-front and derive all sub-contexts from that. - // - // See: https://github.com/ipfs/go-ipfs/issues/5810 - ctx := procctx.OnClosingContext(px) - ctx, cancel := context.WithCancel(ctx) - defer cancel() - - limit := make(chan struct{}, provideWorkerMax) - - limitedGoProvide := func(k cid.Cid, wid int) { - defer func() { - // replace token when done - <-limit - }() - - log.Debugw("Bitswap.ProvideWorker.Start", "ID", wid, "cid", k) - defer log.Debugw("Bitswap.ProvideWorker.End", "ID", wid, "cid", k) - - ctx, cancel := context.WithTimeout(ctx, defaults.ProvideTimeout) // timeout ctx - defer cancel() - - if err := bs.network.Provide(ctx, k); err != nil { - log.Warn(err) - } - } - - // worker spawner, reads from bs.provideKeys until it closes, spawning a - // _ratelimited_ number of workers to handle each key. - for wid := 2; ; wid++ { - log.Debug("Bitswap.ProvideWorker.Loop") - - select { - case <-px.Closing(): - return - case k, ok := <-bs.provideKeys: - if !ok { - log.Debug("provideKeys channel closed") - return - } - select { - case <-px.Closing(): - return - case limit <- struct{}{}: - go limitedGoProvide(k, wid) - } - } - } -} - -func (bs *Bitswap) provideCollector(ctx context.Context) { - defer close(bs.provideKeys) - var toProvide []cid.Cid - var nextKey cid.Cid - var keysOut chan cid.Cid - - for { - select { - case blkey, ok := <-bs.newBlocks: - if !ok { - log.Debug("newBlocks channel closed") - return - } - - if keysOut == nil { - nextKey = blkey - keysOut = bs.provideKeys - } else { - toProvide = append(toProvide, blkey) - } - case keysOut <- nextKey: - if len(toProvide) > 0 { - nextKey = toProvide[0] - toProvide = toProvide[1:] - } else { - keysOut = nil - } - case <-ctx.Done(): - return - } - } -} From 0406bfb1a8ddd967242538b22af45d8156d6f36b Mon Sep 17 00:00:00 2001 From: Jorropo Date: Sat, 6 Aug 2022 01:59:22 +0200 Subject: [PATCH 1026/1035] refactor: remove the need of generics This commit was moved from ipfs/go-bitswap@696d69dcf0b85a1cbb8ac06fa80dc9da923855b0 --- bitswap/metrics/gen.go | 115 ++++++++++++++++++++++++----------------- 1 file changed, 68 insertions(+), 47 deletions(-) diff --git a/bitswap/metrics/gen.go b/bitswap/metrics/gen.go index 22f16c535..000a8cde8 100644 --- a/bitswap/metrics/gen.go +++ b/bitswap/metrics/gen.go @@ -14,32 +14,21 @@ var ( timeMetricsBuckets = []float64{1, 10, 30, 60, 90, 120, 600} ) -type onceAble[T any] struct { - o sync.Once - v T -} - -func (o *onceAble[T]) reuseOrInit(creator func() T) T { - o.o.Do(func() { - o.v = creator() - }) - return o.v -} - // Metrics is a type which lazy initialize metrics objects. // It MUST not be copied. type Metrics struct { - ctx context.Context - - dupHist onceAble[metrics.Histogram] - allHist onceAble[metrics.Histogram] - sentHist onceAble[metrics.Histogram] - sendTimeHist onceAble[metrics.Histogram] - - pendingEngineGauge onceAble[metrics.Gauge] - activeEngineGauge onceAble[metrics.Gauge] - pendingBlocksGauge onceAble[metrics.Gauge] - activeBlocksGauge onceAble[metrics.Gauge] + ctx context.Context + lock sync.Mutex + + dupHist metrics.Histogram + allHist metrics.Histogram + sentHist metrics.Histogram + sendTimeHist metrics.Histogram + + pendingEngineGauge metrics.Gauge + activeEngineGauge metrics.Gauge + pendingBlocksGauge metrics.Gauge + activeBlocksGauge metrics.Gauge } func New(ctx context.Context) *Metrics { @@ -49,63 +38,95 @@ func New(ctx context.Context) *Metrics { // DupHist return recv_dup_blocks_bytes. // Threadsafe func (m *Metrics) DupHist() metrics.Histogram { - return m.dupHist.reuseOrInit(func() metrics.Histogram { - return metrics.NewCtx(m.ctx, "recv_dup_blocks_bytes", "Summary of duplicate data blocks recived").Histogram(metricsBuckets) - }) + m.lock.Lock() + defer m.lock.Unlock() + if m.dupHist != nil { + return m.dupHist + } + m.dupHist = metrics.NewCtx(m.ctx, "recv_dup_blocks_bytes", "Summary of duplicate data blocks recived").Histogram(metricsBuckets) + return m.dupHist } // AllHist returns recv_all_blocks_bytes. // Threadsafe func (m *Metrics) AllHist() metrics.Histogram { - return m.allHist.reuseOrInit(func() metrics.Histogram { - return metrics.NewCtx(m.ctx, "recv_all_blocks_bytes", "Summary of all data blocks recived").Histogram(metricsBuckets) - }) + m.lock.Lock() + defer m.lock.Unlock() + if m.allHist != nil { + return m.allHist + } + m.allHist = metrics.NewCtx(m.ctx, "recv_all_blocks_bytes", "Summary of all data blocks recived").Histogram(metricsBuckets) + return m.allHist } // SentHist returns sent_all_blocks_bytes. // Threadsafe func (m *Metrics) SentHist() metrics.Histogram { - return m.sentHist.reuseOrInit(func() metrics.Histogram { - return metrics.NewCtx(m.ctx, "sent_all_blocks_bytes", "Histogram of blocks sent by this bitswap").Histogram(metricsBuckets) - }) + m.lock.Lock() + defer m.lock.Unlock() + if m.sentHist != nil { + return m.sentHist + } + m.sentHist = metrics.NewCtx(m.ctx, "sent_all_blocks_bytes", "Histogram of blocks sent by this bitswap").Histogram(metricsBuckets) + return m.sentHist } // SendTimeHist returns send_times. // Threadsafe func (m *Metrics) SendTimeHist() metrics.Histogram { - return m.sendTimeHist.reuseOrInit(func() metrics.Histogram { - return metrics.NewCtx(m.ctx, "send_times", "Histogram of how long it takes to send messages in this bitswap").Histogram(timeMetricsBuckets) - }) + m.lock.Lock() + defer m.lock.Unlock() + if m.sendTimeHist != nil { + return m.sendTimeHist + } + m.sendTimeHist = metrics.NewCtx(m.ctx, "send_times", "Histogram of how long it takes to send messages in this bitswap").Histogram(timeMetricsBuckets) + return m.sendTimeHist } // PendingEngineGauge returns pending_tasks. // Threadsafe func (m *Metrics) PendingEngineGauge() metrics.Gauge { - return m.pendingEngineGauge.reuseOrInit(func() metrics.Gauge { - return metrics.NewCtx(m.ctx, "pending_tasks", "Total number of pending tasks").Gauge() - }) + m.lock.Lock() + defer m.lock.Unlock() + if m.pendingEngineGauge != nil { + return m.pendingEngineGauge + } + m.pendingEngineGauge = metrics.NewCtx(m.ctx, "pending_tasks", "Total number of pending tasks").Gauge() + return m.pendingEngineGauge } // ActiveEngineGauge returns active_tasks. // Threadsafe func (m *Metrics) ActiveEngineGauge() metrics.Gauge { - return m.activeEngineGauge.reuseOrInit(func() metrics.Gauge { - return metrics.NewCtx(m.ctx, "active_tasks", "Total number of active tasks").Gauge() - }) + m.lock.Lock() + defer m.lock.Unlock() + if m.activeEngineGauge != nil { + return m.activeEngineGauge + } + m.activeEngineGauge = metrics.NewCtx(m.ctx, "active_tasks", "Total number of active tasks").Gauge() + return m.activeEngineGauge } // PendingBlocksGauge returns pending_block_tasks. // Threadsafe func (m *Metrics) PendingBlocksGauge() metrics.Gauge { - return m.pendingBlocksGauge.reuseOrInit(func() metrics.Gauge { - return metrics.NewCtx(m.ctx, "pending_block_tasks", "Total number of pending blockstore tasks").Gauge() - }) + m.lock.Lock() + defer m.lock.Unlock() + if m.pendingBlocksGauge != nil { + return m.pendingBlocksGauge + } + m.pendingBlocksGauge = metrics.NewCtx(m.ctx, "pending_block_tasks", "Total number of pending blockstore tasks").Gauge() + return m.pendingBlocksGauge } // ActiveBlocksGauge returns active_block_tasks. // Threadsafe func (m *Metrics) ActiveBlocksGauge() metrics.Gauge { - return m.activeBlocksGauge.reuseOrInit(func() metrics.Gauge { - return metrics.NewCtx(m.ctx, "active_block_tasks", "Total number of active blockstore tasks").Gauge() - }) + m.lock.Lock() + defer m.lock.Unlock() + if m.activeBlocksGauge != nil { + return m.activeBlocksGauge + } + m.activeBlocksGauge = metrics.NewCtx(m.ctx, "active_block_tasks", "Total number of active blockstore tasks").Gauge() + return m.activeBlocksGauge } From 9fe61bace37dd91d90e88d8b07f92d52145409e9 Mon Sep 17 00:00:00 2001 From: Jorropo Date: Sat, 6 Aug 2022 23:13:33 +0200 Subject: [PATCH 1027/1035] test: remove TestTracer This test is exceptionally racy and IMO useless (you can go read the 10 lines of code making up tracing and convaince yourself it's working.) This commit was moved from ipfs/go-bitswap@1ac48243c0f8ea5291b8d5caf9c6207bb7ddfce4 --- bitswap/bitswap_test.go | 157 ---------------------------------------- bitswap/options.go | 11 +-- bitswap/polyfill.go | 2 +- 3 files changed, 2 insertions(+), 168 deletions(-) diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index 7c32c6469..33603726b 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -12,10 +12,8 @@ import ( "github.com/ipfs/go-bitswap" testinstance "github.com/ipfs/go-bitswap/client/testinstance" bsmsg "github.com/ipfs/go-bitswap/message" - pb "github.com/ipfs/go-bitswap/message/pb" "github.com/ipfs/go-bitswap/server" tn "github.com/ipfs/go-bitswap/testnet" - "github.com/ipfs/go-bitswap/tracer" blocks "github.com/ipfs/go-block-format" cid "github.com/ipfs/go-cid" detectrace "github.com/ipfs/go-detect-race" @@ -830,158 +828,3 @@ func TestWithScoreLedger(t *testing.T) { t.Fatal("Expected the score ledger to be closed within 5s") } } - -type logItem struct { - dir byte - pid peer.ID - msg bsmsg.BitSwapMessage -} -type mockTracer struct { - mu sync.Mutex - log []logItem -} - -func (m *mockTracer) MessageReceived(p peer.ID, msg bsmsg.BitSwapMessage) { - m.mu.Lock() - defer m.mu.Unlock() - m.log = append(m.log, logItem{'r', p, msg}) -} -func (m *mockTracer) MessageSent(p peer.ID, msg bsmsg.BitSwapMessage) { - m.mu.Lock() - defer m.mu.Unlock() - m.log = append(m.log, logItem{'s', p, msg}) -} - -func (m *mockTracer) getLog() []logItem { - m.mu.Lock() - defer m.mu.Unlock() - return m.log[:len(m.log):len(m.log)] -} - -func TestTracer(t *testing.T) { - net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay)) - ig := testinstance.NewTestInstanceGenerator(net, nil, nil) - defer ig.Close() - bg := blocksutil.NewBlockGenerator() - - instances := ig.Instances(3) - blocks := bg.Blocks(2) - - // Install Tracer - wiretap := new(mockTracer) - updateTracer(instances[0].Exchange, wiretap) - - // First peer has block - addBlock(t, context.Background(), instances[0], blocks[0]) - - ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) - defer cancel() - - // Second peer broadcasts want for block CID - // (Received by first and third peers) - _, err := instances[1].Exchange.GetBlock(ctx, blocks[0].Cid()) - if err != nil { - t.Fatal(err) - } - - // When second peer receives block, it should send out a cancel, so third - // peer should no longer keep second peer's want - if err = tu.WaitFor(ctx, func() error { - if len(instances[2].Exchange.WantlistForPeer(instances[1].Peer)) != 0 { - return fmt.Errorf("should have no items in other peers wantlist") - } - if len(instances[1].Exchange.GetWantlist()) != 0 { - return fmt.Errorf("shouldnt have anything in wantlist") - } - return nil - }); err != nil { - t.Fatal(err) - } - - log := wiretap.getLog() - - // After communication, 3 messages should be logged via Tracer - if l := len(log); l != 3 { - t.Fatal("expected 3 items logged via Tracer, found", l) - } - - // Received: 'Have' - if log[0].dir != 'r' { - t.Error("expected message to be received") - } - if log[0].pid != instances[1].Peer { - t.Error("expected peer", instances[1].Peer, ", found", log[0].pid) - } - if l := len(log[0].msg.Wantlist()); l != 1 { - t.Fatal("expected 1 entry in Wantlist, found", l) - } - if log[0].msg.Wantlist()[0].WantType != pb.Message_Wantlist_Have { - t.Error("expected WantType equal to 'Have', found 'Block'") - } - - // Sent: Block - if log[1].dir != 's' { - t.Error("expected message to be sent") - } - if log[1].pid != instances[1].Peer { - t.Error("expected peer", instances[1].Peer, ", found", log[1].pid) - } - if l := len(log[1].msg.Blocks()); l != 1 { - t.Fatal("expected 1 entry in Blocks, found", l) - } - if log[1].msg.Blocks()[0].Cid() != blocks[0].Cid() { - t.Error("wrong block Cid") - } - - // Received: 'Cancel' - if log[2].dir != 'r' { - t.Error("expected message to be received") - } - if log[2].pid != instances[1].Peer { - t.Error("expected peer", instances[1].Peer, ", found", log[2].pid) - } - if l := len(log[2].msg.Wantlist()); l != 1 { - t.Fatal("expected 1 entry in Wantlist, found", l) - } - if log[2].msg.Wantlist()[0].WantType != pb.Message_Wantlist_Block { - t.Error("expected WantType equal to 'Block', found 'Have'") - } - if log[2].msg.Wantlist()[0].Cancel != true { - t.Error("expected entry with Cancel set to 'true'") - } - - // After disabling WireTap, no new messages are logged - updateTracer(instances[0].Exchange, nil) - - addBlock(t, context.Background(), instances[0], blocks[1]) - - _, err = instances[1].Exchange.GetBlock(ctx, blocks[1].Cid()) - if err != nil { - t.Fatal(err) - } - if err = tu.WaitFor(ctx, func() error { - if len(instances[1].Exchange.GetWantlist()) != 0 { - return fmt.Errorf("shouldnt have anything in wantlist") - } - return nil - }); err != nil { - t.Fatal(err) - } - - log = wiretap.getLog() - - if l := len(log); l != 3 { - t.Fatal("expected 3 items logged via WireTap, found", l) - } - - for _, inst := range instances { - err := inst.Exchange.Close() - if err != nil { - t.Fatal(err) - } - } -} - -func updateTracer(bs *bitswap.Bitswap, tap tracer.Tracer) { - bitswap.WithTracer(tap).V.(func(*bitswap.Bitswap))(bs) -} diff --git a/bitswap/options.go b/bitswap/options.go index 0c087b713..934396a75 100644 --- a/bitswap/options.go +++ b/bitswap/options.go @@ -14,7 +14,7 @@ type option func(*Bitswap) // Option is interface{} of server.Option or client.Option or func(*Bitswap) // wrapped in a struct to gain strong type checking. type Option struct { - V interface{} + v interface{} } func EngineBlockstoreWorkerCount(count int) Option { @@ -74,15 +74,6 @@ func WithTracer(tap tracer.Tracer) Option { return Option{ func(bs *Bitswap) { bs.tracer = tap - // the tests use this to hot update tracers, we need to update tracers of impls if we are running - if bs.Client != nil { - if tap != nil { - tap = nopReceiveTracer{tap} - } - client.WithTracer(tap)(bs.Client) - // no need to check for server as they can't not be both running - server.WithTracer(tap)(bs.Server) - } }, } } diff --git a/bitswap/polyfill.go b/bitswap/polyfill.go index 3ca47b1b4..95dcd5dcc 100644 --- a/bitswap/polyfill.go +++ b/bitswap/polyfill.go @@ -63,7 +63,7 @@ func New(ctx context.Context, net network.BitSwapNetwork, bstore blockstore.Bloc var clientOptions []client.Option for _, o := range options { - switch typedOption := o.V.(type) { + switch typedOption := o.v.(type) { case server.Option: serverOptions = append(serverOptions, typedOption) case client.Option: From d334702fe898f7d4d7af84a6a456e9ef86ee1800 Mon Sep 17 00:00:00 2001 From: Jorropo Date: Thu, 11 Aug 2022 18:24:41 +0200 Subject: [PATCH 1028/1035] refactor: remove metrics object and other review changes This commit was moved from ipfs/go-bitswap@81393bcd77fb6ea8470057adb5f7acc52b195b5f --- bitswap/benchmarks_test.go | 2 +- bitswap/{polyfill.go => bitswap.go} | 16 ++- bitswap/bitswap_test.go | 2 +- bitswap/client/bitswap_with_sessions_test.go | 2 +- bitswap/client/client.go | 14 +- bitswap/decision/forward.go | 12 ++ bitswap/forward.go | 17 +++ bitswap/internal/defaults/defaults.go | 5 + bitswap/metrics/gen.go | 132 ------------------ bitswap/metrics/metrics.go | 44 ++++++ bitswap/server/forward.go | 1 + bitswap/server/internal/decision/engine.go | 10 +- .../server/internal/decision/engine_test.go | 2 - bitswap/server/server.go | 41 +++--- .../{client => }/testinstance/testinstance.go | 0 bitswap/wantlist/forward.go | 23 +++ 16 files changed, 147 insertions(+), 176 deletions(-) rename bitswap/{polyfill.go => bitswap.go} (90%) create mode 100644 bitswap/decision/forward.go create mode 100644 bitswap/forward.go delete mode 100644 bitswap/metrics/gen.go create mode 100644 bitswap/metrics/metrics.go rename bitswap/{client => }/testinstance/testinstance.go (100%) create mode 100644 bitswap/wantlist/forward.go diff --git a/bitswap/benchmarks_test.go b/bitswap/benchmarks_test.go index ea6767713..c989792ac 100644 --- a/bitswap/benchmarks_test.go +++ b/bitswap/benchmarks_test.go @@ -18,8 +18,8 @@ import ( protocol "github.com/libp2p/go-libp2p-core/protocol" "github.com/ipfs/go-bitswap" - testinstance "github.com/ipfs/go-bitswap/client/testinstance" bsnet "github.com/ipfs/go-bitswap/network" + testinstance "github.com/ipfs/go-bitswap/testinstance" tn "github.com/ipfs/go-bitswap/testnet" cid "github.com/ipfs/go-cid" delay "github.com/ipfs/go-ipfs-delay" diff --git a/bitswap/polyfill.go b/bitswap/bitswap.go similarity index 90% rename from bitswap/polyfill.go rename to bitswap/bitswap.go index 95dcd5dcc..f6fdb4cb4 100644 --- a/bitswap/polyfill.go +++ b/bitswap/bitswap.go @@ -5,8 +5,8 @@ import ( "fmt" "github.com/ipfs/go-bitswap/client" + "github.com/ipfs/go-bitswap/internal/defaults" "github.com/ipfs/go-bitswap/message" - "github.com/ipfs/go-bitswap/metrics" "github.com/ipfs/go-bitswap/network" "github.com/ipfs/go-bitswap/server" "github.com/ipfs/go-bitswap/tracer" @@ -24,7 +24,7 @@ import ( var log = logging.Logger("bitswap") // old interface we are targeting -type old interface { +type bitswap interface { Close() error GetBlock(ctx context.Context, k cid.Cid) (blocks.Block, error) GetBlocks(ctx context.Context, keys []cid.Cid) (<-chan blocks.Block, error) @@ -44,7 +44,8 @@ type old interface { } var _ exchange.SessionExchange = (*Bitswap)(nil) -var _ old = (*Bitswap)(nil) +var _ bitswap = (*Bitswap)(nil) +var HasBlockBufferSize = defaults.HasBlockBufferSize type Bitswap struct { *client.Client @@ -81,9 +82,12 @@ func New(ctx context.Context, net network.BitSwapNetwork, bstore blockstore.Bloc serverOptions = append(serverOptions, server.WithTracer(tracer)) } - stats := metrics.New(ctx) - bs.Server = server.New(ctx, net, bstore, stats, serverOptions...) - bs.Client = client.New(ctx, net, bstore, stats, append(clientOptions, client.WithBlockReceivedNotifier(bs.Server))...) + if HasBlockBufferSize != defaults.HasBlockBufferSize { + serverOptions = append(serverOptions, server.HasBlockBufferSize(HasBlockBufferSize)) + } + + bs.Server = server.New(ctx, net, bstore, serverOptions...) + bs.Client = client.New(ctx, net, bstore, append(clientOptions, client.WithBlockReceivedNotifier(bs.Server))...) net.Start(bs) // use the polyfill receiver to log received errors and trace messages only once return bs diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index 33603726b..055a90304 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -10,9 +10,9 @@ import ( "time" "github.com/ipfs/go-bitswap" - testinstance "github.com/ipfs/go-bitswap/client/testinstance" bsmsg "github.com/ipfs/go-bitswap/message" "github.com/ipfs/go-bitswap/server" + testinstance "github.com/ipfs/go-bitswap/testinstance" tn "github.com/ipfs/go-bitswap/testnet" blocks "github.com/ipfs/go-block-format" cid "github.com/ipfs/go-cid" diff --git a/bitswap/client/bitswap_with_sessions_test.go b/bitswap/client/bitswap_with_sessions_test.go index 8ba2d6e9f..5e4d2454f 100644 --- a/bitswap/client/bitswap_with_sessions_test.go +++ b/bitswap/client/bitswap_with_sessions_test.go @@ -8,7 +8,7 @@ import ( "github.com/ipfs/go-bitswap" "github.com/ipfs/go-bitswap/client/internal/session" - testinstance "github.com/ipfs/go-bitswap/client/testinstance" + testinstance "github.com/ipfs/go-bitswap/testinstance" tn "github.com/ipfs/go-bitswap/testnet" blocks "github.com/ipfs/go-block-format" cid "github.com/ipfs/go-cid" diff --git a/bitswap/client/client.go b/bitswap/client/client.go index 1380e0d9b..3a208749a 100644 --- a/bitswap/client/client.go +++ b/bitswap/client/client.go @@ -82,16 +82,14 @@ func WithBlockReceivedNotifier(brn BlockReceivedNotifier) Option { } type BlockReceivedNotifier interface { - // ReceivedBlocks notify the decision engine that a peer is well behaving - // and gave us usefull data, potentially increasing it's score and making us + // ReceivedBlocks notifies the decision engine that a peer is well-behaving + // and gave us useful data, potentially increasing its score and making us // send them more data in exchange. ReceivedBlocks(peer.ID, []blocks.Block) } -// New initializes a BitSwap instance that communicates over the provided -// BitSwapNetwork. This function registers the returned instance as the network -// delegate. Runs until context is cancelled or bitswap.Close is called. -func New(parent context.Context, network bsnet.BitSwapNetwork, bstore blockstore.Blockstore, m *bmetrics.Metrics, options ...Option) *Client { +// New initializes a Bitswap client that runs until client.Close is called. +func New(parent context.Context, network bsnet.BitSwapNetwork, bstore blockstore.Blockstore, options ...Option) *Client { // important to use provided parent context (since it may include important // loggable data). It's probably not a good idea to allow bitswap to be // coupled to the concerns of the ipfs daemon in this way. @@ -155,8 +153,8 @@ func New(parent context.Context, network bsnet.BitSwapNetwork, bstore blockstore sim: sim, notif: notif, counters: new(counters), - dupMetric: m.DupHist(), - allMetric: m.AllHist(), + dupMetric: bmetrics.DupHist(), + allMetric: bmetrics.AllHist(), provSearchDelay: defaults.ProvSearchDelay, rebroadcastDelay: delay.Fixed(time.Minute), simulateDontHavesOnTimeout: true, diff --git a/bitswap/decision/forward.go b/bitswap/decision/forward.go new file mode 100644 index 000000000..d19cda943 --- /dev/null +++ b/bitswap/decision/forward.go @@ -0,0 +1,12 @@ +package decision + +import "github.com/ipfs/go-bitswap/server" + +type ( + // DEPRECATED use server.Receipt instead + Receipt = server.Receipt + // DEPRECATED use server.ScoreLedger instead + ScoreLedger = server.ScoreLedger + // DEPRECATED use server.ScorePeerFunc instead + ScorePeerFunc = server.ScorePeerFunc +) diff --git a/bitswap/forward.go b/bitswap/forward.go new file mode 100644 index 000000000..2beb7590f --- /dev/null +++ b/bitswap/forward.go @@ -0,0 +1,17 @@ +package bitswap + +import ( + "github.com/ipfs/go-bitswap/server" + "github.com/ipfs/go-bitswap/tracer" +) + +type ( + // DEPRECATED + PeerBlockRequestFilter = server.PeerBlockRequestFilter + // DEPRECATED + TaskComparator = server.TaskComparator + // DEPRECATED + TaskInfo = server.TaskInfo + // DEPRECATED + Tracer = tracer.Tracer +) diff --git a/bitswap/internal/defaults/defaults.go b/bitswap/internal/defaults/defaults.go index 54a9eaa66..6f7c2e745 100644 --- a/bitswap/internal/defaults/defaults.go +++ b/bitswap/internal/defaults/defaults.go @@ -19,4 +19,9 @@ const ( BitswapMaxOutstandingBytesPerPeer = 1 << 20 // the number of bytes we attempt to make each outgoing bitswap message BitswapEngineTargetMessageSize = 16 * 1024 + // HasBlockBufferSize is the buffer size of the channel for new blocks + // that need to be provided. They should get pulled over by the + // provideCollector even before they are actually provided. + // TODO: Does this need to be this large givent that? + HasBlockBufferSize = 256 ) diff --git a/bitswap/metrics/gen.go b/bitswap/metrics/gen.go deleted file mode 100644 index 000a8cde8..000000000 --- a/bitswap/metrics/gen.go +++ /dev/null @@ -1,132 +0,0 @@ -package metrics - -import ( - "context" - "sync" - - "github.com/ipfs/go-metrics-interface" -) - -var ( - // the 1<<18+15 is to observe old file chunks that are 1<<18 + 14 in size - metricsBuckets = []float64{1 << 6, 1 << 10, 1 << 14, 1 << 18, 1<<18 + 15, 1 << 22} - - timeMetricsBuckets = []float64{1, 10, 30, 60, 90, 120, 600} -) - -// Metrics is a type which lazy initialize metrics objects. -// It MUST not be copied. -type Metrics struct { - ctx context.Context - lock sync.Mutex - - dupHist metrics.Histogram - allHist metrics.Histogram - sentHist metrics.Histogram - sendTimeHist metrics.Histogram - - pendingEngineGauge metrics.Gauge - activeEngineGauge metrics.Gauge - pendingBlocksGauge metrics.Gauge - activeBlocksGauge metrics.Gauge -} - -func New(ctx context.Context) *Metrics { - return &Metrics{ctx: metrics.CtxSubScope(ctx, "bitswap")} -} - -// DupHist return recv_dup_blocks_bytes. -// Threadsafe -func (m *Metrics) DupHist() metrics.Histogram { - m.lock.Lock() - defer m.lock.Unlock() - if m.dupHist != nil { - return m.dupHist - } - m.dupHist = metrics.NewCtx(m.ctx, "recv_dup_blocks_bytes", "Summary of duplicate data blocks recived").Histogram(metricsBuckets) - return m.dupHist -} - -// AllHist returns recv_all_blocks_bytes. -// Threadsafe -func (m *Metrics) AllHist() metrics.Histogram { - m.lock.Lock() - defer m.lock.Unlock() - if m.allHist != nil { - return m.allHist - } - m.allHist = metrics.NewCtx(m.ctx, "recv_all_blocks_bytes", "Summary of all data blocks recived").Histogram(metricsBuckets) - return m.allHist -} - -// SentHist returns sent_all_blocks_bytes. -// Threadsafe -func (m *Metrics) SentHist() metrics.Histogram { - m.lock.Lock() - defer m.lock.Unlock() - if m.sentHist != nil { - return m.sentHist - } - m.sentHist = metrics.NewCtx(m.ctx, "sent_all_blocks_bytes", "Histogram of blocks sent by this bitswap").Histogram(metricsBuckets) - return m.sentHist -} - -// SendTimeHist returns send_times. -// Threadsafe -func (m *Metrics) SendTimeHist() metrics.Histogram { - m.lock.Lock() - defer m.lock.Unlock() - if m.sendTimeHist != nil { - return m.sendTimeHist - } - m.sendTimeHist = metrics.NewCtx(m.ctx, "send_times", "Histogram of how long it takes to send messages in this bitswap").Histogram(timeMetricsBuckets) - return m.sendTimeHist -} - -// PendingEngineGauge returns pending_tasks. -// Threadsafe -func (m *Metrics) PendingEngineGauge() metrics.Gauge { - m.lock.Lock() - defer m.lock.Unlock() - if m.pendingEngineGauge != nil { - return m.pendingEngineGauge - } - m.pendingEngineGauge = metrics.NewCtx(m.ctx, "pending_tasks", "Total number of pending tasks").Gauge() - return m.pendingEngineGauge -} - -// ActiveEngineGauge returns active_tasks. -// Threadsafe -func (m *Metrics) ActiveEngineGauge() metrics.Gauge { - m.lock.Lock() - defer m.lock.Unlock() - if m.activeEngineGauge != nil { - return m.activeEngineGauge - } - m.activeEngineGauge = metrics.NewCtx(m.ctx, "active_tasks", "Total number of active tasks").Gauge() - return m.activeEngineGauge -} - -// PendingBlocksGauge returns pending_block_tasks. -// Threadsafe -func (m *Metrics) PendingBlocksGauge() metrics.Gauge { - m.lock.Lock() - defer m.lock.Unlock() - if m.pendingBlocksGauge != nil { - return m.pendingBlocksGauge - } - m.pendingBlocksGauge = metrics.NewCtx(m.ctx, "pending_block_tasks", "Total number of pending blockstore tasks").Gauge() - return m.pendingBlocksGauge -} - -// ActiveBlocksGauge returns active_block_tasks. -// Threadsafe -func (m *Metrics) ActiveBlocksGauge() metrics.Gauge { - m.lock.Lock() - defer m.lock.Unlock() - if m.activeBlocksGauge != nil { - return m.activeBlocksGauge - } - m.activeBlocksGauge = metrics.NewCtx(m.ctx, "active_block_tasks", "Total number of active blockstore tasks").Gauge() - return m.activeBlocksGauge -} diff --git a/bitswap/metrics/metrics.go b/bitswap/metrics/metrics.go new file mode 100644 index 000000000..8d679a51e --- /dev/null +++ b/bitswap/metrics/metrics.go @@ -0,0 +1,44 @@ +package metrics + +import ( + "github.com/ipfs/go-metrics-interface" +) + +var ( + // the 1<<18+15 is to observe old file chunks that are 1<<18 + 14 in size + metricsBuckets = []float64{1 << 6, 1 << 10, 1 << 14, 1 << 18, 1<<18 + 15, 1 << 22} + + timeMetricsBuckets = []float64{1, 10, 30, 60, 90, 120, 600} +) + +func DupHist() metrics.Histogram { + return metrics.New("recv_dup_blocks_bytes", "Summary of duplicate data blocks recived").Histogram(metricsBuckets) +} + +func AllHist() metrics.Histogram { + return metrics.New("recv_all_blocks_bytes", "Summary of all data blocks recived").Histogram(metricsBuckets) +} + +func SentHist() metrics.Histogram { + return metrics.New("sent_all_blocks_bytes", "Histogram of blocks sent by this bitswap").Histogram(metricsBuckets) +} + +func SendTimeHist() metrics.Histogram { + return metrics.New("send_times", "Histogram of how long it takes to send messages in this bitswap").Histogram(timeMetricsBuckets) +} + +func PendingEngineGauge() metrics.Gauge { + return metrics.New("pending_tasks", "Total number of pending tasks").Gauge() +} + +func ActiveEngineGauge() metrics.Gauge { + return metrics.New("active_tasks", "Total number of active tasks").Gauge() +} + +func PendingBlocksGauge() metrics.Gauge { + return metrics.New("pending_block_tasks", "Total number of pending blockstore tasks").Gauge() +} + +func ActiveBlocksGauge() metrics.Gauge { + return metrics.New("active_block_tasks", "Total number of active blockstore tasks").Gauge() +} diff --git a/bitswap/server/forward.go b/bitswap/server/forward.go index 67f5b2a5e..79c39d5da 100644 --- a/bitswap/server/forward.go +++ b/bitswap/server/forward.go @@ -8,6 +8,7 @@ type ( Receipt = decision.Receipt PeerBlockRequestFilter = decision.PeerBlockRequestFilter TaskComparator = decision.TaskComparator + TaskInfo = decision.TaskInfo ScoreLedger = decision.ScoreLedger ScorePeerFunc = decision.ScorePeerFunc ) diff --git a/bitswap/server/internal/decision/engine.go b/bitswap/server/internal/decision/engine.go index d1ccdeb02..04bcb1433 100644 --- a/bitswap/server/internal/decision/engine.go +++ b/bitswap/server/internal/decision/engine.go @@ -308,7 +308,6 @@ func NewEngine( bs bstore.Blockstore, peerTagger PeerTagger, self peer.ID, - metrics *bmetrics.Metrics, opts ...Option, ) *Engine { return newEngine( @@ -316,7 +315,6 @@ func NewEngine( peerTagger, self, maxBlockSizeReplaceHasWithBlock, - metrics, opts..., ) } @@ -326,10 +324,8 @@ func newEngine( peerTagger PeerTagger, self peer.ID, maxReplaceSize int, - metrics *bmetrics.Metrics, opts ...Option, ) *Engine { - e := &Engine{ ledgerMap: make(map[peer.ID]*ledger), scoreLedger: NewDefaultScoreLedger(), @@ -344,8 +340,8 @@ func newEngine( sendDontHaves: true, self: self, peerLedger: newPeerLedger(), - pendingGauge: metrics.PendingEngineGauge(), - activeGauge: metrics.ActiveEngineGauge(), + pendingGauge: bmetrics.PendingEngineGauge(), + activeGauge: bmetrics.ActiveEngineGauge(), targetMessageSize: defaultTargetMessageSize, tagQueued: fmt.Sprintf(tagFormat, "queued", uuid.New().String()), tagUseful: fmt.Sprintf(tagFormat, "useful", uuid.New().String()), @@ -355,7 +351,7 @@ func newEngine( opt(e) } - e.bsm = newBlockstoreManager(bs, e.bstoreWorkerCount, metrics.PendingBlocksGauge(), metrics.ActiveBlocksGauge()) + e.bsm = newBlockstoreManager(bs, e.bstoreWorkerCount, bmetrics.PendingBlocksGauge(), bmetrics.ActiveBlocksGauge()) // default peer task queue options peerTaskQueueOpts := []peertaskqueue.Option{ diff --git a/bitswap/server/internal/decision/engine_test.go b/bitswap/server/internal/decision/engine_test.go index 853cc3bf2..3ae8f1505 100644 --- a/bitswap/server/internal/decision/engine_test.go +++ b/bitswap/server/internal/decision/engine_test.go @@ -14,7 +14,6 @@ import ( "github.com/ipfs/go-bitswap/internal/testutil" message "github.com/ipfs/go-bitswap/message" pb "github.com/ipfs/go-bitswap/message/pb" - "github.com/ipfs/go-bitswap/metrics" blocks "github.com/ipfs/go-block-format" "github.com/ipfs/go-cid" @@ -197,7 +196,6 @@ func newEngineForTesting( peerTagger, self, maxReplaceSize, - metrics.New(ctx), opts..., ) } diff --git a/bitswap/server/server.go b/bitswap/server/server.go index 8cbe4682c..b39c34f1a 100644 --- a/bitswap/server/server.go +++ b/bitswap/server/server.go @@ -26,14 +26,7 @@ import ( "go.uber.org/zap" ) -var ( - // HasBlockBufferSize is the buffer size of the channel for new blocks - // that need to be provided. They should get pulled over by the - // provideCollector even before they are actually provided. - // TODO: Does this need to be this large givent that? - HasBlockBufferSize = 256 - provideKeysBufferSize = 2048 -) +var provideKeysBufferSize = 2048 var log = logging.Logger("bitswap-server") var sflog = log.Desugar() @@ -74,11 +67,13 @@ type Server struct { // Extra options to pass to the decision manager engineOptions []decision.Option + // the size of channel buffer to use + hasBlockBufferSize int // whether or not to make provide announcements provideEnabled bool } -func New(ctx context.Context, network bsnet.BitSwapNetwork, bstore blockstore.Blockstore, m *bmetrics.Metrics, options ...Option) *Server { +func New(ctx context.Context, network bsnet.BitSwapNetwork, bstore blockstore.Blockstore, options ...Option) *Server { ctx, cancel := context.WithCancel(ctx) px := process.WithTeardown(func() error { @@ -90,15 +85,16 @@ func New(ctx context.Context, network bsnet.BitSwapNetwork, bstore blockstore.Bl }() s := &Server{ - sentHistogram: m.SentHist(), - sendTimeHistogram: m.SendTimeHist(), - taskWorkerCount: defaults.BitswapTaskWorkerCount, - network: network, - process: px, - provideEnabled: true, - newBlocks: make(chan cid.Cid, HasBlockBufferSize), - provideKeys: make(chan cid.Cid, provideKeysBufferSize), + sentHistogram: bmetrics.SentHist(), + sendTimeHistogram: bmetrics.SendTimeHist(), + taskWorkerCount: defaults.BitswapTaskWorkerCount, + network: network, + process: px, + provideEnabled: true, + hasBlockBufferSize: defaults.HasBlockBufferSize, + provideKeys: make(chan cid.Cid, provideKeysBufferSize), } + s.newBlocks = make(chan cid.Cid, s.hasBlockBufferSize) for _, o := range options { o(s) @@ -109,7 +105,6 @@ func New(ctx context.Context, network bsnet.BitSwapNetwork, bstore blockstore.Bl bstore, network.ConnectionManager(), network.Self(), - m, s.engineOptions..., ) s.engineOptions = nil @@ -215,6 +210,16 @@ func MaxOutstandingBytesPerPeer(count int) Option { } } +// HasBlockBufferSize configure how big the new blocks buffer should be. +func HasBlockBufferSize(count int) Option { + if count < 0 { + panic("cannot have negative buffer size") + } + return func(bs *Server) { + bs.hasBlockBufferSize = count + } +} + // WantlistForPeer returns the currently understood list of blocks requested by a // given peer. func (bs *Server) WantlistForPeer(p peer.ID) []cid.Cid { diff --git a/bitswap/client/testinstance/testinstance.go b/bitswap/testinstance/testinstance.go similarity index 100% rename from bitswap/client/testinstance/testinstance.go rename to bitswap/testinstance/testinstance.go diff --git a/bitswap/wantlist/forward.go b/bitswap/wantlist/forward.go new file mode 100644 index 000000000..c7eba707f --- /dev/null +++ b/bitswap/wantlist/forward.go @@ -0,0 +1,23 @@ +package wantlist + +import ( + "github.com/ipfs/go-bitswap/client/wantlist" + "github.com/ipfs/go-cid" +) + +type ( + // DEPRECATED use wantlist.Entry instead + Entry = wantlist.Entry + // DEPRECATED use wantlist.Wantlist instead + Wantlist = wantlist.Wantlist +) + +// DEPRECATED use wantlist.New instead +func New() *Wantlist { + return wantlist.New() +} + +// DEPRECATED use wantlist.NewRefEntry instead +func NewRefEntry(c cid.Cid, p int32) Entry { + return wantlist.NewRefEntry(c, p) +} From a4c5b71bac447c23378b0c74d0d287f3c3d5cabe Mon Sep 17 00:00:00 2001 From: Gus Eggert Date: Fri, 12 Aug 2022 23:27:14 -0400 Subject: [PATCH 1029/1035] fix: plumb through ctor contexts to preserve metrics scopes This commit was moved from ipfs/go-bitswap@ab72e8eddc0e77fefc616fe3d992b0779d95cda6 --- bitswap/bitswap.go | 9 +++-- bitswap/client/client.go | 4 +-- bitswap/metrics/metrics.go | 34 ++++++++++--------- bitswap/server/internal/decision/engine.go | 9 +++-- .../server/internal/decision/engine_test.go | 1 + bitswap/server/server.go | 8 ++--- 6 files changed, 37 insertions(+), 28 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index f6fdb4cb4..df7a91e74 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -10,11 +10,12 @@ import ( "github.com/ipfs/go-bitswap/network" "github.com/ipfs/go-bitswap/server" "github.com/ipfs/go-bitswap/tracer" + "github.com/ipfs/go-metrics-interface" - "github.com/ipfs/go-block-format" + blocks "github.com/ipfs/go-block-format" "github.com/ipfs/go-cid" - "github.com/ipfs/go-ipfs-blockstore" - "github.com/ipfs/go-ipfs-exchange-interface" + blockstore "github.com/ipfs/go-ipfs-blockstore" + exchange "github.com/ipfs/go-ipfs-exchange-interface" logging "github.com/ipfs/go-log" "github.com/libp2p/go-libp2p-core/peer" @@ -86,6 +87,8 @@ func New(ctx context.Context, net network.BitSwapNetwork, bstore blockstore.Bloc serverOptions = append(serverOptions, server.HasBlockBufferSize(HasBlockBufferSize)) } + ctx = metrics.CtxSubScope(ctx, "bitswap") + bs.Server = server.New(ctx, net, bstore, serverOptions...) bs.Client = client.New(ctx, net, bstore, append(clientOptions, client.WithBlockReceivedNotifier(bs.Server))...) net.Start(bs) // use the polyfill receiver to log received errors and trace messages only once diff --git a/bitswap/client/client.go b/bitswap/client/client.go index 3a208749a..47aa64445 100644 --- a/bitswap/client/client.go +++ b/bitswap/client/client.go @@ -153,8 +153,8 @@ func New(parent context.Context, network bsnet.BitSwapNetwork, bstore blockstore sim: sim, notif: notif, counters: new(counters), - dupMetric: bmetrics.DupHist(), - allMetric: bmetrics.AllHist(), + dupMetric: bmetrics.DupHist(ctx), + allMetric: bmetrics.AllHist(ctx), provSearchDelay: defaults.ProvSearchDelay, rebroadcastDelay: delay.Fixed(time.Minute), simulateDontHavesOnTimeout: true, diff --git a/bitswap/metrics/metrics.go b/bitswap/metrics/metrics.go index 8d679a51e..b71923727 100644 --- a/bitswap/metrics/metrics.go +++ b/bitswap/metrics/metrics.go @@ -1,6 +1,8 @@ package metrics import ( + "context" + "github.com/ipfs/go-metrics-interface" ) @@ -11,34 +13,34 @@ var ( timeMetricsBuckets = []float64{1, 10, 30, 60, 90, 120, 600} ) -func DupHist() metrics.Histogram { - return metrics.New("recv_dup_blocks_bytes", "Summary of duplicate data blocks recived").Histogram(metricsBuckets) +func DupHist(ctx context.Context) metrics.Histogram { + return metrics.NewCtx(ctx, "recv_dup_blocks_bytes", "Summary of duplicate data blocks recived").Histogram(metricsBuckets) } -func AllHist() metrics.Histogram { - return metrics.New("recv_all_blocks_bytes", "Summary of all data blocks recived").Histogram(metricsBuckets) +func AllHist(ctx context.Context) metrics.Histogram { + return metrics.NewCtx(ctx, "recv_all_blocks_bytes", "Summary of all data blocks recived").Histogram(metricsBuckets) } -func SentHist() metrics.Histogram { - return metrics.New("sent_all_blocks_bytes", "Histogram of blocks sent by this bitswap").Histogram(metricsBuckets) +func SentHist(ctx context.Context) metrics.Histogram { + return metrics.NewCtx(ctx, "sent_all_blocks_bytes", "Histogram of blocks sent by this bitswap").Histogram(metricsBuckets) } -func SendTimeHist() metrics.Histogram { - return metrics.New("send_times", "Histogram of how long it takes to send messages in this bitswap").Histogram(timeMetricsBuckets) +func SendTimeHist(ctx context.Context) metrics.Histogram { + return metrics.NewCtx(ctx, "send_times", "Histogram of how long it takes to send messages in this bitswap").Histogram(timeMetricsBuckets) } -func PendingEngineGauge() metrics.Gauge { - return metrics.New("pending_tasks", "Total number of pending tasks").Gauge() +func PendingEngineGauge(ctx context.Context) metrics.Gauge { + return metrics.NewCtx(ctx, "pending_tasks", "Total number of pending tasks").Gauge() } -func ActiveEngineGauge() metrics.Gauge { - return metrics.New("active_tasks", "Total number of active tasks").Gauge() +func ActiveEngineGauge(ctx context.Context) metrics.Gauge { + return metrics.NewCtx(ctx, "active_tasks", "Total number of active tasks").Gauge() } -func PendingBlocksGauge() metrics.Gauge { - return metrics.New("pending_block_tasks", "Total number of pending blockstore tasks").Gauge() +func PendingBlocksGauge(ctx context.Context) metrics.Gauge { + return metrics.NewCtx(ctx, "pending_block_tasks", "Total number of pending blockstore tasks").Gauge() } -func ActiveBlocksGauge() metrics.Gauge { - return metrics.New("active_block_tasks", "Total number of active blockstore tasks").Gauge() +func ActiveBlocksGauge(ctx context.Context) metrics.Gauge { + return metrics.NewCtx(ctx, "active_block_tasks", "Total number of active blockstore tasks").Gauge() } diff --git a/bitswap/server/internal/decision/engine.go b/bitswap/server/internal/decision/engine.go index 04bcb1433..a53a6274f 100644 --- a/bitswap/server/internal/decision/engine.go +++ b/bitswap/server/internal/decision/engine.go @@ -305,12 +305,14 @@ func wrapTaskComparator(tc TaskComparator) peertask.QueueTaskComparator { // maxOutstandingBytesPerPeer hints to the peer task queue not to give a peer more tasks if it has some maximum // work already outstanding. func NewEngine( + ctx context.Context, bs bstore.Blockstore, peerTagger PeerTagger, self peer.ID, opts ...Option, ) *Engine { return newEngine( + ctx, bs, peerTagger, self, @@ -320,6 +322,7 @@ func NewEngine( } func newEngine( + ctx context.Context, bs bstore.Blockstore, peerTagger PeerTagger, self peer.ID, @@ -340,8 +343,8 @@ func newEngine( sendDontHaves: true, self: self, peerLedger: newPeerLedger(), - pendingGauge: bmetrics.PendingEngineGauge(), - activeGauge: bmetrics.ActiveEngineGauge(), + pendingGauge: bmetrics.PendingEngineGauge(ctx), + activeGauge: bmetrics.ActiveEngineGauge(ctx), targetMessageSize: defaultTargetMessageSize, tagQueued: fmt.Sprintf(tagFormat, "queued", uuid.New().String()), tagUseful: fmt.Sprintf(tagFormat, "useful", uuid.New().String()), @@ -351,7 +354,7 @@ func newEngine( opt(e) } - e.bsm = newBlockstoreManager(bs, e.bstoreWorkerCount, bmetrics.PendingBlocksGauge(), bmetrics.ActiveBlocksGauge()) + e.bsm = newBlockstoreManager(bs, e.bstoreWorkerCount, bmetrics.PendingBlocksGauge(ctx), bmetrics.ActiveBlocksGauge(ctx)) // default peer task queue options peerTaskQueueOpts := []peertaskqueue.Option{ diff --git a/bitswap/server/internal/decision/engine_test.go b/bitswap/server/internal/decision/engine_test.go index 3ae8f1505..7484a7aaa 100644 --- a/bitswap/server/internal/decision/engine_test.go +++ b/bitswap/server/internal/decision/engine_test.go @@ -192,6 +192,7 @@ func newEngineForTesting( opts ...Option, ) *Engine { return newEngine( + ctx, bs, peerTagger, self, diff --git a/bitswap/server/server.go b/bitswap/server/server.go index b39c34f1a..c9dbf4d98 100644 --- a/bitswap/server/server.go +++ b/bitswap/server/server.go @@ -17,7 +17,7 @@ import ( "github.com/ipfs/go-bitswap/tracer" blocks "github.com/ipfs/go-block-format" "github.com/ipfs/go-cid" - "github.com/ipfs/go-ipfs-blockstore" + blockstore "github.com/ipfs/go-ipfs-blockstore" logging "github.com/ipfs/go-log" "github.com/ipfs/go-metrics-interface" process "github.com/jbenet/goprocess" @@ -85,8 +85,8 @@ func New(ctx context.Context, network bsnet.BitSwapNetwork, bstore blockstore.Bl }() s := &Server{ - sentHistogram: bmetrics.SentHist(), - sendTimeHistogram: bmetrics.SendTimeHist(), + sentHistogram: bmetrics.SentHist(ctx), + sendTimeHistogram: bmetrics.SendTimeHist(ctx), taskWorkerCount: defaults.BitswapTaskWorkerCount, network: network, process: px, @@ -100,8 +100,8 @@ func New(ctx context.Context, network bsnet.BitSwapNetwork, bstore blockstore.Bl o(s) } - // Set up decision engine s.engine = decision.NewEngine( + ctx, bstore, network.ConnectionManager(), network.Self(), From da8f90c72781f30f9181be6bc454ae9206a57c10 Mon Sep 17 00:00:00 2001 From: Gus Eggert Date: Sat, 13 Aug 2022 08:25:36 -0400 Subject: [PATCH 1030/1035] fix: message queue test races on Windows This commit was moved from ipfs/go-bitswap@b8fd335853abb5ca61ab157ec3d57550d76ce1fd --- .../messagequeue/donthavetimeoutmgr_test.go | 6 ++--- .../messagequeue/messagequeue_test.go | 24 +++++++++---------- 2 files changed, 15 insertions(+), 15 deletions(-) diff --git a/bitswap/client/internal/messagequeue/donthavetimeoutmgr_test.go b/bitswap/client/internal/messagequeue/donthavetimeoutmgr_test.go index 61023f00d..6a31242af 100644 --- a/bitswap/client/internal/messagequeue/donthavetimeoutmgr_test.go +++ b/bitswap/client/internal/messagequeue/donthavetimeoutmgr_test.go @@ -375,10 +375,10 @@ func TestDontHaveTimeoutMgrUsesDefaultTimeoutIfPingError(t *testing.T) { func TestDontHaveTimeoutMgrUsesDefaultTimeoutIfLatencyLonger(t *testing.T) { ks := testutil.GenerateCids(2) - latency := time.Millisecond * 20 + latency := time.Millisecond * 200 latMultiplier := 1 expProcessTime := time.Duration(0) - defaultTimeout := 10 * time.Millisecond + defaultTimeout := 100 * time.Millisecond clock := clock.NewMock() pinged := make(chan struct{}) pc := &mockPeerConn{latency: latency, clock: clock, pinged: pinged} @@ -395,7 +395,7 @@ func TestDontHaveTimeoutMgrUsesDefaultTimeoutIfLatencyLonger(t *testing.T) { dhtm.AddPending(ks) // Sleep for less than the default timeout - clock.Add(defaultTimeout - 5*time.Millisecond) + clock.Add(defaultTimeout - 50*time.Millisecond) // At this stage no timeout should have happened yet if tr.timedOutCount() > 0 { diff --git a/bitswap/client/internal/messagequeue/messagequeue_test.go b/bitswap/client/internal/messagequeue/messagequeue_test.go index 5607a3aa4..1356f35c6 100644 --- a/bitswap/client/internal/messagequeue/messagequeue_test.go +++ b/bitswap/client/internal/messagequeue/messagequeue_test.go @@ -167,7 +167,7 @@ func TestStartupAndShutdown(t *testing.T) { messageQueue.Startup() messageQueue.AddBroadcastWantHaves(bcstwh) - messages := collectMessages(ctx, t, messagesSent, 10*time.Millisecond) + messages := collectMessages(ctx, t, messagesSent, 100*time.Millisecond) if len(messages) != 1 { t.Fatal("wrong number of messages were sent for broadcast want-haves") } @@ -184,7 +184,7 @@ func TestStartupAndShutdown(t *testing.T) { messageQueue.Shutdown() - timeoutctx, cancel := context.WithTimeout(ctx, 10*time.Millisecond) + timeoutctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) defer cancel() select { case <-resetChan: @@ -207,7 +207,7 @@ func TestSendingMessagesDeduped(t *testing.T) { messageQueue.Startup() messageQueue.AddWants(wantBlocks, wantHaves) messageQueue.AddWants(wantBlocks, wantHaves) - messages := collectMessages(ctx, t, messagesSent, 10*time.Millisecond) + messages := collectMessages(ctx, t, messagesSent, 100*time.Millisecond) if totalEntriesLength(messages) != len(wantHaves)+len(wantBlocks) { t.Fatal("Messages were not deduped") @@ -318,7 +318,7 @@ func TestCancelOverridesPendingWants(t *testing.T) { messageQueue.Startup() messageQueue.AddWants(wantBlocks, wantHaves) messageQueue.AddCancels(cancels) - messages := collectMessages(ctx, t, messagesSent, 10*time.Millisecond) + messages := collectMessages(ctx, t, messagesSent, 100*time.Millisecond) if totalEntriesLength(messages) != len(wantHaves)+len(wantBlocks)-len(cancels) { t.Fatal("Wrong message count") @@ -342,7 +342,7 @@ func TestCancelOverridesPendingWants(t *testing.T) { // Cancel the remaining want-blocks and want-haves cancels = append(wantHaves, wantBlocks...) messageQueue.AddCancels(cancels) - messages = collectMessages(ctx, t, messagesSent, 10*time.Millisecond) + messages = collectMessages(ctx, t, messagesSent, 100*time.Millisecond) // The remaining 2 cancels should be sent to the network as they are for // wants that were sent to the network @@ -370,7 +370,7 @@ func TestWantOverridesPendingCancels(t *testing.T) { // Add 1 want-block and 2 want-haves messageQueue.AddWants(wantBlocks, wantHaves) - messages := collectMessages(ctx, t, messagesSent, 10*time.Millisecond) + messages := collectMessages(ctx, t, messagesSent, 100*time.Millisecond) if totalEntriesLength(messages) != len(wantBlocks)+len(wantHaves) { t.Fatal("Wrong message count", totalEntriesLength(messages)) } @@ -380,7 +380,7 @@ func TestWantOverridesPendingCancels(t *testing.T) { // Override one cancel with a want-block (before cancel is sent to network) messageQueue.AddWants(cids[:1], []cid.Cid{}) - messages = collectMessages(ctx, t, messagesSent, 10*time.Millisecond) + messages = collectMessages(ctx, t, messagesSent, 100*time.Millisecond) if totalEntriesLength(messages) != 3 { t.Fatal("Wrong message count", totalEntriesLength(messages)) } @@ -554,7 +554,7 @@ func TestSendToPeerThatDoesntSupportHave(t *testing.T) { // Check broadcast want-haves bcwh := testutil.GenerateCids(10) messageQueue.AddBroadcastWantHaves(bcwh) - messages := collectMessages(ctx, t, messagesSent, 10*time.Millisecond) + messages := collectMessages(ctx, t, messagesSent, 100*time.Millisecond) if len(messages) != 1 { t.Fatal("wrong number of messages were sent", len(messages)) @@ -573,7 +573,7 @@ func TestSendToPeerThatDoesntSupportHave(t *testing.T) { wbs := testutil.GenerateCids(10) whs := testutil.GenerateCids(10) messageQueue.AddWants(wbs, whs) - messages = collectMessages(ctx, t, messagesSent, 10*time.Millisecond) + messages = collectMessages(ctx, t, messagesSent, 100*time.Millisecond) if len(messages) != 1 { t.Fatal("wrong number of messages were sent", len(messages)) @@ -603,7 +603,7 @@ func TestSendToPeerThatDoesntSupportHaveMonitorsTimeouts(t *testing.T) { wbs := testutil.GenerateCids(10) messageQueue.AddWants(wbs, nil) - collectMessages(ctx, t, messagesSent, 10*time.Millisecond) + collectMessages(ctx, t, messagesSent, 100*time.Millisecond) // Check want-blocks are added to DontHaveTimeoutMgr if dhtm.pendingCount() != len(wbs) { @@ -612,7 +612,7 @@ func TestSendToPeerThatDoesntSupportHaveMonitorsTimeouts(t *testing.T) { cancelCount := 2 messageQueue.AddCancels(wbs[:cancelCount]) - collectMessages(ctx, t, messagesSent, 10*time.Millisecond) + collectMessages(ctx, t, messagesSent, 100*time.Millisecond) // Check want-blocks are removed from DontHaveTimeoutMgr if dhtm.pendingCount() != len(wbs)-cancelCount { @@ -685,7 +685,7 @@ func TestResponseReceivedAppliesForFirstResponseOnly(t *testing.T) { // Add some wants and wait 10ms messageQueue.AddWants(cids, nil) - collectMessages(ctx, t, messagesSent, 10*time.Millisecond) + collectMessages(ctx, t, messagesSent, 100*time.Millisecond) // Receive a response for the wants messageQueue.ResponseReceived(cids) From 1c0225268c15dc5e02025a73b4f30acebbcb50ec Mon Sep 17 00:00:00 2001 From: Jorropo Date: Mon, 29 Aug 2022 03:53:11 +0200 Subject: [PATCH 1031/1035] chore: update go-libp2p v0.22.0 This remove the github.com/libp2p/go-libp2p-loggables because AFAIT this is not usefull anymore (we use tracing now). If people care about uuids in logs, we should log sessions in go-log instead. This commit was moved from ipfs/go-bitswap@475c27cc187754e8ba8042110f3fad84540b811e --- bitswap/benchmarks_test.go | 13 +- bitswap/bitswap.go | 2 +- bitswap/bitswap_test.go | 4 +- bitswap/client/client.go | 2 +- .../blockpresencemanager.go | 2 +- .../blockpresencemanager_test.go | 2 +- .../internal/messagequeue/messagequeue.go | 2 +- .../messagequeue/messagequeue_test.go | 2 +- .../internal/peermanager/peermanager.go | 2 +- .../internal/peermanager/peermanager_test.go | 2 +- .../internal/peermanager/peerwantmanager.go | 2 +- .../peermanager/peerwantmanager_test.go | 2 +- .../providerquerymanager.go | 2 +- .../providerquerymanager_test.go | 2 +- .../internal/session/peerresponsetracker.go | 2 +- .../session/peerresponsetracker_test.go | 2 +- .../internal/session/sentwantblockstracker.go | 2 +- bitswap/client/internal/session/session.go | 7 +- .../client/internal/session/session_test.go | 2 +- .../internal/session/sessionwantsender.go | 2 +- .../session/sessionwantsender_test.go | 2 +- .../internal/sessionmanager/sessionmanager.go | 2 +- .../sessionmanager/sessionmanager_test.go | 2 +- .../sessionpeermanager/sessionpeermanager.go | 2 +- .../sessionpeermanager_test.go | 2 +- bitswap/internal/testutil/testutil.go | 2 +- bitswap/message/message.go | 2 +- bitswap/network/connecteventmanager.go | 2 +- bitswap/network/connecteventmanager_test.go | 2 +- bitswap/network/interface.go | 14 +-- bitswap/network/internal/default.go | 23 ++++ bitswap/network/ipfs_impl.go | 24 ++-- bitswap/network/ipfs_impl_test.go | 115 +++++++++++++----- bitswap/network/options.go | 2 +- bitswap/sendOnlyTracer.go | 2 +- bitswap/server/internal/decision/engine.go | 2 +- .../server/internal/decision/engine_test.go | 4 +- bitswap/server/internal/decision/ledger.go | 2 +- .../server/internal/decision/peer_ledger.go | 2 +- .../server/internal/decision/scoreledger.go | 2 +- bitswap/server/server.go | 2 +- bitswap/testinstance/testinstance.go | 4 +- bitswap/testnet/interface.go | 2 +- bitswap/testnet/network_test.go | 2 +- bitswap/testnet/peernet.go | 2 +- bitswap/testnet/virtual.go | 8 +- bitswap/tracer/tracer.go | 2 +- 47 files changed, 180 insertions(+), 110 deletions(-) create mode 100644 bitswap/network/internal/default.go diff --git a/bitswap/benchmarks_test.go b/bitswap/benchmarks_test.go index c989792ac..ef3582b32 100644 --- a/bitswap/benchmarks_test.go +++ b/bitswap/benchmarks_test.go @@ -4,7 +4,6 @@ import ( "context" "encoding/json" "fmt" - "io/ioutil" "math" "math/rand" "os" @@ -15,7 +14,7 @@ import ( "github.com/ipfs/go-bitswap/internal/testutil" blocks "github.com/ipfs/go-block-format" - protocol "github.com/libp2p/go-libp2p-core/protocol" + protocol "github.com/libp2p/go-libp2p/core/protocol" "github.com/ipfs/go-bitswap" bsnet "github.com/ipfs/go-bitswap/network" @@ -115,7 +114,7 @@ func BenchmarkFixedDelay(b *testing.B) { } out, _ := json.MarshalIndent(benchmarkLog, "", " ") - _ = ioutil.WriteFile("tmp/benchmark.json", out, 0666) + _ = os.WriteFile("tmp/benchmark.json", out, 0666) printResults(benchmarkLog) } @@ -183,7 +182,7 @@ func BenchmarkFetchFromOldBitswap(b *testing.B) { } out, _ := json.MarshalIndent(benchmarkLog, "", " ") - _ = ioutil.WriteFile("tmp/benchmark.json", out, 0666) + _ = os.WriteFile("tmp/benchmark.json", out, 0666) printResults(benchmarkLog) } @@ -241,7 +240,7 @@ func BenchmarkRealWorld(b *testing.B) { subtestDistributeAndFetchRateLimited(b, 300, 200, slowNetworkDelay, slowBandwidthGenerator, stdBlockSize, bstoreLatency, allToAll, batchFetchAll) }) out, _ := json.MarshalIndent(benchmarkLog, "", " ") - _ = ioutil.WriteFile("tmp/rw-benchmark.json", out, 0666) + _ = os.WriteFile("tmp/rw-benchmark.json", out, 0666) printResults(benchmarkLog) } @@ -264,7 +263,7 @@ func BenchmarkDatacenter(b *testing.B) { subtestDistributeAndFetchRateLimited(b, 3, 100, datacenterNetworkDelay, datacenterBandwidthGenerator, largeBlockSize, bstoreLatency, allToAll, unixfsFileFetch) }) out, _ := json.MarshalIndent(benchmarkLog, "", " ") - _ = ioutil.WriteFile("tmp/rb-benchmark.json", out, 0666) + _ = os.WriteFile("tmp/rb-benchmark.json", out, 0666) printResults(benchmarkLog) } @@ -305,7 +304,7 @@ func BenchmarkDatacenterMultiLeechMultiSeed(b *testing.B) { }) out, _ := json.MarshalIndent(benchmarkLog, "", " ") - _ = ioutil.WriteFile("tmp/rb-benchmark.json", out, 0666) + _ = os.WriteFile("tmp/rb-benchmark.json", out, 0666) printResults(benchmarkLog) } diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index df7a91e74..cc98a7dbc 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -17,7 +17,7 @@ import ( blockstore "github.com/ipfs/go-ipfs-blockstore" exchange "github.com/ipfs/go-ipfs-exchange-interface" logging "github.com/ipfs/go-log" - "github.com/libp2p/go-libp2p-core/peer" + "github.com/libp2p/go-libp2p/core/peer" "go.uber.org/multierr" ) diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index 055a90304..2ab4547e2 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -21,9 +21,9 @@ import ( delay "github.com/ipfs/go-ipfs-delay" mockrouting "github.com/ipfs/go-ipfs-routing/mock" ipld "github.com/ipfs/go-ipld-format" - peer "github.com/libp2p/go-libp2p-core/peer" - p2ptestutil "github.com/libp2p/go-libp2p-netutil" tu "github.com/libp2p/go-libp2p-testing/etc" + p2ptestutil "github.com/libp2p/go-libp2p-testing/netutil" + peer "github.com/libp2p/go-libp2p/core/peer" ) func isCI() bool { diff --git a/bitswap/client/client.go b/bitswap/client/client.go index 47aa64445..ca94da8c1 100644 --- a/bitswap/client/client.go +++ b/bitswap/client/client.go @@ -37,7 +37,7 @@ import ( "github.com/ipfs/go-metrics-interface" process "github.com/jbenet/goprocess" procctx "github.com/jbenet/goprocess/context" - "github.com/libp2p/go-libp2p-core/peer" + "github.com/libp2p/go-libp2p/core/peer" ) var log = logging.Logger("bitswap-client") diff --git a/bitswap/client/internal/blockpresencemanager/blockpresencemanager.go b/bitswap/client/internal/blockpresencemanager/blockpresencemanager.go index 1d3acb0e2..1b76acc5b 100644 --- a/bitswap/client/internal/blockpresencemanager/blockpresencemanager.go +++ b/bitswap/client/internal/blockpresencemanager/blockpresencemanager.go @@ -4,7 +4,7 @@ import ( "sync" cid "github.com/ipfs/go-cid" - peer "github.com/libp2p/go-libp2p-core/peer" + peer "github.com/libp2p/go-libp2p/core/peer" ) // BlockPresenceManager keeps track of which peers have indicated that they diff --git a/bitswap/client/internal/blockpresencemanager/blockpresencemanager_test.go b/bitswap/client/internal/blockpresencemanager/blockpresencemanager_test.go index 66f489dfd..e6adfc617 100644 --- a/bitswap/client/internal/blockpresencemanager/blockpresencemanager_test.go +++ b/bitswap/client/internal/blockpresencemanager/blockpresencemanager_test.go @@ -4,7 +4,7 @@ import ( "testing" "github.com/ipfs/go-bitswap/internal/testutil" - peer "github.com/libp2p/go-libp2p-core/peer" + peer "github.com/libp2p/go-libp2p/core/peer" cid "github.com/ipfs/go-cid" ) diff --git a/bitswap/client/internal/messagequeue/messagequeue.go b/bitswap/client/internal/messagequeue/messagequeue.go index 6135fa54b..b80d71eef 100644 --- a/bitswap/client/internal/messagequeue/messagequeue.go +++ b/bitswap/client/internal/messagequeue/messagequeue.go @@ -13,7 +13,7 @@ import ( bsnet "github.com/ipfs/go-bitswap/network" cid "github.com/ipfs/go-cid" logging "github.com/ipfs/go-log" - peer "github.com/libp2p/go-libp2p-core/peer" + peer "github.com/libp2p/go-libp2p/core/peer" "github.com/libp2p/go-libp2p/p2p/protocol/ping" "go.uber.org/zap" ) diff --git a/bitswap/client/internal/messagequeue/messagequeue_test.go b/bitswap/client/internal/messagequeue/messagequeue_test.go index 1356f35c6..337435e52 100644 --- a/bitswap/client/internal/messagequeue/messagequeue_test.go +++ b/bitswap/client/internal/messagequeue/messagequeue_test.go @@ -16,7 +16,7 @@ import ( bsmsg "github.com/ipfs/go-bitswap/message" bsnet "github.com/ipfs/go-bitswap/network" - peer "github.com/libp2p/go-libp2p-core/peer" + peer "github.com/libp2p/go-libp2p/core/peer" "github.com/libp2p/go-libp2p/p2p/protocol/ping" ) diff --git a/bitswap/client/internal/peermanager/peermanager.go b/bitswap/client/internal/peermanager/peermanager.go index 1d4538a7e..dbce5bdd6 100644 --- a/bitswap/client/internal/peermanager/peermanager.go +++ b/bitswap/client/internal/peermanager/peermanager.go @@ -8,7 +8,7 @@ import ( "github.com/ipfs/go-metrics-interface" cid "github.com/ipfs/go-cid" - peer "github.com/libp2p/go-libp2p-core/peer" + peer "github.com/libp2p/go-libp2p/core/peer" ) var log = logging.Logger("bs:peermgr") diff --git a/bitswap/client/internal/peermanager/peermanager_test.go b/bitswap/client/internal/peermanager/peermanager_test.go index 2a4c4c697..231f89311 100644 --- a/bitswap/client/internal/peermanager/peermanager_test.go +++ b/bitswap/client/internal/peermanager/peermanager_test.go @@ -9,7 +9,7 @@ import ( "github.com/ipfs/go-bitswap/internal/testutil" cid "github.com/ipfs/go-cid" - "github.com/libp2p/go-libp2p-core/peer" + "github.com/libp2p/go-libp2p/core/peer" ) type msg struct { diff --git a/bitswap/client/internal/peermanager/peerwantmanager.go b/bitswap/client/internal/peermanager/peerwantmanager.go index 46a3ac348..0bc4732ca 100644 --- a/bitswap/client/internal/peermanager/peerwantmanager.go +++ b/bitswap/client/internal/peermanager/peerwantmanager.go @@ -5,7 +5,7 @@ import ( "fmt" cid "github.com/ipfs/go-cid" - peer "github.com/libp2p/go-libp2p-core/peer" + peer "github.com/libp2p/go-libp2p/core/peer" ) // Gauge can be used to keep track of a metric that increases and decreases diff --git a/bitswap/client/internal/peermanager/peerwantmanager_test.go b/bitswap/client/internal/peermanager/peerwantmanager_test.go index 5a00f27f4..fdc223d10 100644 --- a/bitswap/client/internal/peermanager/peerwantmanager_test.go +++ b/bitswap/client/internal/peermanager/peerwantmanager_test.go @@ -5,7 +5,7 @@ import ( "github.com/ipfs/go-bitswap/internal/testutil" cid "github.com/ipfs/go-cid" - peer "github.com/libp2p/go-libp2p-core/peer" + peer "github.com/libp2p/go-libp2p/core/peer" ) type gauge struct { diff --git a/bitswap/client/internal/providerquerymanager/providerquerymanager.go b/bitswap/client/internal/providerquerymanager/providerquerymanager.go index b3d29dea1..9ef2e5fd8 100644 --- a/bitswap/client/internal/providerquerymanager/providerquerymanager.go +++ b/bitswap/client/internal/providerquerymanager/providerquerymanager.go @@ -8,7 +8,7 @@ import ( "github.com/ipfs/go-cid" logging "github.com/ipfs/go-log" - peer "github.com/libp2p/go-libp2p-core/peer" + peer "github.com/libp2p/go-libp2p/core/peer" ) var log = logging.Logger("bitswap") diff --git a/bitswap/client/internal/providerquerymanager/providerquerymanager_test.go b/bitswap/client/internal/providerquerymanager/providerquerymanager_test.go index f98836780..2ca2ffaf6 100644 --- a/bitswap/client/internal/providerquerymanager/providerquerymanager_test.go +++ b/bitswap/client/internal/providerquerymanager/providerquerymanager_test.go @@ -11,7 +11,7 @@ import ( "github.com/ipfs/go-bitswap/internal/testutil" cid "github.com/ipfs/go-cid" - "github.com/libp2p/go-libp2p-core/peer" + "github.com/libp2p/go-libp2p/core/peer" ) type fakeProviderNetwork struct { diff --git a/bitswap/client/internal/session/peerresponsetracker.go b/bitswap/client/internal/session/peerresponsetracker.go index 63e904614..d81c3b027 100644 --- a/bitswap/client/internal/session/peerresponsetracker.go +++ b/bitswap/client/internal/session/peerresponsetracker.go @@ -3,7 +3,7 @@ package session import ( "math/rand" - peer "github.com/libp2p/go-libp2p-core/peer" + peer "github.com/libp2p/go-libp2p/core/peer" ) // peerResponseTracker keeps track of how many times each peer was the first diff --git a/bitswap/client/internal/session/peerresponsetracker_test.go b/bitswap/client/internal/session/peerresponsetracker_test.go index aafd2ced9..f1f58cd99 100644 --- a/bitswap/client/internal/session/peerresponsetracker_test.go +++ b/bitswap/client/internal/session/peerresponsetracker_test.go @@ -5,7 +5,7 @@ import ( "testing" "github.com/ipfs/go-bitswap/internal/testutil" - peer "github.com/libp2p/go-libp2p-core/peer" + peer "github.com/libp2p/go-libp2p/core/peer" ) func TestPeerResponseTrackerInit(t *testing.T) { diff --git a/bitswap/client/internal/session/sentwantblockstracker.go b/bitswap/client/internal/session/sentwantblockstracker.go index cf0581ef3..0dfe0630b 100644 --- a/bitswap/client/internal/session/sentwantblockstracker.go +++ b/bitswap/client/internal/session/sentwantblockstracker.go @@ -2,7 +2,7 @@ package session import ( cid "github.com/ipfs/go-cid" - peer "github.com/libp2p/go-libp2p-core/peer" + peer "github.com/libp2p/go-libp2p/core/peer" ) // sentWantBlocksTracker keeps track of which peers we've sent a want-block to diff --git a/bitswap/client/internal/session/session.go b/bitswap/client/internal/session/session.go index 7b7eb871c..51e787e22 100644 --- a/bitswap/client/internal/session/session.go +++ b/bitswap/client/internal/session/session.go @@ -14,8 +14,7 @@ import ( cid "github.com/ipfs/go-cid" delay "github.com/ipfs/go-ipfs-delay" logging "github.com/ipfs/go-log" - peer "github.com/libp2p/go-libp2p-core/peer" - loggables "github.com/libp2p/go-libp2p-loggables" + peer "github.com/libp2p/go-libp2p/core/peer" "go.uber.org/zap" ) @@ -128,7 +127,6 @@ type Session struct { periodicSearchDelay delay.D // identifiers notif notifications.PubSub - uuid logging.Loggable id uint64 self peer.ID @@ -164,7 +162,6 @@ func New( incoming: make(chan op, 128), latencyTrkr: latencyTracker{}, notif: notif, - uuid: loggables.Uuid("GetBlockRequest"), baseTickDelay: time.Millisecond * 500, id: id, initialSearchDelay: initialSearchDelay, @@ -242,8 +239,6 @@ func (s *Session) GetBlocks(ctx context.Context, keys []cid.Cid) (<-chan blocks. ctx, span := internal.StartSpan(ctx, "Session.GetBlocks") defer span.End() - ctx = logging.ContextWithLoggable(ctx, s.uuid) - return bsgetter.AsyncGetBlocks(ctx, s.ctx, keys, s.notif, func(ctx context.Context, keys []cid.Cid) { select { diff --git a/bitswap/client/internal/session/session_test.go b/bitswap/client/internal/session/session_test.go index eb99380b1..e7ab8737a 100644 --- a/bitswap/client/internal/session/session_test.go +++ b/bitswap/client/internal/session/session_test.go @@ -15,7 +15,7 @@ import ( cid "github.com/ipfs/go-cid" blocksutil "github.com/ipfs/go-ipfs-blocksutil" delay "github.com/ipfs/go-ipfs-delay" - peer "github.com/libp2p/go-libp2p-core/peer" + peer "github.com/libp2p/go-libp2p/core/peer" ) type mockSessionMgr struct { diff --git a/bitswap/client/internal/session/sessionwantsender.go b/bitswap/client/internal/session/sessionwantsender.go index f26356b74..9286d90eb 100644 --- a/bitswap/client/internal/session/sessionwantsender.go +++ b/bitswap/client/internal/session/sessionwantsender.go @@ -6,7 +6,7 @@ import ( bsbpm "github.com/ipfs/go-bitswap/client/internal/blockpresencemanager" cid "github.com/ipfs/go-cid" - peer "github.com/libp2p/go-libp2p-core/peer" + peer "github.com/libp2p/go-libp2p/core/peer" ) const ( diff --git a/bitswap/client/internal/session/sessionwantsender_test.go b/bitswap/client/internal/session/sessionwantsender_test.go index 079d73fa1..733be5a44 100644 --- a/bitswap/client/internal/session/sessionwantsender_test.go +++ b/bitswap/client/internal/session/sessionwantsender_test.go @@ -11,7 +11,7 @@ import ( bsspm "github.com/ipfs/go-bitswap/client/internal/sessionpeermanager" "github.com/ipfs/go-bitswap/internal/testutil" cid "github.com/ipfs/go-cid" - peer "github.com/libp2p/go-libp2p-core/peer" + peer "github.com/libp2p/go-libp2p/core/peer" ) type sentWants struct { diff --git a/bitswap/client/internal/sessionmanager/sessionmanager.go b/bitswap/client/internal/sessionmanager/sessionmanager.go index 174b8b90c..5ac7a8a0a 100644 --- a/bitswap/client/internal/sessionmanager/sessionmanager.go +++ b/bitswap/client/internal/sessionmanager/sessionmanager.go @@ -17,7 +17,7 @@ import ( bssession "github.com/ipfs/go-bitswap/client/internal/session" bssim "github.com/ipfs/go-bitswap/client/internal/sessioninterestmanager" exchange "github.com/ipfs/go-ipfs-exchange-interface" - peer "github.com/libp2p/go-libp2p-core/peer" + peer "github.com/libp2p/go-libp2p/core/peer" ) // Session is a session that is managed by the session manager diff --git a/bitswap/client/internal/sessionmanager/sessionmanager_test.go b/bitswap/client/internal/sessionmanager/sessionmanager_test.go index 00e07696a..c22028d3a 100644 --- a/bitswap/client/internal/sessionmanager/sessionmanager_test.go +++ b/bitswap/client/internal/sessionmanager/sessionmanager_test.go @@ -18,7 +18,7 @@ import ( blocks "github.com/ipfs/go-block-format" cid "github.com/ipfs/go-cid" - peer "github.com/libp2p/go-libp2p-core/peer" + peer "github.com/libp2p/go-libp2p/core/peer" ) type fakeSession struct { diff --git a/bitswap/client/internal/sessionpeermanager/sessionpeermanager.go b/bitswap/client/internal/sessionpeermanager/sessionpeermanager.go index db46691b9..35784d7b7 100644 --- a/bitswap/client/internal/sessionpeermanager/sessionpeermanager.go +++ b/bitswap/client/internal/sessionpeermanager/sessionpeermanager.go @@ -6,7 +6,7 @@ import ( logging "github.com/ipfs/go-log" - peer "github.com/libp2p/go-libp2p-core/peer" + peer "github.com/libp2p/go-libp2p/core/peer" ) var log = logging.Logger("bs:sprmgr") diff --git a/bitswap/client/internal/sessionpeermanager/sessionpeermanager_test.go b/bitswap/client/internal/sessionpeermanager/sessionpeermanager_test.go index 746333c22..ac82362d7 100644 --- a/bitswap/client/internal/sessionpeermanager/sessionpeermanager_test.go +++ b/bitswap/client/internal/sessionpeermanager/sessionpeermanager_test.go @@ -5,7 +5,7 @@ import ( "testing" "github.com/ipfs/go-bitswap/internal/testutil" - peer "github.com/libp2p/go-libp2p-core/peer" + peer "github.com/libp2p/go-libp2p/core/peer" ) type fakePeerTagger struct { diff --git a/bitswap/internal/testutil/testutil.go b/bitswap/internal/testutil/testutil.go index 2bce60e56..355f94623 100644 --- a/bitswap/internal/testutil/testutil.go +++ b/bitswap/internal/testutil/testutil.go @@ -9,7 +9,7 @@ import ( blocks "github.com/ipfs/go-block-format" cid "github.com/ipfs/go-cid" blocksutil "github.com/ipfs/go-ipfs-blocksutil" - peer "github.com/libp2p/go-libp2p-core/peer" + peer "github.com/libp2p/go-libp2p/core/peer" ) var blockGenerator = blocksutil.NewBlockGenerator() diff --git a/bitswap/message/message.go b/bitswap/message/message.go index 43ac11d41..b9c7a46b8 100644 --- a/bitswap/message/message.go +++ b/bitswap/message/message.go @@ -14,7 +14,7 @@ import ( msgio "github.com/libp2p/go-msgio" u "github.com/ipfs/go-ipfs-util" - "github.com/libp2p/go-libp2p-core/network" + "github.com/libp2p/go-libp2p/core/network" ) // BitSwapMessage is the basic interface for interacting building, encoding, diff --git a/bitswap/network/connecteventmanager.go b/bitswap/network/connecteventmanager.go index 723bf614e..88337fce3 100644 --- a/bitswap/network/connecteventmanager.go +++ b/bitswap/network/connecteventmanager.go @@ -3,7 +3,7 @@ package network import ( "sync" - "github.com/libp2p/go-libp2p-core/peer" + "github.com/libp2p/go-libp2p/core/peer" ) type ConnectionListener interface { diff --git a/bitswap/network/connecteventmanager_test.go b/bitswap/network/connecteventmanager_test.go index 4ed7edd73..6696c028f 100644 --- a/bitswap/network/connecteventmanager_test.go +++ b/bitswap/network/connecteventmanager_test.go @@ -6,7 +6,7 @@ import ( "time" "github.com/ipfs/go-bitswap/internal/testutil" - "github.com/libp2p/go-libp2p-core/peer" + "github.com/libp2p/go-libp2p/core/peer" "github.com/stretchr/testify/require" ) diff --git a/bitswap/network/interface.go b/bitswap/network/interface.go index 018d57ba0..c58c3169e 100644 --- a/bitswap/network/interface.go +++ b/bitswap/network/interface.go @@ -5,24 +5,24 @@ import ( "time" bsmsg "github.com/ipfs/go-bitswap/message" + "github.com/ipfs/go-bitswap/network/internal" cid "github.com/ipfs/go-cid" - "github.com/libp2p/go-libp2p-core/connmgr" - "github.com/libp2p/go-libp2p-core/peer" - "github.com/libp2p/go-libp2p-core/protocol" + "github.com/libp2p/go-libp2p/core/connmgr" + "github.com/libp2p/go-libp2p/core/peer" "github.com/libp2p/go-libp2p/p2p/protocol/ping" ) var ( // ProtocolBitswapNoVers is equivalent to the legacy bitswap protocol - ProtocolBitswapNoVers protocol.ID = "/ipfs/bitswap" + ProtocolBitswapNoVers = internal.ProtocolBitswapNoVers // ProtocolBitswapOneZero is the prefix for the legacy bitswap protocol - ProtocolBitswapOneZero protocol.ID = "/ipfs/bitswap/1.0.0" + ProtocolBitswapOneZero = internal.ProtocolBitswapOneZero // ProtocolBitswapOneOne is the the prefix for version 1.1.0 - ProtocolBitswapOneOne protocol.ID = "/ipfs/bitswap/1.1.0" + ProtocolBitswapOneOne = internal.ProtocolBitswapOneOne // ProtocolBitswap is the current version of the bitswap protocol: 1.2.0 - ProtocolBitswap protocol.ID = "/ipfs/bitswap/1.2.0" + ProtocolBitswap = internal.ProtocolBitswap ) // BitSwapNetwork provides network connectivity for BitSwap sessions. diff --git a/bitswap/network/internal/default.go b/bitswap/network/internal/default.go new file mode 100644 index 000000000..13f4936a8 --- /dev/null +++ b/bitswap/network/internal/default.go @@ -0,0 +1,23 @@ +package internal + +import ( + "github.com/libp2p/go-libp2p/core/protocol" +) + +var ( + // ProtocolBitswapNoVers is equivalent to the legacy bitswap protocol + ProtocolBitswapNoVers protocol.ID = "/ipfs/bitswap" + // ProtocolBitswapOneZero is the prefix for the legacy bitswap protocol + ProtocolBitswapOneZero protocol.ID = "/ipfs/bitswap/1.0.0" + // ProtocolBitswapOneOne is the the prefix for version 1.1.0 + ProtocolBitswapOneOne protocol.ID = "/ipfs/bitswap/1.1.0" + // ProtocolBitswap is the current version of the bitswap protocol: 1.2.0 + ProtocolBitswap protocol.ID = "/ipfs/bitswap/1.2.0" +) + +var DefaultProtocols = []protocol.ID{ + ProtocolBitswap, + ProtocolBitswapOneOne, + ProtocolBitswapOneZero, + ProtocolBitswapNoVers, +} diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index 9762f5601..292535a5f 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -9,16 +9,17 @@ import ( "time" bsmsg "github.com/ipfs/go-bitswap/message" + "github.com/ipfs/go-bitswap/network/internal" cid "github.com/ipfs/go-cid" logging "github.com/ipfs/go-log" - "github.com/libp2p/go-libp2p-core/connmgr" - "github.com/libp2p/go-libp2p-core/host" - "github.com/libp2p/go-libp2p-core/network" - "github.com/libp2p/go-libp2p-core/peer" - peerstore "github.com/libp2p/go-libp2p-core/peerstore" - "github.com/libp2p/go-libp2p-core/protocol" - "github.com/libp2p/go-libp2p-core/routing" + "github.com/libp2p/go-libp2p/core/connmgr" + "github.com/libp2p/go-libp2p/core/host" + "github.com/libp2p/go-libp2p/core/network" + "github.com/libp2p/go-libp2p/core/peer" + peerstore "github.com/libp2p/go-libp2p/core/peerstore" + "github.com/libp2p/go-libp2p/core/protocol" + "github.com/libp2p/go-libp2p/core/routing" "github.com/libp2p/go-libp2p/p2p/protocol/ping" msgio "github.com/libp2p/go-msgio" ma "github.com/multiformats/go-multiaddr" @@ -54,14 +55,7 @@ func NewFromIpfsHost(host host.Host, r routing.ContentRouting, opts ...NetOpt) B } func processSettings(opts ...NetOpt) Settings { - s := Settings{ - SupportedProtocols: []protocol.ID{ - ProtocolBitswap, - ProtocolBitswapOneOne, - ProtocolBitswapOneZero, - ProtocolBitswapNoVers, - }, - } + s := Settings{SupportedProtocols: internal.DefaultProtocols} for _, opt := range opts { opt(&s) } diff --git a/bitswap/network/ipfs_impl_test.go b/bitswap/network/ipfs_impl_test.go index 9e0694896..61f501a55 100644 --- a/bitswap/network/ipfs_impl_test.go +++ b/bitswap/network/ipfs_impl_test.go @@ -10,17 +10,18 @@ import ( bsmsg "github.com/ipfs/go-bitswap/message" pb "github.com/ipfs/go-bitswap/message/pb" bsnet "github.com/ipfs/go-bitswap/network" + "github.com/ipfs/go-bitswap/network/internal" tn "github.com/ipfs/go-bitswap/testnet" ds "github.com/ipfs/go-datastore" blocksutil "github.com/ipfs/go-ipfs-blocksutil" mockrouting "github.com/ipfs/go-ipfs-routing/mock" "github.com/multiformats/go-multistream" - "github.com/libp2p/go-libp2p-core/host" - "github.com/libp2p/go-libp2p-core/network" - "github.com/libp2p/go-libp2p-core/peer" - "github.com/libp2p/go-libp2p-core/protocol" tnet "github.com/libp2p/go-libp2p-testing/net" + "github.com/libp2p/go-libp2p/core/host" + "github.com/libp2p/go-libp2p/core/network" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/libp2p/go-libp2p/core/protocol" mocknet "github.com/libp2p/go-libp2p/p2p/net/mock" ) @@ -75,6 +76,7 @@ type ErrStream struct { lk sync.Mutex err error timingOut bool + closed bool } type ErrHost struct { @@ -98,6 +100,14 @@ func (es *ErrStream) Write(b []byte) (int, error) { return es.Stream.Write(b) } +func (es *ErrStream) Close() error { + es.lk.Lock() + es.closed = true + es.lk.Unlock() + + return es.Stream.Close() +} + func (eh *ErrHost) Connect(ctx context.Context, pi peer.AddrInfo) error { eh.lk.Lock() defer eh.lk.Unlock() @@ -157,7 +167,8 @@ func TestMessageSendAndReceive(t *testing.T) { ctx := context.Background() ctx, cancel := context.WithTimeout(ctx, 10*time.Second) defer cancel() - mn := mocknet.New(ctx) + mn := mocknet.New() + defer mn.Close() mr := mockrouting.NewServer() streamNet, err := tn.StreamNet(ctx, mn, mr) if err != nil { @@ -260,7 +271,8 @@ func TestMessageSendAndReceive(t *testing.T) { func prepareNetwork(t *testing.T, ctx context.Context, p1 tnet.Identity, r1 *receiver, p2 tnet.Identity, r2 *receiver) (*ErrHost, bsnet.BitSwapNetwork, *ErrHost, bsnet.BitSwapNetwork, bsmsg.BitSwapMessage) { // create network - mn := mocknet.New(ctx) + mn := mocknet.New() + defer mn.Close() mr := mockrouting.NewServer() // Host 1 @@ -439,7 +451,8 @@ func TestMessageSendNotSupportedResponse(t *testing.T) { func TestSupportsHave(t *testing.T) { ctx := context.Background() - mn := mocknet.New(ctx) + mn := mocknet.New() + defer mn.Close() mr := mockrouting.NewServer() streamNet, err := tn.StreamNet(ctx, mn, mr) if err != nil { @@ -497,24 +510,7 @@ func testNetworkCounters(t *testing.T, n1 int, n2 int) { p2 := tnet.RandIdentityOrFatal(t) r2 := newReceiver() - var wg1, wg2 sync.WaitGroup - r1.listener = &network.NotifyBundle{ - OpenedStreamF: func(n network.Network, s network.Stream) { - wg1.Add(1) - }, - ClosedStreamF: func(n network.Network, s network.Stream) { - wg1.Done() - }, - } - r2.listener = &network.NotifyBundle{ - OpenedStreamF: func(n network.Network, s network.Stream) { - wg2.Add(1) - }, - ClosedStreamF: func(n network.Network, s network.Stream) { - wg2.Done() - }, - } - _, bsnet1, _, bsnet2, msg := prepareNetwork(t, ctx, p1, r1, p2, r2) + h1, bsnet1, h2, bsnet2, msg := prepareNetwork(t, ctx, p1, r1, p2, r2) for n := 0; n < n1; n++ { ctx, cancel := context.WithTimeout(ctx, time.Second) @@ -579,12 +575,75 @@ func testNetworkCounters(t *testing.T, n1 int, n2 int) { ctxto, cancelto := context.WithTimeout(ctx, 5*time.Second) defer cancelto() ctxwait, cancelwait := context.WithCancel(ctx) - defer cancelwait() go func() { - wg1.Wait() - wg2.Wait() + // Wait until all streams are closed + throttler := time.NewTicker(time.Millisecond * 5) + defer throttler.Stop() + for { + h1.lk.Lock() + var done bool + for _, s := range h1.streams { + s.lk.Lock() + closed := s.closed + closed = closed || s.err != nil + s.lk.Unlock() + if closed { + continue + } + pid := s.Protocol() + for _, v := range internal.DefaultProtocols { + if pid == v { + goto ElseH1 + } + } + } + done = true + ElseH1: + h1.lk.Unlock() + if done { + break + } + select { + case <-ctxto.Done(): + return + case <-throttler.C: + } + } + + for { + h2.lk.Lock() + var done bool + for _, s := range h2.streams { + s.lk.Lock() + closed := s.closed + closed = closed || s.err != nil + s.lk.Unlock() + if closed { + continue + } + pid := s.Protocol() + for _, v := range internal.DefaultProtocols { + if pid == v { + goto ElseH2 + } + } + } + done = true + ElseH2: + h2.lk.Unlock() + if done { + break + } + select { + case <-ctxto.Done(): + return + case <-throttler.C: + } + } + cancelwait() }() + select { case <-ctxto.Done(): t.Fatal("network streams closing timed out") diff --git a/bitswap/network/options.go b/bitswap/network/options.go index 1df8963a3..10d02e5e9 100644 --- a/bitswap/network/options.go +++ b/bitswap/network/options.go @@ -1,6 +1,6 @@ package network -import "github.com/libp2p/go-libp2p-core/protocol" +import "github.com/libp2p/go-libp2p/core/protocol" type NetOpt func(*Settings) diff --git a/bitswap/sendOnlyTracer.go b/bitswap/sendOnlyTracer.go index 1a12403fa..d01d3148e 100644 --- a/bitswap/sendOnlyTracer.go +++ b/bitswap/sendOnlyTracer.go @@ -3,7 +3,7 @@ package bitswap import ( "github.com/ipfs/go-bitswap/message" "github.com/ipfs/go-bitswap/tracer" - "github.com/libp2p/go-libp2p-core/peer" + "github.com/libp2p/go-libp2p/core/peer" ) type sendOnlyTracer interface { diff --git a/bitswap/server/internal/decision/engine.go b/bitswap/server/internal/decision/engine.go index a53a6274f..5a7df4b7d 100644 --- a/bitswap/server/internal/decision/engine.go +++ b/bitswap/server/internal/decision/engine.go @@ -23,7 +23,7 @@ import ( "github.com/ipfs/go-peertaskqueue/peertask" "github.com/ipfs/go-peertaskqueue/peertracker" process "github.com/jbenet/goprocess" - "github.com/libp2p/go-libp2p-core/peer" + "github.com/libp2p/go-libp2p/core/peer" ) // TODO consider taking responsibility for other types of requests. For diff --git a/bitswap/server/internal/decision/engine_test.go b/bitswap/server/internal/decision/engine_test.go index 7484a7aaa..8872eeb97 100644 --- a/bitswap/server/internal/decision/engine_test.go +++ b/bitswap/server/internal/decision/engine_test.go @@ -21,8 +21,8 @@ import ( dssync "github.com/ipfs/go-datastore/sync" blockstore "github.com/ipfs/go-ipfs-blockstore" process "github.com/jbenet/goprocess" - peer "github.com/libp2p/go-libp2p-core/peer" - libp2ptest "github.com/libp2p/go-libp2p-core/test" + peer "github.com/libp2p/go-libp2p/core/peer" + libp2ptest "github.com/libp2p/go-libp2p/core/test" ) type peerTag struct { diff --git a/bitswap/server/internal/decision/ledger.go b/bitswap/server/internal/decision/ledger.go index a848f7b03..9edc27563 100644 --- a/bitswap/server/internal/decision/ledger.go +++ b/bitswap/server/internal/decision/ledger.go @@ -7,7 +7,7 @@ import ( pb "github.com/ipfs/go-bitswap/message/pb" "github.com/ipfs/go-cid" - "github.com/libp2p/go-libp2p-core/peer" + "github.com/libp2p/go-libp2p/core/peer" ) func newLedger(p peer.ID) *ledger { diff --git a/bitswap/server/internal/decision/peer_ledger.go b/bitswap/server/internal/decision/peer_ledger.go index ecf41e6b1..c22322b28 100644 --- a/bitswap/server/internal/decision/peer_ledger.go +++ b/bitswap/server/internal/decision/peer_ledger.go @@ -2,7 +2,7 @@ package decision import ( "github.com/ipfs/go-cid" - "github.com/libp2p/go-libp2p-core/peer" + "github.com/libp2p/go-libp2p/core/peer" ) type peerLedger struct { diff --git a/bitswap/server/internal/decision/scoreledger.go b/bitswap/server/internal/decision/scoreledger.go index 188c998a3..dbcf69d85 100644 --- a/bitswap/server/internal/decision/scoreledger.go +++ b/bitswap/server/internal/decision/scoreledger.go @@ -5,7 +5,7 @@ import ( "time" "github.com/benbjohnson/clock" - peer "github.com/libp2p/go-libp2p-core/peer" + peer "github.com/libp2p/go-libp2p/core/peer" ) const ( diff --git a/bitswap/server/server.go b/bitswap/server/server.go index c9dbf4d98..db7733dc9 100644 --- a/bitswap/server/server.go +++ b/bitswap/server/server.go @@ -22,7 +22,7 @@ import ( "github.com/ipfs/go-metrics-interface" process "github.com/jbenet/goprocess" procctx "github.com/jbenet/goprocess/context" - "github.com/libp2p/go-libp2p-core/peer" + "github.com/libp2p/go-libp2p/core/peer" "go.uber.org/zap" ) diff --git a/bitswap/testinstance/testinstance.go b/bitswap/testinstance/testinstance.go index 6522de3d4..b4936996c 100644 --- a/bitswap/testinstance/testinstance.go +++ b/bitswap/testinstance/testinstance.go @@ -12,9 +12,9 @@ import ( ds_sync "github.com/ipfs/go-datastore/sync" blockstore "github.com/ipfs/go-ipfs-blockstore" delay "github.com/ipfs/go-ipfs-delay" - peer "github.com/libp2p/go-libp2p-core/peer" - p2ptestutil "github.com/libp2p/go-libp2p-netutil" tnet "github.com/libp2p/go-libp2p-testing/net" + p2ptestutil "github.com/libp2p/go-libp2p-testing/netutil" + peer "github.com/libp2p/go-libp2p/core/peer" ) // NewTestInstanceGenerator generates a new InstanceGenerator for the given diff --git a/bitswap/testnet/interface.go b/bitswap/testnet/interface.go index b49dd80ad..ed5c2ab7a 100644 --- a/bitswap/testnet/interface.go +++ b/bitswap/testnet/interface.go @@ -3,8 +3,8 @@ package bitswap import ( bsnet "github.com/ipfs/go-bitswap/network" - "github.com/libp2p/go-libp2p-core/peer" tnet "github.com/libp2p/go-libp2p-testing/net" + "github.com/libp2p/go-libp2p/core/peer" ) // Network is an interface for generating bitswap network interfaces diff --git a/bitswap/testnet/network_test.go b/bitswap/testnet/network_test.go index fbd1fa41a..1bac2be73 100644 --- a/bitswap/testnet/network_test.go +++ b/bitswap/testnet/network_test.go @@ -12,8 +12,8 @@ import ( delay "github.com/ipfs/go-ipfs-delay" mockrouting "github.com/ipfs/go-ipfs-routing/mock" - "github.com/libp2p/go-libp2p-core/peer" tnet "github.com/libp2p/go-libp2p-testing/net" + "github.com/libp2p/go-libp2p/core/peer" ) func TestSendMessageAsyncButWaitForResponse(t *testing.T) { diff --git a/bitswap/testnet/peernet.go b/bitswap/testnet/peernet.go index 5e6430691..8a7a6d2e9 100644 --- a/bitswap/testnet/peernet.go +++ b/bitswap/testnet/peernet.go @@ -8,8 +8,8 @@ import ( ds "github.com/ipfs/go-datastore" mockrouting "github.com/ipfs/go-ipfs-routing/mock" - "github.com/libp2p/go-libp2p-core/peer" tnet "github.com/libp2p/go-libp2p-testing/net" + "github.com/libp2p/go-libp2p/core/peer" mockpeernet "github.com/libp2p/go-libp2p/p2p/net/mock" ) diff --git a/bitswap/testnet/virtual.go b/bitswap/testnet/virtual.go index 975bf98b3..68f1bff49 100644 --- a/bitswap/testnet/virtual.go +++ b/bitswap/testnet/virtual.go @@ -15,11 +15,11 @@ import ( delay "github.com/ipfs/go-ipfs-delay" mockrouting "github.com/ipfs/go-ipfs-routing/mock" - "github.com/libp2p/go-libp2p-core/connmgr" - "github.com/libp2p/go-libp2p-core/peer" - protocol "github.com/libp2p/go-libp2p-core/protocol" - "github.com/libp2p/go-libp2p-core/routing" tnet "github.com/libp2p/go-libp2p-testing/net" + "github.com/libp2p/go-libp2p/core/connmgr" + "github.com/libp2p/go-libp2p/core/peer" + protocol "github.com/libp2p/go-libp2p/core/protocol" + "github.com/libp2p/go-libp2p/core/routing" mocknet "github.com/libp2p/go-libp2p/p2p/net/mock" "github.com/libp2p/go-libp2p/p2p/protocol/ping" ) diff --git a/bitswap/tracer/tracer.go b/bitswap/tracer/tracer.go index c5b70b7cd..af1d39d82 100644 --- a/bitswap/tracer/tracer.go +++ b/bitswap/tracer/tracer.go @@ -2,7 +2,7 @@ package tracer import ( bsmsg "github.com/ipfs/go-bitswap/message" - peer "github.com/libp2p/go-libp2p-core/peer" + peer "github.com/libp2p/go-libp2p/core/peer" ) // Tracer provides methods to access all messages sent and received by Bitswap. From 1417d8bdb877c2af96c05e3b687ac92cdd9403f9 Mon Sep 17 00:00:00 2001 From: Jorropo Date: Thu, 8 Sep 2022 17:37:26 +0200 Subject: [PATCH 1032/1035] chore: fix incorrect log message when a bad option is passed This commit was moved from ipfs/go-bitswap@64bf4e99d5b62cfc0315035efa47dc9f944473e1 --- bitswap/bitswap.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index cc98a7dbc..226ce83c4 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -73,7 +73,7 @@ func New(ctx context.Context, net network.BitSwapNetwork, bstore blockstore.Bloc case option: typedOption(bs) default: - panic(fmt.Errorf("unknown option type passed to bitswap.New, got: %T, %v; expected: %T, %T or %T", typedOption, typedOption, server.Option(nil), client.Option(nil), server.Option(nil))) + panic(fmt.Errorf("unknown option type passed to bitswap.New, got: %T, %v; expected: %T, %T or %T", typedOption, typedOption, server.Option(nil), client.Option(nil), option{})) } } From 38d044c516331a41529b8eff1d3ab1e998817856 Mon Sep 17 00:00:00 2001 From: Jorropo Date: Thu, 8 Sep 2022 17:43:29 +0200 Subject: [PATCH 1033/1035] fix: incorrect type in the WithTracer polyfill option This commit was moved from ipfs/go-bitswap@1ccd1517acd49bf0ae2bceb0edd21dae958985b2 --- bitswap/bitswap.go | 2 +- bitswap/options.go | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 226ce83c4..ea776c365 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -73,7 +73,7 @@ func New(ctx context.Context, net network.BitSwapNetwork, bstore blockstore.Bloc case option: typedOption(bs) default: - panic(fmt.Errorf("unknown option type passed to bitswap.New, got: %T, %v; expected: %T, %T or %T", typedOption, typedOption, server.Option(nil), client.Option(nil), option{})) + panic(fmt.Errorf("unknown option type passed to bitswap.New, got: %T, %v; expected: %T, %T or %T", typedOption, typedOption, server.Option(nil), client.Option(nil), option(nil))) } } diff --git a/bitswap/options.go b/bitswap/options.go index 934396a75..6a1b59137 100644 --- a/bitswap/options.go +++ b/bitswap/options.go @@ -72,8 +72,8 @@ func SetSimulateDontHavesOnTimeout(send bool) Option { func WithTracer(tap tracer.Tracer) Option { // Only trace the server, both receive the same messages anyway return Option{ - func(bs *Bitswap) { + option(func(bs *Bitswap) { bs.tracer = tap - }, + }), } } From a72a7f431e843a8d0d2aaf8f11180dc396a06e0c Mon Sep 17 00:00:00 2001 From: Jorropo Date: Tue, 13 Sep 2022 15:54:35 +0200 Subject: [PATCH 1034/1035] fix: create a copy of the protocol slice in network.processSettings Fixes #584 This commit was moved from ipfs/go-bitswap@2545a3fa44925584b81b8a4d53d1f13b68831cdf --- bitswap/network/ipfs_impl.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index 292535a5f..392a00ed2 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -55,7 +55,7 @@ func NewFromIpfsHost(host host.Host, r routing.ContentRouting, opts ...NetOpt) B } func processSettings(opts ...NetOpt) Settings { - s := Settings{SupportedProtocols: internal.DefaultProtocols} + s := Settings{SupportedProtocols: append([]protocol.ID(nil), internal.DefaultProtocols...)} for _, opt := range opts { opt(&s) } From d42c44b3fbf140ee1c81e86ba27b64250b95efb3 Mon Sep 17 00:00:00 2001 From: Gus Eggert Date: Fri, 2 Dec 2022 10:55:26 -0500 Subject: [PATCH 1035/1035] chore: import go-bitswap --- bitswap/benchmarks_test.go | 10 +- bitswap/bitswap.go | 12 +- bitswap/bitswap_test.go | 10 +- bitswap/client/bitswap_with_sessions_test.go | 8 +- bitswap/client/client.go | 32 +- .../blockpresencemanager_test.go | 2 +- bitswap/client/internal/getter/getter.go | 4 +- .../messagequeue/donthavetimeoutmgr_test.go | 2 +- .../internal/messagequeue/messagequeue.go | 8 +- .../messagequeue/messagequeue_test.go | 8 +- .../internal/peermanager/peermanager_test.go | 2 +- .../peermanager/peerwantmanager_test.go | 2 +- .../providerquerymanager_test.go | 2 +- .../session/peerresponsetracker_test.go | 2 +- .../session/sentwantblockstracker_test.go | 2 +- bitswap/client/internal/session/session.go | 12 +- .../client/internal/session/session_test.go | 12 +- .../internal/session/sessionwants_test.go | 2 +- .../internal/session/sessionwantsender.go | 2 +- .../session/sessionwantsender_test.go | 8 +- .../client/internal/session/wantinfo_test.go | 2 +- .../sessioninterestmanager_test.go | 2 +- .../internal/sessionmanager/sessionmanager.go | 10 +- .../sessionmanager/sessionmanager_test.go | 12 +- .../sessionpeermanager_test.go | 2 +- bitswap/client/wantlist/wantlist.go | 2 +- bitswap/client/wantlist/wantlist_test.go | 2 +- bitswap/decision/forward.go | 2 +- bitswap/forward.go | 4 +- bitswap/internal/testutil/testutil.go | 4 +- bitswap/message/message.go | 4 +- bitswap/message/message_test.go | 4 +- bitswap/message/pb/cid_test.go | 2 +- bitswap/network/connecteventmanager_test.go | 2 +- bitswap/network/interface.go | 4 +- bitswap/network/ipfs_impl.go | 4 +- bitswap/network/ipfs_impl_test.go | 10 +- bitswap/options.go | 6 +- bitswap/sendOnlyTracer.go | 4 +- bitswap/server/forward.go | 2 +- .../decision/blockstoremanager_test.go | 2 +- bitswap/server/internal/decision/engine.go | 10 +- .../server/internal/decision/engine_test.go | 6 +- bitswap/server/internal/decision/ledger.go | 4 +- .../internal/decision/taskmerger_test.go | 2 +- bitswap/server/server.go | 14 +- bitswap/testinstance/testinstance.go | 6 +- bitswap/testnet/interface.go | 2 +- bitswap/testnet/network_test.go | 4 +- bitswap/testnet/peernet.go | 2 +- bitswap/testnet/virtual.go | 4 +- bitswap/tracer/tracer.go | 2 +- bitswap/wantlist/forward.go | 2 +- go.mod | 74 ++++- go.sum | 288 ++++++++++++++++-- 55 files changed, 473 insertions(+), 175 deletions(-) diff --git a/bitswap/benchmarks_test.go b/bitswap/benchmarks_test.go index ef3582b32..805c800e4 100644 --- a/bitswap/benchmarks_test.go +++ b/bitswap/benchmarks_test.go @@ -12,14 +12,14 @@ import ( "testing" "time" - "github.com/ipfs/go-bitswap/internal/testutil" + "github.com/ipfs/go-libipfs/bitswap/internal/testutil" blocks "github.com/ipfs/go-block-format" protocol "github.com/libp2p/go-libp2p/core/protocol" - "github.com/ipfs/go-bitswap" - bsnet "github.com/ipfs/go-bitswap/network" - testinstance "github.com/ipfs/go-bitswap/testinstance" - tn "github.com/ipfs/go-bitswap/testnet" + "github.com/ipfs/go-libipfs/bitswap" + bsnet "github.com/ipfs/go-libipfs/bitswap/network" + testinstance "github.com/ipfs/go-libipfs/bitswap/testinstance" + tn "github.com/ipfs/go-libipfs/bitswap/testnet" cid "github.com/ipfs/go-cid" delay "github.com/ipfs/go-ipfs-delay" mockrouting "github.com/ipfs/go-ipfs-routing/mock" diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index ea776c365..396b53adc 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -4,12 +4,12 @@ import ( "context" "fmt" - "github.com/ipfs/go-bitswap/client" - "github.com/ipfs/go-bitswap/internal/defaults" - "github.com/ipfs/go-bitswap/message" - "github.com/ipfs/go-bitswap/network" - "github.com/ipfs/go-bitswap/server" - "github.com/ipfs/go-bitswap/tracer" + "github.com/ipfs/go-libipfs/bitswap/client" + "github.com/ipfs/go-libipfs/bitswap/internal/defaults" + "github.com/ipfs/go-libipfs/bitswap/message" + "github.com/ipfs/go-libipfs/bitswap/network" + "github.com/ipfs/go-libipfs/bitswap/server" + "github.com/ipfs/go-libipfs/bitswap/tracer" "github.com/ipfs/go-metrics-interface" blocks "github.com/ipfs/go-block-format" diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index 2ab4547e2..6a5367295 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -9,11 +9,11 @@ import ( "testing" "time" - "github.com/ipfs/go-bitswap" - bsmsg "github.com/ipfs/go-bitswap/message" - "github.com/ipfs/go-bitswap/server" - testinstance "github.com/ipfs/go-bitswap/testinstance" - tn "github.com/ipfs/go-bitswap/testnet" + "github.com/ipfs/go-libipfs/bitswap" + bsmsg "github.com/ipfs/go-libipfs/bitswap/message" + "github.com/ipfs/go-libipfs/bitswap/server" + testinstance "github.com/ipfs/go-libipfs/bitswap/testinstance" + tn "github.com/ipfs/go-libipfs/bitswap/testnet" blocks "github.com/ipfs/go-block-format" cid "github.com/ipfs/go-cid" detectrace "github.com/ipfs/go-detect-race" diff --git a/bitswap/client/bitswap_with_sessions_test.go b/bitswap/client/bitswap_with_sessions_test.go index 5e4d2454f..0af20c288 100644 --- a/bitswap/client/bitswap_with_sessions_test.go +++ b/bitswap/client/bitswap_with_sessions_test.go @@ -6,10 +6,10 @@ import ( "testing" "time" - "github.com/ipfs/go-bitswap" - "github.com/ipfs/go-bitswap/client/internal/session" - testinstance "github.com/ipfs/go-bitswap/testinstance" - tn "github.com/ipfs/go-bitswap/testnet" + "github.com/ipfs/go-libipfs/bitswap" + "github.com/ipfs/go-libipfs/bitswap/client/internal/session" + testinstance "github.com/ipfs/go-libipfs/bitswap/testinstance" + tn "github.com/ipfs/go-libipfs/bitswap/testnet" blocks "github.com/ipfs/go-block-format" cid "github.com/ipfs/go-cid" blocksutil "github.com/ipfs/go-ipfs-blocksutil" diff --git a/bitswap/client/client.go b/bitswap/client/client.go index ca94da8c1..bb34081cc 100644 --- a/bitswap/client/client.go +++ b/bitswap/client/client.go @@ -13,22 +13,22 @@ import ( "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/trace" - bsbpm "github.com/ipfs/go-bitswap/client/internal/blockpresencemanager" - bsgetter "github.com/ipfs/go-bitswap/client/internal/getter" - bsmq "github.com/ipfs/go-bitswap/client/internal/messagequeue" - "github.com/ipfs/go-bitswap/client/internal/notifications" - bspm "github.com/ipfs/go-bitswap/client/internal/peermanager" - bspqm "github.com/ipfs/go-bitswap/client/internal/providerquerymanager" - bssession "github.com/ipfs/go-bitswap/client/internal/session" - bssim "github.com/ipfs/go-bitswap/client/internal/sessioninterestmanager" - bssm "github.com/ipfs/go-bitswap/client/internal/sessionmanager" - bsspm "github.com/ipfs/go-bitswap/client/internal/sessionpeermanager" - "github.com/ipfs/go-bitswap/internal" - "github.com/ipfs/go-bitswap/internal/defaults" - bsmsg "github.com/ipfs/go-bitswap/message" - bmetrics "github.com/ipfs/go-bitswap/metrics" - bsnet "github.com/ipfs/go-bitswap/network" - "github.com/ipfs/go-bitswap/tracer" + bsbpm "github.com/ipfs/go-libipfs/bitswap/client/internal/blockpresencemanager" + bsgetter "github.com/ipfs/go-libipfs/bitswap/client/internal/getter" + bsmq "github.com/ipfs/go-libipfs/bitswap/client/internal/messagequeue" + "github.com/ipfs/go-libipfs/bitswap/client/internal/notifications" + bspm "github.com/ipfs/go-libipfs/bitswap/client/internal/peermanager" + bspqm "github.com/ipfs/go-libipfs/bitswap/client/internal/providerquerymanager" + bssession "github.com/ipfs/go-libipfs/bitswap/client/internal/session" + bssim "github.com/ipfs/go-libipfs/bitswap/client/internal/sessioninterestmanager" + bssm "github.com/ipfs/go-libipfs/bitswap/client/internal/sessionmanager" + bsspm "github.com/ipfs/go-libipfs/bitswap/client/internal/sessionpeermanager" + "github.com/ipfs/go-libipfs/bitswap/internal" + "github.com/ipfs/go-libipfs/bitswap/internal/defaults" + bsmsg "github.com/ipfs/go-libipfs/bitswap/message" + bmetrics "github.com/ipfs/go-libipfs/bitswap/metrics" + bsnet "github.com/ipfs/go-libipfs/bitswap/network" + "github.com/ipfs/go-libipfs/bitswap/tracer" blocks "github.com/ipfs/go-block-format" "github.com/ipfs/go-cid" blockstore "github.com/ipfs/go-ipfs-blockstore" diff --git a/bitswap/client/internal/blockpresencemanager/blockpresencemanager_test.go b/bitswap/client/internal/blockpresencemanager/blockpresencemanager_test.go index e6adfc617..3fdbf66e2 100644 --- a/bitswap/client/internal/blockpresencemanager/blockpresencemanager_test.go +++ b/bitswap/client/internal/blockpresencemanager/blockpresencemanager_test.go @@ -3,7 +3,7 @@ package blockpresencemanager import ( "testing" - "github.com/ipfs/go-bitswap/internal/testutil" + "github.com/ipfs/go-libipfs/bitswap/internal/testutil" peer "github.com/libp2p/go-libp2p/core/peer" cid "github.com/ipfs/go-cid" diff --git a/bitswap/client/internal/getter/getter.go b/bitswap/client/internal/getter/getter.go index 5a58e187b..66f05d037 100644 --- a/bitswap/client/internal/getter/getter.go +++ b/bitswap/client/internal/getter/getter.go @@ -4,8 +4,8 @@ import ( "context" "errors" - "github.com/ipfs/go-bitswap/client/internal" - notifications "github.com/ipfs/go-bitswap/client/internal/notifications" + "github.com/ipfs/go-libipfs/bitswap/client/internal" + notifications "github.com/ipfs/go-libipfs/bitswap/client/internal/notifications" logging "github.com/ipfs/go-log" blocks "github.com/ipfs/go-block-format" diff --git a/bitswap/client/internal/messagequeue/donthavetimeoutmgr_test.go b/bitswap/client/internal/messagequeue/donthavetimeoutmgr_test.go index 6a31242af..541230b95 100644 --- a/bitswap/client/internal/messagequeue/donthavetimeoutmgr_test.go +++ b/bitswap/client/internal/messagequeue/donthavetimeoutmgr_test.go @@ -8,7 +8,7 @@ import ( "time" "github.com/benbjohnson/clock" - "github.com/ipfs/go-bitswap/internal/testutil" + "github.com/ipfs/go-libipfs/bitswap/internal/testutil" cid "github.com/ipfs/go-cid" "github.com/libp2p/go-libp2p/p2p/protocol/ping" ) diff --git a/bitswap/client/internal/messagequeue/messagequeue.go b/bitswap/client/internal/messagequeue/messagequeue.go index b80d71eef..f6083f7c5 100644 --- a/bitswap/client/internal/messagequeue/messagequeue.go +++ b/bitswap/client/internal/messagequeue/messagequeue.go @@ -7,10 +7,10 @@ import ( "time" "github.com/benbjohnson/clock" - bswl "github.com/ipfs/go-bitswap/client/wantlist" - bsmsg "github.com/ipfs/go-bitswap/message" - pb "github.com/ipfs/go-bitswap/message/pb" - bsnet "github.com/ipfs/go-bitswap/network" + bswl "github.com/ipfs/go-libipfs/bitswap/client/wantlist" + bsmsg "github.com/ipfs/go-libipfs/bitswap/message" + pb "github.com/ipfs/go-libipfs/bitswap/message/pb" + bsnet "github.com/ipfs/go-libipfs/bitswap/network" cid "github.com/ipfs/go-cid" logging "github.com/ipfs/go-log" peer "github.com/libp2p/go-libp2p/core/peer" diff --git a/bitswap/client/internal/messagequeue/messagequeue_test.go b/bitswap/client/internal/messagequeue/messagequeue_test.go index 337435e52..6caff10dd 100644 --- a/bitswap/client/internal/messagequeue/messagequeue_test.go +++ b/bitswap/client/internal/messagequeue/messagequeue_test.go @@ -10,12 +10,12 @@ import ( "time" "github.com/benbjohnson/clock" - "github.com/ipfs/go-bitswap/internal/testutil" - pb "github.com/ipfs/go-bitswap/message/pb" + "github.com/ipfs/go-libipfs/bitswap/internal/testutil" + pb "github.com/ipfs/go-libipfs/bitswap/message/pb" cid "github.com/ipfs/go-cid" - bsmsg "github.com/ipfs/go-bitswap/message" - bsnet "github.com/ipfs/go-bitswap/network" + bsmsg "github.com/ipfs/go-libipfs/bitswap/message" + bsnet "github.com/ipfs/go-libipfs/bitswap/network" peer "github.com/libp2p/go-libp2p/core/peer" "github.com/libp2p/go-libp2p/p2p/protocol/ping" ) diff --git a/bitswap/client/internal/peermanager/peermanager_test.go b/bitswap/client/internal/peermanager/peermanager_test.go index 231f89311..2cad0058e 100644 --- a/bitswap/client/internal/peermanager/peermanager_test.go +++ b/bitswap/client/internal/peermanager/peermanager_test.go @@ -6,7 +6,7 @@ import ( "testing" "time" - "github.com/ipfs/go-bitswap/internal/testutil" + "github.com/ipfs/go-libipfs/bitswap/internal/testutil" cid "github.com/ipfs/go-cid" "github.com/libp2p/go-libp2p/core/peer" diff --git a/bitswap/client/internal/peermanager/peerwantmanager_test.go b/bitswap/client/internal/peermanager/peerwantmanager_test.go index fdc223d10..bfdf671f1 100644 --- a/bitswap/client/internal/peermanager/peerwantmanager_test.go +++ b/bitswap/client/internal/peermanager/peerwantmanager_test.go @@ -3,7 +3,7 @@ package peermanager import ( "testing" - "github.com/ipfs/go-bitswap/internal/testutil" + "github.com/ipfs/go-libipfs/bitswap/internal/testutil" cid "github.com/ipfs/go-cid" peer "github.com/libp2p/go-libp2p/core/peer" ) diff --git a/bitswap/client/internal/providerquerymanager/providerquerymanager_test.go b/bitswap/client/internal/providerquerymanager/providerquerymanager_test.go index 2ca2ffaf6..57590f883 100644 --- a/bitswap/client/internal/providerquerymanager/providerquerymanager_test.go +++ b/bitswap/client/internal/providerquerymanager/providerquerymanager_test.go @@ -8,7 +8,7 @@ import ( "testing" "time" - "github.com/ipfs/go-bitswap/internal/testutil" + "github.com/ipfs/go-libipfs/bitswap/internal/testutil" cid "github.com/ipfs/go-cid" "github.com/libp2p/go-libp2p/core/peer" diff --git a/bitswap/client/internal/session/peerresponsetracker_test.go b/bitswap/client/internal/session/peerresponsetracker_test.go index f1f58cd99..42372ab00 100644 --- a/bitswap/client/internal/session/peerresponsetracker_test.go +++ b/bitswap/client/internal/session/peerresponsetracker_test.go @@ -4,7 +4,7 @@ import ( "math" "testing" - "github.com/ipfs/go-bitswap/internal/testutil" + "github.com/ipfs/go-libipfs/bitswap/internal/testutil" peer "github.com/libp2p/go-libp2p/core/peer" ) diff --git a/bitswap/client/internal/session/sentwantblockstracker_test.go b/bitswap/client/internal/session/sentwantblockstracker_test.go index 2449840c9..9ef938aa2 100644 --- a/bitswap/client/internal/session/sentwantblockstracker_test.go +++ b/bitswap/client/internal/session/sentwantblockstracker_test.go @@ -3,7 +3,7 @@ package session import ( "testing" - "github.com/ipfs/go-bitswap/internal/testutil" + "github.com/ipfs/go-libipfs/bitswap/internal/testutil" ) func TestSendWantBlocksTracker(t *testing.T) { diff --git a/bitswap/client/internal/session/session.go b/bitswap/client/internal/session/session.go index 51e787e22..a3038e238 100644 --- a/bitswap/client/internal/session/session.go +++ b/bitswap/client/internal/session/session.go @@ -4,12 +4,12 @@ import ( "context" "time" - "github.com/ipfs/go-bitswap/client/internal" - bsbpm "github.com/ipfs/go-bitswap/client/internal/blockpresencemanager" - bsgetter "github.com/ipfs/go-bitswap/client/internal/getter" - notifications "github.com/ipfs/go-bitswap/client/internal/notifications" - bspm "github.com/ipfs/go-bitswap/client/internal/peermanager" - bssim "github.com/ipfs/go-bitswap/client/internal/sessioninterestmanager" + "github.com/ipfs/go-libipfs/bitswap/client/internal" + bsbpm "github.com/ipfs/go-libipfs/bitswap/client/internal/blockpresencemanager" + bsgetter "github.com/ipfs/go-libipfs/bitswap/client/internal/getter" + notifications "github.com/ipfs/go-libipfs/bitswap/client/internal/notifications" + bspm "github.com/ipfs/go-libipfs/bitswap/client/internal/peermanager" + bssim "github.com/ipfs/go-libipfs/bitswap/client/internal/sessioninterestmanager" blocks "github.com/ipfs/go-block-format" cid "github.com/ipfs/go-cid" delay "github.com/ipfs/go-ipfs-delay" diff --git a/bitswap/client/internal/session/session_test.go b/bitswap/client/internal/session/session_test.go index e7ab8737a..9ccf2a09c 100644 --- a/bitswap/client/internal/session/session_test.go +++ b/bitswap/client/internal/session/session_test.go @@ -6,12 +6,12 @@ import ( "testing" "time" - bsbpm "github.com/ipfs/go-bitswap/client/internal/blockpresencemanager" - notifications "github.com/ipfs/go-bitswap/client/internal/notifications" - bspm "github.com/ipfs/go-bitswap/client/internal/peermanager" - bssim "github.com/ipfs/go-bitswap/client/internal/sessioninterestmanager" - bsspm "github.com/ipfs/go-bitswap/client/internal/sessionpeermanager" - "github.com/ipfs/go-bitswap/internal/testutil" + bsbpm "github.com/ipfs/go-libipfs/bitswap/client/internal/blockpresencemanager" + notifications "github.com/ipfs/go-libipfs/bitswap/client/internal/notifications" + bspm "github.com/ipfs/go-libipfs/bitswap/client/internal/peermanager" + bssim "github.com/ipfs/go-libipfs/bitswap/client/internal/sessioninterestmanager" + bsspm "github.com/ipfs/go-libipfs/bitswap/client/internal/sessionpeermanager" + "github.com/ipfs/go-libipfs/bitswap/internal/testutil" cid "github.com/ipfs/go-cid" blocksutil "github.com/ipfs/go-ipfs-blocksutil" delay "github.com/ipfs/go-ipfs-delay" diff --git a/bitswap/client/internal/session/sessionwants_test.go b/bitswap/client/internal/session/sessionwants_test.go index b6e6c94ff..173a99f37 100644 --- a/bitswap/client/internal/session/sessionwants_test.go +++ b/bitswap/client/internal/session/sessionwants_test.go @@ -3,7 +3,7 @@ package session import ( "testing" - "github.com/ipfs/go-bitswap/internal/testutil" + "github.com/ipfs/go-libipfs/bitswap/internal/testutil" cid "github.com/ipfs/go-cid" ) diff --git a/bitswap/client/internal/session/sessionwantsender.go b/bitswap/client/internal/session/sessionwantsender.go index 9286d90eb..ee5bc1884 100644 --- a/bitswap/client/internal/session/sessionwantsender.go +++ b/bitswap/client/internal/session/sessionwantsender.go @@ -3,7 +3,7 @@ package session import ( "context" - bsbpm "github.com/ipfs/go-bitswap/client/internal/blockpresencemanager" + bsbpm "github.com/ipfs/go-libipfs/bitswap/client/internal/blockpresencemanager" cid "github.com/ipfs/go-cid" peer "github.com/libp2p/go-libp2p/core/peer" diff --git a/bitswap/client/internal/session/sessionwantsender_test.go b/bitswap/client/internal/session/sessionwantsender_test.go index 733be5a44..44e74516f 100644 --- a/bitswap/client/internal/session/sessionwantsender_test.go +++ b/bitswap/client/internal/session/sessionwantsender_test.go @@ -6,10 +6,10 @@ import ( "testing" "time" - bsbpm "github.com/ipfs/go-bitswap/client/internal/blockpresencemanager" - bspm "github.com/ipfs/go-bitswap/client/internal/peermanager" - bsspm "github.com/ipfs/go-bitswap/client/internal/sessionpeermanager" - "github.com/ipfs/go-bitswap/internal/testutil" + bsbpm "github.com/ipfs/go-libipfs/bitswap/client/internal/blockpresencemanager" + bspm "github.com/ipfs/go-libipfs/bitswap/client/internal/peermanager" + bsspm "github.com/ipfs/go-libipfs/bitswap/client/internal/sessionpeermanager" + "github.com/ipfs/go-libipfs/bitswap/internal/testutil" cid "github.com/ipfs/go-cid" peer "github.com/libp2p/go-libp2p/core/peer" ) diff --git a/bitswap/client/internal/session/wantinfo_test.go b/bitswap/client/internal/session/wantinfo_test.go index 8397d81fe..604a07514 100644 --- a/bitswap/client/internal/session/wantinfo_test.go +++ b/bitswap/client/internal/session/wantinfo_test.go @@ -3,7 +3,7 @@ package session import ( "testing" - "github.com/ipfs/go-bitswap/internal/testutil" + "github.com/ipfs/go-libipfs/bitswap/internal/testutil" ) func TestEmptyWantInfo(t *testing.T) { diff --git a/bitswap/client/internal/sessioninterestmanager/sessioninterestmanager_test.go b/bitswap/client/internal/sessioninterestmanager/sessioninterestmanager_test.go index 0bba66389..44cf971ff 100644 --- a/bitswap/client/internal/sessioninterestmanager/sessioninterestmanager_test.go +++ b/bitswap/client/internal/sessioninterestmanager/sessioninterestmanager_test.go @@ -3,7 +3,7 @@ package sessioninterestmanager import ( "testing" - "github.com/ipfs/go-bitswap/internal/testutil" + "github.com/ipfs/go-libipfs/bitswap/internal/testutil" cid "github.com/ipfs/go-cid" ) diff --git a/bitswap/client/internal/sessionmanager/sessionmanager.go b/bitswap/client/internal/sessionmanager/sessionmanager.go index 5ac7a8a0a..c24a5ef31 100644 --- a/bitswap/client/internal/sessionmanager/sessionmanager.go +++ b/bitswap/client/internal/sessionmanager/sessionmanager.go @@ -11,11 +11,11 @@ import ( "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/trace" - "github.com/ipfs/go-bitswap/client/internal" - bsbpm "github.com/ipfs/go-bitswap/client/internal/blockpresencemanager" - notifications "github.com/ipfs/go-bitswap/client/internal/notifications" - bssession "github.com/ipfs/go-bitswap/client/internal/session" - bssim "github.com/ipfs/go-bitswap/client/internal/sessioninterestmanager" + "github.com/ipfs/go-libipfs/bitswap/client/internal" + bsbpm "github.com/ipfs/go-libipfs/bitswap/client/internal/blockpresencemanager" + notifications "github.com/ipfs/go-libipfs/bitswap/client/internal/notifications" + bssession "github.com/ipfs/go-libipfs/bitswap/client/internal/session" + bssim "github.com/ipfs/go-libipfs/bitswap/client/internal/sessioninterestmanager" exchange "github.com/ipfs/go-ipfs-exchange-interface" peer "github.com/libp2p/go-libp2p/core/peer" ) diff --git a/bitswap/client/internal/sessionmanager/sessionmanager_test.go b/bitswap/client/internal/sessionmanager/sessionmanager_test.go index c22028d3a..35fbe08ca 100644 --- a/bitswap/client/internal/sessionmanager/sessionmanager_test.go +++ b/bitswap/client/internal/sessionmanager/sessionmanager_test.go @@ -9,12 +9,12 @@ import ( delay "github.com/ipfs/go-ipfs-delay" - bsbpm "github.com/ipfs/go-bitswap/client/internal/blockpresencemanager" - notifications "github.com/ipfs/go-bitswap/client/internal/notifications" - bspm "github.com/ipfs/go-bitswap/client/internal/peermanager" - bssession "github.com/ipfs/go-bitswap/client/internal/session" - bssim "github.com/ipfs/go-bitswap/client/internal/sessioninterestmanager" - "github.com/ipfs/go-bitswap/internal/testutil" + bsbpm "github.com/ipfs/go-libipfs/bitswap/client/internal/blockpresencemanager" + notifications "github.com/ipfs/go-libipfs/bitswap/client/internal/notifications" + bspm "github.com/ipfs/go-libipfs/bitswap/client/internal/peermanager" + bssession "github.com/ipfs/go-libipfs/bitswap/client/internal/session" + bssim "github.com/ipfs/go-libipfs/bitswap/client/internal/sessioninterestmanager" + "github.com/ipfs/go-libipfs/bitswap/internal/testutil" blocks "github.com/ipfs/go-block-format" cid "github.com/ipfs/go-cid" diff --git a/bitswap/client/internal/sessionpeermanager/sessionpeermanager_test.go b/bitswap/client/internal/sessionpeermanager/sessionpeermanager_test.go index ac82362d7..8e27f2ab3 100644 --- a/bitswap/client/internal/sessionpeermanager/sessionpeermanager_test.go +++ b/bitswap/client/internal/sessionpeermanager/sessionpeermanager_test.go @@ -4,7 +4,7 @@ import ( "sync" "testing" - "github.com/ipfs/go-bitswap/internal/testutil" + "github.com/ipfs/go-libipfs/bitswap/internal/testutil" peer "github.com/libp2p/go-libp2p/core/peer" ) diff --git a/bitswap/client/wantlist/wantlist.go b/bitswap/client/wantlist/wantlist.go index da54983e1..9e40e1604 100644 --- a/bitswap/client/wantlist/wantlist.go +++ b/bitswap/client/wantlist/wantlist.go @@ -5,7 +5,7 @@ package wantlist import ( "sort" - pb "github.com/ipfs/go-bitswap/message/pb" + pb "github.com/ipfs/go-libipfs/bitswap/message/pb" cid "github.com/ipfs/go-cid" ) diff --git a/bitswap/client/wantlist/wantlist_test.go b/bitswap/client/wantlist/wantlist_test.go index 2f64f3856..aeba28ab6 100644 --- a/bitswap/client/wantlist/wantlist_test.go +++ b/bitswap/client/wantlist/wantlist_test.go @@ -3,7 +3,7 @@ package wantlist import ( "testing" - pb "github.com/ipfs/go-bitswap/message/pb" + pb "github.com/ipfs/go-libipfs/bitswap/message/pb" cid "github.com/ipfs/go-cid" "github.com/stretchr/testify/require" ) diff --git a/bitswap/decision/forward.go b/bitswap/decision/forward.go index d19cda943..48306a60b 100644 --- a/bitswap/decision/forward.go +++ b/bitswap/decision/forward.go @@ -1,6 +1,6 @@ package decision -import "github.com/ipfs/go-bitswap/server" +import "github.com/ipfs/go-libipfs/bitswap/server" type ( // DEPRECATED use server.Receipt instead diff --git a/bitswap/forward.go b/bitswap/forward.go index 2beb7590f..59d32e525 100644 --- a/bitswap/forward.go +++ b/bitswap/forward.go @@ -1,8 +1,8 @@ package bitswap import ( - "github.com/ipfs/go-bitswap/server" - "github.com/ipfs/go-bitswap/tracer" + "github.com/ipfs/go-libipfs/bitswap/server" + "github.com/ipfs/go-libipfs/bitswap/tracer" ) type ( diff --git a/bitswap/internal/testutil/testutil.go b/bitswap/internal/testutil/testutil.go index 355f94623..377dfb305 100644 --- a/bitswap/internal/testutil/testutil.go +++ b/bitswap/internal/testutil/testutil.go @@ -4,8 +4,8 @@ import ( "fmt" "math/rand" - "github.com/ipfs/go-bitswap/client/wantlist" - bsmsg "github.com/ipfs/go-bitswap/message" + "github.com/ipfs/go-libipfs/bitswap/client/wantlist" + bsmsg "github.com/ipfs/go-libipfs/bitswap/message" blocks "github.com/ipfs/go-block-format" cid "github.com/ipfs/go-cid" blocksutil "github.com/ipfs/go-ipfs-blocksutil" diff --git a/bitswap/message/message.go b/bitswap/message/message.go index b9c7a46b8..daa0ecbd8 100644 --- a/bitswap/message/message.go +++ b/bitswap/message/message.go @@ -5,8 +5,8 @@ import ( "errors" "io" - "github.com/ipfs/go-bitswap/client/wantlist" - pb "github.com/ipfs/go-bitswap/message/pb" + "github.com/ipfs/go-libipfs/bitswap/client/wantlist" + pb "github.com/ipfs/go-libipfs/bitswap/message/pb" blocks "github.com/ipfs/go-block-format" cid "github.com/ipfs/go-cid" diff --git a/bitswap/message/message_test.go b/bitswap/message/message_test.go index 46de49613..a01de0ab9 100644 --- a/bitswap/message/message_test.go +++ b/bitswap/message/message_test.go @@ -4,8 +4,8 @@ import ( "bytes" "testing" - "github.com/ipfs/go-bitswap/client/wantlist" - pb "github.com/ipfs/go-bitswap/message/pb" + "github.com/ipfs/go-libipfs/bitswap/client/wantlist" + pb "github.com/ipfs/go-libipfs/bitswap/message/pb" blocksutil "github.com/ipfs/go-ipfs-blocksutil" blocks "github.com/ipfs/go-block-format" diff --git a/bitswap/message/pb/cid_test.go b/bitswap/message/pb/cid_test.go index 3d4b87a78..d0d5d2b23 100644 --- a/bitswap/message/pb/cid_test.go +++ b/bitswap/message/pb/cid_test.go @@ -7,7 +7,7 @@ import ( "github.com/ipfs/go-cid" u "github.com/ipfs/go-ipfs-util" - pb "github.com/ipfs/go-bitswap/message/pb" + pb "github.com/ipfs/go-libipfs/bitswap/message/pb" ) func TestCID(t *testing.T) { diff --git a/bitswap/network/connecteventmanager_test.go b/bitswap/network/connecteventmanager_test.go index 6696c028f..8e2e5f268 100644 --- a/bitswap/network/connecteventmanager_test.go +++ b/bitswap/network/connecteventmanager_test.go @@ -5,7 +5,7 @@ import ( "testing" "time" - "github.com/ipfs/go-bitswap/internal/testutil" + "github.com/ipfs/go-libipfs/bitswap/internal/testutil" "github.com/libp2p/go-libp2p/core/peer" "github.com/stretchr/testify/require" ) diff --git a/bitswap/network/interface.go b/bitswap/network/interface.go index c58c3169e..7c6eeecd4 100644 --- a/bitswap/network/interface.go +++ b/bitswap/network/interface.go @@ -4,8 +4,8 @@ import ( "context" "time" - bsmsg "github.com/ipfs/go-bitswap/message" - "github.com/ipfs/go-bitswap/network/internal" + bsmsg "github.com/ipfs/go-libipfs/bitswap/message" + "github.com/ipfs/go-libipfs/bitswap/network/internal" cid "github.com/ipfs/go-cid" diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index 392a00ed2..03344fa9c 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -8,8 +8,8 @@ import ( "sync/atomic" "time" - bsmsg "github.com/ipfs/go-bitswap/message" - "github.com/ipfs/go-bitswap/network/internal" + bsmsg "github.com/ipfs/go-libipfs/bitswap/message" + "github.com/ipfs/go-libipfs/bitswap/network/internal" cid "github.com/ipfs/go-cid" logging "github.com/ipfs/go-log" diff --git a/bitswap/network/ipfs_impl_test.go b/bitswap/network/ipfs_impl_test.go index 61f501a55..b7b914c35 100644 --- a/bitswap/network/ipfs_impl_test.go +++ b/bitswap/network/ipfs_impl_test.go @@ -7,11 +7,11 @@ import ( "testing" "time" - bsmsg "github.com/ipfs/go-bitswap/message" - pb "github.com/ipfs/go-bitswap/message/pb" - bsnet "github.com/ipfs/go-bitswap/network" - "github.com/ipfs/go-bitswap/network/internal" - tn "github.com/ipfs/go-bitswap/testnet" + bsmsg "github.com/ipfs/go-libipfs/bitswap/message" + pb "github.com/ipfs/go-libipfs/bitswap/message/pb" + bsnet "github.com/ipfs/go-libipfs/bitswap/network" + "github.com/ipfs/go-libipfs/bitswap/network/internal" + tn "github.com/ipfs/go-libipfs/bitswap/testnet" ds "github.com/ipfs/go-datastore" blocksutil "github.com/ipfs/go-ipfs-blocksutil" mockrouting "github.com/ipfs/go-ipfs-routing/mock" diff --git a/bitswap/options.go b/bitswap/options.go index 6a1b59137..f797e3089 100644 --- a/bitswap/options.go +++ b/bitswap/options.go @@ -3,9 +3,9 @@ package bitswap import ( "time" - "github.com/ipfs/go-bitswap/client" - "github.com/ipfs/go-bitswap/server" - "github.com/ipfs/go-bitswap/tracer" + "github.com/ipfs/go-libipfs/bitswap/client" + "github.com/ipfs/go-libipfs/bitswap/server" + "github.com/ipfs/go-libipfs/bitswap/tracer" delay "github.com/ipfs/go-ipfs-delay" ) diff --git a/bitswap/sendOnlyTracer.go b/bitswap/sendOnlyTracer.go index d01d3148e..9570b4390 100644 --- a/bitswap/sendOnlyTracer.go +++ b/bitswap/sendOnlyTracer.go @@ -1,8 +1,8 @@ package bitswap import ( - "github.com/ipfs/go-bitswap/message" - "github.com/ipfs/go-bitswap/tracer" + "github.com/ipfs/go-libipfs/bitswap/message" + "github.com/ipfs/go-libipfs/bitswap/tracer" "github.com/libp2p/go-libp2p/core/peer" ) diff --git a/bitswap/server/forward.go b/bitswap/server/forward.go index 79c39d5da..bb8395229 100644 --- a/bitswap/server/forward.go +++ b/bitswap/server/forward.go @@ -1,7 +1,7 @@ package server import ( - "github.com/ipfs/go-bitswap/server/internal/decision" + "github.com/ipfs/go-libipfs/bitswap/server/internal/decision" ) type ( diff --git a/bitswap/server/internal/decision/blockstoremanager_test.go b/bitswap/server/internal/decision/blockstoremanager_test.go index d1c150278..e06bb2247 100644 --- a/bitswap/server/internal/decision/blockstoremanager_test.go +++ b/bitswap/server/internal/decision/blockstoremanager_test.go @@ -7,7 +7,7 @@ import ( "testing" "time" - "github.com/ipfs/go-bitswap/internal/testutil" + "github.com/ipfs/go-libipfs/bitswap/internal/testutil" cid "github.com/ipfs/go-cid" "github.com/ipfs/go-metrics-interface" diff --git a/bitswap/server/internal/decision/engine.go b/bitswap/server/internal/decision/engine.go index 5a7df4b7d..9a5ddb556 100644 --- a/bitswap/server/internal/decision/engine.go +++ b/bitswap/server/internal/decision/engine.go @@ -9,11 +9,11 @@ import ( "github.com/google/uuid" - wl "github.com/ipfs/go-bitswap/client/wantlist" - "github.com/ipfs/go-bitswap/internal/defaults" - bsmsg "github.com/ipfs/go-bitswap/message" - pb "github.com/ipfs/go-bitswap/message/pb" - bmetrics "github.com/ipfs/go-bitswap/metrics" + wl "github.com/ipfs/go-libipfs/bitswap/client/wantlist" + "github.com/ipfs/go-libipfs/bitswap/internal/defaults" + bsmsg "github.com/ipfs/go-libipfs/bitswap/message" + pb "github.com/ipfs/go-libipfs/bitswap/message/pb" + bmetrics "github.com/ipfs/go-libipfs/bitswap/metrics" blocks "github.com/ipfs/go-block-format" "github.com/ipfs/go-cid" bstore "github.com/ipfs/go-ipfs-blockstore" diff --git a/bitswap/server/internal/decision/engine_test.go b/bitswap/server/internal/decision/engine_test.go index 8872eeb97..1695c5acf 100644 --- a/bitswap/server/internal/decision/engine_test.go +++ b/bitswap/server/internal/decision/engine_test.go @@ -11,9 +11,9 @@ import ( "time" "github.com/benbjohnson/clock" - "github.com/ipfs/go-bitswap/internal/testutil" - message "github.com/ipfs/go-bitswap/message" - pb "github.com/ipfs/go-bitswap/message/pb" + "github.com/ipfs/go-libipfs/bitswap/internal/testutil" + message "github.com/ipfs/go-libipfs/bitswap/message" + pb "github.com/ipfs/go-libipfs/bitswap/message/pb" blocks "github.com/ipfs/go-block-format" "github.com/ipfs/go-cid" diff --git a/bitswap/server/internal/decision/ledger.go b/bitswap/server/internal/decision/ledger.go index 9edc27563..7577df292 100644 --- a/bitswap/server/internal/decision/ledger.go +++ b/bitswap/server/internal/decision/ledger.go @@ -3,8 +3,8 @@ package decision import ( "sync" - wl "github.com/ipfs/go-bitswap/client/wantlist" - pb "github.com/ipfs/go-bitswap/message/pb" + wl "github.com/ipfs/go-libipfs/bitswap/client/wantlist" + pb "github.com/ipfs/go-libipfs/bitswap/message/pb" "github.com/ipfs/go-cid" "github.com/libp2p/go-libp2p/core/peer" diff --git a/bitswap/server/internal/decision/taskmerger_test.go b/bitswap/server/internal/decision/taskmerger_test.go index eb79f1569..06d563c2d 100644 --- a/bitswap/server/internal/decision/taskmerger_test.go +++ b/bitswap/server/internal/decision/taskmerger_test.go @@ -3,7 +3,7 @@ package decision import ( "testing" - "github.com/ipfs/go-bitswap/internal/testutil" + "github.com/ipfs/go-libipfs/bitswap/internal/testutil" "github.com/ipfs/go-peertaskqueue" "github.com/ipfs/go-peertaskqueue/peertask" ) diff --git a/bitswap/server/server.go b/bitswap/server/server.go index db7733dc9..f955e2cbd 100644 --- a/bitswap/server/server.go +++ b/bitswap/server/server.go @@ -8,13 +8,13 @@ import ( "sync" "time" - "github.com/ipfs/go-bitswap/internal/defaults" - "github.com/ipfs/go-bitswap/message" - pb "github.com/ipfs/go-bitswap/message/pb" - bmetrics "github.com/ipfs/go-bitswap/metrics" - bsnet "github.com/ipfs/go-bitswap/network" - "github.com/ipfs/go-bitswap/server/internal/decision" - "github.com/ipfs/go-bitswap/tracer" + "github.com/ipfs/go-libipfs/bitswap/internal/defaults" + "github.com/ipfs/go-libipfs/bitswap/message" + pb "github.com/ipfs/go-libipfs/bitswap/message/pb" + bmetrics "github.com/ipfs/go-libipfs/bitswap/metrics" + bsnet "github.com/ipfs/go-libipfs/bitswap/network" + "github.com/ipfs/go-libipfs/bitswap/server/internal/decision" + "github.com/ipfs/go-libipfs/bitswap/tracer" blocks "github.com/ipfs/go-block-format" "github.com/ipfs/go-cid" blockstore "github.com/ipfs/go-ipfs-blockstore" diff --git a/bitswap/testinstance/testinstance.go b/bitswap/testinstance/testinstance.go index b4936996c..29dd324f8 100644 --- a/bitswap/testinstance/testinstance.go +++ b/bitswap/testinstance/testinstance.go @@ -4,9 +4,9 @@ import ( "context" "time" - "github.com/ipfs/go-bitswap" - bsnet "github.com/ipfs/go-bitswap/network" - tn "github.com/ipfs/go-bitswap/testnet" + "github.com/ipfs/go-libipfs/bitswap" + bsnet "github.com/ipfs/go-libipfs/bitswap/network" + tn "github.com/ipfs/go-libipfs/bitswap/testnet" ds "github.com/ipfs/go-datastore" delayed "github.com/ipfs/go-datastore/delayed" ds_sync "github.com/ipfs/go-datastore/sync" diff --git a/bitswap/testnet/interface.go b/bitswap/testnet/interface.go index ed5c2ab7a..62f3ca625 100644 --- a/bitswap/testnet/interface.go +++ b/bitswap/testnet/interface.go @@ -1,7 +1,7 @@ package bitswap import ( - bsnet "github.com/ipfs/go-bitswap/network" + bsnet "github.com/ipfs/go-libipfs/bitswap/network" tnet "github.com/libp2p/go-libp2p-testing/net" "github.com/libp2p/go-libp2p/core/peer" diff --git a/bitswap/testnet/network_test.go b/bitswap/testnet/network_test.go index 1bac2be73..811993395 100644 --- a/bitswap/testnet/network_test.go +++ b/bitswap/testnet/network_test.go @@ -5,8 +5,8 @@ import ( "sync" "testing" - bsmsg "github.com/ipfs/go-bitswap/message" - bsnet "github.com/ipfs/go-bitswap/network" + bsmsg "github.com/ipfs/go-libipfs/bitswap/message" + bsnet "github.com/ipfs/go-libipfs/bitswap/network" blocks "github.com/ipfs/go-block-format" delay "github.com/ipfs/go-ipfs-delay" diff --git a/bitswap/testnet/peernet.go b/bitswap/testnet/peernet.go index 8a7a6d2e9..ea98b98f4 100644 --- a/bitswap/testnet/peernet.go +++ b/bitswap/testnet/peernet.go @@ -3,7 +3,7 @@ package bitswap import ( "context" - bsnet "github.com/ipfs/go-bitswap/network" + bsnet "github.com/ipfs/go-libipfs/bitswap/network" ds "github.com/ipfs/go-datastore" mockrouting "github.com/ipfs/go-ipfs-routing/mock" diff --git a/bitswap/testnet/virtual.go b/bitswap/testnet/virtual.go index 68f1bff49..b74348966 100644 --- a/bitswap/testnet/virtual.go +++ b/bitswap/testnet/virtual.go @@ -8,8 +8,8 @@ import ( "sync/atomic" "time" - bsmsg "github.com/ipfs/go-bitswap/message" - bsnet "github.com/ipfs/go-bitswap/network" + bsmsg "github.com/ipfs/go-libipfs/bitswap/message" + bsnet "github.com/ipfs/go-libipfs/bitswap/network" cid "github.com/ipfs/go-cid" delay "github.com/ipfs/go-ipfs-delay" diff --git a/bitswap/tracer/tracer.go b/bitswap/tracer/tracer.go index af1d39d82..91ff25cf8 100644 --- a/bitswap/tracer/tracer.go +++ b/bitswap/tracer/tracer.go @@ -1,7 +1,7 @@ package tracer import ( - bsmsg "github.com/ipfs/go-bitswap/message" + bsmsg "github.com/ipfs/go-libipfs/bitswap/message" peer "github.com/libp2p/go-libp2p/core/peer" ) diff --git a/bitswap/wantlist/forward.go b/bitswap/wantlist/forward.go index c7eba707f..daee1da69 100644 --- a/bitswap/wantlist/forward.go +++ b/bitswap/wantlist/forward.go @@ -1,7 +1,7 @@ package wantlist import ( - "github.com/ipfs/go-bitswap/client/wantlist" + "github.com/ipfs/go-libipfs/bitswap/client/wantlist" "github.com/ipfs/go-cid" ) diff --git a/go.mod b/go.mod index 9c2b4207a..5c18259fd 100644 --- a/go.mod +++ b/go.mod @@ -1,39 +1,82 @@ module github.com/ipfs/go-libipfs -go 1.19 +go 1.18 + +require github.com/stretchr/testify v1.8.1 require ( github.com/benbjohnson/clock v1.3.0 + github.com/cskr/pubsub v1.0.2 + github.com/gogo/protobuf v1.3.2 + github.com/google/uuid v1.3.0 github.com/gorilla/mux v1.8.0 - github.com/ipfs/go-cid v0.3.2 + github.com/ipfs/go-block-format v0.0.3 + github.com/ipfs/go-cid v0.2.0 + github.com/ipfs/go-datastore v0.5.1 + github.com/ipfs/go-detect-race v0.0.1 + github.com/ipfs/go-ipfs-blockstore v1.2.0 + github.com/ipfs/go-ipfs-blocksutil v0.0.1 + github.com/ipfs/go-ipfs-delay v0.0.1 + github.com/ipfs/go-ipfs-exchange-interface v0.2.0 + github.com/ipfs/go-ipfs-routing v0.2.1 + github.com/ipfs/go-ipfs-util v0.0.2 + github.com/ipfs/go-ipld-format v0.3.0 github.com/ipfs/go-ipns v0.3.0 + github.com/ipfs/go-log v1.0.5 github.com/ipfs/go-log/v2 v2.5.1 - github.com/libp2p/go-libp2p v0.23.4 + github.com/ipfs/go-metrics-interface v0.0.1 + github.com/ipfs/go-peertaskqueue v0.7.0 + github.com/jbenet/goprocess v0.1.4 + github.com/libp2p/go-buffer-pool v0.1.0 + github.com/libp2p/go-libp2p v0.22.0 github.com/libp2p/go-libp2p-record v0.2.0 - github.com/multiformats/go-multiaddr v0.8.0 + github.com/libp2p/go-libp2p-testing v0.12.0 + github.com/libp2p/go-msgio v0.2.0 + github.com/multiformats/go-multiaddr v0.6.0 github.com/multiformats/go-multibase v0.1.1 github.com/multiformats/go-multihash v0.2.1 + github.com/multiformats/go-multistream v0.3.3 github.com/samber/lo v1.36.0 - github.com/stretchr/testify v1.8.1 + go.opentelemetry.io/otel v1.7.0 + go.opentelemetry.io/otel/trace v1.7.0 + go.uber.org/multierr v1.8.0 + go.uber.org/zap v1.22.0 ) require ( github.com/davecgh/go-spew v1.1.1 // indirect github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0 // indirect - github.com/gogo/protobuf v1.3.2 // indirect - github.com/ipfs/go-ipfs-util v0.0.2 // indirect + github.com/go-logr/logr v1.2.3 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/google/go-cmp v0.5.8 // indirect + github.com/google/gopacket v1.1.19 // indirect + github.com/hashicorp/golang-lru v0.5.4 // indirect + github.com/huin/goupnp v1.0.3 // indirect + github.com/ipfs/bbloom v0.0.4 // indirect + github.com/ipfs/go-ipfs-ds-help v1.1.0 // indirect + github.com/ipfs/go-ipfs-pq v0.0.2 // indirect github.com/ipld/go-ipld-prime v0.9.0 // indirect - github.com/klauspost/cpuid/v2 v2.1.1 // indirect - github.com/libp2p/go-buffer-pool v0.1.0 // indirect + github.com/jackpal/go-nat-pmp v1.0.2 // indirect + github.com/klauspost/cpuid/v2 v2.1.0 // indirect + github.com/koron/go-ssdp v0.0.3 // indirect + github.com/libp2p/go-cidranger v1.1.0 // indirect + github.com/libp2p/go-libp2p-asn-util v0.2.0 // indirect + github.com/libp2p/go-libp2p-core v0.20.0 // indirect + github.com/libp2p/go-nat v0.1.0 // indirect + github.com/libp2p/go-netroute v0.2.0 // indirect github.com/libp2p/go-openssl v0.1.0 // indirect github.com/mattn/go-isatty v0.0.16 // indirect github.com/mattn/go-pointer v0.0.1 // indirect + github.com/miekg/dns v1.1.50 // indirect github.com/minio/sha256-simd v1.0.0 // indirect github.com/mr-tron/base58 v1.2.0 // indirect - github.com/multiformats/go-base32 v0.1.0 // indirect + github.com/multiformats/go-base32 v0.0.4 // indirect github.com/multiformats/go-base36 v0.1.0 // indirect - github.com/multiformats/go-multicodec v0.6.0 // indirect + github.com/multiformats/go-multiaddr-dns v0.3.1 // indirect + github.com/multiformats/go-multiaddr-fmt v0.1.0 // indirect + github.com/multiformats/go-multicodec v0.5.0 // indirect github.com/multiformats/go-varint v0.0.6 // indirect + github.com/opentracing/opentracing-go v1.2.0 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/polydawn/refmt v0.0.0-20190807091052-3d65705ee9f1 // indirect @@ -41,11 +84,14 @@ require ( github.com/spaolacci/murmur3 v1.1.0 // indirect github.com/stretchr/objx v0.5.0 // indirect go.uber.org/atomic v1.10.0 // indirect - go.uber.org/multierr v1.8.0 // indirect - go.uber.org/zap v1.23.0 // indirect golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e // indirect - golang.org/x/exp v0.0.0-20220916125017-b168a2c6b86b // indirect + golang.org/x/exp v0.0.0-20220303212507-bbda1eaf7a17 // indirect + golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 // indirect + golang.org/x/net v0.0.0-20220812174116-3211cb980234 // indirect + golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 // indirect golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab // indirect + golang.org/x/tools v0.1.12 // indirect + gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect gopkg.in/yaml.v3 v3.0.1 // indirect lukechampine.com/blake3 v1.1.7 // indirect ) diff --git a/go.sum b/go.sum index de564deb1..004e6f2b1 100644 --- a/go.sum +++ b/go.sum @@ -1,88 +1,274 @@ +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/btcsuite/btcd v0.0.0-20190213025234-306aecffea32/go.mod h1:DrZx5ec/dmnfpw9KyYoQyYo7d0KEvTkk/5M/vbZjAr8= +github.com/btcsuite/btcd v0.0.0-20190523000118-16327141da8c/go.mod h1:3J08xEfcugPacsc34/LKRU2yO7YmuT8yt28J8k2+rrI= +github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA= +github.com/btcsuite/btcutil v0.0.0-20190207003914-4c204d697803/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg= +github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg= +github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd/go.mod h1:HHNXQzUsZCxOoE+CPiyCTO6x34Zs86zZUiwtpXoGdtg= +github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd/go.mod h1:F+uVaaLLH7j4eDXPRvw78tMflu7Ie2bzYOH4Y8rRKBY= +github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc= +github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY= +github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs= +github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= +github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= +github.com/cheekybits/genny v1.0.0 h1:uGGa4nei+j20rOSeDeP5Of12XVm7TGUd4dJA9RDitfE= +github.com/containerd/cgroups v1.0.4 h1:jN/mbWBEaz+T1pi5OFtnkQ+8qnmEbAr1Oo1FRm5B0dA= +github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd/v22 v22.3.2 h1:D9/bQk5vlXQFZ6Kwuu6zaiXJ9oTPe68++AzAJc1DzSI= +github.com/cskr/pubsub v1.0.2 h1:vlOzMhl6PFn60gRlTQQsIfVwaPB/B/8MziK8FhEPt/0= +github.com/cskr/pubsub v1.0.2/go.mod h1:/8MzYXk/NJAz782G8RPkFzXTZVu63VotefPnR9TIRis= +github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c h1:pFUpOrbxDR6AkioZ1ySsx5yxlDQZ8stG2b88gTPxgJU= github.com/decred/dcrd/crypto/blake256 v1.0.0 h1:/8DMNYp9SGi5f0w7uCm6d6M4OU2rGFK09Y2A4Xv7EE0= github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0 h1:HbphB4TFFXpv7MNrT52FGrrgVXF1owhMVTHFZIlnvd4= github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0/go.mod h1:DZGJHZMqrU4JJqFAWUS2UO1+lbSKsdiOoYi9Zzey7Fc= +github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= +github.com/elastic/gosigar v0.14.2 h1:Dg80n8cr90OZ7x+bAax/QjoW/XqTI11RmA79ZwIm9/4= +github.com/flynn/noise v1.0.0 h1:DlTHqmzmvcEiKj+4RYo/imoswx/4r6iBlCMfVtrMXpQ= +github.com/francoispqt/gojay v1.2.13 h1:d2m3sFjloqoIUQU3TsHBgj6qg/BVGlTBeHDUmyJnXKk= github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwVZI= +github.com/go-check/check v0.0.0-20180628173108-788fd7840127/go.mod h1:9ES+weclKsC9YodN5RgxqK/VD9HM9JsCSh7rNhMZE98= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= +github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 h1:p104kn46Q8WdvHunIJ9dAyjPVtrBPhSr3KT2yUst43I= +github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk= +github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= +github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg= +github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/gopacket v1.1.17/go.mod h1:UdDNZ1OO62aGYVnPhxT1U6aI7ukYtA/kB8vaU0diBUM= +github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8= +github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= +github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= +github.com/gxed/hashland/keccakpg v0.0.1/go.mod h1:kRzw3HkwxFU1mpmPP8v1WyQzwdGfmKFJ6tItnhQ67kU= +github.com/gxed/hashland/murmur3 v0.0.1/go.mod h1:KjXop02n4/ckmZSnY2+HKcLud/tcmvhST0bie/0lS48= +github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= +github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/huin/goupnp v1.0.0/go.mod h1:n9v9KO1tAxYH82qOn+UTIFQDmx5n1Zxd/ClZDMX7Bnc= +github.com/huin/goupnp v1.0.3 h1:N8No57ls+MnjlB+JPiCVSOyy/ot7MJTqlo7rn+NYSqQ= +github.com/huin/goupnp v1.0.3/go.mod h1:ZxNlw5WqJj6wSsRK5+YfflQGXYfccj5VgQsMNixHM7Y= +github.com/huin/goutil v0.0.0-20170803182201-1ca381bf3150/go.mod h1:PpLOETDnJ0o3iZrZfqZzyLl6l7F3c6L1oWn7OICBi6o= +github.com/ipfs/bbloom v0.0.4 h1:Gi+8EGJ2y5qiD5FbsbpX/TMNcJw8gSqr7eyjHa4Fhvs= +github.com/ipfs/bbloom v0.0.4/go.mod h1:cS9YprKXpoZ9lT0n/Mw/a6/aFV6DTjTLYHeA+gyqMG0= +github.com/ipfs/go-block-format v0.0.2/go.mod h1:AWR46JfpcObNfg3ok2JHDUfdiHRgWhJgCQF+KIgOPJY= +github.com/ipfs/go-block-format v0.0.3 h1:r8t66QstRp/pd/or4dpnbVfXT5Gt7lOqRvC+/dDTpMc= +github.com/ipfs/go-block-format v0.0.3/go.mod h1:4LmD4ZUw0mhO+JSKdpWwrzATiEfM7WWgQ8H5l6P8MVk= +github.com/ipfs/go-cid v0.0.1/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= +github.com/ipfs/go-cid v0.0.2/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= github.com/ipfs/go-cid v0.0.4/go.mod h1:4LLaPOQwmk5z9LBgQnpkivrx8BJjUyGwTXCd5Xfj6+M= -github.com/ipfs/go-cid v0.3.2 h1:OGgOd+JCFM+y1DjWPmVH+2/4POtpDzwcr7VgnB7mZXc= -github.com/ipfs/go-cid v0.3.2/go.mod h1:gQ8pKqT/sUxGY+tIwy1RPpAojYu7jAyCp5Tz1svoupw= +github.com/ipfs/go-cid v0.0.5/go.mod h1:plgt+Y5MnOey4vO4UlUazGqdbEXuFYitED67FexhXog= +github.com/ipfs/go-cid v0.0.7/go.mod h1:6Ux9z5e+HpkQdckYoX1PG/6xqKspzlEIR5SDmgqgC/I= +github.com/ipfs/go-cid v0.2.0 h1:01JTiihFq9en9Vz0lc0VDWvZe/uBonGpzo4THP0vcQ0= +github.com/ipfs/go-cid v0.2.0/go.mod h1:P+HXFDF4CVhaVayiEb4wkAy7zBHxBwsJyt0Y5U6MLro= +github.com/ipfs/go-datastore v0.1.1/go.mod h1:w38XXW9kVFNp57Zj5knbKWM2T+KOZCGDRVNdgPHtbHw= +github.com/ipfs/go-datastore v0.5.0/go.mod h1:9zhEApYMTl17C8YDp7JmU7sQZi2/wqiYh73hakZ90Bk= +github.com/ipfs/go-datastore v0.5.1 h1:WkRhLuISI+XPD0uk3OskB0fYFSyqK8Ob5ZYew9Qa1nQ= +github.com/ipfs/go-datastore v0.5.1/go.mod h1:9zhEApYMTl17C8YDp7JmU7sQZi2/wqiYh73hakZ90Bk= +github.com/ipfs/go-detect-race v0.0.1 h1:qX/xay2W3E4Q1U7d9lNs1sU9nvguX0a7319XbyQ6cOk= +github.com/ipfs/go-detect-race v0.0.1/go.mod h1:8BNT7shDZPo99Q74BpGMK+4D8Mn4j46UU0LZ723meps= +github.com/ipfs/go-ipfs-blockstore v1.2.0 h1:n3WTeJ4LdICWs/0VSfjHrlqpPpl6MZ+ySd3j8qz0ykw= +github.com/ipfs/go-ipfs-blockstore v1.2.0/go.mod h1:eh8eTFLiINYNSNawfZOC7HOxNTxpB1PFuA5E1m/7exE= +github.com/ipfs/go-ipfs-blocksutil v0.0.1 h1:Eh/H4pc1hsvhzsQoMEP3Bke/aW5P5rVM1IWFJMcGIPQ= +github.com/ipfs/go-ipfs-blocksutil v0.0.1/go.mod h1:Yq4M86uIOmxmGPUHv/uI7uKqZNtLb449gwKqXjIsnRk= +github.com/ipfs/go-ipfs-delay v0.0.0-20181109222059-70721b86a9a8/go.mod h1:8SP1YXK1M1kXuc4KJZINY3TQQ03J2rwBG9QfXmbRPrw= +github.com/ipfs/go-ipfs-delay v0.0.1 h1:r/UXYyRcddO6thwOnhiznIAiSvxMECGgtv35Xs1IeRQ= +github.com/ipfs/go-ipfs-delay v0.0.1/go.mod h1:8SP1YXK1M1kXuc4KJZINY3TQQ03J2rwBG9QfXmbRPrw= +github.com/ipfs/go-ipfs-ds-help v0.1.1/go.mod h1:SbBafGJuGsPI/QL3j9Fc5YPLeAu+SzOkI0gFwAg+mOs= +github.com/ipfs/go-ipfs-ds-help v1.1.0 h1:yLE2w9RAsl31LtfMt91tRZcrx+e61O5mDxFRR994w4Q= +github.com/ipfs/go-ipfs-ds-help v1.1.0/go.mod h1:YR5+6EaebOhfcqVCyqemItCLthrpVNot+rsOU/5IatU= +github.com/ipfs/go-ipfs-exchange-interface v0.2.0 h1:8lMSJmKogZYNo2jjhUs0izT+dck05pqUw4mWNW9Pw6Y= +github.com/ipfs/go-ipfs-exchange-interface v0.2.0/go.mod h1:z6+RhJuDQbqKguVyslSOuVDhqF9JtTrO3eptSAiW2/Y= +github.com/ipfs/go-ipfs-pq v0.0.2 h1:e1vOOW6MuOwG2lqxcLA+wEn93i/9laCY8sXAw76jFOY= +github.com/ipfs/go-ipfs-pq v0.0.2/go.mod h1:LWIqQpqfRG3fNc5XsnIhz/wQ2XXGyugQwls7BgUmUfY= +github.com/ipfs/go-ipfs-routing v0.2.1 h1:E+whHWhJkdN9YeoHZNj5itzc+OR292AJ2uE9FFiW0BY= +github.com/ipfs/go-ipfs-routing v0.2.1/go.mod h1:xiNNiwgjmLqPS1cimvAw6EyB9rkVDbiocA4yY+wRNLM= +github.com/ipfs/go-ipfs-util v0.0.1/go.mod h1:spsl5z8KUnrve+73pOhSVZND1SIxPW5RyBCNzQxlJBc= github.com/ipfs/go-ipfs-util v0.0.2 h1:59Sswnk1MFaiq+VcaknX7aYEyGyGDAA73ilhEK2POp8= github.com/ipfs/go-ipfs-util v0.0.2/go.mod h1:CbPtkWJzjLdEcezDns2XYaehFVNXG9zrdrtMecczcsQ= +github.com/ipfs/go-ipld-format v0.3.0 h1:Mwm2oRLzIuUwEPewWAWyMuuBQUsn3awfFEYVb8akMOQ= +github.com/ipfs/go-ipld-format v0.3.0/go.mod h1:co/SdBE8h99968X0hViiw1MNlh6fvxxnHpvVLnH7jSM= github.com/ipfs/go-ipns v0.3.0 h1:ai791nTgVo+zTuq2bLvEGmWP1M0A6kGTXUsgv/Yq67A= github.com/ipfs/go-ipns v0.3.0/go.mod h1:3cLT2rbvgPZGkHJoPO1YMJeh6LtkxopCkKFcio/wE24= +github.com/ipfs/go-log v0.0.1/go.mod h1:kL1d2/hzSpI0thNYjiKfjanbVNU+IIGA/WnNESY9leM= +github.com/ipfs/go-log v1.0.5 h1:2dOuUCB1Z7uoczMWgAyDck5JLb72zHzrMnGnCNNbvY8= +github.com/ipfs/go-log v1.0.5/go.mod h1:j0b8ZoR+7+R99LD9jZ6+AJsrzkPbSXbZfGakb5JPtIo= +github.com/ipfs/go-log/v2 v2.1.3/go.mod h1:/8d0SH3Su5Ooc31QlL1WysJhvyOTDCjcCZ9Axpmri6g= github.com/ipfs/go-log/v2 v2.5.1 h1:1XdUzF7048prq4aBjDQQ4SL5RxftpRGdXhNRwKSAlcY= github.com/ipfs/go-log/v2 v2.5.1/go.mod h1:prSpmC1Gpllc9UYWxDiZDreBYw7zp4Iqp1kOLU9U5UI= +github.com/ipfs/go-metrics-interface v0.0.1 h1:j+cpbjYvu4R8zbleSs36gvB7jR+wsL2fGD6n0jO4kdg= +github.com/ipfs/go-metrics-interface v0.0.1/go.mod h1:6s6euYU4zowdslK0GKHmqaIZ3j/b/tL7HTWtJ4VPgWY= +github.com/ipfs/go-peertaskqueue v0.7.0 h1:VyO6G4sbzX80K58N60cCaHsSsypbUNs1GjO5seGNsQ0= +github.com/ipfs/go-peertaskqueue v0.7.0/go.mod h1:M/akTIE/z1jGNXMU7kFB4TeSEFvj68ow0Rrb04donIU= github.com/ipld/go-ipld-prime v0.9.0 h1:N2OjJMb+fhyFPwPnVvJcWU/NsumP8etal+d2v3G4eww= github.com/ipld/go-ipld-prime v0.9.0/go.mod h1:KvBLMr4PX1gWptgkzRjVZCrLmSGcZCb/jioOQwCqZN8= +github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus= +github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= +github.com/jbenet/go-cienv v0.1.0/go.mod h1:TqNnHUmJgXau0nCzC7kXWeotg3J9W34CUv5Djy1+FlA= +github.com/jbenet/go-temp-err-catcher v0.1.0 h1:zpb3ZH6wIE8Shj2sKS+khgRvf7T7RABoLk/+KKHggpk= +github.com/jbenet/goprocess v0.0.0-20160826012719-b497e2f366b8/go.mod h1:Ly/wlsjFq/qrU3Rar62tu1gASgGw6chQbSh/XgIIXCY= +github.com/jbenet/goprocess v0.1.3/go.mod h1:5yspPrukOVuOLORacaBi858NqyClJPQxYZlqdZVfqY4= +github.com/jbenet/goprocess v0.1.4 h1:DRGOFReOMqqDNXwW70QkacFW0YN9QnwLV0Vqk+3oU0o= +github.com/jbenet/goprocess v0.1.4/go.mod h1:5yspPrukOVuOLORacaBi858NqyClJPQxYZlqdZVfqY4= +github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ= github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4= +github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A= github.com/klauspost/cpuid/v2 v2.0.4/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= -github.com/klauspost/cpuid/v2 v2.1.1 h1:t0wUqjowdm8ezddV5k0tLWVklVuvLJpoHeb4WBdydm0= -github.com/klauspost/cpuid/v2 v2.1.1/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY= +github.com/klauspost/cpuid/v2 v2.1.0 h1:eyi1Ad2aNJMW95zcSbmGg7Cg6cq3ADwLpMAP96d8rF0= +github.com/klauspost/cpuid/v2 v2.1.0/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY= +github.com/koron/go-ssdp v0.0.0-20191105050749-2e1c40ed0b5d/go.mod h1:5Ky9EC2xfoUKUor0Hjgi2BJhCSXJfMOFlmyYrVKGQMk= +github.com/koron/go-ssdp v0.0.3 h1:JivLMY45N76b4p/vsWGOKewBQu6uf39y8l+AQ7sDKx8= +github.com/koron/go-ssdp v0.0.3/go.mod h1:b2MxI6yh02pKrsyNoQUsk4+YNikaGhe4894J+Q5lDvA= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6cdF0Y8= github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg= -github.com/libp2p/go-libp2p v0.23.4 h1:hWi9XHSOVFR1oDWRk7rigfyA4XNMuYL20INNybP9LP8= -github.com/libp2p/go-libp2p v0.23.4/go.mod h1:s9DEa5NLR4g+LZS+md5uGU4emjMWFiqkZr6hBTY8UxI= +github.com/libp2p/go-cidranger v1.1.0 h1:ewPN8EZ0dd1LSnrtuwd4709PXVcITVeuwbag38yPW7c= +github.com/libp2p/go-cidranger v1.1.0/go.mod h1:KWZTfSr+r9qEo9OkI9/SIEeAtw+NNoU0dXIXt15Okic= +github.com/libp2p/go-flow-metrics v0.0.1/go.mod h1:Iv1GH0sG8DtYN3SVJ2eG221wMiNpZxBdp967ls1g+k8= +github.com/libp2p/go-flow-metrics v0.1.0 h1:0iPhMI8PskQwzh57jB9WxIuIOQ0r+15PChFGkx3Q3WM= +github.com/libp2p/go-libp2p v0.22.0 h1:2Tce0kHOp5zASFKJbNzRElvh0iZwdtG5uZheNW8chIw= +github.com/libp2p/go-libp2p v0.22.0/go.mod h1:UDolmweypBSjQb2f7xutPnwZ/fxioLbMBxSjRksxxU4= +github.com/libp2p/go-libp2p-asn-util v0.2.0 h1:rg3+Os8jbnO5DxkC7K/Utdi+DkY3q/d1/1q+8WeNAsw= +github.com/libp2p/go-libp2p-asn-util v0.2.0/go.mod h1:WoaWxbHKBymSN41hWSq/lGKJEca7TNm58+gGJi2WsLI= +github.com/libp2p/go-libp2p-core v0.0.1/go.mod h1:g/VxnTZ/1ygHxH3dKok7Vno1VfpvGcGip57wjTU4fco= +github.com/libp2p/go-libp2p-core v0.0.2/go.mod h1:9dAcntw/n46XycV4RnlBq3BpgrmyUi9LuoTNdPrbUco= +github.com/libp2p/go-libp2p-core v0.20.0 h1:PGKM74+T+O/FaZNARNW32i90RMBHCcgd/hkum2UQ5eY= +github.com/libp2p/go-libp2p-core v0.20.0/go.mod h1:6zR8H7CvQWgYLsbG4on6oLNSGcyKaYFSEYyDt51+bIY= +github.com/libp2p/go-libp2p-record v0.1.0/go.mod h1:ujNc8iuE5dlKWVy6wuL6dd58t0n7xI4hAIl8pE6wu5Q= github.com/libp2p/go-libp2p-record v0.2.0 h1:oiNUOCWno2BFuxt3my4i1frNrt7PerzB3queqa1NkQ0= github.com/libp2p/go-libp2p-record v0.2.0/go.mod h1:I+3zMkvvg5m2OcSdoL0KPljyJyvNDFGKX7QdlpYUcwk= +github.com/libp2p/go-libp2p-testing v0.0.3/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E= +github.com/libp2p/go-libp2p-testing v0.12.0 h1:EPvBb4kKMWO29qP4mZGyhVzUyR25dvfUIK5WDu6iPUA= +github.com/libp2p/go-libp2p-testing v0.12.0/go.mod h1:KcGDRXyN7sQCllucn1cOOS+Dmm7ujhfEyXQL5lvkcPg= +github.com/libp2p/go-msgio v0.2.0 h1:W6shmB+FeynDrUVl2dgFQvzfBZcXiyqY4VmpQLu9FqU= +github.com/libp2p/go-msgio v0.2.0/go.mod h1:dBVM1gW3Jk9XqHkU4eKdGvVHdLa51hoGfll6jMJMSlY= +github.com/libp2p/go-nat v0.1.0 h1:MfVsH6DLcpa04Xr+p8hmVRG4juse0s3J8HyNWYHffXg= +github.com/libp2p/go-nat v0.1.0/go.mod h1:X7teVkwRHNInVNWQiO/tAiAVRwSr5zoRz4YSTC3uRBM= +github.com/libp2p/go-netroute v0.1.2/go.mod h1:jZLDV+1PE8y5XxBySEBgbuVAXbhtuHSdmLPL2n9MKbk= +github.com/libp2p/go-netroute v0.2.0 h1:0FpsbsvuSnAhXFnCY0VLFbJOzaK0VnP0r1QT/o4nWRE= +github.com/libp2p/go-netroute v0.2.0/go.mod h1:Vio7LTzZ+6hoT4CMZi5/6CpY3Snzh2vgZhWgxMNwlQI= github.com/libp2p/go-openssl v0.1.0 h1:LBkKEcUv6vtZIQLVTegAil8jbNpJErQ9AnT+bWV+Ooo= github.com/libp2p/go-openssl v0.1.0/go.mod h1:OiOxwPpL3n4xlenjx2h7AwSGaFSC/KZvf6gNdOBQMtc= +github.com/libp2p/go-reuseport v0.2.0 h1:18PRvIMlpY6ZK85nIAicSBuXXvrYoSw3dsBAR7zc560= +github.com/libp2p/go-sockaddr v0.0.2/go.mod h1:syPvOmNs24S3dFVGJA1/mrqdeijPxLV2Le3BRLKd68k= +github.com/libp2p/go-yamux/v3 v3.1.2 h1:lNEy28MBk1HavUAlzKgShp+F6mn/ea1nDYWftZhFW9Q= +github.com/lucas-clemente/quic-go v0.28.1 h1:Uo0lvVxWg5la9gflIF9lwa39ONq85Xq2D91YNEIslzU= +github.com/marten-seemann/qtls-go1-16 v0.1.5 h1:o9JrYPPco/Nukd/HpOHMHZoBDXQqoNtUCmny98/1uqQ= +github.com/marten-seemann/qtls-go1-17 v0.1.2 h1:JADBlm0LYiVbuSySCHeY863dNkcpMmDR7s0bLKJeYlQ= +github.com/marten-seemann/qtls-go1-18 v0.1.2 h1:JH6jmzbduz0ITVQ7ShevK10Av5+jBEKAHMntXmIV7kM= +github.com/marten-seemann/qtls-go1-19 v0.1.0 h1:rLFKD/9mp/uq1SYGYuVZhm83wkmU95pK5df3GufyYYU= +github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd h1:br0buuQ854V8u83wA0rVZ8ttrq5CpaPZdvrK0LP2lOk= +github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= +github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= github.com/mattn/go-isatty v0.0.16 h1:bq3VjFmv/sOjHtdEhmkEV4x1AJtvUvOJ2PFAZ5+peKQ= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-pointer v0.0.1 h1:n+XhsuGeVO6MEAp7xyEukFINEa+Quek5psIR/ylA6o0= github.com/mattn/go-pointer v0.0.1/go.mod h1:2zXcozF6qYGgmsG+SeTZz3oAbFLdD3OWqnUbNvJZAlc= +github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= +github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= +github.com/miekg/dns v1.1.50 h1:DQUfb9uc6smULcREF09Uc+/Gd46YWqJd5DbpPE9xkcA= +github.com/miekg/dns v1.1.50/go.mod h1:e3IlAVfNqAllflbibAZEWOXOQ+Ynzk/dDozDxY7XnME= +github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b h1:z78hV3sbSMAUoyUMM0I83AUIT6Hu17AWfgjzIbtrYFc= +github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc h1:PTfri+PuQmWDqERdnNMiD9ZejrlswWrCpBEZgWOiTrc= github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1/go.mod h1:pD8RvIylQ358TN4wwqatJ8rNavkEINozVn9DtGI3dfQ= +github.com/minio/sha256-simd v0.0.0-20190131020904-2d45a736cd16/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U= +github.com/minio/sha256-simd v0.0.0-20190328051042-05b4dd3047e5/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U= github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= github.com/minio/sha256-simd v1.0.0 h1:v1ta+49hkWZyvaKwrQB8elexRqm6Y0aMLjCNsrYxo6g= github.com/minio/sha256-simd v1.0.0/go.mod h1:OuYzVNI5vcoYIAmbIvHPl3N3jUzVedXbKy5RFepssQM= github.com/mr-tron/base58 v1.1.0/go.mod h1:xcD2VGqlgYjBdcBLw+TuYLr8afG+Hj8g2eTVqeSzSU8= +github.com/mr-tron/base58 v1.1.1/go.mod h1:xcD2VGqlgYjBdcBLw+TuYLr8afG+Hj8g2eTVqeSzSU8= github.com/mr-tron/base58 v1.1.2/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= github.com/mr-tron/base58 v1.1.3/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o= github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= github.com/multiformats/go-base32 v0.0.3/go.mod h1:pLiuGC8y0QR3Ue4Zug5UzK9LjgbkL8NSQj0zQ5Nz/AA= -github.com/multiformats/go-base32 v0.1.0 h1:pVx9xoSPqEIQG8o+UbAe7DNi51oej1NtK+aGkbLYxPE= -github.com/multiformats/go-base32 v0.1.0/go.mod h1:Kj3tFY6zNr+ABYMqeUNeGvkIC/UYgtWibDcT0rExnbI= +github.com/multiformats/go-base32 v0.0.4 h1:+qMh4a2f37b4xTNs6mqitDinryCI+tfO2dRVMN9mjSE= +github.com/multiformats/go-base32 v0.0.4/go.mod h1:jNLFzjPZtp3aIARHbJRZIaPuspdH0J6q39uUM5pnABM= github.com/multiformats/go-base36 v0.1.0 h1:JR6TyF7JjGd3m6FbLU2cOxhC0Li8z8dLNGQ89tUg4F4= github.com/multiformats/go-base36 v0.1.0/go.mod h1:kFGE83c6s80PklsHO9sRn2NCoffoRdUUOENyW/Vv6sM= -github.com/multiformats/go-multiaddr v0.8.0 h1:aqjksEcqK+iD/Foe1RRFsGZh8+XFiGo7FgUCZlpv3LU= -github.com/multiformats/go-multiaddr v0.8.0/go.mod h1:Fs50eBDWvZu+l3/9S6xAE7ZYj6yhxlvaVZjakWN7xRs= +github.com/multiformats/go-multiaddr v0.0.2/go.mod h1:xKVEak1K9cS1VdmPZW3LSIb6lgmoS58qz/pzqmAxV44= +github.com/multiformats/go-multiaddr v0.0.4/go.mod h1:xKVEak1K9cS1VdmPZW3LSIb6lgmoS58qz/pzqmAxV44= +github.com/multiformats/go-multiaddr v0.1.1/go.mod h1:aMKBKNEYmzmDmxfX88/vz+J5IU55txyt0p4aiWVohjo= +github.com/multiformats/go-multiaddr v0.2.0/go.mod h1:0nO36NvPpyV4QzvTLi/lafl2y95ncPj0vFwVF6k6wJ4= +github.com/multiformats/go-multiaddr v0.6.0 h1:qMnoOPj2s8xxPU5kZ57Cqdr0hHhARz7mFsPMIiYNqzg= +github.com/multiformats/go-multiaddr v0.6.0/go.mod h1:F4IpaKZuPP360tOMn2Tpyu0At8w23aRyVqeK0DbFeGM= +github.com/multiformats/go-multiaddr-dns v0.3.1 h1:QgQgR+LQVt3NPTjbrLLpsaT2ufAA2y0Mkk+QRVJbW3A= +github.com/multiformats/go-multiaddr-dns v0.3.1/go.mod h1:G/245BRQ6FJGmryJCrOuTdB37AMA5AMOVuO6NY3JwTk= github.com/multiformats/go-multiaddr-fmt v0.1.0 h1:WLEFClPycPkp4fnIzoFoV9FVd49/eQsuaL3/CWe167E= +github.com/multiformats/go-multiaddr-fmt v0.1.0/go.mod h1:hGtDIW4PU4BqJ50gW2quDuPVjyWNZxToGUh/HwTZYJo= github.com/multiformats/go-multibase v0.0.1/go.mod h1:bja2MqRZ3ggyXtZSEDKpl0uO/gviWFaSteVbWT51qgs= +github.com/multiformats/go-multibase v0.0.3/go.mod h1:5+1R4eQrT3PkYZ24C3W2Ue2tPwIdYQD509ZjSb5y9Oc= github.com/multiformats/go-multibase v0.1.1 h1:3ASCDsuLX8+j4kx58qnJ4YFq/JWTJpCyDW27ztsVTOI= github.com/multiformats/go-multibase v0.1.1/go.mod h1:ZEjHE+IsUrgp5mhlEAYjMtZwK1k4haNkcaPg9aoe1a8= -github.com/multiformats/go-multicodec v0.6.0 h1:KhH2kSuCARyuJraYMFxrNO3DqIaYhOdS039kbhgVwpE= -github.com/multiformats/go-multicodec v0.6.0/go.mod h1:GUC8upxSBE4oG+q3kWZRw/+6yC1BqO550bjhWsJbZlw= +github.com/multiformats/go-multicodec v0.5.0 h1:EgU6cBe/D7WRwQb1KmnBvU7lrcFGMggZVTPtOW9dDHs= +github.com/multiformats/go-multicodec v0.5.0/go.mod h1:DiY2HFaEp5EhEXb/iYzVAunmyX/aSFMxq2KMKfWEues= +github.com/multiformats/go-multihash v0.0.1/go.mod h1:w/5tugSrLEbWqlcgJabL3oHFKTwfvkofsjW2Qa1ct4U= +github.com/multiformats/go-multihash v0.0.5/go.mod h1:lt/HCbqlQwlPBz7lv0sQCdtfcMtlJvakRUn/0Ual8po= +github.com/multiformats/go-multihash v0.0.8/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew= github.com/multiformats/go-multihash v0.0.10/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew= github.com/multiformats/go-multihash v0.0.13/go.mod h1:VdAWLKTwram9oKAatUcLxBNUjdtcVwxObEQBtRfuyjc= +github.com/multiformats/go-multihash v0.0.14/go.mod h1:VdAWLKTwram9oKAatUcLxBNUjdtcVwxObEQBtRfuyjc= github.com/multiformats/go-multihash v0.0.15/go.mod h1:D6aZrWNLFTV/ynMpKsNtB40mJzmCl4jb1alC0OvHiHg= github.com/multiformats/go-multihash v0.2.1 h1:aem8ZT0VA2nCHHk7bPJ1BjUbHNciqZC/d16Vve9l108= github.com/multiformats/go-multihash v0.2.1/go.mod h1:WxoMcYG85AZVQUyRyo9s4wULvW5qrI9vb2Lt6evduFc= +github.com/multiformats/go-multistream v0.3.3 h1:d5PZpjwRgVlbwfdTDjife7XszfZd8KYWfROYFlGcR8o= +github.com/multiformats/go-multistream v0.3.3/go.mod h1:ODRoqamLUsETKS9BNcII4gcRsJBU5VAwRIv7O39cEXg= +github.com/multiformats/go-varint v0.0.1/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= github.com/multiformats/go-varint v0.0.5/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= github.com/multiformats/go-varint v0.0.6 h1:gk85QWKxh3TazbLxED/NlDVv8+q+ReFJk7Y2W/KhfNY= github.com/multiformats/go-varint v0.0.6/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= +github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= +github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/opencontainers/runtime-spec v1.0.2 h1:UfAcuLBJB9Coz72x1hgl8O5RVzTdNiaglX6v2DM6FI0= +github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= +github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= +github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 h1:onHthvaw9LFnH4t2DcNVpwGmV9E1BkGknEliJkfwQj0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -90,12 +276,19 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/polydawn/refmt v0.0.0-20190807091052-3d65705ee9f1 h1:CskT+S6Ay54OwxBGB0R3Rsx4Muto6UnEYTyKJbyRIAI= github.com/polydawn/refmt v0.0.0-20190807091052-3d65705ee9f1/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o= +github.com/prometheus/client_golang v1.12.1 h1:ZiaPsmm9uiBeaSMRznKsCDNtPCS0T3JVDGF+06gjBzk= +github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= +github.com/prometheus/common v0.37.0 h1:ccBbHCgIiT9uSoFY0vX8H3zsNR5eLt17/RQLUvn8pXE= +github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo= +github.com/raulk/go-watchdog v1.3.0 h1:oUmdlHxdkXRJlwfG0O9omj8ukerm8MEQavSiDTEtBsk= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/samber/lo v1.36.0 h1:4LaOxH1mHnbDGhTVE0i1z8v/lWaQW8AIfOD3HU4mSaw= github.com/samber/lo v1.36.0/go.mod h1:HLeWcJRRyLKp3+/XBJvOrerCQn9mhdKMHyd7IRlgeQ8= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/spacemonkeygo/openssl v0.0.0-20181017203307-c2dcc5cca94a/go.mod h1:7AyxJNCJ7SBZ1MfVQCWD6Uqo2oubI2Eq2y2eqf+A5r0= github.com/spacemonkeygo/spacelog v0.0.0-20180420211403-2296661a0572 h1:RC6RW7j+1+HkWaX/Yh71Ee5ZHaHYt7ZP4sQgUrm6cDU= github.com/spacemonkeygo/spacelog v0.0.0-20180420211403-2296661a0572/go.mod h1:w0SWMsp6j9O/dk4/ZpIhL+3CkG8ofA2vuv7k+ltqUMc= github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= @@ -105,6 +298,7 @@ github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSS github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= @@ -113,50 +307,89 @@ github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o github.com/thoas/go-funk v0.9.1 h1:O549iLZqPpTUQ10ykd26sZhzD+rmR5pWhuElrhbC20M= github.com/warpfork/go-wish v0.0.0-20200122115046-b9ea61034e4a h1:G++j5e0OC488te356JvdhaM8YS6nMsjLAYF7JxCv07w= github.com/warpfork/go-wish v0.0.0-20200122115046-b9ea61034e4a/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw= +github.com/whyrusleeping/go-logging v0.0.0-20170515211332-0457bb6b88fc/go.mod h1:bopw91TMyo8J3tvftk8xmU2kPmlrt4nScJQZU2hE5EM= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +go.opentelemetry.io/otel v1.7.0 h1:Z2lA3Tdch0iDcrhJXDIlC94XE+bxok1F9B+4Lz/lGsM= +go.opentelemetry.io/otel v1.7.0/go.mod h1:5BdUoMIz5WEs0vt0CUEMtSSaTSHBBVwrhnz7+nrD5xk= +go.opentelemetry.io/otel/trace v1.7.0 h1:O37Iogk1lEkMRXewVtZ1BBTVn5JEp8GrJvP92bJqC6o= +go.opentelemetry.io/otel/trace v1.7.0/go.mod h1:fzLSB9nqR2eXzxPXb2JW9IKE+ScyXA48yyE4TNvoHqU= +go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ= go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA= +go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= go.uber.org/multierr v1.8.0 h1:dg6GjLku4EH+249NNmoIciG9N/jURbDG+pFlTkhzIC8= go.uber.org/multierr v1.8.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak= +go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= +go.uber.org/zap v1.16.0/go.mod h1:MA8QOfq0BHJwdXa996Y4dYkAqRKB8/1K1QMMZVaNZjQ= go.uber.org/zap v1.19.1/go.mod h1:j3DNczoxDZroyBnOT1L/Q79cfUMGZxlv/9dzN7SM1rI= -go.uber.org/zap v1.23.0 h1:OjGQ5KQDEUawVHxNwQgPpiypGHOxo2mNZsOqTak4fFY= -go.uber.org/zap v1.23.0/go.mod h1:D+nX8jyLsMHMYrln8A0rJjFt/T/9/bGgIhAqxv5URuY= +go.uber.org/zap v1.22.0 h1:Zcye5DUgBloQ9BaT4qc9BnjOFog5TvBSAGkJ3Nf70c0= +go.uber.org/zap v1.22.0/go.mod h1:H4siCOZOrAolnUPJEkfaSjDqyP+BDS0DdDWzwcgt3+U= +golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190225124518-7f87c0fbb88b/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190513172903-22d7a77e9e5f/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e h1:T8NU3HyQ8ClP4SEE+KbFlg6n0NhuTsN4MyznaarGsZM= golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/exp v0.0.0-20220916125017-b168a2c6b86b h1:SCE/18RnFsLrjydh/R/s5EVvHoZprqEQUuoxK8q2Pc4= -golang.org/x/exp v0.0.0-20220916125017-b168a2c6b86b/go.mod h1:cyybsKvd6eL0RnXn6p/Grxp8F5bW7iYuBgsNCOHpMYE= +golang.org/x/exp v0.0.0-20220303212507-bbda1eaf7a17 h1:3MTrJm4PyNL9NBqvYDSj3DHl46qQakyfqfWo4jgfaEM= +golang.org/x/exp v0.0.0-20220303212507-bbda1eaf7a17/go.mod h1:lgLbSvA5ygNOMpwM/9anMpWVlVJ7Z+cHWq/eFuinpGE= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 h1:6zppjxzCulZykYSLyVDYbneBfbaBIQPYMevg0bEwv2s= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181011144130-49bb7cea24b1/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190227160552-c95aed5357e7/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210423184538-5f58ad60dda6/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= +golang.org/x/net v0.0.0-20210726213435-c6fcb2dbf985/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220812174116-3211cb980234 h1:RDqmgfe7SvlMWoqC3xwQ2blLO3fcWcxMa3eBLRdRW7E= +golang.org/x/net v0.0.0-20220812174116-3211cb980234/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 h1:uVc8UZUe6tr40fFVnUP5Oj+veunVezqYl9z7DYw9xzw= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190219092855-153ac476189d/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190228124157-a34e9553db1e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190405154228-4b34438f7a67/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210309074719-68d13333faf2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210426080607-c94f62235c83/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -166,24 +399,43 @@ golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXR golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.6-0.20210726203631-07bc1bf47fb2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.12 h1:VveCTK38A2rkS8ZqFY25HIDFscX5X9OoEhJd3quQmXU= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= lukechampine.com/blake3 v1.1.7 h1:GgRMhmdsuK8+ii6UZFDL8Nb+VyMwadAgcJyfYHxG6n0= lukechampine.com/blake3 v1.1.7/go.mod h1:tkKEOtDkNtklkXtLNEOGNq5tcV90tJiA1vAA12R78LA=