From f6a190b99ec079a5bb884e78b3c6fd84e53e4b55 Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Tue, 27 Oct 2020 11:15:00 +0100 Subject: [PATCH 01/39] Use inbound peerslot slots when a substream is received, rather than a connection --- client/network/src/protocol.rs | 2 +- .../src/protocol/generic_proto/behaviour.rs | 330 +++++++++----- .../protocol/generic_proto/handler/group.rs | 421 ++++++++++++------ 3 files changed, 499 insertions(+), 254 deletions(-) diff --git a/client/network/src/protocol.rs b/client/network/src/protocol.rs index ac74af0f5ca94..1926265330f39 100644 --- a/client/network/src/protocol.rs +++ b/client/network/src/protocol.rs @@ -1668,7 +1668,7 @@ impl NetworkBehaviour for Protocol { notifications_sink, } }, - GenericProtoOut::CustomProtocolClosed { peer_id, .. } => { + GenericProtoOut::CustomProtocolClosed { peer_id } => { self.on_peer_disconnected(peer_id) }, GenericProtoOut::LegacyMessage { peer_id, message } => diff --git a/client/network/src/protocol/generic_proto/behaviour.rs b/client/network/src/protocol/generic_proto/behaviour.rs index 7b62b154016c3..29b5088244fba 100644 --- a/client/network/src/protocol/generic_proto/behaviour.rs +++ b/client/network/src/protocol/generic_proto/behaviour.rs @@ -42,23 +42,7 @@ use wasm_timer::Instant; /// Network behaviour that handles opening substreams for custom protocols with other peers. /// -/// ## Legacy vs new protocol -/// -/// The `GenericProto` behaves as following: -/// -/// - Whenever a connection is established, we open a single substream (called "legacy protocol" in -/// the source code) on that connection. This substream name depends on the `protocol_id` and -/// `versions` passed at initialization. If the remote refuses this substream, we close the -/// connection. -/// -/// - For each registered protocol, we also open an additional substream for this protocol. If the -/// remote refuses this substream, then it's fine. -/// -/// - Whenever we want to send a message, we can call either `send_packet` to force the legacy -/// substream, or `write_notification` to indicate a registered protocol. If the registered -/// protocol was refused or isn't supported by the remote, we always use the legacy instead. -/// -/// ## How it works +/// # How it works /// /// The role of the `GenericProto` is to synchronize the following components: /// @@ -157,6 +141,8 @@ pub struct GenericProto { struct DelayId(u64); /// State of a peer we're connected to. +/// +/// The various variants correspond to the state that are relevant to the peerset. #[derive(Debug)] enum PeerState { /// State is poisoned. This is a temporary state for a peer and we should always switch back @@ -182,9 +168,10 @@ enum PeerState { /// The peerset requested that we connect to this peer. We are currently dialing this peer. Requested, - /// We are connected to this peer but the peerset refused it. + /// We are connected to this peer but the peerset hasn't requested it. /// - /// We may still have ongoing traffic with that peer, but it should cease shortly. + /// The handler is either in the closed state, or a `Close` message has been sent to it and + /// hasn't been answered yet. Disabled { /// The connections that are currently open for custom protocol traffic. open: SmallVec<[(ConnectionId, NotificationsSink); crate::MAX_CONNECTIONS_PER_PEER]>, @@ -192,9 +179,12 @@ enum PeerState { banned_until: Option, }, - /// We are connected to this peer but we are not opening any Substrate substream. The handler - /// will be enabled when `timer` fires. This peer can still perform Kademlia queries and such, - /// but should get disconnected in a few seconds. + /// We are connected to this peer but we are not opening any Substrate substream. + /// + /// The handler is either in the closed state, or a `Close` message has been sent to it and + /// hasn't been answered yet. + /// + /// The handler will be opened when `timer` fires. DisabledPendingEnable { /// The connections that are currently open for custom protocol traffic. open: SmallVec<[(ConnectionId, NotificationsSink); crate::MAX_CONNECTIONS_PER_PEER]>, @@ -204,18 +194,22 @@ enum PeerState { timer_deadline: Instant, }, - /// We are connected to this peer and the peerset has accepted it. The handler is in the - /// enabled state. + /// We are connected to this peer and the peerset has accepted it. We have sent to the + /// handlers an open message that might or might not have been processed already. Enabled { /// The connections that are currently open for custom protocol traffic. open: SmallVec<[(ConnectionId, NotificationsSink); crate::MAX_CONNECTIONS_PER_PEER]>, }, - /// We received an incoming connection from this peer and forwarded that - /// connection request to the peerset. The connection handlers are waiting - /// for initialisation, i.e. to be enabled or disabled based on whether - /// the peerset accepts or rejects the peer. - Incoming, + /// We have received an `OpenDesired` from the handler and forwarded that request to the + /// peerset. The connection handlers are waiting for a response, i.e. to be opened or closed + /// based on whether the peerset accepts or rejects the peer. + Incoming { + /// The connections that are currently open for custom protocol traffic. + open: SmallVec<[(ConnectionId, NotificationsSink); crate::MAX_CONNECTIONS_PER_PEER]>, + /// If `Some`, any dial attempts to this peer are delayed until the given `Instant`. + banned_until: Option, + }, } impl PeerState { @@ -303,8 +297,6 @@ pub enum GenericProtoOut { CustomProtocolClosed { /// Id of the peer we were connected to. peer_id: PeerId, - /// Reason why the substream closed, for debugging purposes. - reason: Cow<'static, str>, }, /// Receives a message on the legacy substream. @@ -463,11 +455,11 @@ impl GenericProto { PeerState::Enabled { open } => { debug!(target: "sub-libp2p", "PSM <= Dropped({:?})", peer_id); self.peerset.dropped(peer_id.clone()); - debug!(target: "sub-libp2p", "Handler({:?}) <= Disable", peer_id); + debug!(target: "sub-libp2p", "Handler({:?}) <= Close", peer_id); self.events.push_back(NetworkBehaviourAction::NotifyHandler { peer_id: peer_id.clone(), handler: NotifyHandler::All, - event: NotifsHandlerIn::Disable, + event: NotifsHandlerIn::Close, }); let banned_until = ban.map(|dur| Instant::now() + dur); *entry.into_mut() = PeerState::Disabled { @@ -477,7 +469,7 @@ impl GenericProto { }, // Incoming => Disabled. - PeerState::Incoming => { + PeerState::Incoming { open, banned_until } => { let inc = if let Some(inc) = self.incoming.iter_mut() .find(|i| i.peer_id == *entry.key() && i.alive) { inc @@ -488,15 +480,16 @@ impl GenericProto { }; inc.alive = false; - debug!(target: "sub-libp2p", "Handler({:?}) <= Disable", peer_id); + debug!(target: "sub-libp2p", "Handler({:?}) <= Close", peer_id); self.events.push_back(NetworkBehaviourAction::NotifyHandler { peer_id: peer_id.clone(), handler: NotifyHandler::All, - event: NotifsHandlerIn::Disable, + event: NotifsHandlerIn::Close, }); + // TODO: interaction with `banned_until` above? let banned_until = ban.map(|dur| Instant::now() + dur); *entry.into_mut() = PeerState::Disabled { - open: SmallVec::new(), + open, banned_until } }, @@ -664,16 +657,16 @@ impl GenericProto { PeerState::Disabled { open, banned_until: _ } => { debug!(target: "sub-libp2p", "PSM => Connect({:?}): Enabling connections.", occ_entry.key()); - debug!(target: "sub-libp2p", "Handler({:?}) <= Enable", occ_entry.key()); + debug!(target: "sub-libp2p", "Handler({:?}) <= Open", occ_entry.key()); self.events.push_back(NetworkBehaviourAction::NotifyHandler { peer_id: occ_entry.key().clone(), handler: NotifyHandler::All, - event: NotifsHandlerIn::Enable, + event: NotifsHandlerIn::Open, }); *occ_entry.into_mut() = PeerState::Enabled { open }; }, - PeerState::Incoming => { + PeerState::Incoming { open, .. } => { debug!(target: "sub-libp2p", "PSM => Connect({:?}): Enabling connections.", occ_entry.key()); if let Some(inc) = self.incoming.iter_mut() @@ -683,13 +676,13 @@ impl GenericProto { error!(target: "sub-libp2p", "State mismatch in libp2p: no entry in \ incoming for incoming peer") } - debug!(target: "sub-libp2p", "Handler({:?}) <= Enable", occ_entry.key()); + debug!(target: "sub-libp2p", "Handler({:?}) <= Open", occ_entry.key()); self.events.push_back(NetworkBehaviourAction::NotifyHandler { peer_id: occ_entry.key().clone(), handler: NotifyHandler::All, - event: NotifsHandlerIn::Enable, + event: NotifsHandlerIn::Open, }); - *occ_entry.into_mut() = PeerState::Enabled { open: SmallVec::new() }; + *occ_entry.into_mut() = PeerState::Enabled { open }; }, st @ PeerState::Enabled { .. } => { @@ -748,18 +741,18 @@ impl GenericProto { PeerState::Enabled { open } => { debug!(target: "sub-libp2p", "PSM => Drop({:?}): Disabling connections.", entry.key()); - debug!(target: "sub-libp2p", "Handler({:?}) <= Disable", entry.key()); + debug!(target: "sub-libp2p", "Handler({:?}) <= Close", entry.key()); self.events.push_back(NetworkBehaviourAction::NotifyHandler { peer_id: entry.key().clone(), handler: NotifyHandler::All, - event: NotifsHandlerIn::Disable, + event: NotifsHandlerIn::Close, }); *entry.into_mut() = PeerState::Disabled { open, banned_until: None } }, - st @ PeerState::Incoming => { + st @ PeerState::Incoming { .. } => { error!(target: "sub-libp2p", "PSM => Drop({:?}): Not enabled (Incoming).", entry.key()); *entry.into_mut() = st; @@ -799,17 +792,25 @@ impl GenericProto { return } - match self.peers.get_mut(&incoming.peer_id) { - Some(state @ PeerState::Incoming) => { + let state = match self.peers.get_mut(&incoming.peer_id) { + Some(s) => s, + None => { + debug_assert!(false); + return; + } + }; + + match mem::replace(state, PeerState::Poisoned) { + PeerState::Incoming { open, .. } => { debug!(target: "sub-libp2p", "PSM => Accept({:?}, {:?}): Enabling connections.", index, incoming.peer_id); - debug!(target: "sub-libp2p", "Handler({:?}) <= Enable", incoming.peer_id); + debug!(target: "sub-libp2p", "Handler({:?}) <= Open", incoming.peer_id); self.events.push_back(NetworkBehaviourAction::NotifyHandler { peer_id: incoming.peer_id, handler: NotifyHandler::All, - event: NotifsHandlerIn::Enable, + event: NotifsHandlerIn::Open, }); - *state = PeerState::Enabled { open: SmallVec::new() }; + *state = PeerState::Enabled { open }; } peer => error!(target: "sub-libp2p", "State mismatch in libp2p: Expected alive incoming. Got {:?}.", @@ -832,19 +833,27 @@ impl GenericProto { return } - match self.peers.get_mut(&incoming.peer_id) { - Some(state @ PeerState::Incoming) => { + let state = match self.peers.get_mut(&incoming.peer_id) { + Some(s) => s, + None => { + debug_assert!(false); + return; + } + }; + + match mem::replace(state, PeerState::Poisoned) { + PeerState::Incoming { open, banned_until } => { debug!(target: "sub-libp2p", "PSM => Reject({:?}, {:?}): Rejecting connections.", index, incoming.peer_id); - debug!(target: "sub-libp2p", "Handler({:?}) <= Disable", incoming.peer_id); + debug!(target: "sub-libp2p", "Handler({:?}) <= Close", incoming.peer_id); self.events.push_back(NetworkBehaviourAction::NotifyHandler { peer_id: incoming.peer_id, handler: NotifyHandler::All, - event: NotifsHandlerIn::Disable, + event: NotifsHandlerIn::Close, }); *state = PeerState::Disabled { - open: SmallVec::new(), - banned_until: None + open, + banned_until, }; } peer => error!(target: "sub-libp2p", @@ -875,9 +884,9 @@ impl NetworkBehaviour for GenericProto { fn inject_connection_established(&mut self, peer_id: &PeerId, conn: &ConnectionId, endpoint: &ConnectedPoint) { debug!(target: "sub-libp2p", "Libp2p => Connection ({:?},{:?}) to {} established.", conn, endpoint, peer_id); - match (self.peers.entry(peer_id.clone()).or_insert(PeerState::Poisoned), endpoint) { - (st @ &mut PeerState::Requested, endpoint) | - (st @ &mut PeerState::PendingRequest { .. }, endpoint) => { + match self.peers.entry(peer_id.clone()).or_insert(PeerState::Poisoned) { + st @ &mut PeerState::Requested | + st @ &mut PeerState::PendingRequest { .. } => { debug!(target: "sub-libp2p", "Libp2p => Connected({}, {:?}): Connection was requested by PSM.", peer_id, endpoint @@ -886,38 +895,15 @@ impl NetworkBehaviour for GenericProto { self.events.push_back(NetworkBehaviourAction::NotifyHandler { peer_id: peer_id.clone(), handler: NotifyHandler::One(*conn), - event: NotifsHandlerIn::Enable + event: NotifsHandlerIn::Open }); } // Note: it may seem weird that "Banned" peers get treated as if they were absent. // This is because the word "Banned" means "temporarily prevent outgoing connections to // this peer", and not "banned" in the sense that we would refuse the peer altogether. - (st @ &mut PeerState::Poisoned, endpoint @ ConnectedPoint::Listener { .. }) | - (st @ &mut PeerState::Banned { .. }, endpoint @ ConnectedPoint::Listener { .. }) => { - let incoming_id = self.next_incoming_index; - self.next_incoming_index.0 = match self.next_incoming_index.0.checked_add(1) { - Some(v) => v, - None => { - error!(target: "sub-libp2p", "Overflow in next_incoming_index"); - return - } - }; - debug!(target: "sub-libp2p", "Libp2p => Connected({}, {:?}): Incoming connection", - peer_id, endpoint); - debug!(target: "sub-libp2p", "PSM <= Incoming({}, {:?}).", - peer_id, incoming_id); - self.peerset.incoming(peer_id.clone(), incoming_id); - self.incoming.push(IncomingPeer { - peer_id: peer_id.clone(), - alive: true, - incoming_id, - }); - *st = PeerState::Incoming { }; - } - - (st @ &mut PeerState::Poisoned, endpoint) | - (st @ &mut PeerState::Banned { .. }, endpoint) => { + st @ &mut PeerState::Poisoned | + st @ &mut PeerState::Banned { .. } => { let banned_until = if let PeerState::Banned { until } = st { Some(*until) } else { @@ -927,37 +913,27 @@ impl NetworkBehaviour for GenericProto { "Libp2p => Connected({},{:?}): Not requested by PSM, disabling.", peer_id, endpoint); *st = PeerState::Disabled { open: SmallVec::new(), banned_until }; - self.events.push_back(NetworkBehaviourAction::NotifyHandler { - peer_id: peer_id.clone(), - handler: NotifyHandler::One(*conn), - event: NotifsHandlerIn::Disable - }); } - (PeerState::Incoming { .. }, _) => { + PeerState::Incoming { .. } => { debug!(target: "sub-libp2p", "Secondary connection {:?} to {} waiting for PSM decision.", conn, peer_id); }, - (PeerState::Enabled { .. }, _) => { + PeerState::Enabled { .. } => { debug!(target: "sub-libp2p", "Handler({},{:?}) <= Enable secondary connection", peer_id, conn); self.events.push_back(NetworkBehaviourAction::NotifyHandler { peer_id: peer_id.clone(), handler: NotifyHandler::One(*conn), - event: NotifsHandlerIn::Enable + event: NotifsHandlerIn::Open }); } - (PeerState::Disabled { .. }, _) | (PeerState::DisabledPendingEnable { .. }, _) => { + PeerState::Disabled { .. } | PeerState::DisabledPendingEnable { .. } => { debug!(target: "sub-libp2p", "Handler({},{:?}) <= Disable secondary connection", peer_id, conn); - self.events.push_back(NetworkBehaviourAction::NotifyHandler { - peer_id: peer_id.clone(), - handler: NotifyHandler::One(*conn), - event: NotifsHandlerIn::Disable - }); } } } @@ -988,7 +964,6 @@ impl NetworkBehaviour for GenericProto { debug!(target: "sub-libp2p", "External API <= Closed({})", peer_id); let event = GenericProtoOut::CustomProtocolClosed { peer_id: peer_id.clone(), - reason: "Disconnected by libp2p".into(), }; self.events.push_back(NetworkBehaviourAction::GenerateEvent(event)); @@ -1060,7 +1035,7 @@ impl NetworkBehaviour for GenericProto { // In the incoming state, we don't report "Dropped". Instead we will just ignore the // corresponding Accept/Reject. - Some(PeerState::Incoming { }) => { + Some(PeerState::Incoming { .. }) => { if let Some(state) = self.incoming.iter_mut() .find(|i| i.alive && i.peer_id == *peer_id) { @@ -1127,15 +1102,63 @@ impl NetworkBehaviour for GenericProto { event: NotifsHandlerOut, ) { match event { - NotifsHandlerOut::Closed { endpoint, reason } => { + NotifsHandlerOut::OpenDesired => { debug!(target: "sub-libp2p", - "Handler({:?}) => Endpoint {:?} closed for custom protocols: {}", - source, endpoint, reason); + "Handler({:?}) => Open requested from the remote", + source); let mut entry = if let Entry::Occupied(entry) = self.peers.entry(source.clone()) { entry } else { - error!(target: "sub-libp2p", "Closed: State mismatch in the custom protos handler"); + error!(target: "sub-libp2p", "OpenDesired: State mismatch in the custom protos handler"); + debug_assert!(false); + return + }; + + match mem::replace(entry.get_mut(), PeerState::Poisoned) { + PeerState::Enabled { open, .. } | + PeerState::DisabledPendingEnable { open, .. } | + PeerState::Disabled { open, .. } => { + let incoming_id = self.next_incoming_index; + self.next_incoming_index.0 = match self.next_incoming_index.0.checked_add(1) { + Some(v) => v, + None => { + error!(target: "sub-libp2p", "Overflow in next_incoming_index"); + return + } + }; + debug!(target: "sub-libp2p", "PSM <= Incoming({}, {:?}).", + source, incoming_id); + self.peerset.incoming(source.clone(), incoming_id); + self.incoming.push(IncomingPeer { + peer_id: source.clone(), + alive: true, + incoming_id, + }); + *entry.into_mut() = PeerState::Incoming { + open, + banned_until: None, // TODO: get from `DisabledPendingEnable` + }; + } + state => { + error!(target: "sub-libp2p", + "Open: Unexpected state in the custom protos handler: {:?}", + state); + return + } + }; + } + + NotifsHandlerOut::CloseDesired => { + debug!(target: "sub-libp2p", + "Handler({:?}) => Closing requested from the remote", + source); + + let mut entry = if let Entry::Occupied(entry) = self.peers.entry(source.clone()) { + entry + } else { + error!(target: "sub-libp2p", "CloseDesired: State mismatch in the custom protos handler"); + debug_assert!(false); return }; @@ -1158,13 +1181,13 @@ impl NetworkBehaviour for GenericProto { // race conditions involving the legacy substream. // Once https://github.com/paritytech/substrate/issues/5670 is done, this // should be changed to stay in the `Enabled` state. - debug!(target: "sub-libp2p", "Handler({:?}) <= Disable", source); + debug!(target: "sub-libp2p", "Handler({:?}) <= Close", source); debug!(target: "sub-libp2p", "PSM <= Dropped({:?})", source); self.peerset.dropped(source.clone()); self.events.push_back(NetworkBehaviourAction::NotifyHandler { peer_id: source.clone(), handler: NotifyHandler::All, - event: NotifsHandlerIn::Disable, + event: NotifsHandlerIn::Close, }); let last = open.is_empty(); @@ -1182,6 +1205,55 @@ impl NetworkBehaviour for GenericProto { (last, new_notifications_sink) }, + state @ PeerState::Disabled { .. } | + state @ PeerState::DisabledPendingEnable { .. } => { + *entry.into_mut() = state; + return; + }, + state => { + error!(target: "sub-libp2p", + "Unexpected state in the custom protos handler: {:?}", + state); + return + } + }; + + if last { + debug!(target: "sub-libp2p", "External API <= Closed({:?})", source); + let event = GenericProtoOut::CustomProtocolClosed { + peer_id: source, + }; + self.events.push_back(NetworkBehaviourAction::GenerateEvent(event)); + + } else { + if let Some(new_notifications_sink) = new_notifications_sink { + let event = GenericProtoOut::CustomProtocolReplaced { + peer_id: source, + notifications_sink: new_notifications_sink, + }; + self.events.push_back(NetworkBehaviourAction::GenerateEvent(event)); + } + debug!(target: "sub-libp2p", "Secondary connection closed custom protocol."); + } + } + + NotifsHandlerOut::CloseResult => { + debug!(target: "sub-libp2p", + "Handler({:?}) => Endpoint closed for notifications protocols", + source); + + let mut entry = if let Entry::Occupied(entry) = self.peers.entry(source.clone()) { + entry + } else { + error!(target: "sub-libp2p", "Closed: State mismatch in the custom protos handler"); + return + }; + + let (last, new_notifications_sink) = match mem::replace(entry.get_mut(), PeerState::Poisoned) { + state @ PeerState::Enabled { .. } => { + *entry.into_mut() = state; + return; + }, PeerState::Disabled { mut open, banned_until } => { let pos = open.iter().position(|(c, _)| c == &connection); let sink_closed = pos == Some(0); @@ -1256,7 +1328,6 @@ impl NetworkBehaviour for GenericProto { if last { debug!(target: "sub-libp2p", "External API <= Closed({:?})", source); let event = GenericProtoOut::CustomProtocolClosed { - reason, peer_id: source, }; self.events.push_back(NetworkBehaviourAction::GenerateEvent(event)); @@ -1273,7 +1344,7 @@ impl NetworkBehaviour for GenericProto { } } - NotifsHandlerOut::Open { endpoint, received_handshake, notifications_sink } => { + NotifsHandlerOut::OpenResultOk { endpoint, received_handshake, notifications_sink } => { debug!(target: "sub-libp2p", "Handler({:?}) => Endpoint {:?} open for custom protocols.", source, endpoint); @@ -1320,6 +1391,41 @@ impl NetworkBehaviour for GenericProto { } } + NotifsHandlerOut::OpenResultErr => { + debug!(target: "sub-libp2p", + "Handler({:?}) => Failed to open substream with remote", + source); + + let mut entry = if let Entry::Occupied(entry) = self.peers.entry(source.clone()) { + entry + } else { + error!(target: "sub-libp2p", "Closed: State mismatch in the custom protos handler"); + return + }; + + match mem::replace(entry.get_mut(), PeerState::Poisoned) { + PeerState::Enabled { open } => { + debug!(target: "sub-libp2p", "PSM <= Dropped({:?})", source); + self.peerset.dropped(source.clone()); + + *entry.into_mut() = PeerState::Disabled { + open, + banned_until: None + }; + }, + state @ PeerState::Disabled { .. } | + state @ PeerState::DisabledPendingEnable { .. } => { + *entry.into_mut() = state; + }, + state => { + error!(target: "sub-libp2p", + "Unexpected state in the custom protos handler: {:?}", + state); + debug_assert!(false); + } + }; + } + NotifsHandlerOut::CustomMessage { message } => { debug_assert!(self.is_open(&source)); trace!(target: "sub-libp2p", "Handler({:?}) => Message", source); @@ -1410,11 +1516,11 @@ impl NetworkBehaviour for GenericProto { } PeerState::DisabledPendingEnable { timer, open, .. } if *timer == delay_id => { - debug!(target: "sub-libp2p", "Handler({:?}) <= Enable (ban expired)", peer_id); + debug!(target: "sub-libp2p", "Handler({:?}) <= Open (ban expired)", peer_id); self.events.push_back(NetworkBehaviourAction::NotifyHandler { peer_id, handler: NotifyHandler::All, - event: NotifsHandlerIn::Enable, + event: NotifsHandlerIn::Open, }); *peer_state = PeerState::Enabled { open: mem::replace(open, Default::default()) }; } diff --git a/client/network/src/protocol/generic_proto/handler/group.rs b/client/network/src/protocol/generic_proto/handler/group.rs index fbfdb1cb6ab0e..2e9ce8b712592 100644 --- a/client/network/src/protocol/generic_proto/handler/group.rs +++ b/client/network/src/protocol/generic_proto/handler/group.rs @@ -22,28 +22,34 @@ //! //! # Usage //! -//! The handler can be in one of the following states: `Initial`, `Enabled`, `Disabled`. +//! From an API perspective, the [`NotifsHandler`] is always in one of the following state: //! -//! The `Initial` state is the state that the handler initially is in. It is a temporary state -//! during which the user must either enable or disable the handler. After that, the handler stays -//! either enabled or disabled. +//! - Closed substreams. This is the initial state. +//! - Closed substreams, but remote desires them to be open. +//! - Open substreams. +//! - Open substreams, but remote desires them to be closed. //! -//! On the wire, we try to open the following substreams: +//! The [`NotifsHandler`] can spontaneously switch between these states: //! -//! - One substream for each notification protocol passed as parameter to the -//! `NotifsHandlerProto::new` function. -//! - One "legacy" substream used for anything non-related to gossiping, and used as a fallback -//! in case the notification protocol can't be opened. +//! - "Closed substreams" to "Closed substreams but open desired". When that happens, a +//! [`NotifsHandlerOut::OpenDesired`] is emitted. +//! - "Closed substreams but open desired" to "Closed substreams" (i.e. the remote has cancelled +//! their request). When that happens, a [`NotifsHandlerOut::CloseDesired`] is emitted. +//! - "Open substreams" to "Open substreams but close desired". When that happens, a +//! [`NotifsHandlerOut::CloseDesired`] is emitted. //! -//! When the handler is in the `Enabled` state, we immediately open and try to maintain all the -//! aforementioned substreams. When the handler is in the `Disabled` state, we immediately close -//! (or abort opening) all these substreams. It is intended that in the future we allow states in -//! which some protocols are open and not others. Symmetrically, we allow incoming -//! Substrate-related substreams if and only if we are in the `Enabled` state. +//! The user can instruct the `NotifsHandler` to switch from "closed" to "open" or vice-versa by +//! sending either a [`NotifsHandlerIn::Open`] or a [`NotifsHandlerIn::Close`]. The `NotifsHandler` +//! must answer with [`NotifsHandlerOut::OpenResultOk`] or [`NotifsHandlerOut::OpenResultErr`], or +//! with [`NotifsHandlerOut::CloseResult`]. //! -//! The user has the choice between sending a message with `SendNotification`, to send a -//! notification, and `SendLegacy`, to send any other kind of message. +//! When a [`NotifsHandlerOut::OpenResultOk`] is emitted, the `NotifsHandler` is now in the open +//! state. When a [`NotifsHandlerOut::OpenResultErr`] or [`NotifsHandlerOut::CloseResult`] is +//! emitted, the `NotifsHandler` is now (or remains) in the closed state. //! +//! When a [`NotifsHandlerOut::OpenDesired`] is emitted, the user should always send back either a +//! [`NotifsHandlerIn::Open`] or a [`NotifsHandlerIn::Close`].If this isn't done, the remote will +//! be left in a pending state. use crate::protocol::generic_proto::{ handler::legacy::{LegacyProtoHandler, LegacyProtoHandlerProto, LegacyProtoHandlerIn, LegacyProtoHandlerOut}, @@ -68,9 +74,9 @@ use futures::{ lock::{Mutex as FuturesMutex, MutexGuard as FuturesMutexGuard}, prelude::* }; -use log::{debug, error}; +use log::error; use parking_lot::{Mutex, RwLock}; -use std::{borrow::Cow, str, sync::Arc, task::{Context, Poll}}; +use std::{borrow::Cow, collections::VecDeque, mem, str, sync::Arc, task::{Context, Poll}}; /// Number of pending notifications in asynchronous contexts. /// See [`NotificationsSink::reserve_notification`] for context. @@ -113,41 +119,59 @@ pub struct NotifsHandler { /// Handler for backwards-compatibility. legacy: LegacyProtoHandler, - /// In the situation where either the legacy substream has been opened or the handshake-bearing - /// notifications protocol is open, but we haven't sent out any [`NotifsHandlerOut::Open`] - /// event yet, this contains the received handshake waiting to be reported through the - /// external API. - pending_handshake: Option>, - /// State of this handler. - enabled: EnabledState, - - /// If we receive inbound substream requests while in initialization mode, - /// we push the corresponding index here and process them when the handler - /// gets enabled/disabled. - pending_in: Vec, - - /// If `Some`, contains the two `Receiver`s connected to the [`NotificationsSink`] that has - /// been sent out. The notifications to send out can be pulled from this receivers. - /// We use two different channels in order to have two different channel sizes, but from the - /// receiving point of view, the two channels are the same. - /// The receivers are fused in case the user drops the [`NotificationsSink`] entirely. + state: State, + + /// Events to return in priority from `poll`. + events_queue: VecDeque< + ProtocolsHandlerEvent + >, +} + +/// See the module-level documentation to learn about the meaning of these variants. +#[derive(Debug)] +enum State { + /// Handler is in the "Closed" state. + Closed { + /// When we receive inbound substream requests, we push here the index within + /// [`NotisHandler::in_handlers`], and process them when an `Open` or `Close` request is + /// received. + /// + /// If this is non-empty, a [`NotifsHandlerOut::OpenDesired`] has been emitted. If this + /// transitions from non-empty to empty, a [`NotisHandlerOut::CloseDesired`] or a + /// [`NotisHandlerOut::CloseResult`] is emitted. + pending_in: Vec, + }, + + /// Handler is in the "Closed" state, but has received a [`NotifsHandlerIn::Open`] and is + /// consequently trying to open the various notifications substreams. /// - /// Contains `Some` if and only if it has been reported to the user that the substreams are - /// open. - notifications_sink_rx: Option< - stream::Select< + /// A [`NotifsHandlerOut::OpenResultOk`] or a [`NotifsHandlerOut::OpenResultErr`] event must + /// be emitted when transitionning to respectively [`State::Open`] or [`State::Closed`]. + Opening { + /// In the situation where either the legacy substream has been opened or the + /// handshake-bearing notifications protocol is open, but we haven't sent out any + /// [`NotifsHandlerOut::Open`] event yet, this contains the received handshake waiting to + /// be reported through the external API. + pending_handshake: Option>, + }, + + /// Handler is in the "Open" state. + Open { + /// Contains the two `Receiver`s connected to the [`NotificationsSink`] that has been + /// sent out. The notifications to send out can be pulled from this receivers. + /// We use two different channels in order to have two different channel sizes, but from + /// the receiving point of view, the two channels are the same. + /// The receivers are fused in case the user drops the [`NotificationsSink`] entirely. + notifications_sink_rx: stream::Select< stream::Fuse>, stream::Fuse> - > - >, -} + >, -#[derive(Debug, Clone, PartialEq, Eq)] -enum EnabledState { - Initial, - Enabled, - Disabled, + /// If true, at least one substream has been closed and a + /// [`NotifsHandlerOut::CloseDesired`] message has been sent out. + want_closed: bool, + }, } impl IntoProtocolsHandler for NotifsHandlerProto { @@ -173,10 +197,10 @@ impl IntoProtocolsHandler for NotifsHandlerProto { .collect(), endpoint: connected_point.clone(), legacy: self.legacy.into_handler(remote_peer_id, connected_point), - pending_handshake: None, - enabled: EnabledState::Initial, - pending_in: Vec::new(), - notifications_sink_rx: None, + state: State::Closed { + pending_in: Vec::new(), + }, + events_queue: VecDeque::with_capacity(16), } } } @@ -184,18 +208,27 @@ impl IntoProtocolsHandler for NotifsHandlerProto { /// Event that can be received by a `NotifsHandler`. #[derive(Debug, Clone)] pub enum NotifsHandlerIn { - /// The node should start using custom protocols. - Enable, + /// Instruct the handler to open the notification substreams. + /// + /// Must always be answered by a [`NotifsHandlerOut::OpenResultOk`] or a + /// [`NotifsHandlerOut::OpenResultErr`] event. + /// + /// Importantly, it is forbidden to send a [`NotifsHandlerIn::Open`] while a previous one is + /// already in the fly. It is however possible if a `Close` is still in the fly. + Open, - /// The node should stop using custom protocols. - Disable, + /// Instruct the handler to close the notification substreams, or reject any pending incoming + /// substream request. + /// + /// Must always be answered by a [`NotifsHandlerOut::CloseResult`] event. + Close, } /// Event that can be emitted by a `NotifsHandler`. #[derive(Debug)] pub enum NotifsHandlerOut { - /// The connection is open for custom protocols. - Open { + /// Acknowledges a [`NotifsHandlerIn::Open`]. + OpenResultOk { /// The endpoint of the connection that is open for custom protocols. endpoint: ConnectedPoint, /// Handshake that was sent to us. @@ -205,15 +238,29 @@ pub enum NotifsHandlerOut { notifications_sink: NotificationsSink, }, - /// The connection is closed for custom protocols. - Closed { - /// The reason for closing, for diagnostic purposes. - reason: Cow<'static, str>, - /// The endpoint of the connection that closed for custom protocols. - endpoint: ConnectedPoint, - }, + /// Acknowledges a [`NotifsHandlerIn::Open`]. The remote has refused the attempt to open + /// notification substreams. + OpenResultErr, + + /// Acknowledges a [`NotifsHandlerIn::Close`]. + CloseResult, + + /// The remote would like the substreams to be open. Send a [`NotifsHandlerIn::Open`] or a + /// [`NotifsHandlerIn::Close`] in order to either accept or deny this request. If a + /// [`NotifsHandlerIn::Open`] or [`NotifsHandlerIn::Close`] has been sent before and has not + /// yet been acknowledged by a matching [`NotifsHandlerOut`], then you don't need to a send + /// another [`NotifsHandlerIn`]. + OpenDesired, + + /// The remote would like the substreams to be closed. Send a [`NotifsHandlerIn::Close`] in + /// order to close them. If a [`NotifsHandlerIn::Close`] has been sent before and has not yet + /// been acknowledged by a [`NotifsHandlerOut::CloseResult`], then you don't need to a send + /// another one. + CloseDesired, /// Received a non-gossiping message on the legacy substream. + /// + /// Can only happen when the handler is in the open state. CustomMessage { /// Message that has been received. /// @@ -223,6 +270,8 @@ pub enum NotifsHandlerOut { }, /// Received a message on a custom protocol substream. + /// + /// Can only happen when the handler is in the open state. Notification { /// Name of the protocol of the message. protocol_name: Cow<'static, str>, @@ -430,44 +479,64 @@ impl ProtocolsHandler for NotifsHandler { fn inject_event(&mut self, message: NotifsHandlerIn) { match message { - NotifsHandlerIn::Enable => { - if let EnabledState::Enabled = self.enabled { - debug!("enabling already-enabled handler"); - } - self.enabled = EnabledState::Enabled; - self.legacy.inject_event(LegacyProtoHandlerIn::Enable); - for (handler, initial_message) in &mut self.out_handlers { - // We create `initial_message` on a separate line to be sure that the lock - // is released as soon as possible. - let initial_message = initial_message.read().clone(); - handler.inject_event(NotifsOutHandlerIn::Enable { - initial_message, - }); - } - for num in self.pending_in.drain(..) { - // We create `handshake_message` on a separate line to be sure - // that the lock is released as soon as possible. - let handshake_message = self.in_handlers[num].1.read().clone(); - self.in_handlers[num].0 - .inject_event(NotifsInHandlerIn::Accept(handshake_message)); + NotifsHandlerIn::Open => { + match &mut self.state { + State::Closed { pending_in } => { + self.legacy.inject_event(LegacyProtoHandlerIn::Enable); + + for (handler, initial_message) in &mut self.out_handlers { + // We create `initial_message` on a separate line to be sure that the + // lock is released as soon as possible. + let initial_message = initial_message.read().clone(); + handler.inject_event(NotifsOutHandlerIn::Enable { + initial_message, + }); + } + + for num in pending_in.drain(..) { + // We create `handshake_message` on a separate line to be sure + // that the lock is released as soon as possible. + let handshake_message = self.in_handlers[num].1.read().clone(); + self.in_handlers[num].0 + .inject_event(NotifsInHandlerIn::Accept(handshake_message)); + } + + self.state = State::Opening { + pending_handshake: None, + }; + }, + State::Opening { .. } | + State::Open { .. } => { + // As documented, it is forbidden to send an `Open` while there is already + // one in the fly. + error!(target: "sub-libp2p", "opening already-opened handler"); + }, } }, - NotifsHandlerIn::Disable => { - if let EnabledState::Disabled = self.enabled { - debug!("disabling already-disabled handler"); - } - self.legacy.inject_event(LegacyProtoHandlerIn::Disable); - // The notifications protocols start in the disabled state. If we were in the - // "Initial" state, then we shouldn't disable the notifications protocols again. - if self.enabled != EnabledState::Initial { - for (handler, _) in &mut self.out_handlers { - handler.inject_event(NotifsOutHandlerIn::Disable); - } - } - self.enabled = EnabledState::Disabled; - for num in self.pending_in.drain(..) { - self.in_handlers[num].0.inject_event(NotifsInHandlerIn::Refuse); + + NotifsHandlerIn::Close => { + match &mut self.state { + State::Open { .. } | + State::Opening { .. } => { + self.legacy.inject_event(LegacyProtoHandlerIn::Disable); + for (handler, _) in &mut self.out_handlers { + handler.inject_event(NotifsOutHandlerIn::Disable); + } + + self.state = State::Closed { + pending_in: Vec::new(), + }; + }, + State::Closed { pending_in } => { + for num in pending_in.drain(..) { + self.in_handlers[num].0.inject_event(NotifsInHandlerIn::Refuse); + } + }, } + + self.events_queue.push_back( + ProtocolsHandlerEvent::Custom(NotifsHandlerOut::CloseResult) + ); }, } } @@ -534,7 +603,11 @@ impl ProtocolsHandler for NotifsHandler { ) -> Poll< ProtocolsHandlerEvent > { - if let Some(notifications_sink_rx) = &mut self.notifications_sink_rx { + if let Some(ev) = self.events_queue.pop_front() { + return Poll::Ready(ev); + } + + if let State::Open { notifications_sink_rx, .. } = &mut self.state { 'poll_notifs_sink: loop { // Before we poll the notifications sink receiver, check that all the notification // channels are ready to send a message. @@ -589,7 +662,9 @@ impl ProtocolsHandler for NotifsHandler { } } NotificationsSinkMessage::ForceClose => { - return Poll::Ready(ProtocolsHandlerEvent::Close(NotifsHandlerError::SyncNotificationsClogged)); + return Poll::Ready( + ProtocolsHandlerEvent::Close(NotifsHandlerError::SyncNotificationsClogged) + ); } } } @@ -600,7 +675,7 @@ impl ProtocolsHandler for NotifsHandler { // handshake) is open but the user isn't aware yet of the substreams being open. // When that is the case, neither the legacy substream nor the incoming notifications // substreams should be polled, otherwise there is a risk of receiving messages from them. - if self.pending_handshake.is_none() { + if !matches!(self.state, State::Opening { pending_handshake: Some(_) }) { while let Poll::Ready(ev) = self.legacy.poll(cx) { match ev { ProtocolsHandlerEvent::OutboundSubstreamRequest { protocol, .. } => @@ -609,23 +684,32 @@ impl ProtocolsHandler for NotifsHandler { received_handshake, .. }) => { - if self.notifications_sink_rx.is_none() { - debug_assert!(self.pending_handshake.is_none()); - self.pending_handshake = Some(received_handshake); + match &mut self.state { + State::Opening { pending_handshake } => { + debug_assert!(pending_handshake.is_none()); + *pending_handshake = Some(received_handshake); + } + _ => debug_assert!(false), } + cx.waker().wake_by_ref(); return Poll::Pending; }, - ProtocolsHandlerEvent::Custom(LegacyProtoHandlerOut::CustomProtocolClosed { reason, .. }) => { - // We consciously drop the receivers despite notifications being potentially - // still buffered up. - self.notifications_sink_rx = None; - - return Poll::Ready(ProtocolsHandlerEvent::Custom( - NotifsHandlerOut::Closed { endpoint: self.endpoint.clone(), reason } - )) + ProtocolsHandlerEvent::Custom(LegacyProtoHandlerOut::CustomProtocolClosed { .. }) => { + match &mut self.state { + State::Open { want_closed, .. } if *want_closed == false => { + *want_closed = true; + return Poll::Ready(ProtocolsHandlerEvent::Custom( + NotifsHandlerOut::CloseDesired + )); + } + State::Open { .. } => {} + State::Opening { .. } => {} + State::Closed { .. } => debug_assert!(false), + } }, ProtocolsHandlerEvent::Custom(LegacyProtoHandlerOut::CustomMessage { message }) => { + debug_assert!(!matches!(self.state, State::Open { .. })); return Poll::Ready(ProtocolsHandlerEvent::Custom( NotifsHandlerOut::CustomMessage { message } )) @@ -638,7 +722,7 @@ impl ProtocolsHandler for NotifsHandler { for (handler_num, (handler, handshake_message)) in self.in_handlers.iter_mut().enumerate() { loop { - let poll = if self.notifications_sink_rx.is_some() { + let poll = if matches!(self.state, State::Open { .. }) { handler.poll(cx) } else { handler.poll_process(cx) @@ -654,26 +738,45 @@ impl ProtocolsHandler for NotifsHandler { error!("Incoming substream handler tried to open a substream"), ProtocolsHandlerEvent::Close(err) => void::unreachable(err), ProtocolsHandlerEvent::Custom(NotifsInHandlerOut::OpenRequest(_)) => - match self.enabled { - EnabledState::Initial => self.pending_in.push(handler_num), - EnabledState::Enabled => { + match &mut self.state { + State::Closed { pending_in } => { + let was_empty = pending_in.is_empty(); + pending_in.push(handler_num); + if was_empty { + return Poll::Ready(ProtocolsHandlerEvent::Custom( + NotifsHandlerOut::OpenDesired + )); + } + }, + State::Opening { .. } | State::Open { .. } => { // We create `handshake_message` on a separate line to be sure // that the lock is released as soon as possible. let handshake_message = handshake_message.read().clone(); handler.inject_event(NotifsInHandlerIn::Accept(handshake_message)) }, - EnabledState::Disabled => - handler.inject_event(NotifsInHandlerIn::Refuse), }, - ProtocolsHandlerEvent::Custom(NotifsInHandlerOut::Closed) => {}, + ProtocolsHandlerEvent::Custom(NotifsInHandlerOut::Closed) => { + match &mut self.state { + State::Open { want_closed, .. } if *want_closed == false => { + *want_closed = true; + return Poll::Ready(ProtocolsHandlerEvent::Custom( + NotifsHandlerOut::CloseDesired + )); + } + State::Open { .. } => {} + State::Opening { .. } => {} + State::Closed { .. } => debug_assert!(false), + } + }, ProtocolsHandlerEvent::Custom(NotifsInHandlerOut::Notif(message)) => { - debug_assert!(self.pending_handshake.is_none()); - if self.notifications_sink_rx.is_some() { + if matches!(self.state, State::Open { .. }) { let msg = NotifsHandlerOut::Notification { message, protocol_name: handler.protocol_name().clone(), }; return Poll::Ready(ProtocolsHandlerEvent::Custom(msg)); + } else { + debug_assert!(false); } }, } @@ -682,34 +785,67 @@ impl ProtocolsHandler for NotifsHandler { for (handler_num, (handler, _)) in self.out_handlers.iter_mut().enumerate() { while let Poll::Ready(ev) = handler.poll(cx) { - match ev { - ProtocolsHandlerEvent::OutboundSubstreamRequest { protocol } => + match (ev, &mut self.state) { + (ProtocolsHandlerEvent::OutboundSubstreamRequest { protocol }, _) => return Poll::Ready(ProtocolsHandlerEvent::OutboundSubstreamRequest { protocol: protocol .map_info(|()| handler_num), }), - ProtocolsHandlerEvent::Close(err) => void::unreachable(err), + (ProtocolsHandlerEvent::Close(err), _) => void::unreachable(err), // Opened substream on the handshake-bearing notification protocol. - ProtocolsHandlerEvent::Custom(NotifsOutHandlerOut::Open { handshake }) - if handler_num == 0 => + ( + ProtocolsHandlerEvent::Custom(NotifsOutHandlerOut::Open { handshake }), + State::Opening { pending_handshake } + ) if handler_num == 0 && pending_handshake.is_none() => { - if self.notifications_sink_rx.is_none() && self.pending_handshake.is_none() { - self.pending_handshake = Some(handshake); + *pending_handshake = Some(handshake); + }, + + (ProtocolsHandlerEvent::Custom(NotifsOutHandlerOut::Open { handshake }), _) + if handler_num == 0 => debug_assert!(false), + (ProtocolsHandlerEvent::Custom(NotifsOutHandlerOut::Open { .. }), _) => {}, + + ( + ProtocolsHandlerEvent::Custom(NotifsOutHandlerOut::Closed), + State::Open { want_closed, .. } + ) => { + if *want_closed == false { + *want_closed = true; + return Poll::Ready(ProtocolsHandlerEvent::Custom( + NotifsHandlerOut::CloseDesired + )); } }, - // Nothing to do in response to other notification substreams being opened - // or closed. - ProtocolsHandlerEvent::Custom(NotifsOutHandlerOut::Open { .. }) => {}, - ProtocolsHandlerEvent::Custom(NotifsOutHandlerOut::Closed) => {}, - ProtocolsHandlerEvent::Custom(NotifsOutHandlerOut::Refused) => {}, + // Remote has denied an opening attempt for this notifications protocol. + // This fails the entire opening attempt. + (ProtocolsHandlerEvent::Custom(NotifsOutHandlerOut::Refused), State::Opening { .. }) | + (ProtocolsHandlerEvent::Custom(NotifsOutHandlerOut::Closed), State::Opening { .. }) => { + self.legacy.inject_event(LegacyProtoHandlerIn::Disable); + for (handler, _) in &mut self.out_handlers { + handler.inject_event(NotifsOutHandlerIn::Disable); + } + + self.state = State::Closed { + pending_in: Vec::new(), + }; + + return Poll::Ready(ProtocolsHandlerEvent::Custom( + NotifsHandlerOut::OpenResultErr + )); + }, + + + (ProtocolsHandlerEvent::Custom(NotifsOutHandlerOut::Refused), _) | + (ProtocolsHandlerEvent::Custom(NotifsOutHandlerOut::Closed), _) => + debug_assert!(false), } } } - if self.out_handlers.iter().all(|(h, _)| h.is_open() || h.is_refused()) { - if let Some(handshake) = self.pending_handshake.take() { + if let State::Opening { pending_handshake: Some(pending_handshake), .. } = &mut self.state { + if self.out_handlers.iter().all(|(h, _)| h.is_open() || h.is_refused()) { let (async_tx, async_rx) = mpsc::channel(ASYNC_NOTIFICATIONS_BUFFER_SIZE); let (sync_tx, sync_rx) = mpsc::channel(SYNC_NOTIFICATIONS_BUFFER_SIZE); let notifications_sink = NotificationsSink { @@ -719,13 +855,16 @@ impl ProtocolsHandler for NotifsHandler { }), }; - debug_assert!(self.notifications_sink_rx.is_none()); - self.notifications_sink_rx = Some(stream::select(async_rx.fuse(), sync_rx.fuse())); + let pending_handshake = mem::replace(pending_handshake, Vec::new()); + self.state = State::Open { + notifications_sink_rx: stream::select(async_rx.fuse(), sync_rx.fuse()), + want_closed: false, + }; return Poll::Ready(ProtocolsHandlerEvent::Custom( - NotifsHandlerOut::Open { + NotifsHandlerOut::OpenResultOk { endpoint: self.endpoint.clone(), - received_handshake: handshake, + received_handshake: pending_handshake, notifications_sink } )) From 72b99554448b693662645796c9e3557dab533915 Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Thu, 29 Oct 2020 14:48:10 +0100 Subject: [PATCH 02/39] Refactor PeerState --- .../src/protocol/generic_proto/behaviour.rs | 1150 +++++++++++------ .../protocol/generic_proto/handler/group.rs | 6 + 2 files changed, 744 insertions(+), 412 deletions(-) diff --git a/client/network/src/protocol/generic_proto/behaviour.rs b/client/network/src/protocol/generic_proto/behaviour.rs index 29b5088244fba..4479e602827e8 100644 --- a/client/network/src/protocol/generic_proto/behaviour.rs +++ b/client/network/src/protocol/generic_proto/behaviour.rs @@ -168,47 +168,105 @@ enum PeerState { /// The peerset requested that we connect to this peer. We are currently dialing this peer. Requested, - /// We are connected to this peer but the peerset hasn't requested it. + /// We are connected to this peer but the peerset hasn't requested it or has denied it. /// /// The handler is either in the closed state, or a `Close` message has been sent to it and /// hasn't been answered yet. Disabled { - /// The connections that are currently open for custom protocol traffic. - open: SmallVec<[(ConnectionId, NotificationsSink); crate::MAX_CONNECTIONS_PER_PEER]>, - /// If `Some`, any dial attempts to this peer are delayed until the given `Instant`. + /// If `Some`, any connection request from the peerset to this peer is delayed until the + /// given `Instant`. banned_until: Option, + + /// List of connections that are in the `Closed` state but to which a + /// [`NotifsHandlerIn::Open`] message then a [`NotifsHandlerIn::Close`] message has been + /// sent. + opening_and_closing: SmallVec<[ConnectionId; crate::MAX_CONNECTIONS_PER_PEER]>, + + /// List of connections that are in the `Closed` state. + closed: SmallVec<[ConnectionId; crate::MAX_CONNECTIONS_PER_PEER]>, + + /// List of connections that are in the `Closing` state. + /// A [`NotifsHandlerIn::Close`] message has been sent to the handler, and we are now + /// waiting for the actual closing. + closing: SmallVec<[ConnectionId; crate::MAX_CONNECTIONS_PER_PEER]>, }, - /// We are connected to this peer but we are not opening any Substrate substream. + /// We are connected to this peer. The peerset has requested a connection to this peer, but + /// it is currently in a "banned" phase. The state will switch to `Enabled` once the timer + /// expires. /// /// The handler is either in the closed state, or a `Close` message has been sent to it and /// hasn't been answered yet. /// /// The handler will be opened when `timer` fires. DisabledPendingEnable { - /// The connections that are currently open for custom protocol traffic. - open: SmallVec<[(ConnectionId, NotificationsSink); crate::MAX_CONNECTIONS_PER_PEER]>, /// When to enable this remote. References an entry in `delays`. timer: DelayId, /// When the `timer` will trigger. timer_deadline: Instant, + + /// List of connections that are in the `Closed` state but to which a + /// [`NotifsHandlerIn::Open`] message then a [`NotifsHandlerIn::Close`] message has been + /// sent. + opening_and_closing: SmallVec<[ConnectionId; crate::MAX_CONNECTIONS_PER_PEER]>, + + /// List of connections that are in the `Closed` state. + closed: SmallVec<[ConnectionId; crate::MAX_CONNECTIONS_PER_PEER]>, + + /// List of connections that are in the `Closing` state. + /// A [`NotifsHandlerIn::Close`] message has been sent to the handler, and we are now + /// waiting for the actual closing. + closing: SmallVec<[ConnectionId; crate::MAX_CONNECTIONS_PER_PEER]>, }, - /// We are connected to this peer and the peerset has accepted it. We have sent to the - /// handlers an open message that might or might not have been processed already. + /// We are connected to this peer and the peerset has accepted it. Enabled { - /// The connections that are currently open for custom protocol traffic. + /// List of connections that are in the `Closed` state but to which a + /// [`NotifsHandlerIn::Open`] message then a [`NotifsHandlerIn::Close`] message has been + /// sent. + opening_and_closing: SmallVec<[ConnectionId; crate::MAX_CONNECTIONS_PER_PEER]>, + + /// List of connections that are in the `Closed` state but to which a + /// [`NotifsHandlerIn::Open`] message has been sent. + opening: SmallVec<[ConnectionId; crate::MAX_CONNECTIONS_PER_PEER]>, + + /// List of connections that are in the `Open` state. open: SmallVec<[(ConnectionId, NotificationsSink); crate::MAX_CONNECTIONS_PER_PEER]>, + + /// List of connections that are in the `Closed` state. + closed: SmallVec<[ConnectionId; crate::MAX_CONNECTIONS_PER_PEER]>, + + /// List of connections that are in the `Closing` state. + /// A [`NotifsHandlerIn::Close`] message has been sent to the handler, and we are now + /// waiting for the actual closing. + closing: SmallVec<[ConnectionId; crate::MAX_CONNECTIONS_PER_PEER]>, }, - /// We have received an `OpenDesired` from the handler and forwarded that request to the - /// peerset. The connection handlers are waiting for a response, i.e. to be opened or closed - /// based on whether the peerset accepts or rejects the peer. + /// We are connected to this peer. We have received an `OpenDesired` from one of the handlers + /// and forwarded that request to the peerset. The connection handlers are waiting for a + /// response, i.e. to be opened or closed based on whether the peerset accepts or rejects the + /// peer. Incoming { - /// The connections that are currently open for custom protocol traffic. - open: SmallVec<[(ConnectionId, NotificationsSink); crate::MAX_CONNECTIONS_PER_PEER]>, /// If `Some`, any dial attempts to this peer are delayed until the given `Instant`. banned_until: Option, + + /// List of connections that are in the `Closed` state but to which a + /// [`NotifsHandlerIn::Open`] message then a [`NotifsHandlerIn::Close`] message has been + /// sent. + opening_and_closing: SmallVec<[ConnectionId; crate::MAX_CONNECTIONS_PER_PEER]>, + + /// List of connections that are in the `Closed` state, meaning that the remote hasn't + /// requested anything. + closed: SmallVec<[ConnectionId; crate::MAX_CONNECTIONS_PER_PEER]>, + + /// List of connections that are in the `Closing` state. + /// A [`NotifsHandlerIn::Close`] message has been sent to the handler, and we are now + /// waiting for the actual closing. + closing: SmallVec<[ConnectionId; crate::MAX_CONNECTIONS_PER_PEER]>, + + /// List of connections that are in the `OpenDesired` state, meaning that the remote + /// wants to open a substream. + open_desired: SmallVec<[ConnectionId; crate::MAX_CONNECTIONS_PER_PEER]>, }, } @@ -223,8 +281,6 @@ impl PeerState { /// that is open for custom protocol traffic. fn get_open(&self) -> Option<&NotificationsSink> { match self { - PeerState::Disabled { open, .. } | - PeerState::DisabledPendingEnable { open, .. } | PeerState::Enabled { open, .. } => if !open.is_empty() { Some(&open[0].1) @@ -235,6 +291,8 @@ impl PeerState { PeerState::Banned { .. } => None, PeerState::PendingRequest { .. } => None, PeerState::Requested => None, + PeerState::Disabled { .. } => None, + PeerState::DisabledPendingEnable { .. } => None, PeerState::Incoming { .. } => None, } } @@ -434,7 +492,9 @@ impl GenericProto { // DisabledPendingEnable => Disabled. PeerState::DisabledPendingEnable { - open, + opening_and_closing, + closed, + closing, timer_deadline, timer: _ } => { @@ -446,30 +506,57 @@ impl GenericProto { timer_deadline }); *entry.into_mut() = PeerState::Disabled { - open, + opening_and_closing, + closed, + closing, banned_until } }, // Enabled => Disabled. - PeerState::Enabled { open } => { + PeerState::Enabled { mut opening_and_closing, opening, open, closed, mut closing } => { debug!(target: "sub-libp2p", "PSM <= Dropped({:?})", peer_id); self.peerset.dropped(peer_id.clone()); - debug!(target: "sub-libp2p", "Handler({:?}) <= Close", peer_id); - self.events.push_back(NetworkBehaviourAction::NotifyHandler { - peer_id: peer_id.clone(), - handler: NotifyHandler::All, - event: NotifsHandlerIn::Close, - }); + + if !open.is_empty() { + debug!(target: "sub-libp2p", "External API <= Closed({})", peer_id); + let event = GenericProtoOut::CustomProtocolClosed { + peer_id: peer_id.clone(), + }; + self.events.push_back(NetworkBehaviourAction::GenerateEvent(event)); + } + + for (connec_id, _) in open { + debug!(target: "sub-libp2p", "Handler({:?}, {:?}) <= Close", peer_id, connec_id); + self.events.push_back(NetworkBehaviourAction::NotifyHandler { + peer_id: peer_id.clone(), + handler: NotifyHandler::One(connec_id), + event: NotifsHandlerIn::Close, + }); + closing.push(connec_id); + } + + for connec_id in opening { + debug!(target: "sub-libp2p", "Handler({:?}, {:?}) <= Close", peer_id, connec_id); + self.events.push_back(NetworkBehaviourAction::NotifyHandler { + peer_id: peer_id.clone(), + handler: NotifyHandler::One(connec_id), + event: NotifsHandlerIn::Close, + }); + opening_and_closing.push(connec_id); + } + let banned_until = ban.map(|dur| Instant::now() + dur); *entry.into_mut() = PeerState::Disabled { - open, + opening_and_closing, + closing, + closed, banned_until } }, // Incoming => Disabled. - PeerState::Incoming { open, banned_until } => { + PeerState::Incoming { opening_and_closing, mut closing, closed, open_desired, banned_until } => { let inc = if let Some(inc) = self.incoming.iter_mut() .find(|i| i.peer_id == *entry.key() && i.alive) { inc @@ -480,16 +567,23 @@ impl GenericProto { }; inc.alive = false; - debug!(target: "sub-libp2p", "Handler({:?}) <= Close", peer_id); - self.events.push_back(NetworkBehaviourAction::NotifyHandler { - peer_id: peer_id.clone(), - handler: NotifyHandler::All, - event: NotifsHandlerIn::Close, - }); + + for connec_id in open_desired { + debug!(target: "sub-libp2p", "Handler({:?}, {:?}) <= Close", peer_id, connec_id); + self.events.push_back(NetworkBehaviourAction::NotifyHandler { + peer_id: peer_id.clone(), + handler: NotifyHandler::One(connec_id), + event: NotifsHandlerIn::Close, + }); + closing.push(connec_id); + } + // TODO: interaction with `banned_until` above? let banned_until = ban.map(|dur| Instant::now() + dur); *entry.into_mut() = PeerState::Disabled { - open, + opening_and_closing, + closing, + closed, banned_until } }, @@ -584,7 +678,7 @@ impl GenericProto { /// Function that is called when the peerset wants us to connect to a peer. fn peerset_report_connect(&mut self, peer_id: PeerId) { - let mut occ_entry = match self.peers.entry(peer_id) { + let mut occ_entry = match self.peers.entry(peer_id.clone()) { Entry::Occupied(entry) => entry, Entry::Vacant(entry) => { // If there's no entry in `self.peers`, start dialing. @@ -632,7 +726,9 @@ impl GenericProto { }, PeerState::Disabled { - open, + opening_and_closing, + closed, + closing, banned_until: Some(ref banned) } if *banned > now => { let peer_id = occ_entry.key().clone(); @@ -648,25 +744,76 @@ impl GenericProto { }.boxed()); *occ_entry.into_mut() = PeerState::DisabledPendingEnable { - open, + opening_and_closing, + closed, + closing, timer: delay_id, timer_deadline: *banned, }; }, - PeerState::Disabled { open, banned_until: _ } => { - debug!(target: "sub-libp2p", "PSM => Connect({:?}): Enabling connections.", - occ_entry.key()); - debug!(target: "sub-libp2p", "Handler({:?}) <= Open", occ_entry.key()); - self.events.push_back(NetworkBehaviourAction::NotifyHandler { - peer_id: occ_entry.key().clone(), - handler: NotifyHandler::All, - event: NotifsHandlerIn::Open, - }); - *occ_entry.into_mut() = PeerState::Enabled { open }; + PeerState::Disabled { opening_and_closing, mut closed, closing, banned_until } => { + // Choose one connection on which to open the notifications substream. + if !closed.is_empty() { + let chosen_connec = closed.remove(0); + debug!(target: "sub-libp2p", "PSM => Connect({:?}): Enabling connections.", + occ_entry.key()); + debug!(target: "sub-libp2p", "Handler({:?}, {:?}) <= Open", peer_id, chosen_connec); + self.events.push_back(NetworkBehaviourAction::NotifyHandler { + peer_id: peer_id.clone(), + handler: NotifyHandler::One(chosen_connec), + event: NotifsHandlerIn::Open, + }); + *occ_entry.into_mut() = PeerState::Enabled { + opening_and_closing, + opening: { + let mut l = SmallVec::new(); + l.push(chosen_connec); + l + }, + open: SmallVec::new(), + closed, + closing, + }; + + } else { + // If no connection is available, switch to "banned" mode in order to try + // again later. + debug!( + target: "sub-libp2p", + "PSM => Connect({:?}): No connection in proper state. Delaying.", + occ_entry.key() + ); + + let timer_deadline = { + let base = now + Duration::from_secs(5); + if let Some(banned_until) = banned_until { + cmp::max(base, banned_until) + } else { + base + } + }; + + let delay_id = self.next_delay_id; + self.next_delay_id.0 += 1; + debug_assert!(timer_deadline > now); + let delay = futures_timer::Delay::new(timer_deadline - now); + self.delays.push(async move { + delay.await; + (delay_id, peer_id) + }.boxed()); + + *occ_entry.into_mut() = PeerState::DisabledPendingEnable { + opening_and_closing, + closed, + closing, + timer: delay_id, + timer_deadline, + }; + } }, - PeerState::Incoming { open, .. } => { + PeerState::Incoming { opening_and_closing, closed, open_desired, closing, .. } => { debug!(target: "sub-libp2p", "PSM => Connect({:?}): Enabling connections.", occ_entry.key()); if let Some(inc) = self.incoming.iter_mut() @@ -676,13 +823,24 @@ impl GenericProto { error!(target: "sub-libp2p", "State mismatch in libp2p: no entry in \ incoming for incoming peer") } - debug!(target: "sub-libp2p", "Handler({:?}) <= Open", occ_entry.key()); - self.events.push_back(NetworkBehaviourAction::NotifyHandler { - peer_id: occ_entry.key().clone(), - handler: NotifyHandler::All, - event: NotifsHandlerIn::Open, - }); - *occ_entry.into_mut() = PeerState::Enabled { open }; + + debug_assert!(!open_desired.is_empty()); + for connec_id in &open_desired { + debug!(target: "sub-libp2p", "Handler({:?}, {:?}) <= Open", occ_entry.key(), *connec_id); + self.events.push_back(NetworkBehaviourAction::NotifyHandler { + peer_id: occ_entry.key().clone(), + handler: NotifyHandler::One(*connec_id), + event: NotifsHandlerIn::Open, + }); + } + + *occ_entry.into_mut() = PeerState::Enabled { + closed, + closing, + opening_and_closing, + opening: open_desired, + open: SmallVec::new(), + }; }, st @ PeerState::Enabled { .. } => { @@ -690,22 +848,27 @@ impl GenericProto { "PSM => Connect({:?}): Already connected.", occ_entry.key()); *occ_entry.into_mut() = st; + debug_assert!(false); }, st @ PeerState::DisabledPendingEnable { .. } => { warn!(target: "sub-libp2p", "PSM => Connect({:?}): Already pending enabling.", occ_entry.key()); *occ_entry.into_mut() = st; + debug_assert!(false); }, st @ PeerState::Requested { .. } | st @ PeerState::PendingRequest { .. } => { warn!(target: "sub-libp2p", "PSM => Connect({:?}): Duplicate request.", occ_entry.key()); *occ_entry.into_mut() = st; + debug_assert!(false); }, - PeerState::Poisoned => - error!(target: "sub-libp2p", "State of {:?} is poisoned", occ_entry.key()), + PeerState::Poisoned => { + error!(target: "sub-libp2p", "State of {:?} is poisoned", occ_entry.key()); + debug_assert!(false); + }, } } @@ -726,7 +889,9 @@ impl GenericProto { }, PeerState::DisabledPendingEnable { - open, + opening_and_closing, + closed, + closing, timer_deadline, timer: _ } => { @@ -734,21 +899,40 @@ impl GenericProto { "PSM => Drop({:?}): Interrupting pending enabling.", entry.key()); *entry.into_mut() = PeerState::Disabled { - open, + opening_and_closing, + closed, + closing, banned_until: Some(timer_deadline), }; }, - PeerState::Enabled { open } => { + PeerState::Enabled { mut opening_and_closing, closed, mut closing, opening, open } => { debug!(target: "sub-libp2p", "PSM => Drop({:?}): Disabling connections.", entry.key()); - debug!(target: "sub-libp2p", "Handler({:?}) <= Close", entry.key()); - self.events.push_back(NetworkBehaviourAction::NotifyHandler { - peer_id: entry.key().clone(), - handler: NotifyHandler::All, - event: NotifsHandlerIn::Close, - }); + + for connec_id in opening { + debug!(target: "sub-libp2p", "Handler({:?}, {:?}) <= Close", entry.key(), connec_id); + self.events.push_back(NetworkBehaviourAction::NotifyHandler { + peer_id: entry.key().clone(), + handler: NotifyHandler::One(connec_id), + event: NotifsHandlerIn::Close, + }); + opening_and_closing.push(connec_id); + } + + for (connec_id, _) in open { + debug!(target: "sub-libp2p", "Handler({:?}, {:?}) <= Close", entry.key(), connec_id); + self.events.push_back(NetworkBehaviourAction::NotifyHandler { + peer_id: entry.key().clone(), + handler: NotifyHandler::One(connec_id), + event: NotifsHandlerIn::Close, + }); + closing.push(connec_id); + } + *entry.into_mut() = PeerState::Disabled { - open, + opening_and_closing, + closed, + closing, banned_until: None } }, @@ -756,6 +940,7 @@ impl GenericProto { error!(target: "sub-libp2p", "PSM => Drop({:?}): Not enabled (Incoming).", entry.key()); *entry.into_mut() = st; + debug_assert!(!false); }, PeerState::Requested => { // We don't cancel dialing. Libp2p doesn't expose that on purpose, as other @@ -769,8 +954,10 @@ impl GenericProto { *entry.into_mut() = PeerState::Banned { until: timer_deadline } }, - PeerState::Poisoned => - error!(target: "sub-libp2p", "State of {:?} is poisoned", entry.key()), + PeerState::Poisoned => { + error!(target: "sub-libp2p", "State of {:?} is poisoned", entry.key()); + debug_assert!(!false); + }, } } @@ -801,16 +988,25 @@ impl GenericProto { }; match mem::replace(state, PeerState::Poisoned) { - PeerState::Incoming { open, .. } => { + PeerState::Incoming { opening_and_closing, closing, closed, open_desired, .. } => { debug!(target: "sub-libp2p", "PSM => Accept({:?}, {:?}): Enabling connections.", index, incoming.peer_id); - debug!(target: "sub-libp2p", "Handler({:?}) <= Open", incoming.peer_id); - self.events.push_back(NetworkBehaviourAction::NotifyHandler { - peer_id: incoming.peer_id, - handler: NotifyHandler::All, - event: NotifsHandlerIn::Open, - }); - *state = PeerState::Enabled { open }; + debug_assert!(!open_desired.is_empty()); + for connec_id in &open_desired { + debug!(target: "sub-libp2p", "Handler({:?}, {:?}) <= Open", incoming.peer_id, *connec_id); + self.events.push_back(NetworkBehaviourAction::NotifyHandler { + peer_id: incoming.peer_id.clone(), + handler: NotifyHandler::One(*connec_id), + event: NotifsHandlerIn::Open, + }); + } + *state = PeerState::Enabled { + opening_and_closing, + closed, + closing, + opening: open_desired, + open: SmallVec::new(), + }; } peer => error!(target: "sub-libp2p", "State mismatch in libp2p: Expected alive incoming. Got {:?}.", @@ -842,17 +1038,23 @@ impl GenericProto { }; match mem::replace(state, PeerState::Poisoned) { - PeerState::Incoming { open, banned_until } => { + PeerState::Incoming { opening_and_closing, mut closing, closed, open_desired, banned_until } => { debug!(target: "sub-libp2p", "PSM => Reject({:?}, {:?}): Rejecting connections.", index, incoming.peer_id); - debug!(target: "sub-libp2p", "Handler({:?}) <= Close", incoming.peer_id); - self.events.push_back(NetworkBehaviourAction::NotifyHandler { - peer_id: incoming.peer_id, - handler: NotifyHandler::All, - event: NotifsHandlerIn::Close, - }); + debug_assert!(!open_desired.is_empty()); + for connec_id in open_desired { + debug!(target: "sub-libp2p", "Handler({:?}, {:?}) <= Close", incoming.peer_id, connec_id); + self.events.push_back(NetworkBehaviourAction::NotifyHandler { + peer_id: incoming.peer_id.clone(), + handler: NotifyHandler::One(connec_id), + event: NotifsHandlerIn::Close, + }); + closing.push(connec_id); + } *state = PeerState::Disabled { - open, + opening_and_closing, + closed, + closing, banned_until, }; } @@ -882,8 +1084,6 @@ impl NetworkBehaviour for GenericProto { } fn inject_connection_established(&mut self, peer_id: &PeerId, conn: &ConnectionId, endpoint: &ConnectedPoint) { - debug!(target: "sub-libp2p", "Libp2p => Connection ({:?},{:?}) to {} established.", - conn, endpoint, peer_id); match self.peers.entry(peer_id.clone()).or_insert(PeerState::Poisoned) { st @ &mut PeerState::Requested | st @ &mut PeerState::PendingRequest { .. } => { @@ -891,7 +1091,18 @@ impl NetworkBehaviour for GenericProto { "Libp2p => Connected({}, {:?}): Connection was requested by PSM.", peer_id, endpoint ); - *st = PeerState::Enabled { open: SmallVec::new() }; + *st = PeerState::Enabled { + opening: { + let mut l = SmallVec::new(); + l.push(*conn); + l + }, + opening_and_closing: SmallVec::new(), + closing: SmallVec::new(), + closed: SmallVec::new(), + open: SmallVec::new(), + }; + debug!(target: "sub-libp2p", "Handler({:?}, {:?}) <= Open", peer_id, *conn); self.events.push_back(NetworkBehaviourAction::NotifyHandler { peer_id: peer_id.clone(), handler: NotifyHandler::One(*conn), @@ -910,150 +1121,233 @@ impl NetworkBehaviour for GenericProto { None }; debug!(target: "sub-libp2p", - "Libp2p => Connected({},{:?}): Not requested by PSM, disabling.", - peer_id, endpoint); - *st = PeerState::Disabled { open: SmallVec::new(), banned_until }; + "Libp2p => Connected({}, {:?}, {:?}): Not requested by PSM, disabling.", + peer_id, endpoint, *conn); + *st = PeerState::Disabled { + opening_and_closing: SmallVec::new(), + closed: { + let mut l = SmallVec::new(); + l.push(*conn); + l + }, + closing: SmallVec::new(), + banned_until + }; } - PeerState::Incoming { .. } => { + PeerState::Incoming { closed, .. } | + PeerState::Disabled { closed, .. } | + PeerState::DisabledPendingEnable { closed, .. } | + PeerState::Enabled { closed, .. } => { debug!(target: "sub-libp2p", - "Secondary connection {:?} to {} waiting for PSM decision.", - conn, peer_id); - }, - - PeerState::Enabled { .. } => { - debug!(target: "sub-libp2p", "Handler({},{:?}) <= Enable secondary connection", - peer_id, conn); - self.events.push_back(NetworkBehaviourAction::NotifyHandler { - peer_id: peer_id.clone(), - handler: NotifyHandler::One(*conn), - event: NotifsHandlerIn::Open - }); - } - - PeerState::Disabled { .. } | PeerState::DisabledPendingEnable { .. } => { - debug!(target: "sub-libp2p", "Handler({},{:?}) <= Disable secondary connection", - peer_id, conn); + "Libp2p => Connected({}, {:?}, {:?}): Secondary connection. Leaving closed.", + peer_id, endpoint, *conn); + closed.push(*conn); } } } - fn inject_connection_closed(&mut self, peer_id: &PeerId, conn: &ConnectionId, endpoint: &ConnectedPoint) { - debug!(target: "sub-libp2p", "Libp2p => Connection ({:?},{:?}) to {} closed.", - conn, endpoint, peer_id); - match self.peers.get_mut(peer_id) { - Some(PeerState::Disabled { open, .. }) | - Some(PeerState::DisabledPendingEnable { open, .. }) | - Some(PeerState::Enabled { open, .. }) => { - // Check if the "link" to the peer is already considered closed, - // i.e. there is no connection that is open for custom protocols, - // in which case `CustomProtocolClosed` was already emitted. - let closed = open.is_empty(); - let sink_closed = open.get(0).map_or(false, |(c, _)| c == conn); - open.retain(|(c, _)| c != conn); - if !closed { - if let Some((_, sink)) = open.get(0) { - if sink_closed { - let event = GenericProtoOut::CustomProtocolReplaced { - peer_id: peer_id.clone(), - notifications_sink: sink.clone(), - }; - self.events.push_back(NetworkBehaviourAction::GenerateEvent(event)); - } - } else { - debug!(target: "sub-libp2p", "External API <= Closed({})", peer_id); - let event = GenericProtoOut::CustomProtocolClosed { - peer_id: peer_id.clone(), - }; - - self.events.push_back(NetworkBehaviourAction::GenerateEvent(event)); - } - } - } - _ => {} - } - } + fn inject_connection_closed(&mut self, peer_id: &PeerId, conn: &ConnectionId, _endpoint: &ConnectedPoint) { + let mut entry = if let Entry::Occupied(entry) = self.peers.entry(peer_id.clone()) { + entry + } else { + error!(target: "sub-libp2p", "inject_connection_closed: State mismatch in the custom protos handler"); + debug_assert!(false); + return + }; - fn inject_disconnected(&mut self, peer_id: &PeerId) { - match self.peers.remove(peer_id) { - None | Some(PeerState::Requested) | Some(PeerState::PendingRequest { .. }) | - Some(PeerState::Banned { .. }) => + match mem::replace(entry.get_mut(), PeerState::Poisoned) { + PeerState::Requested | + PeerState::PendingRequest { .. } | + PeerState::Banned { .. } => { // This is a serious bug either in this state machine or in libp2p. error!(target: "sub-libp2p", "`inject_disconnected` called for unknown peer {}", - peer_id), + peer_id); + debug_assert!(false); + }, - Some(PeerState::Disabled { open, banned_until, .. }) => { - if !open.is_empty() { + PeerState::Disabled { mut opening_and_closing, mut closed, mut closing, banned_until } => { + debug!(target: "sub-libp2p", "Libp2p => Disconnected({}, {:?}): Disabled.", peer_id, *conn); + + if let Some(pos) = opening_and_closing.iter().position(|c| *c == *conn) { + opening_and_closing.remove(pos); + } else if let Some(pos) = closed.iter().position(|c| *c == *conn) { + closed.remove(pos); + } else if let Some(pos) = closing.iter().position(|c| *c == *conn) { + closing.remove(pos); + } else { debug_assert!(false); - error!( - target: "sub-libp2p", - "State mismatch: disconnected from {} with non-empty list of connections", - peer_id - ); + error!(target: "sub-libp2p", + "inject_connection_closed: State mismatch in the custom protos handler"); } - debug!(target: "sub-libp2p", "Libp2p => Disconnected({}): Was disabled.", peer_id); - if let Some(until) = banned_until { - self.peers.insert(peer_id.clone(), PeerState::Banned { until }); + + if opening_and_closing.is_empty() && closed.is_empty() && closing.is_empty() { + if let Some(until) = banned_until { + *entry.get_mut() = PeerState::Banned { until }; + } else { + entry.remove(); + } + } else { + *entry.get_mut() = PeerState::Disabled { + opening_and_closing, closed, closing, banned_until + }; } - } + }, - Some(PeerState::DisabledPendingEnable { open, timer_deadline, .. }) => { - if !open.is_empty() { + PeerState::DisabledPendingEnable { mut opening_and_closing, mut closed, mut closing, timer_deadline, timer } => { + debug!( + target: "sub-libp2p", + "Libp2p => Disconnected({}, {:?}): Disabled but pending enable.", + peer_id, *conn + ); + + if let Some(pos) = opening_and_closing.iter().position(|c| *c == *conn) { + opening_and_closing.remove(pos); + } else if let Some(pos) = closed.iter().position(|c| *c == *conn) { + closed.remove(pos); + } else if let Some(pos) = closing.iter().position(|c| *c == *conn) { + closing.remove(pos); + } else { debug_assert!(false); - error!( - target: "sub-libp2p", - "State mismatch: disconnected from {} with non-empty list of connections", - peer_id - ); + error!(target: "sub-libp2p", + "inject_connection_closed: State mismatch in the custom protos handler"); } - debug!(target: "sub-libp2p", - "Libp2p => Disconnected({}): Was disabled but pending enable.", - peer_id); - debug!(target: "sub-libp2p", "PSM <= Dropped({})", peer_id); - self.peerset.dropped(peer_id.clone()); - self.peers.insert(peer_id.clone(), PeerState::Banned { until: timer_deadline }); - } - Some(PeerState::Enabled { open, .. }) => { - if !open.is_empty() { + if opening_and_closing.is_empty() && closed.is_empty() && closing.is_empty() { + debug!(target: "sub-libp2p", "PSM <= Dropped({})", peer_id); + self.peerset.dropped(peer_id.clone()); + *entry.get_mut() = PeerState::Banned { until: timer_deadline }; + + } else { + *entry.get_mut() = PeerState::DisabledPendingEnable { + opening_and_closing, closed, closing, timer_deadline, timer + }; + } + }, + + PeerState::Incoming { mut opening_and_closing, mut closing, mut closed, mut open_desired, banned_until } => { + debug!( + target: "sub-libp2p", + "Libp2p => Disconnected({}, {:?}): OpenDesired.", + peer_id, *conn + ); + + if let Some(pos) = opening_and_closing.iter().position(|c| *c == *conn) { + opening_and_closing.remove(pos); + } else if let Some(pos) = closing.iter().position(|c| *c == *conn) { + closing.remove(pos); + } else if let Some(pos) = closed.iter().position(|c| *c == *conn) { + closed.remove(pos); + } else if let Some(pos) = open_desired.iter().position(|c| *c == *conn) { + open_desired.remove(pos); + + // In the incoming state, we don't report "Dropped". Instead we will just + // ignore the corresponding Accept/Reject. + if let Some(state) = self.incoming.iter_mut() + .find(|i| i.alive && i.peer_id == *peer_id) + { + state.alive = false; + } else { + error!(target: "sub-libp2p", "State mismatch in libp2p: no entry in incoming \ + corresponding to an incoming state in peers"); + debug_assert!(false); + } + + } else { debug_assert!(false); - error!( - target: "sub-libp2p", - "State mismatch: disconnected from {} with non-empty list of connections", - peer_id - ); + error!(target: "sub-libp2p", + "inject_connection_closed: State mismatch in the custom protos handler"); + } + + if opening_and_closing.is_empty() && closing.is_empty() && closed.is_empty() && + open_desired.is_empty() + { + if let Some(until) = banned_until { + *entry.get_mut() = PeerState::Banned { until }; + } else { + entry.remove(); + } + + } else if open_desired.is_empty() { + *entry.get_mut() = PeerState::Disabled { + opening_and_closing, closed, closing, banned_until + }; + + } else { + *entry.get_mut() = PeerState::Incoming { + opening_and_closing, closing, closed, open_desired, banned_until + }; } - debug!(target: "sub-libp2p", "Libp2p => Disconnected({}): Was enabled.", peer_id); - debug!(target: "sub-libp2p", "PSM <= Dropped({})", peer_id); - self.peerset.dropped(peer_id.clone()); - let ban_dur = Uniform::new(5, 10).sample(&mut rand::thread_rng()); - self.peers.insert(peer_id.clone(), PeerState::Banned { - until: Instant::now() + Duration::from_secs(ban_dur) - }); } - // In the incoming state, we don't report "Dropped". Instead we will just ignore the - // corresponding Accept/Reject. - Some(PeerState::Incoming { .. }) => { - if let Some(state) = self.incoming.iter_mut() - .find(|i| i.alive && i.peer_id == *peer_id) + PeerState::Enabled { mut opening_and_closing, mut closed, mut closing, mut opening, mut open } => { + debug!( + target: "sub-libp2p", + "Libp2p => Disconnected({}, {:?}): Enabled.", + peer_id, *conn + ); + + if let Some(pos) = opening_and_closing.iter().position(|c| *c == *conn) { + opening_and_closing.remove(pos); + } else if let Some(pos) = closed.iter().position(|c| *c == *conn) { + closed.remove(pos); + } else if let Some(pos) = closing.iter().position(|c| *c == *conn) { + closing.remove(pos); + } else if let Some(pos) = opening.iter().position(|c| *c == *conn) { + opening.remove(pos); + } else if let Some(pos) = open.iter().position(|(c, _)| *c == *conn) { + open.remove(pos); + + if open.is_empty() { + debug!(target: "sub-libp2p", "External API <= Closed({})", peer_id); + let event = GenericProtoOut::CustomProtocolClosed { + peer_id: peer_id.clone(), + }; + self.events.push_back(NetworkBehaviourAction::GenerateEvent(event)); + + } else if pos == 0 { + debug!(target: "sub-libp2p", "External API <= Sink replaced({})", peer_id); + let event = GenericProtoOut::CustomProtocolReplaced { + peer_id: peer_id.clone(), + notifications_sink: open[0].1.clone(), + }; + self.events.push_back(NetworkBehaviourAction::GenerateEvent(event)); + } + + } else { + debug_assert!(false); + error!(target: "sub-libp2p", + "inject_connection_closed: State mismatch in the custom protos handler"); + } + + if opening_and_closing.is_empty() && closed.is_empty() && closing.is_empty() + && opening.is_empty() && open.is_empty() { - debug!(target: "sub-libp2p", - "Libp2p => Disconnected({}): Was in incoming mode with id {:?}.", - peer_id, state.incoming_id); - state.alive = false; + debug!(target: "sub-libp2p", "PSM <= Dropped({})", peer_id); + self.peerset.dropped(peer_id.clone()); + let ban_dur = Uniform::new(5, 10).sample(&mut rand::thread_rng()); + *entry.get_mut() = PeerState::Banned { + until: Instant::now() + Duration::from_secs(ban_dur) + }; + } else { - error!(target: "sub-libp2p", "State mismatch in libp2p: no entry in incoming \ - corresponding to an incoming state in peers") + *entry.get_mut() = PeerState::Enabled { + opening_and_closing, closed, closing, opening, open + }; } } - Some(PeerState::Poisoned) => - error!(target: "sub-libp2p", "State of peer {} is poisoned", peer_id), + PeerState::Poisoned => { + error!(target: "sub-libp2p", "State of peer {} is poisoned", peer_id); + debug_assert!(false); + }, } } + fn inject_disconnected(&mut self, _peer_id: &PeerId) { + } + fn inject_addr_reach_failure(&mut self, peer_id: Option<&PeerId>, addr: &Multiaddr, error: &dyn error::Error) { trace!(target: "sub-libp2p", "Libp2p => Reach failure for {:?} through {:?}: {:?}", peer_id, addr, error); } @@ -1085,8 +1379,10 @@ impl NetworkBehaviour for GenericProto { *entry.into_mut() = st; }, - PeerState::Poisoned => - error!(target: "sub-libp2p", "State of {:?} is poisoned", peer_id), + PeerState::Poisoned => { + error!(target: "sub-libp2p", "State of {:?} is poisoned", peer_id); + debug_assert!(false); + }, } } else { @@ -1104,8 +1400,8 @@ impl NetworkBehaviour for GenericProto { match event { NotifsHandlerOut::OpenDesired => { debug!(target: "sub-libp2p", - "Handler({:?}) => Open requested from the remote", - source); + "Handler({:?}, {:?}]) => OpenDesired", + source, connection); let mut entry = if let Entry::Occupied(entry) = self.peers.entry(source.clone()) { entry @@ -1116,9 +1412,38 @@ impl NetworkBehaviour for GenericProto { }; match mem::replace(entry.get_mut(), PeerState::Poisoned) { - PeerState::Enabled { open, .. } | - PeerState::DisabledPendingEnable { open, .. } | - PeerState::Disabled { open, .. } => { + PeerState::Incoming { opening_and_closing, closing, closed, mut open_desired, banned_until } => { + debug_assert!(!open_desired.is_empty()); + open_desired.push(connection); + *entry.into_mut() = PeerState::Incoming { + opening_and_closing, + closing, + closed, + open_desired, + banned_until, + }; + }, + + PeerState::Enabled { opening_and_closing, closing, closed, mut opening, open } => { + debug!(target: "sub-libp2p", "Handler({:?}, {:?}) <= Open", source, connection); + self.events.push_back(NetworkBehaviourAction::NotifyHandler { + peer_id: source, + handler: NotifyHandler::One(connection), + event: NotifsHandlerIn::Open, + }); + opening.push(connection); + + *entry.into_mut() = PeerState::Enabled { + opening_and_closing, + closing, + closed, + opening, + open, + }; + }, + + PeerState::DisabledPendingEnable { opening_and_closing, closed, closing, .. } | + PeerState::Disabled { opening_and_closing, closed, closing, .. } => { let incoming_id = self.next_incoming_index; self.next_incoming_index.0 = match self.next_incoming_index.0.checked_add(1) { Some(v) => v, @@ -1127,6 +1452,7 @@ impl NetworkBehaviour for GenericProto { return } }; + debug!(target: "sub-libp2p", "PSM <= Incoming({}, {:?}).", source, incoming_id); self.peerset.incoming(source.clone(), incoming_id); @@ -1135,15 +1461,25 @@ impl NetworkBehaviour for GenericProto { alive: true, incoming_id, }); + *entry.into_mut() = PeerState::Incoming { - open, + opening_and_closing, + closing, + closed, + open_desired: { + let mut l = SmallVec::new(); + l.push(connection); + l + }, banned_until: None, // TODO: get from `DisabledPendingEnable` }; } + state => { error!(target: "sub-libp2p", - "Open: Unexpected state in the custom protos handler: {:?}", + "OpenDesired: Unexpected state in the custom protos handler: {:?}", state); + debug_assert!(false); return } }; @@ -1151,8 +1487,8 @@ impl NetworkBehaviour for GenericProto { NotifsHandlerOut::CloseDesired => { debug!(target: "sub-libp2p", - "Handler({:?}) => Closing requested from the remote", - source); + "Handler({}, {:?}) => CloseDesired", + source, connection); let mut entry = if let Entry::Occupied(entry) = self.peers.entry(source.clone()) { entry @@ -1162,48 +1498,56 @@ impl NetworkBehaviour for GenericProto { return }; - let (last, new_notifications_sink) = match mem::replace(entry.get_mut(), PeerState::Poisoned) { - PeerState::Enabled { mut open } => { - let pos = open.iter().position(|(c, _)| c == &connection); - let sink_closed = pos == Some(0); - if let Some(pos) = pos { - open.remove(pos); + match mem::replace(entry.get_mut(), PeerState::Poisoned) { + PeerState::Enabled { opening_and_closing, mut closing, closed, opening, mut open } => { + let pos = if let Some(pos) = open.iter().position(|(c, _)| *c == connection) { + pos } else { - debug_assert!(false); - error!( - target: "sub-libp2p", - "State mismatch with {}: unknown closed connection", - source - ); - } + debug_assert!(closing.iter().any(|c| *c == connection)); + return; + }; - // TODO: We switch the entire peer state to "disabled" because of possible - // race conditions involving the legacy substream. - // Once https://github.com/paritytech/substrate/issues/5670 is done, this - // should be changed to stay in the `Enabled` state. - debug!(target: "sub-libp2p", "Handler({:?}) <= Close", source); - debug!(target: "sub-libp2p", "PSM <= Dropped({:?})", source); - self.peerset.dropped(source.clone()); + open.remove(pos); + + debug!(target: "sub-libp2p", "Handler({}, {:?}) <= Close", source, connection); self.events.push_back(NetworkBehaviourAction::NotifyHandler { peer_id: source.clone(), - handler: NotifyHandler::All, + handler: NotifyHandler::One(connection), event: NotifsHandlerIn::Close, }); - - let last = open.is_empty(); - let new_notifications_sink = open.iter().next().and_then(|(_, sink)| - if sink_closed { - Some(sink.clone()) + closing.push(connection); + + if open.is_empty() { + if opening.is_empty() { + debug!(target: "sub-libp2p", "PSM <= Dropped({:?})", source); + self.peerset.dropped(source.clone()); + *entry.into_mut() = PeerState::Disabled { + opening_and_closing, closing, closed, banned_until: None + }; } else { - None - }); + *entry.into_mut() = PeerState::Enabled { + opening_and_closing, closing, closed, opening, open + }; + } - *entry.into_mut() = PeerState::Disabled { - open, - banned_until: None - }; + debug!(target: "sub-libp2p", "External API <= Closed({:?})", source); + let event = GenericProtoOut::CustomProtocolClosed { + peer_id: source, + }; + self.events.push_back(NetworkBehaviourAction::GenerateEvent(event)); - (last, new_notifications_sink) + } else if pos == 0 { + debug!(target: "sub-libp2p", "External API <= Sink replaced({:?})", source); + let new_notifications_sink = open[0].1.clone(); + let event = GenericProtoOut::CustomProtocolReplaced { + peer_id: source, + notifications_sink: new_notifications_sink, + }; + self.events.push_back(NetworkBehaviourAction::GenerateEvent(event)); + *entry.into_mut() = PeerState::Enabled { + opening_and_closing, closing, closed, opening, open + }; + } }, state @ PeerState::Disabled { .. } | state @ PeerState::DisabledPendingEnable { .. } => { @@ -1216,206 +1560,164 @@ impl NetworkBehaviour for GenericProto { state); return } - }; - - if last { - debug!(target: "sub-libp2p", "External API <= Closed({:?})", source); - let event = GenericProtoOut::CustomProtocolClosed { - peer_id: source, - }; - self.events.push_back(NetworkBehaviourAction::GenerateEvent(event)); - - } else { - if let Some(new_notifications_sink) = new_notifications_sink { - let event = GenericProtoOut::CustomProtocolReplaced { - peer_id: source, - notifications_sink: new_notifications_sink, - }; - self.events.push_back(NetworkBehaviourAction::GenerateEvent(event)); - } - debug!(target: "sub-libp2p", "Secondary connection closed custom protocol."); } } NotifsHandlerOut::CloseResult => { debug!(target: "sub-libp2p", - "Handler({:?}) => Endpoint closed for notifications protocols", - source); - - let mut entry = if let Entry::Occupied(entry) = self.peers.entry(source.clone()) { - entry - } else { - error!(target: "sub-libp2p", "Closed: State mismatch in the custom protos handler"); - return - }; - - let (last, new_notifications_sink) = match mem::replace(entry.get_mut(), PeerState::Poisoned) { - state @ PeerState::Enabled { .. } => { - *entry.into_mut() = state; - return; - }, - PeerState::Disabled { mut open, banned_until } => { - let pos = open.iter().position(|(c, _)| c == &connection); - let sink_closed = pos == Some(0); - if let Some(pos) = pos { - open.remove(pos); + "Handler({}, {:?}) => Endpoint closed for notifications protocols", + source, connection); + + match self.peers.get_mut(&source) { + Some(PeerState::DisabledPendingEnable { closing, closed, .. }) | + Some(PeerState::Disabled { closing, closed, .. }) | + Some(PeerState::Enabled { closing, closed, .. }) => { + if let Some(pos) = closing.iter().position(|c| *c == connection) { + closing.remove(pos); + closed.push(connection); } else { debug_assert!(false); - error!( - target: "sub-libp2p", - "State mismatch with {}: unknown closed connection", - source - ); + error!(target: "sub-libp2p", + "CloseResult: State mismatch in the custom protos handler"); } - - let last = open.is_empty(); - let new_notifications_sink = open.iter().next().and_then(|(_, sink)| - if sink_closed { - Some(sink.clone()) - } else { - None - }); - - *entry.into_mut() = PeerState::Disabled { - open, - banned_until - }; - - (last, new_notifications_sink) }, - PeerState::DisabledPendingEnable { - mut open, - timer, - timer_deadline - } => { - let pos = open.iter().position(|(c, _)| c == &connection); - let sink_closed = pos == Some(0); - if let Some(pos) = pos { - open.remove(pos); - } else { - debug_assert!(false); - error!( - target: "sub-libp2p", - "State mismatch with {}: unknown closed connection", - source - ); - } - - let last = open.is_empty(); - let new_notifications_sink = open.iter().next().and_then(|(_, sink)| - if sink_closed { - Some(sink.clone()) - } else { - None - }); - *entry.into_mut() = PeerState::DisabledPendingEnable { - open, - timer, - timer_deadline - }; - - (last, new_notifications_sink) - }, state => { error!(target: "sub-libp2p", - "Unexpected state in the custom protos handler: {:?}", - state); - return - } - }; - - if last { - debug!(target: "sub-libp2p", "External API <= Closed({:?})", source); - let event = GenericProtoOut::CustomProtocolClosed { - peer_id: source, - }; - self.events.push_back(NetworkBehaviourAction::GenerateEvent(event)); - - } else { - if let Some(new_notifications_sink) = new_notifications_sink { - let event = GenericProtoOut::CustomProtocolReplaced { - peer_id: source, - notifications_sink: new_notifications_sink, - }; - self.events.push_back(NetworkBehaviourAction::GenerateEvent(event)); + "CloseResult: Unexpected state in the custom protos handler: {:?}", + state); + debug_assert!(false); } - debug!(target: "sub-libp2p", "Secondary connection closed custom protocol."); } } - NotifsHandlerOut::OpenResultOk { endpoint, received_handshake, notifications_sink } => { + NotifsHandlerOut::OpenResultOk { received_handshake, notifications_sink, .. } => { debug!(target: "sub-libp2p", - "Handler({:?}) => Endpoint {:?} open for custom protocols.", - source, endpoint); - - let first = match self.peers.get_mut(&source) { - Some(PeerState::Enabled { ref mut open, .. }) | - Some(PeerState::DisabledPendingEnable { ref mut open, .. }) | - Some(PeerState::Disabled { ref mut open, .. }) => { - let first = open.is_empty(); - if !open.iter().any(|(c, _)| *c == connection) { - open.push((connection, notifications_sink.clone())); + "Handler({}, {:?}) => OpenResultOk", + source, connection); + + match self.peers.get_mut(&source) { + Some(PeerState::Enabled { opening_and_closing, closing, opening, open, .. }) => { + if let Some(pos) = opening.iter().position(|c| *c == connection) { + if open.is_empty() { + debug!(target: "sub-libp2p", "External API <= Open({:?})", source); + let event = GenericProtoOut::CustomProtocolOpen { + peer_id: source, + received_handshake, + notifications_sink: notifications_sink.clone(), + }; + self.events.push_back(NetworkBehaviourAction::GenerateEvent(event)); + } + opening.remove(pos); + open.push((connection, notifications_sink)); + } else if let Some(pos) = opening_and_closing.iter().position(|c| *c == connection) { + opening_and_closing.remove(pos); + closing.push(connection); } else { - error!( - target: "sub-libp2p", - "State mismatch: connection with {} opened a second time", - source - ); + debug_assert!(false); + error!(target: "sub-libp2p", + "OpenResultOk State mismatch in the custom protos handler"); + } + }, + + Some(PeerState::DisabledPendingEnable { opening_and_closing, closing, .. }) | + Some(PeerState::Disabled { opening_and_closing, closing, .. }) => { + if let Some(pos) = opening_and_closing.iter().position(|c| *c == connection) { + opening_and_closing.remove(pos); + closing.push(connection); + } else { + debug_assert!(false); + error!(target: "sub-libp2p", + "OpenResultOk State mismatch in the custom protos handler"); } - first } + state => { error!(target: "sub-libp2p", - "Open: Unexpected state in the custom protos handler: {:?}", + "OpenResultOk: Unexpected state in the custom protos handler: {:?}", state); + debug_assert!(false); return } - }; - - if first { - debug!(target: "sub-libp2p", "External API <= Open({:?})", source); - let event = GenericProtoOut::CustomProtocolOpen { - peer_id: source, - received_handshake, - notifications_sink - }; - self.events.push_back(NetworkBehaviourAction::GenerateEvent(event)); - - } else { - debug!( - target: "sub-libp2p", - "Handler({:?}) => Secondary connection opened custom protocol", - source - ); } } NotifsHandlerOut::OpenResultErr => { debug!(target: "sub-libp2p", - "Handler({:?}) => Failed to open substream with remote", - source); + "Handler({:?}, {:?}) => Failed to open substream with remote", + source, connection); let mut entry = if let Entry::Occupied(entry) = self.peers.entry(source.clone()) { entry } else { - error!(target: "sub-libp2p", "Closed: State mismatch in the custom protos handler"); + error!(target: "sub-libp2p", "OpenResultErr: State mismatch in the custom protos handler"); return }; match mem::replace(entry.get_mut(), PeerState::Poisoned) { - PeerState::Enabled { open } => { - debug!(target: "sub-libp2p", "PSM <= Dropped({:?})", source); - self.peerset.dropped(source.clone()); + PeerState::Enabled { mut closed, mut closing, mut opening_and_closing, mut opening, open } => { + if let Some(pos) = opening.iter().position(|c| *c == connection) { + opening.remove(pos); + closed.push(connection); + } else if let Some(pos) = opening_and_closing.iter().position(|c| *c == connection) { + opening_and_closing.remove(pos); + closing.push(connection); + } else { + debug_assert!(false); + error!(target: "sub-libp2p", + "OpenResultErr: State mismatch in the custom protos handler"); + } + + if opening.is_empty() && open.is_empty() { + debug!(target: "sub-libp2p", "PSM <= Dropped({:?})", source); + self.peerset.dropped(source.clone()); + + *entry.into_mut() = PeerState::Disabled { + closed, + closing, + opening_and_closing, + banned_until: None + }; + } else { + *entry.into_mut() = PeerState::Enabled { + closed, closing, opening_and_closing, opening, open + }; + } + }, + PeerState::Disabled { closed, mut closing, mut opening_and_closing, banned_until } => { + if let Some(pos) = opening_and_closing.iter().position(|c| *c == connection) { + opening_and_closing.remove(pos); + closing.push(connection); + } else { + debug_assert!(false); + error!(target: "sub-libp2p", + "OpenResultErr: State mismatch in the custom protos handler"); + } *entry.into_mut() = PeerState::Disabled { - open, - banned_until: None + closed, + closing, + opening_and_closing, + banned_until, }; }, - state @ PeerState::Disabled { .. } | - state @ PeerState::DisabledPendingEnable { .. } => { - *entry.into_mut() = state; + PeerState::DisabledPendingEnable { closed, mut closing, mut opening_and_closing, timer, timer_deadline } => { + if let Some(pos) = opening_and_closing.iter().position(|c| *c == connection) { + opening_and_closing.remove(pos); + closing.push(connection); + } else { + debug_assert!(false); + error!(target: "sub-libp2p", + "OpenResultErr: State mismatch in the custom protos handler"); + } + + *entry.into_mut() = PeerState::DisabledPendingEnable { + closed, + closing, + opening_and_closing, + timer, + timer_deadline, + }; }, state => { error!(target: "sub-libp2p", @@ -1515,14 +1817,38 @@ impl NetworkBehaviour for GenericProto { *peer_state = PeerState::Requested; } - PeerState::DisabledPendingEnable { timer, open, .. } if *timer == delay_id => { - debug!(target: "sub-libp2p", "Handler({:?}) <= Open (ban expired)", peer_id); - self.events.push_back(NetworkBehaviourAction::NotifyHandler { - peer_id, - handler: NotifyHandler::All, - event: NotifsHandlerIn::Open, - }); - *peer_state = PeerState::Enabled { open: mem::replace(open, Default::default()) }; + PeerState::DisabledPendingEnable { timer, opening_and_closing, closed, closing, timer_deadline } + if *timer == delay_id => { + + if !closed.is_empty() { + let chosen_connec = closed.remove(0); + debug!(target: "sub-libp2p", "Handler({:?}, {:?}) <= Open (ban expired)", + peer_id, chosen_connec); + self.events.push_back(NetworkBehaviourAction::NotifyHandler { + peer_id: peer_id.clone(), + handler: NotifyHandler::One(chosen_connec), + event: NotifsHandlerIn::Open, + }); + *peer_state = PeerState::Enabled { + opening_and_closing: mem::replace(opening_and_closing, Default::default()), + opening: { + let mut l = SmallVec::new(); + l.push(chosen_connec); + l + }, + open: SmallVec::new(), + closed: mem::replace(closed, Default::default()), + closing: mem::replace(closing, Default::default()), + }; + } else { + *timer_deadline = Instant::now() + Duration::from_secs(5); + let delay = futures_timer::Delay::new(Duration::from_secs(5)); + let timer = *timer; + self.delays.push(async move { + delay.await; + (timer, peer_id) + }.boxed()); + } } // We intentionally never remove elements from `delays`, and it may diff --git a/client/network/src/protocol/generic_proto/handler/group.rs b/client/network/src/protocol/generic_proto/handler/group.rs index 2e9ce8b712592..df14e197ec9fe 100644 --- a/client/network/src/protocol/generic_proto/handler/group.rs +++ b/client/network/src/protocol/generic_proto/handler/group.rs @@ -526,6 +526,12 @@ impl ProtocolsHandler for NotifsHandler { self.state = State::Closed { pending_in: Vec::new(), }; + + if matches!(self.state, State::Opening { .. }) { + self.events_queue.push_back( + ProtocolsHandlerEvent::Custom(NotifsHandlerOut::OpenResultErr) + ); + } }, State::Closed { pending_in } => { for num in pending_in.drain(..) { From 4182faff9b118095b5f0882f16632e2b261a51e2 Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Fri, 30 Oct 2020 11:44:34 +0100 Subject: [PATCH 03/39] Some bugfixes --- .../src/protocol/generic_proto/behaviour.rs | 49 +++++++++++++------ 1 file changed, 35 insertions(+), 14 deletions(-) diff --git a/client/network/src/protocol/generic_proto/behaviour.rs b/client/network/src/protocol/generic_proto/behaviour.rs index 4479e602827e8..935ed59bb8afb 100644 --- a/client/network/src/protocol/generic_proto/behaviour.rs +++ b/client/network/src/protocol/generic_proto/behaviour.rs @@ -1316,9 +1316,9 @@ impl NetworkBehaviour for GenericProto { } } else { - debug_assert!(false); error!(target: "sub-libp2p", "inject_connection_closed: State mismatch in the custom protos handler"); + debug_assert!(false); } if opening_and_closing.is_empty() && closed.is_empty() && closing.is_empty() @@ -1412,9 +1412,15 @@ impl NetworkBehaviour for GenericProto { }; match mem::replace(entry.get_mut(), PeerState::Poisoned) { - PeerState::Incoming { opening_and_closing, closing, closed, mut open_desired, banned_until } => { + PeerState::Incoming { opening_and_closing, closing, mut closed, mut open_desired, banned_until } => { debug_assert!(!open_desired.is_empty()); - open_desired.push(connection); + if let Some(pos) = closed.iter().position(|c| *c == connection) { + closed.remove(pos); + open_desired.push(connection); + } else { + debug_assert!(opening_and_closing.iter().any(|c| *c == connection)); + } + *entry.into_mut() = PeerState::Incoming { opening_and_closing, closing, @@ -1424,14 +1430,23 @@ impl NetworkBehaviour for GenericProto { }; }, - PeerState::Enabled { opening_and_closing, closing, closed, mut opening, open } => { - debug!(target: "sub-libp2p", "Handler({:?}, {:?}) <= Open", source, connection); - self.events.push_back(NetworkBehaviourAction::NotifyHandler { - peer_id: source, - handler: NotifyHandler::One(connection), - event: NotifsHandlerIn::Open, - }); - opening.push(connection); + PeerState::Enabled { opening_and_closing, closing, mut closed, mut opening, open } => { + if let Some(pos) = closed.iter().position(|c| *c == connection) { + closed.remove(pos); + debug!(target: "sub-libp2p", "Handler({:?}, {:?}) <= Open", source, connection); + self.events.push_back(NetworkBehaviourAction::NotifyHandler { + peer_id: source, + handler: NotifyHandler::One(connection), + event: NotifsHandlerIn::Open, + }); + opening.push(connection); + + } else { + debug_assert!( + opening_and_closing.iter().any(|c| *c == connection) || + opening.iter().any(|c| *c == connection) + ); + } *entry.into_mut() = PeerState::Enabled { opening_and_closing, @@ -1442,8 +1457,14 @@ impl NetworkBehaviour for GenericProto { }; }, - PeerState::DisabledPendingEnable { opening_and_closing, closed, closing, .. } | - PeerState::Disabled { opening_and_closing, closed, closing, .. } => { + PeerState::DisabledPendingEnable { opening_and_closing, mut closed, closing, .. } | + PeerState::Disabled { opening_and_closing, mut closed, closing, .. } => { + if let Some(pos) = closed.iter().position(|c| *c == connection) { + closed.remove(pos); + } else { + debug_assert!(opening_and_closing.iter().any(|c| *c == connection)); + } + let incoming_id = self.next_incoming_index; self.next_incoming_index.0 = match self.next_incoming_index.0.checked_add(1) { Some(v) => v, @@ -1565,7 +1586,7 @@ impl NetworkBehaviour for GenericProto { NotifsHandlerOut::CloseResult => { debug!(target: "sub-libp2p", - "Handler({}, {:?}) => Endpoint closed for notifications protocols", + "Handler({}, {:?}) => CloseResult", source, connection); match self.peers.get_mut(&source) { From 602689443b3be4e15a6bc426d78d7551dd552491 Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Fri, 30 Oct 2020 13:56:13 +0100 Subject: [PATCH 04/39] Fix warnings so that CI runs, gmlrlblbl --- client/network/src/protocol/generic_proto/behaviour.rs | 9 +++++++-- .../network/src/protocol/generic_proto/handler/group.rs | 2 +- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/client/network/src/protocol/generic_proto/behaviour.rs b/client/network/src/protocol/generic_proto/behaviour.rs index 935ed59bb8afb..df794cda258fe 100644 --- a/client/network/src/protocol/generic_proto/behaviour.rs +++ b/client/network/src/protocol/generic_proto/behaviour.rs @@ -578,8 +578,13 @@ impl GenericProto { closing.push(connec_id); } - // TODO: interaction with `banned_until` above? - let banned_until = ban.map(|dur| Instant::now() + dur); + let banned_until = match (banned_until, ban) { + (Some(a), Some(b)) => Some(cmp::max(a, Instant::now() + b)), + (Some(a), None) => Some(a), + (None, Some(b)) => Some(Instant::now() + b), + (None, None) => None, + }; + *entry.into_mut() = PeerState::Disabled { opening_and_closing, closing, diff --git a/client/network/src/protocol/generic_proto/handler/group.rs b/client/network/src/protocol/generic_proto/handler/group.rs index df14e197ec9fe..d97a3b9b57623 100644 --- a/client/network/src/protocol/generic_proto/handler/group.rs +++ b/client/network/src/protocol/generic_proto/handler/group.rs @@ -808,7 +808,7 @@ impl ProtocolsHandler for NotifsHandler { *pending_handshake = Some(handshake); }, - (ProtocolsHandlerEvent::Custom(NotifsOutHandlerOut::Open { handshake }), _) + (ProtocolsHandlerEvent::Custom(NotifsOutHandlerOut::Open { .. }), _) if handler_num == 0 => debug_assert!(false), (ProtocolsHandlerEvent::Custom(NotifsOutHandlerOut::Open { .. }), _) => {}, From 856abcf53788dc79c762257c0db737cccdcde1df Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Mon, 2 Nov 2020 10:10:26 +0100 Subject: [PATCH 05/39] Bugfixes --- .../src/protocol/generic_proto/behaviour.rs | 286 +++++++++++++----- 1 file changed, 208 insertions(+), 78 deletions(-) diff --git a/client/network/src/protocol/generic_proto/behaviour.rs b/client/network/src/protocol/generic_proto/behaviour.rs index df794cda258fe..a047b662fa7f5 100644 --- a/client/network/src/protocol/generic_proto/behaviour.rs +++ b/client/network/src/protocol/generic_proto/behaviour.rs @@ -179,15 +179,16 @@ enum PeerState { /// List of connections that are in the `Closed` state but to which a /// [`NotifsHandlerIn::Open`] message then a [`NotifsHandlerIn::Close`] message has been - /// sent. + /// sent. An `OpenResultOk`/`OpenResultErr` message followed with a `CloseResult` message + /// are expected. opening_and_closing: SmallVec<[ConnectionId; crate::MAX_CONNECTIONS_PER_PEER]>, /// List of connections that are in the `Closed` state. closed: SmallVec<[ConnectionId; crate::MAX_CONNECTIONS_PER_PEER]>, - /// List of connections that are in the `Closing` state. - /// A [`NotifsHandlerIn::Close`] message has been sent to the handler, and we are now - /// waiting for the actual closing. + /// List of connections that are either in the `Open` or the `Closed` state, but to which + /// a [`NotifsHandlerIn::Close`] message has been sent. Waiting for this message to be + /// acknowledged through a [`NotifsHandlerOut::CloseResult`]. closing: SmallVec<[ConnectionId; crate::MAX_CONNECTIONS_PER_PEER]>, }, @@ -207,15 +208,16 @@ enum PeerState { /// List of connections that are in the `Closed` state but to which a /// [`NotifsHandlerIn::Open`] message then a [`NotifsHandlerIn::Close`] message has been - /// sent. + /// sent. An `OpenResultOk`/`OpenResultErr` message followed with a `CloseResult` message + /// are expected. opening_and_closing: SmallVec<[ConnectionId; crate::MAX_CONNECTIONS_PER_PEER]>, /// List of connections that are in the `Closed` state. closed: SmallVec<[ConnectionId; crate::MAX_CONNECTIONS_PER_PEER]>, - /// List of connections that are in the `Closing` state. - /// A [`NotifsHandlerIn::Close`] message has been sent to the handler, and we are now - /// waiting for the actual closing. + /// List of connections that are either in the `Open` or the `Closed` state, but to which + /// a [`NotifsHandlerIn::Close`] message has been sent. Waiting for this message to be + /// acknowledged through a [`NotifsHandlerOut::CloseResult`]. closing: SmallVec<[ConnectionId; crate::MAX_CONNECTIONS_PER_PEER]>, }, @@ -223,7 +225,8 @@ enum PeerState { Enabled { /// List of connections that are in the `Closed` state but to which a /// [`NotifsHandlerIn::Open`] message then a [`NotifsHandlerIn::Close`] message has been - /// sent. + /// sent. An `OpenResultOk`/`OpenResultErr` message followed with a `CloseResult` message + /// are expected. opening_and_closing: SmallVec<[ConnectionId; crate::MAX_CONNECTIONS_PER_PEER]>, /// List of connections that are in the `Closed` state but to which a @@ -231,14 +234,16 @@ enum PeerState { opening: SmallVec<[ConnectionId; crate::MAX_CONNECTIONS_PER_PEER]>, /// List of connections that are in the `Open` state. + /// + /// The external API is notified of a channel if and only if `!open.is_empty()`. open: SmallVec<[(ConnectionId, NotificationsSink); crate::MAX_CONNECTIONS_PER_PEER]>, /// List of connections that are in the `Closed` state. closed: SmallVec<[ConnectionId; crate::MAX_CONNECTIONS_PER_PEER]>, - /// List of connections that are in the `Closing` state. - /// A [`NotifsHandlerIn::Close`] message has been sent to the handler, and we are now - /// waiting for the actual closing. + /// List of connections that are either in the `Open` or the `Closed` state, but to which + /// a [`NotifsHandlerIn::Close`] message has been sent. Waiting for this message to be + /// acknowledged through a [`NotifsHandlerOut::CloseResult`]. closing: SmallVec<[ConnectionId; crate::MAX_CONNECTIONS_PER_PEER]>, }, @@ -252,19 +257,21 @@ enum PeerState { /// List of connections that are in the `Closed` state but to which a /// [`NotifsHandlerIn::Open`] message then a [`NotifsHandlerIn::Close`] message has been - /// sent. + /// sent. An `OpenResultOk`/`OpenResultErr` message followed with a `CloseResult` message + /// are expected. opening_and_closing: SmallVec<[ConnectionId; crate::MAX_CONNECTIONS_PER_PEER]>, /// List of connections that are in the `Closed` state, meaning that the remote hasn't /// requested anything. closed: SmallVec<[ConnectionId; crate::MAX_CONNECTIONS_PER_PEER]>, - /// List of connections that are in the `Closing` state. - /// A [`NotifsHandlerIn::Close`] message has been sent to the handler, and we are now - /// waiting for the actual closing. + /// List of connections that are either in the `Open` or the `Closed` state, but to which + /// a [`NotifsHandlerIn::Close`] message has been sent. Waiting for this message to be + /// acknowledged through a [`NotifsHandlerOut::CloseResult`]. closing: SmallVec<[ConnectionId; crate::MAX_CONNECTIONS_PER_PEER]>, - /// List of connections that are in the `OpenDesired` state, meaning that the remote + /// List of connections that are in the `Closed` state, but for which a + /// [`NotifsHandlerOut::OpenDesired`] message has been received, meaning that the remote /// wants to open a substream. open_desired: SmallVec<[ConnectionId; crate::MAX_CONNECTIONS_PER_PEER]>, }, @@ -514,6 +521,8 @@ impl GenericProto { }, // Enabled => Disabled. + // All open or opening connections are sent a `Close` message. + // If relevant, the external API is instantly notified. PeerState::Enabled { mut opening_and_closing, opening, open, closed, mut closing } => { debug!(target: "sub-libp2p", "PSM <= Dropped({:?})", peer_id); self.peerset.dropped(peer_id.clone()); @@ -556,6 +565,7 @@ impl GenericProto { }, // Incoming => Disabled. + // Ongoing opening requests from the remote are rejected. PeerState::Incoming { opening_and_closing, mut closing, closed, open_desired, banned_until } => { let inc = if let Some(inc) = self.incoming.iter_mut() .find(|i| i.peer_id == *entry.key() && i.alive) { @@ -683,6 +693,7 @@ impl GenericProto { /// Function that is called when the peerset wants us to connect to a peer. fn peerset_report_connect(&mut self, peer_id: PeerId) { + // If `PeerId` is unknown to us, insert an entry, start dialing, and return early. let mut occ_entry = match self.peers.entry(peer_id.clone()) { Entry::Occupied(entry) => entry, Entry::Vacant(entry) => { @@ -701,6 +712,7 @@ impl GenericProto { let now = Instant::now(); match mem::replace(occ_entry.get_mut(), PeerState::Poisoned) { + // Banned (not expired) => PendingRequest PeerState::Banned { ref until } if *until > now => { let peer_id = occ_entry.key().clone(); debug!(target: "sub-libp2p", "PSM => Connect({:?}): Will start to connect at \ @@ -720,6 +732,7 @@ impl GenericProto { }; }, + // Banned (expired) => Requested PeerState::Banned { .. } => { debug!(target: "sub-libp2p", "PSM => Connect({:?}): Starting to connect", occ_entry.key()); debug!(target: "sub-libp2p", "Libp2p <= Dial {:?}", occ_entry.key()); @@ -730,6 +743,7 @@ impl GenericProto { *occ_entry.into_mut() = PeerState::Requested; }, + // Disabled (with non-expired ban) => DisabledPendingEnable PeerState::Disabled { opening_and_closing, closed, @@ -757,8 +771,9 @@ impl GenericProto { }; }, + // Disabled => Enabled PeerState::Disabled { opening_and_closing, mut closed, closing, banned_until } => { - // Choose one connection on which to open the notifications substream. + // The first element of `closed` is chosen to open the notifications substream. if !closed.is_empty() { let chosen_connec = closed.remove(0); debug!(target: "sub-libp2p", "PSM => Connect({:?}): Enabling connections.", @@ -782,8 +797,9 @@ impl GenericProto { }; } else { - // If no connection is available, switch to "banned" mode in order to try - // again later. + // If no connection is available, switch to `DisabledPendingEnable` in order + // to try again later. + debug_assert!(!opening_and_closing.is_empty() || !closing.is_empty()); debug!( target: "sub-libp2p", "PSM => Connect({:?}): No connection in proper state. Delaying.", @@ -818,6 +834,7 @@ impl GenericProto { } }, + // Incoming => Enabled PeerState::Incoming { opening_and_closing, closed, open_desired, closing, .. } => { debug!(target: "sub-libp2p", "PSM => Connect({:?}): Enabling connections.", occ_entry.key()); @@ -848,6 +865,7 @@ impl GenericProto { }; }, + // Other states are kept as-is. st @ PeerState::Enabled { .. } => { warn!(target: "sub-libp2p", "PSM => Connect({:?}): Already connected.", @@ -893,6 +911,7 @@ impl GenericProto { *entry.into_mut() = st; }, + // DisabledPendingEnable => Disabled PeerState::DisabledPendingEnable { opening_and_closing, closed, @@ -911,9 +930,18 @@ impl GenericProto { }; }, + // Enabled => Disabled PeerState::Enabled { mut opening_and_closing, closed, mut closing, opening, open } => { debug!(target: "sub-libp2p", "PSM => Drop({:?}): Disabling connections.", entry.key()); + if !open.is_empty() { + debug!(target: "sub-libp2p", "External API <= Closed({})", entry.key()); + let event = GenericProtoOut::CustomProtocolClosed { + peer_id: entry.key().clone(), + }; + self.events.push_back(NetworkBehaviourAction::GenerateEvent(event)); + } + for connec_id in opening { debug!(target: "sub-libp2p", "Handler({:?}, {:?}) <= Close", entry.key(), connec_id); self.events.push_back(NetworkBehaviourAction::NotifyHandler { @@ -941,12 +969,8 @@ impl GenericProto { banned_until: None } }, - st @ PeerState::Incoming { .. } => { - error!(target: "sub-libp2p", "PSM => Drop({:?}): Not enabled (Incoming).", - entry.key()); - *entry.into_mut() = st; - debug_assert!(!false); - }, + + // Requested => Ø PeerState::Requested => { // We don't cancel dialing. Libp2p doesn't expose that on purpose, as other // sub-systems (such as the discovery mechanism) may require dialing this peer as @@ -954,11 +978,20 @@ impl GenericProto { debug!(target: "sub-libp2p", "PSM => Drop({:?}): Not yet connected.", entry.key()); entry.remove(); }, + + // PendingRequest => Banned PeerState::PendingRequest { timer_deadline, .. } => { debug!(target: "sub-libp2p", "PSM => Drop({:?}): Not yet connected", entry.key()); *entry.into_mut() = PeerState::Banned { until: timer_deadline } }, + // Invalid state transitions. + st @ PeerState::Incoming { .. } => { + error!(target: "sub-libp2p", "PSM => Drop({:?}): Not enabled (Incoming).", + entry.key()); + *entry.into_mut() = st; + debug_assert!(!false); + }, PeerState::Poisoned => { error!(target: "sub-libp2p", "State of {:?} is poisoned", entry.key()); debug_assert!(!false); @@ -980,7 +1013,7 @@ impl GenericProto { debug!(target: "sub-libp2p", "PSM => Accept({:?}, {:?}): Obsolete incoming, sending back dropped", index, incoming.peer_id); debug!(target: "sub-libp2p", "PSM <= Dropped({:?})", incoming.peer_id); - self.peerset.dropped(incoming.peer_id); + self.peerset.dropped(incoming.peer_id); // TODO: is that correct?! return } @@ -993,10 +1026,12 @@ impl GenericProto { }; match mem::replace(state, PeerState::Poisoned) { + // Incoming => Enabled PeerState::Incoming { opening_and_closing, closing, closed, open_desired, .. } => { debug!(target: "sub-libp2p", "PSM => Accept({:?}, {:?}): Enabling connections.", index, incoming.peer_id); debug_assert!(!open_desired.is_empty()); + for connec_id in &open_desired { debug!(target: "sub-libp2p", "Handler({:?}, {:?}) <= Open", incoming.peer_id, *connec_id); self.events.push_back(NetworkBehaviourAction::NotifyHandler { @@ -1005,6 +1040,7 @@ impl GenericProto { event: NotifsHandlerIn::Open, }); } + *state = PeerState::Enabled { opening_and_closing, closed, @@ -1013,9 +1049,14 @@ impl GenericProto { open: SmallVec::new(), }; } - peer => error!(target: "sub-libp2p", - "State mismatch in libp2p: Expected alive incoming. Got {:?}.", - peer) + + // Any state other than `Incoming` is invalid. + peer => { + error!(target: "sub-libp2p", + "State mismatch in libp2p: Expected alive incoming. Got {:?}.", + peer); + debug_assert!(false); + } } } @@ -1043,9 +1084,11 @@ impl GenericProto { }; match mem::replace(state, PeerState::Poisoned) { + // Incoming => Disabled PeerState::Incoming { opening_and_closing, mut closing, closed, open_desired, banned_until } => { debug!(target: "sub-libp2p", "PSM => Reject({:?}, {:?}): Rejecting connections.", index, incoming.peer_id); + debug_assert!(!open_desired.is_empty()); for connec_id in open_desired { debug!(target: "sub-libp2p", "Handler({:?}, {:?}) <= Close", incoming.peer_id, connec_id); @@ -1056,6 +1099,7 @@ impl GenericProto { }); closing.push(connec_id); } + *state = PeerState::Disabled { opening_and_closing, closed, @@ -1090,12 +1134,20 @@ impl NetworkBehaviour for GenericProto { fn inject_connection_established(&mut self, peer_id: &PeerId, conn: &ConnectionId, endpoint: &ConnectedPoint) { match self.peers.entry(peer_id.clone()).or_insert(PeerState::Poisoned) { + // Requested | PendingRequest => Enabled st @ &mut PeerState::Requested | st @ &mut PeerState::PendingRequest { .. } => { debug!(target: "sub-libp2p", "Libp2p => Connected({}, {:?}): Connection was requested by PSM.", peer_id, endpoint ); + debug!(target: "sub-libp2p", "Handler({:?}, {:?}) <= Open", peer_id, *conn); + self.events.push_back(NetworkBehaviourAction::NotifyHandler { + peer_id: peer_id.clone(), + handler: NotifyHandler::One(*conn), + event: NotifsHandlerIn::Open + }); + *st = PeerState::Enabled { opening: { let mut l = SmallVec::new(); @@ -1107,17 +1159,10 @@ impl NetworkBehaviour for GenericProto { closed: SmallVec::new(), open: SmallVec::new(), }; - debug!(target: "sub-libp2p", "Handler({:?}, {:?}) <= Open", peer_id, *conn); - self.events.push_back(NetworkBehaviourAction::NotifyHandler { - peer_id: peer_id.clone(), - handler: NotifyHandler::One(*conn), - event: NotifsHandlerIn::Open - }); } - // Note: it may seem weird that "Banned" peers get treated as if they were absent. - // This is because the word "Banned" means "temporarily prevent outgoing connections to - // this peer", and not "banned" in the sense that we would refuse the peer altogether. + // Poisoned gets inserted above if the entry was missing. + // Ø | Banned => Disabled st @ &mut PeerState::Poisoned | st @ &mut PeerState::Banned { .. } => { let banned_until = if let PeerState::Banned { until } = st { @@ -1140,6 +1185,8 @@ impl NetworkBehaviour for GenericProto { }; } + // In all other states, add this new connection to the list of closed inactive + // connections. PeerState::Incoming { closed, .. } | PeerState::Disabled { closed, .. } | PeerState::DisabledPendingEnable { closed, .. } | @@ -1162,16 +1209,7 @@ impl NetworkBehaviour for GenericProto { }; match mem::replace(entry.get_mut(), PeerState::Poisoned) { - PeerState::Requested | - PeerState::PendingRequest { .. } | - PeerState::Banned { .. } => { - // This is a serious bug either in this state machine or in libp2p. - error!(target: "sub-libp2p", - "`inject_disconnected` called for unknown peer {}", - peer_id); - debug_assert!(false); - }, - + // Disabled => Disabled | Banned | Ø PeerState::Disabled { mut opening_and_closing, mut closed, mut closing, banned_until } => { debug!(target: "sub-libp2p", "Libp2p => Disconnected({}, {:?}): Disabled.", peer_id, *conn); @@ -1200,6 +1238,7 @@ impl NetworkBehaviour for GenericProto { } }, + // DisabledPendingEnable => DisabledPendingEnable | Banned PeerState::DisabledPendingEnable { mut opening_and_closing, mut closed, mut closing, timer_deadline, timer } => { debug!( target: "sub-libp2p", @@ -1231,6 +1270,7 @@ impl NetworkBehaviour for GenericProto { } }, + // Incoming => Incoming | Disabled | Banned | Ø PeerState::Incoming { mut opening_and_closing, mut closing, mut closed, mut open_desired, banned_until } => { debug!( target: "sub-libp2p", @@ -1286,6 +1326,8 @@ impl NetworkBehaviour for GenericProto { } } + // Enabled => Enabled | Banned + // Peers are always banned when disconnecting while Enabled. PeerState::Enabled { mut opening_and_closing, mut closed, mut closing, mut opening, mut open } => { debug!( target: "sub-libp2p", @@ -1343,6 +1385,15 @@ impl NetworkBehaviour for GenericProto { } } + PeerState::Requested | + PeerState::PendingRequest { .. } | + PeerState::Banned { .. } => { + // This is a serious bug either in this state machine or in libp2p. + error!(target: "sub-libp2p", + "`inject_disconnected` called for unknown peer {}", + peer_id); + debug_assert!(false); + }, PeerState::Poisoned => { error!(target: "sub-libp2p", "State of peer {} is poisoned", peer_id); debug_assert!(false); @@ -1417,12 +1468,17 @@ impl NetworkBehaviour for GenericProto { }; match mem::replace(entry.get_mut(), PeerState::Poisoned) { + // Incoming => Incoming PeerState::Incoming { opening_and_closing, closing, mut closed, mut open_desired, banned_until } => { debug_assert!(!open_desired.is_empty()); if let Some(pos) = closed.iter().position(|c| *c == connection) { closed.remove(pos); open_desired.push(connection); } else { + // Connections in `opening_and_closing` are in a Closed phase, and as + // such can emit `OpenDesired` messages. + // Since an `Open` and a `Close` messages have already been sent, + // there is nothing much that can be done about this anyway. debug_assert!(opening_and_closing.iter().any(|c| *c == connection)); } @@ -1447,6 +1503,10 @@ impl NetworkBehaviour for GenericProto { opening.push(connection); } else { + // Connections in `opening_and_closing` `opening` are in a Closed + // phase, and as such can emit `OpenDesired` messages. + // Since an `Open` message haS already been sent, there is nothing + // more to do. debug_assert!( opening_and_closing.iter().any(|c| *c == connection) || opening.iter().any(|c| *c == connection) @@ -1462,43 +1522,105 @@ impl NetworkBehaviour for GenericProto { }; }, - PeerState::DisabledPendingEnable { opening_and_closing, mut closed, closing, .. } | - PeerState::Disabled { opening_and_closing, mut closed, closing, .. } => { + // Disabled => Disabled | Incoming + PeerState::Disabled { opening_and_closing, mut closed, closing, banned_until } => { if let Some(pos) = closed.iter().position(|c| *c == connection) { closed.remove(pos); + // Added in `open_desired` below. + + let incoming_id = self.next_incoming_index; + self.next_incoming_index.0 = match self.next_incoming_index.0.checked_add(1) { + Some(v) => v, + None => { + error!(target: "sub-libp2p", "Overflow in next_incoming_index"); + return + } + }; + + debug!(target: "sub-libp2p", "PSM <= Incoming({}, {:?}).", + source, incoming_id); + self.peerset.incoming(source.clone(), incoming_id); + self.incoming.push(IncomingPeer { + peer_id: source.clone(), + alive: true, + incoming_id, + }); + + *entry.into_mut() = PeerState::Incoming { + opening_and_closing, + closing, + closed, + open_desired: { + let mut l = SmallVec::new(); + l.push(connection); + l + }, + banned_until, + }; + } else { + // Connections in `opening_and_closing` `opening` are in a Closed + // phase, and as such can emit `OpenDesired` messages. + // We ignore them. debug_assert!(opening_and_closing.iter().any(|c| *c == connection)); + *entry.into_mut() = PeerState::Disabled { + opening_and_closing, + closed, + closing, + banned_until, + }; } + } - let incoming_id = self.next_incoming_index; - self.next_incoming_index.0 = match self.next_incoming_index.0.checked_add(1) { - Some(v) => v, - None => { - error!(target: "sub-libp2p", "Overflow in next_incoming_index"); - return - } - }; + // DisabledPendingEnable => DisabledPendingEnable | Incoming + PeerState::DisabledPendingEnable { opening_and_closing, mut closed, closing, timer, timer_deadline } => { + if let Some(pos) = closed.iter().position(|c| *c == connection) { + closed.remove(pos); + // Added in `open_desired` below. + + let incoming_id = self.next_incoming_index; + self.next_incoming_index.0 = match self.next_incoming_index.0.checked_add(1) { + Some(v) => v, + None => { + error!(target: "sub-libp2p", "Overflow in next_incoming_index"); + return + } + }; - debug!(target: "sub-libp2p", "PSM <= Incoming({}, {:?}).", - source, incoming_id); - self.peerset.incoming(source.clone(), incoming_id); - self.incoming.push(IncomingPeer { - peer_id: source.clone(), - alive: true, - incoming_id, - }); + debug!(target: "sub-libp2p", "PSM <= Incoming({}, {:?}).", + source, incoming_id); + self.peerset.incoming(source.clone(), incoming_id); + self.incoming.push(IncomingPeer { + peer_id: source.clone(), + alive: true, + incoming_id, + }); - *entry.into_mut() = PeerState::Incoming { - opening_and_closing, - closing, - closed, - open_desired: { - let mut l = SmallVec::new(); - l.push(connection); - l - }, - banned_until: None, // TODO: get from `DisabledPendingEnable` - }; + *entry.into_mut() = PeerState::Incoming { + opening_and_closing, + closing, + closed, + open_desired: { + let mut l = SmallVec::new(); + l.push(connection); + l + }, + banned_until: Some(timer_deadline), + }; + + } else { + // Connections in `opening_and_closing` `opening` are in a Closed + // phase, and as such can emit `OpenDesired` messages. + // We ignore them. + debug_assert!(opening_and_closing.iter().any(|c| *c == connection)); + *entry.into_mut() = PeerState::DisabledPendingEnable { + opening_and_closing, + closed, + closing, + timer, + timer_deadline, + }; + } } state => { @@ -1525,6 +1647,7 @@ impl NetworkBehaviour for GenericProto { }; match mem::replace(entry.get_mut(), PeerState::Poisoned) { + // Enabled => Enabled | Disabled PeerState::Enabled { opening_and_closing, mut closing, closed, opening, mut open } => { let pos = if let Some(pos) = open.iter().position(|(c, _)| *c == connection) { pos @@ -1543,6 +1666,7 @@ impl NetworkBehaviour for GenericProto { }); closing.push(connection); + // `open` wasn't empty before. if open.is_empty() { if opening.is_empty() { debug!(target: "sub-libp2p", "PSM <= Dropped({:?})", source); @@ -1575,6 +1699,9 @@ impl NetworkBehaviour for GenericProto { }; } }, + + // All connections in `Disabled` and `DisabledPendingEnable` have been sent a + // `Close` message already, and as such ignore any `CloseDesired` message. state @ PeerState::Disabled { .. } | state @ PeerState::DisabledPendingEnable { .. } => { *entry.into_mut() = state; @@ -1595,6 +1722,7 @@ impl NetworkBehaviour for GenericProto { source, connection); match self.peers.get_mut(&source) { + // Move the connection from `closing` to `closed`. Some(PeerState::DisabledPendingEnable { closing, closed, .. }) | Some(PeerState::Disabled { closing, closed, .. }) | Some(PeerState::Enabled { closing, closed, .. }) => { @@ -1682,6 +1810,8 @@ impl NetworkBehaviour for GenericProto { match mem::replace(entry.get_mut(), PeerState::Poisoned) { PeerState::Enabled { mut closed, mut closing, mut opening_and_closing, mut opening, open } => { + debug_assert!(!opening.is_empty() || !open.is_empty()); + if let Some(pos) = opening.iter().position(|c| *c == connection) { opening.remove(pos); closed.push(connection); From e6f55690fa516a7c6242886844bbe030d01aadbd Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Mon, 2 Nov 2020 11:04:17 +0100 Subject: [PATCH 06/39] Update docs --- .../src/protocol/generic_proto/behaviour.rs | 41 +++++++++---------- 1 file changed, 20 insertions(+), 21 deletions(-) diff --git a/client/network/src/protocol/generic_proto/behaviour.rs b/client/network/src/protocol/generic_proto/behaviour.rs index a047b662fa7f5..6f391acc289a0 100644 --- a/client/network/src/protocol/generic_proto/behaviour.rs +++ b/client/network/src/protocol/generic_proto/behaviour.rs @@ -47,24 +47,29 @@ use wasm_timer::Instant; /// The role of the `GenericProto` is to synchronize the following components: /// /// - The libp2p swarm that opens new connections and reports disconnects. -/// - The connection handler (see `handler.rs`) that handles individual connections. +/// - The connection handler (see `group.rs`) that handles individual connections. /// - The peerset manager (PSM) that requests links to peers to be established or broken. /// - The external API, that requires knowledge of the links that have been established. /// -/// Each connection handler can be in four different states: Enabled+Open, Enabled+Closed, -/// Disabled+Open, or Disabled+Closed. The Enabled/Disabled component must be in sync with the -/// peerset manager. For example, if the peerset manager requires a disconnection, we disable the -/// connection handlers of that peer. The Open/Closed component must be in sync with the external -/// API. +/// In the state machine below, each `PeerId` is attributed one of these states: /// -/// However, a connection handler for a peer only exists if we are actually connected to that peer. -/// What this means is that there are six possible states for each peer: Disconnected, Dialing -/// (trying to connect), Enabled+Open, Enabled+Closed, Disabled+Open, Disabled+Closed. -/// Most notably, the Dialing state must correspond to a "link established" state in the peerset -/// manager. In other words, the peerset manager doesn't differentiate whether we are dialing a -/// peer or connected to it. +/// - No open connection, but requested by the peerset. Currently dialing. +/// - Has open TCP connection(s) unbeknownst to the peerset. No substream is open. +/// - Has open TCP connection(s), acknowledged by the peerset. +/// - Notifications substreams are open on at least one connection, and external +/// API has been notified. +/// - Notifications substreams aren't open. +/// - Has open TCP connection(s) and remote would like to open substreams. Peerset has +/// been asked to attribute an inbound slot. /// -/// There may be multiple connections to a peer. However, the status of a peer on +/// In addition to these states, there also exists a "banning" system. If we fail to dial a peer, +/// we "ban" it for a few seconds. If the PSM requests connecting to a peer that is currently +/// "banned", the next dialing attempt is delayed until after the ban expires. However, the PSM +/// will still consider the peer to be connected. This "ban" is thus not a ban in a strict sense: +/// if a "banned" peer tries to connect, the connection is accepted. A ban only delays dialing +/// attempts. +/// +/// There may be multiple connections to a peer. The status of a peer on /// the API of this behaviour and towards the peerset manager is aggregated in /// the following way: /// @@ -78,9 +83,9 @@ use wasm_timer::Instant; /// in terms of potential reordering and dropped messages. Messages can /// be received on any connection. /// 3. The behaviour reports `GenericProtoOut::CustomProtocolOpen` when the -/// first connection reports `NotifsHandlerOut::Open`. +/// first connection reports `NotifsHandlerOut::OpenResultOk`. /// 4. The behaviour reports `GenericProtoOut::CustomProtocolClosed` when the -/// last connection reports `NotifsHandlerOut::Closed`. +/// last connection reports `NotifsHandlerOut::ClosedResult`. /// /// In this way, the number of actual established connections to the peer is /// an implementation detail of this behaviour. Note that, in practice and at @@ -88,12 +93,6 @@ use wasm_timer::Instant; /// and only as a result of simultaneous dialing. However, the implementation /// accommodates for any number of connections. /// -/// Additionally, there also exists a "banning" system. If we fail to dial a peer, we "ban" it for -/// a few seconds. If the PSM requests connecting to a peer that is currently "banned", the next -/// dialing attempt is delayed until after the ban expires. However, the PSM will still consider -/// the peer to be connected. This "ban" is thus not a ban in a strict sense: If a "banned" peer -/// tries to connect, the connection is accepted. A ban only delays dialing attempts. -/// pub struct GenericProto { /// `PeerId` of the local node. local_peer_id: PeerId, From be64e75c2a0893911bf1d64dc66ff9fa8bea6f65 Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Tue, 3 Nov 2020 10:48:41 +0100 Subject: [PATCH 07/39] Apply suggestions from code review Co-authored-by: Roman Borschel --- client/network/src/protocol/generic_proto/behaviour.rs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/client/network/src/protocol/generic_proto/behaviour.rs b/client/network/src/protocol/generic_proto/behaviour.rs index 6f391acc289a0..b982ccb7f1cf0 100644 --- a/client/network/src/protocol/generic_proto/behaviour.rs +++ b/client/network/src/protocol/generic_proto/behaviour.rs @@ -141,7 +141,7 @@ struct DelayId(u64); /// State of a peer we're connected to. /// -/// The various variants correspond to the state that are relevant to the peerset. +/// The variants correspond to the state of the peer w.r.t. the peerset. #[derive(Debug)] enum PeerState { /// State is poisoned. This is a temporary state for a peer and we should always switch back @@ -1389,7 +1389,7 @@ impl NetworkBehaviour for GenericProto { PeerState::Banned { .. } => { // This is a serious bug either in this state machine or in libp2p. error!(target: "sub-libp2p", - "`inject_disconnected` called for unknown peer {}", + "`inject_connection_closed` called for unknown peer {}", peer_id); debug_assert!(false); }, @@ -1502,7 +1502,7 @@ impl NetworkBehaviour for GenericProto { opening.push(connection); } else { - // Connections in `opening_and_closing` `opening` are in a Closed + // Connections in `opening_and_closing` and `opening` are in a Closed // phase, and as such can emit `OpenDesired` messages. // Since an `Open` message haS already been sent, there is nothing // more to do. @@ -1558,7 +1558,7 @@ impl NetworkBehaviour for GenericProto { }; } else { - // Connections in `opening_and_closing` `opening` are in a Closed + // Connections in `opening_and_closing` are in a Closed // phase, and as such can emit `OpenDesired` messages. // We ignore them. debug_assert!(opening_and_closing.iter().any(|c| *c == connection)); @@ -1608,7 +1608,7 @@ impl NetworkBehaviour for GenericProto { }; } else { - // Connections in `opening_and_closing` `opening` are in a Closed + // Connections in `opening_and_closing` are in a Closed // phase, and as such can emit `OpenDesired` messages. // We ignore them. debug_assert!(opening_and_closing.iter().any(|c| *c == connection)); From b0caa7c8f11eab21a0201937c0f03696c77be757 Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Tue, 3 Nov 2020 11:26:06 +0100 Subject: [PATCH 08/39] Clean up Banned state --- .../src/protocol/generic_proto/behaviour.rs | 115 ++++++++++++++---- 1 file changed, 89 insertions(+), 26 deletions(-) diff --git a/client/network/src/protocol/generic_proto/behaviour.rs b/client/network/src/protocol/generic_proto/behaviour.rs index b982ccb7f1cf0..7e093477e3226 100644 --- a/client/network/src/protocol/generic_proto/behaviour.rs +++ b/client/network/src/protocol/generic_proto/behaviour.rs @@ -152,8 +152,10 @@ enum PeerState { /// The peer misbehaved. If the PSM wants us to connect to this peer, we will add an artificial /// delay to the connection. Banned { + /// When the ban expires. For clean-up purposes. References an entry in `delays`. + timer: DelayId, /// Until when the peer is banned. - until: Instant, + timer_deadline: Instant, }, /// The peerset requested that we connect to this peer. We are currently not connected. @@ -712,22 +714,13 @@ impl GenericProto { match mem::replace(occ_entry.get_mut(), PeerState::Poisoned) { // Banned (not expired) => PendingRequest - PeerState::Banned { ref until } if *until > now => { + PeerState::Banned { ref timer, ref timer_deadline } if *timer_deadline > now => { let peer_id = occ_entry.key().clone(); debug!(target: "sub-libp2p", "PSM => Connect({:?}): Will start to connect at \ - until {:?}", peer_id, until); - - let delay_id = self.next_delay_id; - self.next_delay_id.0 += 1; - let delay = futures_timer::Delay::new(*until - now); - self.delays.push(async move { - delay.await; - (delay_id, peer_id) - }.boxed()); - + until {:?}", peer_id, timer_deadline); *occ_entry.into_mut() = PeerState::PendingRequest { - timer: delay_id, - timer_deadline: *until, + timer: *timer, + timer_deadline: *timer_deadline, }; }, @@ -979,9 +972,9 @@ impl GenericProto { }, // PendingRequest => Banned - PeerState::PendingRequest { timer_deadline, .. } => { + PeerState::PendingRequest { timer, timer_deadline } => { debug!(target: "sub-libp2p", "PSM => Drop({:?}): Not yet connected", entry.key()); - *entry.into_mut() = PeerState::Banned { until: timer_deadline } + *entry.into_mut() = PeerState::Banned { timer, timer_deadline } }, // Invalid state transitions. @@ -1164,8 +1157,8 @@ impl NetworkBehaviour for GenericProto { // Ø | Banned => Disabled st @ &mut PeerState::Poisoned | st @ &mut PeerState::Banned { .. } => { - let banned_until = if let PeerState::Banned { until } = st { - Some(*until) + let banned_until = if let PeerState::Banned { timer_deadline, .. } = st { + Some(*timer_deadline) } else { None }; @@ -1226,7 +1219,24 @@ impl NetworkBehaviour for GenericProto { if opening_and_closing.is_empty() && closed.is_empty() && closing.is_empty() { if let Some(until) = banned_until { - *entry.get_mut() = PeerState::Banned { until }; + let now = Instant::now(); + if until > now { + let delay_id = self.next_delay_id; + self.next_delay_id.0 += 1; + let delay = futures_timer::Delay::new(until - now); + let peer_id = peer_id.clone(); + self.delays.push(async move { + delay.await; + (delay_id, peer_id) + }.boxed()); + + *entry.get_mut() = PeerState::Banned { + timer: delay_id, + timer_deadline: until, + }; + } else { + entry.remove(); + } } else { entry.remove(); } @@ -1260,7 +1270,7 @@ impl NetworkBehaviour for GenericProto { if opening_and_closing.is_empty() && closed.is_empty() && closing.is_empty() { debug!(target: "sub-libp2p", "PSM <= Dropped({})", peer_id); self.peerset.dropped(peer_id.clone()); - *entry.get_mut() = PeerState::Banned { until: timer_deadline }; + *entry.get_mut() = PeerState::Banned { timer, timer_deadline }; } else { *entry.get_mut() = PeerState::DisabledPendingEnable { @@ -1308,7 +1318,24 @@ impl NetworkBehaviour for GenericProto { open_desired.is_empty() { if let Some(until) = banned_until { - *entry.get_mut() = PeerState::Banned { until }; + let now = Instant::now(); + if until > now { + let delay_id = self.next_delay_id; + self.next_delay_id.0 += 1; + let delay = futures_timer::Delay::new(until - now); + let peer_id = peer_id.clone(); + self.delays.push(async move { + delay.await; + (delay_id, peer_id) + }.boxed()); + + *entry.get_mut() = PeerState::Banned { + timer: delay_id, + timer_deadline: until, + }; + } else { + entry.remove(); + } } else { entry.remove(); } @@ -1373,8 +1400,19 @@ impl NetworkBehaviour for GenericProto { debug!(target: "sub-libp2p", "PSM <= Dropped({})", peer_id); self.peerset.dropped(peer_id.clone()); let ban_dur = Uniform::new(5, 10).sample(&mut rand::thread_rng()); + + let delay_id = self.next_delay_id; + self.next_delay_id.0 += 1; + let delay = futures_timer::Delay::new(Duration::from_secs(ban_dur)); + let peer_id = peer_id.clone(); + self.delays.push(async move { + delay.await; + (delay_id, peer_id) + }.boxed()); + *entry.get_mut() = PeerState::Banned { - until: Instant::now() + Duration::from_secs(ban_dur) + timer: delay_id, + timer_deadline: Instant::now() + Duration::from_secs(ban_dur), }; } else { @@ -1417,13 +1455,33 @@ impl NetworkBehaviour for GenericProto { }, // "Basic" situation: we failed to reach a peer that the peerset requested. - PeerState::Requested | PeerState::PendingRequest { .. } => { + st @ PeerState::Requested | + st @ PeerState::PendingRequest { .. } => { debug!(target: "sub-libp2p", "Libp2p => Dial failure for {:?}", peer_id); + + debug!(target: "sub-libp2p", "PSM <= Dropped({:?})", peer_id); + self.peerset.dropped(peer_id.clone()); + + let now = Instant::now(); + let ban_duration = match st { + PeerState::PendingRequest { timer_deadline, .. } if timer_deadline > now => + cmp::max(timer_deadline - now, Duration::from_secs(5)), + _ => Duration::from_secs(5) + }; + + let delay_id = self.next_delay_id; + self.next_delay_id.0 += 1; + let delay = futures_timer::Delay::new(ban_duration); + let peer_id = peer_id.clone(); + self.delays.push(async move { + delay.await; + (delay_id, peer_id) + }.boxed()); + *entry.into_mut() = PeerState::Banned { - until: Instant::now() + Duration::from_secs(5) + timer: delay_id, + timer_deadline: now + ban_duration, }; - debug!(target: "sub-libp2p", "PSM <= Dropped({:?})", peer_id); - self.peerset.dropped(peer_id.clone()) }, // We can still get dial failures even if we are already connected to the peer, @@ -1963,6 +2021,11 @@ impl NetworkBehaviour for GenericProto { }; match peer_state { + PeerState::Banned { timer, .. } if *timer == delay_id => { + debug!(target: "sub-libp2p", "Libp2p <= Clean up ban of {:?} from the state", peer_id); + self.peers.remove(&peer_id); + } + PeerState::PendingRequest { timer, .. } if *timer == delay_id => { debug!(target: "sub-libp2p", "Libp2p <= Dial {:?} now that ban has expired", peer_id); self.events.push_back(NetworkBehaviourAction::DialPeer { From 9cb238827f2011dbcd9249193c367607fe66b43e Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Wed, 4 Nov 2020 12:37:12 +0100 Subject: [PATCH 09/39] Refactor connections state --- .../src/protocol/generic_proto/behaviour.rs | 948 ++++++++---------- 1 file changed, 435 insertions(+), 513 deletions(-) diff --git a/client/network/src/protocol/generic_proto/behaviour.rs b/client/network/src/protocol/generic_proto/behaviour.rs index 7e093477e3226..f882c72f3377c 100644 --- a/client/network/src/protocol/generic_proto/behaviour.rs +++ b/client/network/src/protocol/generic_proto/behaviour.rs @@ -178,19 +178,8 @@ enum PeerState { /// given `Instant`. banned_until: Option, - /// List of connections that are in the `Closed` state but to which a - /// [`NotifsHandlerIn::Open`] message then a [`NotifsHandlerIn::Close`] message has been - /// sent. An `OpenResultOk`/`OpenResultErr` message followed with a `CloseResult` message - /// are expected. - opening_and_closing: SmallVec<[ConnectionId; crate::MAX_CONNECTIONS_PER_PEER]>, - - /// List of connections that are in the `Closed` state. - closed: SmallVec<[ConnectionId; crate::MAX_CONNECTIONS_PER_PEER]>, - - /// List of connections that are either in the `Open` or the `Closed` state, but to which - /// a [`NotifsHandlerIn::Close`] message has been sent. Waiting for this message to be - /// acknowledged through a [`NotifsHandlerOut::CloseResult`]. - closing: SmallVec<[ConnectionId; crate::MAX_CONNECTIONS_PER_PEER]>, + /// List of connections with this peer, and their state. + connections: SmallVec<[(ConnectionId, ConnectionState); crate::MAX_CONNECTIONS_PER_PEER]>, }, /// We are connected to this peer. The peerset has requested a connection to this peer, but @@ -207,45 +196,14 @@ enum PeerState { /// When the `timer` will trigger. timer_deadline: Instant, - /// List of connections that are in the `Closed` state but to which a - /// [`NotifsHandlerIn::Open`] message then a [`NotifsHandlerIn::Close`] message has been - /// sent. An `OpenResultOk`/`OpenResultErr` message followed with a `CloseResult` message - /// are expected. - opening_and_closing: SmallVec<[ConnectionId; crate::MAX_CONNECTIONS_PER_PEER]>, - - /// List of connections that are in the `Closed` state. - closed: SmallVec<[ConnectionId; crate::MAX_CONNECTIONS_PER_PEER]>, - - /// List of connections that are either in the `Open` or the `Closed` state, but to which - /// a [`NotifsHandlerIn::Close`] message has been sent. Waiting for this message to be - /// acknowledged through a [`NotifsHandlerOut::CloseResult`]. - closing: SmallVec<[ConnectionId; crate::MAX_CONNECTIONS_PER_PEER]>, + /// List of connections with this peer, and their state. + connections: SmallVec<[(ConnectionId, ConnectionState); crate::MAX_CONNECTIONS_PER_PEER]>, }, /// We are connected to this peer and the peerset has accepted it. Enabled { - /// List of connections that are in the `Closed` state but to which a - /// [`NotifsHandlerIn::Open`] message then a [`NotifsHandlerIn::Close`] message has been - /// sent. An `OpenResultOk`/`OpenResultErr` message followed with a `CloseResult` message - /// are expected. - opening_and_closing: SmallVec<[ConnectionId; crate::MAX_CONNECTIONS_PER_PEER]>, - - /// List of connections that are in the `Closed` state but to which a - /// [`NotifsHandlerIn::Open`] message has been sent. - opening: SmallVec<[ConnectionId; crate::MAX_CONNECTIONS_PER_PEER]>, - - /// List of connections that are in the `Open` state. - /// - /// The external API is notified of a channel if and only if `!open.is_empty()`. - open: SmallVec<[(ConnectionId, NotificationsSink); crate::MAX_CONNECTIONS_PER_PEER]>, - - /// List of connections that are in the `Closed` state. - closed: SmallVec<[ConnectionId; crate::MAX_CONNECTIONS_PER_PEER]>, - - /// List of connections that are either in the `Open` or the `Closed` state, but to which - /// a [`NotifsHandlerIn::Close`] message has been sent. Waiting for this message to be - /// acknowledged through a [`NotifsHandlerOut::CloseResult`]. - closing: SmallVec<[ConnectionId; crate::MAX_CONNECTIONS_PER_PEER]>, + /// List of connections with this peer, and their state. + connections: SmallVec<[(ConnectionId, ConnectionState); crate::MAX_CONNECTIONS_PER_PEER]>, }, /// We are connected to this peer. We have received an `OpenDesired` from one of the handlers @@ -256,25 +214,8 @@ enum PeerState { /// If `Some`, any dial attempts to this peer are delayed until the given `Instant`. banned_until: Option, - /// List of connections that are in the `Closed` state but to which a - /// [`NotifsHandlerIn::Open`] message then a [`NotifsHandlerIn::Close`] message has been - /// sent. An `OpenResultOk`/`OpenResultErr` message followed with a `CloseResult` message - /// are expected. - opening_and_closing: SmallVec<[ConnectionId; crate::MAX_CONNECTIONS_PER_PEER]>, - - /// List of connections that are in the `Closed` state, meaning that the remote hasn't - /// requested anything. - closed: SmallVec<[ConnectionId; crate::MAX_CONNECTIONS_PER_PEER]>, - - /// List of connections that are either in the `Open` or the `Closed` state, but to which - /// a [`NotifsHandlerIn::Close`] message has been sent. Waiting for this message to be - /// acknowledged through a [`NotifsHandlerOut::CloseResult`]. - closing: SmallVec<[ConnectionId; crate::MAX_CONNECTIONS_PER_PEER]>, - - /// List of connections that are in the `Closed` state, but for which a - /// [`NotifsHandlerOut::OpenDesired`] message has been received, meaning that the remote - /// wants to open a substream. - open_desired: SmallVec<[ConnectionId; crate::MAX_CONNECTIONS_PER_PEER]>, + /// List of connections with this peer, and their state. + connections: SmallVec<[(ConnectionId, ConnectionState); crate::MAX_CONNECTIONS_PER_PEER]>, }, } @@ -289,12 +230,13 @@ impl PeerState { /// that is open for custom protocol traffic. fn get_open(&self) -> Option<&NotificationsSink> { match self { - PeerState::Enabled { open, .. } => - if !open.is_empty() { - Some(&open[0].1) - } else { - None - } + PeerState::Enabled { connections, .. } => connections + .iter() + .filter_map(|(_, s)| match s { + ConnectionState::Open(s) => Some(s), + _ => None, + }) + .next(), PeerState::Poisoned => None, PeerState::Banned { .. } => None, PeerState::PendingRequest { .. } => None, @@ -320,6 +262,37 @@ impl PeerState { } } +/// State of the handler of a single connection visible from this state machine. +#[derive(Debug)] +enum ConnectionState { + /// Connection is in the `Closed` state, meaning that the remote hasn't requested anything. + Closed, + + /// Connection is either in the `Open` or the `Closed` state, but a + /// [`NotifsHandlerIn::Close`] message has been sent. Waiting for this message to be + /// acknowledged through a [`NotifsHandlerOut::CloseResult`]. + Closing, + + /// Connection is in the `Closed` state but a [`NotifsHandlerIn::Open`] message has been sent. + /// An `OpenResultOk`/`OpenResultErr` message is expected. + Opening, + + /// Connection is in the `Closed` state but a [`NotifsHandlerIn::Open`] message then a + /// [`NotifsHandlerIn::Close`] message has been sent. An `OpenResultOk`/`OpenResultErr` message + /// followed with a `CloseResult` message are expected. + OpeningAndClosing, + + /// Connection is in the `Closed` state, but a [`NotifsHandlerOut::OpenDesired`] message has + /// been received, meaning that the remote wants to open a substream. + OpenDesired, + + /// Connection is in the `Open` state. + /// + /// The external API is notified of a channel with this peer if any of its connection is in + /// this state. + Open(NotificationsSink), +} + /// State of an "incoming" message sent to the peer set manager. #[derive(Debug)] struct IncomingPeer { @@ -500,9 +473,7 @@ impl GenericProto { // DisabledPendingEnable => Disabled. PeerState::DisabledPendingEnable { - opening_and_closing, - closed, - closing, + connections, timer_deadline, timer: _ } => { @@ -514,9 +485,7 @@ impl GenericProto { timer_deadline }); *entry.into_mut() = PeerState::Disabled { - opening_and_closing, - closed, - closing, + connections, banned_until } }, @@ -524,11 +493,11 @@ impl GenericProto { // Enabled => Disabled. // All open or opening connections are sent a `Close` message. // If relevant, the external API is instantly notified. - PeerState::Enabled { mut opening_and_closing, opening, open, closed, mut closing } => { + PeerState::Enabled { mut connections } => { debug!(target: "sub-libp2p", "PSM <= Dropped({:?})", peer_id); self.peerset.dropped(peer_id.clone()); - if !open.is_empty() { + if connections.iter().any(|(_, s)| matches!(s, ConnectionState::Open(_))) { debug!(target: "sub-libp2p", "External API <= Closed({})", peer_id); let event = GenericProtoOut::CustomProtocolClosed { peer_id: peer_id.clone(), @@ -536,38 +505,43 @@ impl GenericProto { self.events.push_back(NetworkBehaviourAction::GenerateEvent(event)); } - for (connec_id, _) in open { - debug!(target: "sub-libp2p", "Handler({:?}, {:?}) <= Close", peer_id, connec_id); + for (connec_id, connec_state) in connections.iter_mut() + .filter(|(_, s)| matches!(s, ConnectionState::Open(_))) + { + debug!(target: "sub-libp2p", "Handler({:?}, {:?}) <= Close", peer_id, *connec_id); self.events.push_back(NetworkBehaviourAction::NotifyHandler { peer_id: peer_id.clone(), - handler: NotifyHandler::One(connec_id), + handler: NotifyHandler::One(*connec_id), event: NotifsHandlerIn::Close, }); - closing.push(connec_id); + *connec_state = ConnectionState::Closing; } - for connec_id in opening { - debug!(target: "sub-libp2p", "Handler({:?}, {:?}) <= Close", peer_id, connec_id); + for (connec_id, connec_state) in connections.iter_mut() + .filter(|(_, s)| matches!(s, ConnectionState::Opening)) + { + debug!(target: "sub-libp2p", "Handler({:?}, {:?}) <= Close", peer_id, *connec_id); self.events.push_back(NetworkBehaviourAction::NotifyHandler { peer_id: peer_id.clone(), - handler: NotifyHandler::One(connec_id), + handler: NotifyHandler::One(*connec_id), event: NotifsHandlerIn::Close, }); - opening_and_closing.push(connec_id); + *connec_state = ConnectionState::OpeningAndClosing; } + debug_assert!(!connections.iter().any(|(_, s)| matches!(s, ConnectionState::Open(_)))); + debug_assert!(!connections.iter().any(|(_, s)| matches!(s, ConnectionState::Opening))); + let banned_until = ban.map(|dur| Instant::now() + dur); *entry.into_mut() = PeerState::Disabled { - opening_and_closing, - closing, - closed, + connections, banned_until } }, // Incoming => Disabled. // Ongoing opening requests from the remote are rejected. - PeerState::Incoming { opening_and_closing, mut closing, closed, open_desired, banned_until } => { + PeerState::Incoming { mut connections, banned_until } => { let inc = if let Some(inc) = self.incoming.iter_mut() .find(|i| i.peer_id == *entry.key() && i.alive) { inc @@ -579,14 +553,16 @@ impl GenericProto { inc.alive = false; - for connec_id in open_desired { - debug!(target: "sub-libp2p", "Handler({:?}, {:?}) <= Close", peer_id, connec_id); + for (connec_id, connec_state) in connections.iter_mut() + .filter(|(_, s)| matches!(s, ConnectionState::OpenDesired)) + { + debug!(target: "sub-libp2p", "Handler({:?}, {:?}) <= Close", peer_id, *connec_id); self.events.push_back(NetworkBehaviourAction::NotifyHandler { peer_id: peer_id.clone(), - handler: NotifyHandler::One(connec_id), + handler: NotifyHandler::One(*connec_id), event: NotifsHandlerIn::Close, }); - closing.push(connec_id); + *connec_state = ConnectionState::Closing; } let banned_until = match (banned_until, ban) { @@ -596,10 +572,9 @@ impl GenericProto { (None, None) => None, }; + debug_assert!(!connections.iter().any(|(_, s)| matches!(s, ConnectionState::OpenDesired))); *entry.into_mut() = PeerState::Disabled { - opening_and_closing, - closing, - closed, + connections, banned_until } }, @@ -737,9 +712,7 @@ impl GenericProto { // Disabled (with non-expired ban) => DisabledPendingEnable PeerState::Disabled { - opening_and_closing, - closed, - closing, + connections, banned_until: Some(ref banned) } if *banned > now => { let peer_id = occ_entry.key().clone(); @@ -755,43 +728,35 @@ impl GenericProto { }.boxed()); *occ_entry.into_mut() = PeerState::DisabledPendingEnable { - opening_and_closing, - closed, - closing, + connections, timer: delay_id, timer_deadline: *banned, }; }, // Disabled => Enabled - PeerState::Disabled { opening_and_closing, mut closed, closing, banned_until } => { + PeerState::Disabled { mut connections, banned_until } => { // The first element of `closed` is chosen to open the notifications substream. - if !closed.is_empty() { - let chosen_connec = closed.remove(0); + if let Some((connec_id, connec_state)) = connections.iter_mut() + .find(|(_, s)| matches!(s, ConnectionState::Closed)) + { debug!(target: "sub-libp2p", "PSM => Connect({:?}): Enabling connections.", occ_entry.key()); - debug!(target: "sub-libp2p", "Handler({:?}, {:?}) <= Open", peer_id, chosen_connec); + debug!(target: "sub-libp2p", "Handler({:?}, {:?}) <= Open", peer_id, *connec_id); self.events.push_back(NetworkBehaviourAction::NotifyHandler { peer_id: peer_id.clone(), - handler: NotifyHandler::One(chosen_connec), + handler: NotifyHandler::One(*connec_id), event: NotifsHandlerIn::Open, }); - *occ_entry.into_mut() = PeerState::Enabled { - opening_and_closing, - opening: { - let mut l = SmallVec::new(); - l.push(chosen_connec); - l - }, - open: SmallVec::new(), - closed, - closing, - }; + *connec_state = ConnectionState::Opening; + *occ_entry.into_mut() = PeerState::Enabled { connections }; } else { // If no connection is available, switch to `DisabledPendingEnable` in order // to try again later. - debug_assert!(!opening_and_closing.is_empty() || !closing.is_empty()); + debug_assert!(connections.iter().any(|(_, s)| { + matches!(s, ConnectionState::OpeningAndClosing | ConnectionState::Closing) + })); debug!( target: "sub-libp2p", "PSM => Connect({:?}): No connection in proper state. Delaying.", @@ -817,9 +782,7 @@ impl GenericProto { }.boxed()); *occ_entry.into_mut() = PeerState::DisabledPendingEnable { - opening_and_closing, - closed, - closing, + connections, timer: delay_id, timer_deadline, }; @@ -827,7 +790,7 @@ impl GenericProto { }, // Incoming => Enabled - PeerState::Incoming { opening_and_closing, closed, open_desired, closing, .. } => { + PeerState::Incoming { mut connections, .. } => { debug!(target: "sub-libp2p", "PSM => Connect({:?}): Enabling connections.", occ_entry.key()); if let Some(inc) = self.incoming.iter_mut() @@ -838,23 +801,20 @@ impl GenericProto { incoming for incoming peer") } - debug_assert!(!open_desired.is_empty()); - for connec_id in &open_desired { + debug_assert!(connections.iter().any(|(_, s)| matches!(s, ConnectionState::OpenDesired))); + for (connec_id, connec_state) in connections.iter_mut() + .filter(|(_, s)| matches!(s, ConnectionState::OpenDesired)) + { debug!(target: "sub-libp2p", "Handler({:?}, {:?}) <= Open", occ_entry.key(), *connec_id); self.events.push_back(NetworkBehaviourAction::NotifyHandler { peer_id: occ_entry.key().clone(), handler: NotifyHandler::One(*connec_id), event: NotifsHandlerIn::Open, }); + *connec_state = ConnectionState::Opening; } - *occ_entry.into_mut() = PeerState::Enabled { - closed, - closing, - opening_and_closing, - opening: open_desired, - open: SmallVec::new(), - }; + *occ_entry.into_mut() = PeerState::Enabled { connections }; }, // Other states are kept as-is. @@ -904,29 +864,21 @@ impl GenericProto { }, // DisabledPendingEnable => Disabled - PeerState::DisabledPendingEnable { - opening_and_closing, - closed, - closing, - timer_deadline, - timer: _ - } => { + PeerState::DisabledPendingEnable { connections, timer_deadline, timer: _ } => { debug!(target: "sub-libp2p", "PSM => Drop({:?}): Interrupting pending enabling.", entry.key()); *entry.into_mut() = PeerState::Disabled { - opening_and_closing, - closed, - closing, + connections, banned_until: Some(timer_deadline), }; }, // Enabled => Disabled - PeerState::Enabled { mut opening_and_closing, closed, mut closing, opening, open } => { + PeerState::Enabled { mut connections } => { debug!(target: "sub-libp2p", "PSM => Drop({:?}): Disabling connections.", entry.key()); - if !open.is_empty() { + if connections.iter().any(|(_, s)| matches!(s, ConnectionState::Open(_))) { debug!(target: "sub-libp2p", "External API <= Closed({})", entry.key()); let event = GenericProtoOut::CustomProtocolClosed { peer_id: entry.key().clone(), @@ -934,32 +886,31 @@ impl GenericProto { self.events.push_back(NetworkBehaviourAction::GenerateEvent(event)); } - for connec_id in opening { - debug!(target: "sub-libp2p", "Handler({:?}, {:?}) <= Close", entry.key(), connec_id); + for (connec_id, connec_state) in connections.iter_mut() + .filter(|(_, s)| matches!(s, ConnectionState::Opening)) + { + debug!(target: "sub-libp2p", "Handler({:?}, {:?}) <= Close", entry.key(), *connec_id); self.events.push_back(NetworkBehaviourAction::NotifyHandler { peer_id: entry.key().clone(), - handler: NotifyHandler::One(connec_id), + handler: NotifyHandler::One(*connec_id), event: NotifsHandlerIn::Close, }); - opening_and_closing.push(connec_id); + *connec_state = ConnectionState::OpeningAndClosing; } - for (connec_id, _) in open { - debug!(target: "sub-libp2p", "Handler({:?}, {:?}) <= Close", entry.key(), connec_id); + for (connec_id, connec_state) in connections.iter_mut() + .filter(|(_, s)| matches!(s, ConnectionState::Open(_))) + { + debug!(target: "sub-libp2p", "Handler({:?}, {:?}) <= Close", entry.key(), *connec_id); self.events.push_back(NetworkBehaviourAction::NotifyHandler { peer_id: entry.key().clone(), - handler: NotifyHandler::One(connec_id), + handler: NotifyHandler::One(*connec_id), event: NotifsHandlerIn::Close, }); - closing.push(connec_id); + *connec_state = ConnectionState::Closing; } - *entry.into_mut() = PeerState::Disabled { - opening_and_closing, - closed, - closing, - banned_until: None - } + *entry.into_mut() = PeerState::Disabled { connections, banned_until: None } }, // Requested => Ø @@ -1019,27 +970,24 @@ impl GenericProto { match mem::replace(state, PeerState::Poisoned) { // Incoming => Enabled - PeerState::Incoming { opening_and_closing, closing, closed, open_desired, .. } => { + PeerState::Incoming { mut connections, .. } => { debug!(target: "sub-libp2p", "PSM => Accept({:?}, {:?}): Enabling connections.", index, incoming.peer_id); - debug_assert!(!open_desired.is_empty()); - for connec_id in &open_desired { + debug_assert!(connections.iter().any(|(_, s)| matches!(s, ConnectionState::OpenDesired))); + for (connec_id, connec_state) in connections.iter_mut() + .filter(|(_, s)| matches!(s, ConnectionState::OpenDesired)) + { debug!(target: "sub-libp2p", "Handler({:?}, {:?}) <= Open", incoming.peer_id, *connec_id); self.events.push_back(NetworkBehaviourAction::NotifyHandler { peer_id: incoming.peer_id.clone(), handler: NotifyHandler::One(*connec_id), event: NotifsHandlerIn::Open, }); + *connec_state = ConnectionState::Opening; } - *state = PeerState::Enabled { - opening_and_closing, - closed, - closing, - opening: open_desired, - open: SmallVec::new(), - }; + *state = PeerState::Enabled { connections }; } // Any state other than `Incoming` is invalid. @@ -1077,27 +1025,24 @@ impl GenericProto { match mem::replace(state, PeerState::Poisoned) { // Incoming => Disabled - PeerState::Incoming { opening_and_closing, mut closing, closed, open_desired, banned_until } => { + PeerState::Incoming { mut connections, banned_until } => { debug!(target: "sub-libp2p", "PSM => Reject({:?}, {:?}): Rejecting connections.", index, incoming.peer_id); - debug_assert!(!open_desired.is_empty()); - for connec_id in open_desired { + debug_assert!(connections.iter().any(|(_, s)| matches!(s, ConnectionState::OpenDesired))); + for (connec_id, connec_state) in connections.iter_mut() + .filter(|(_, s)| matches!(s, ConnectionState::OpenDesired)) + { debug!(target: "sub-libp2p", "Handler({:?}, {:?}) <= Close", incoming.peer_id, connec_id); self.events.push_back(NetworkBehaviourAction::NotifyHandler { peer_id: incoming.peer_id.clone(), - handler: NotifyHandler::One(connec_id), + handler: NotifyHandler::One(*connec_id), event: NotifsHandlerIn::Close, }); - closing.push(connec_id); + *connec_state = ConnectionState::Closing; } - *state = PeerState::Disabled { - opening_and_closing, - closed, - closing, - banned_until, - }; + *state = PeerState::Disabled { connections, banned_until }; } peer => error!(target: "sub-libp2p", "State mismatch in libp2p: Expected alive incoming. Got {:?}.", @@ -1140,17 +1085,9 @@ impl NetworkBehaviour for GenericProto { event: NotifsHandlerIn::Open }); - *st = PeerState::Enabled { - opening: { - let mut l = SmallVec::new(); - l.push(*conn); - l - }, - opening_and_closing: SmallVec::new(), - closing: SmallVec::new(), - closed: SmallVec::new(), - open: SmallVec::new(), - }; + let mut connections = SmallVec::new(); + connections.push((*conn, ConnectionState::Opening)); + *st = PeerState::Enabled { connections }; } // Poisoned gets inserted above if the entry was missing. @@ -1165,28 +1102,22 @@ impl NetworkBehaviour for GenericProto { debug!(target: "sub-libp2p", "Libp2p => Connected({}, {:?}, {:?}): Not requested by PSM, disabling.", peer_id, endpoint, *conn); - *st = PeerState::Disabled { - opening_and_closing: SmallVec::new(), - closed: { - let mut l = SmallVec::new(); - l.push(*conn); - l - }, - closing: SmallVec::new(), - banned_until - }; + + let mut connections = SmallVec::new(); + connections.push((*conn, ConnectionState::Closed)); + *st = PeerState::Disabled { connections, banned_until }; } // In all other states, add this new connection to the list of closed inactive // connections. - PeerState::Incoming { closed, .. } | - PeerState::Disabled { closed, .. } | - PeerState::DisabledPendingEnable { closed, .. } | - PeerState::Enabled { closed, .. } => { + PeerState::Incoming { connections, .. } | + PeerState::Disabled { connections, .. } | + PeerState::DisabledPendingEnable { connections, .. } | + PeerState::Enabled { connections, .. } => { debug!(target: "sub-libp2p", "Libp2p => Connected({}, {:?}, {:?}): Secondary connection. Leaving closed.", peer_id, endpoint, *conn); - closed.push(*conn); + connections.push((*conn, ConnectionState::Closed)); } } } @@ -1202,22 +1133,18 @@ impl NetworkBehaviour for GenericProto { match mem::replace(entry.get_mut(), PeerState::Poisoned) { // Disabled => Disabled | Banned | Ø - PeerState::Disabled { mut opening_and_closing, mut closed, mut closing, banned_until } => { + PeerState::Disabled { mut connections, banned_until } => { debug!(target: "sub-libp2p", "Libp2p => Disconnected({}, {:?}): Disabled.", peer_id, *conn); - if let Some(pos) = opening_and_closing.iter().position(|c| *c == *conn) { - opening_and_closing.remove(pos); - } else if let Some(pos) = closed.iter().position(|c| *c == *conn) { - closed.remove(pos); - } else if let Some(pos) = closing.iter().position(|c| *c == *conn) { - closing.remove(pos); + if let Some(pos) = connections.iter().position(|(c, _)| *c == *conn) { + connections.remove(pos); } else { debug_assert!(false); error!(target: "sub-libp2p", "inject_connection_closed: State mismatch in the custom protos handler"); } - if opening_and_closing.is_empty() && closed.is_empty() && closing.is_empty() { + if connections.is_empty() { if let Some(until) = banned_until { let now = Instant::now(); if until > now { @@ -1241,71 +1168,61 @@ impl NetworkBehaviour for GenericProto { entry.remove(); } } else { - *entry.get_mut() = PeerState::Disabled { - opening_and_closing, closed, closing, banned_until - }; + *entry.get_mut() = PeerState::Disabled { connections, banned_until }; } }, // DisabledPendingEnable => DisabledPendingEnable | Banned - PeerState::DisabledPendingEnable { mut opening_and_closing, mut closed, mut closing, timer_deadline, timer } => { + PeerState::DisabledPendingEnable { mut connections, timer_deadline, timer } => { debug!( target: "sub-libp2p", "Libp2p => Disconnected({}, {:?}): Disabled but pending enable.", peer_id, *conn ); - if let Some(pos) = opening_and_closing.iter().position(|c| *c == *conn) { - opening_and_closing.remove(pos); - } else if let Some(pos) = closed.iter().position(|c| *c == *conn) { - closed.remove(pos); - } else if let Some(pos) = closing.iter().position(|c| *c == *conn) { - closing.remove(pos); + if let Some(pos) = connections.iter().position(|(c, _)| *c == *conn) { + connections.remove(pos); } else { debug_assert!(false); error!(target: "sub-libp2p", "inject_connection_closed: State mismatch in the custom protos handler"); } - if opening_and_closing.is_empty() && closed.is_empty() && closing.is_empty() { + if connections.is_empty() { debug!(target: "sub-libp2p", "PSM <= Dropped({})", peer_id); self.peerset.dropped(peer_id.clone()); *entry.get_mut() = PeerState::Banned { timer, timer_deadline }; } else { *entry.get_mut() = PeerState::DisabledPendingEnable { - opening_and_closing, closed, closing, timer_deadline, timer + connections, timer_deadline, timer }; } }, // Incoming => Incoming | Disabled | Banned | Ø - PeerState::Incoming { mut opening_and_closing, mut closing, mut closed, mut open_desired, banned_until } => { + PeerState::Incoming { mut connections, banned_until } => { debug!( target: "sub-libp2p", "Libp2p => Disconnected({}, {:?}): OpenDesired.", peer_id, *conn ); - if let Some(pos) = opening_and_closing.iter().position(|c| *c == *conn) { - opening_and_closing.remove(pos); - } else if let Some(pos) = closing.iter().position(|c| *c == *conn) { - closing.remove(pos); - } else if let Some(pos) = closed.iter().position(|c| *c == *conn) { - closed.remove(pos); - } else if let Some(pos) = open_desired.iter().position(|c| *c == *conn) { - open_desired.remove(pos); - - // In the incoming state, we don't report "Dropped". Instead we will just - // ignore the corresponding Accept/Reject. - if let Some(state) = self.incoming.iter_mut() - .find(|i| i.alive && i.peer_id == *peer_id) - { - state.alive = false; - } else { - error!(target: "sub-libp2p", "State mismatch in libp2p: no entry in incoming \ - corresponding to an incoming state in peers"); - debug_assert!(false); + if let Some(pos) = connections.iter().position(|(c, _)| *c == *conn) { + let (_, state) = connections.remove(pos); + + if let ConnectionState::OpenDesired = state { + // In the incoming state, we don't report "Dropped". Instead we will just + // ignore the corresponding Accept/Reject. + if let Some(state) = self.incoming.iter_mut() + .find(|i| i.alive && i.peer_id == *peer_id) + { + state.alive = false; + } else { + error!(target: "sub-libp2p", "State mismatch in libp2p: no entry in \ + incoming corresponding to an incoming state in peers"); + debug_assert!(false); + } } } else { @@ -1314,9 +1231,7 @@ impl NetworkBehaviour for GenericProto { "inject_connection_closed: State mismatch in the custom protos handler"); } - if opening_and_closing.is_empty() && closing.is_empty() && closed.is_empty() && - open_desired.is_empty() - { + if connections.is_empty() { if let Some(until) = banned_until { let now = Instant::now(); if until > now { @@ -1340,52 +1255,53 @@ impl NetworkBehaviour for GenericProto { entry.remove(); } - } else if open_desired.is_empty() { - *entry.get_mut() = PeerState::Disabled { - opening_and_closing, closed, closing, banned_until - }; + } else if !connections.iter().any(|(_, s)| matches!(s, ConnectionState::OpenDesired)) { + // If no connection is `OpenDesired` anymore, switch to `Disabled`. + *entry.get_mut() = PeerState::Disabled { connections, banned_until }; } else { - *entry.get_mut() = PeerState::Incoming { - opening_and_closing, closing, closed, open_desired, banned_until - }; + *entry.get_mut() = PeerState::Incoming { connections, banned_until }; } } // Enabled => Enabled | Banned // Peers are always banned when disconnecting while Enabled. - PeerState::Enabled { mut opening_and_closing, mut closed, mut closing, mut opening, mut open } => { + PeerState::Enabled { mut connections } => { debug!( target: "sub-libp2p", "Libp2p => Disconnected({}, {:?}): Enabled.", peer_id, *conn ); - if let Some(pos) = opening_and_closing.iter().position(|c| *c == *conn) { - opening_and_closing.remove(pos); - } else if let Some(pos) = closed.iter().position(|c| *c == *conn) { - closed.remove(pos); - } else if let Some(pos) = closing.iter().position(|c| *c == *conn) { - closing.remove(pos); - } else if let Some(pos) = opening.iter().position(|c| *c == *conn) { - opening.remove(pos); - } else if let Some(pos) = open.iter().position(|(c, _)| *c == *conn) { - open.remove(pos); - - if open.is_empty() { - debug!(target: "sub-libp2p", "External API <= Closed({})", peer_id); - let event = GenericProtoOut::CustomProtocolClosed { - peer_id: peer_id.clone(), - }; - self.events.push_back(NetworkBehaviourAction::GenerateEvent(event)); - - } else if pos == 0 { - debug!(target: "sub-libp2p", "External API <= Sink replaced({})", peer_id); - let event = GenericProtoOut::CustomProtocolReplaced { - peer_id: peer_id.clone(), - notifications_sink: open[0].1.clone(), - }; - self.events.push_back(NetworkBehaviourAction::GenerateEvent(event)); + if let Some(pos) = connections.iter().position(|(c, _)| *c == *conn) { + let (_, state) = connections.remove(pos); + if let ConnectionState::Open(_) = state { + if let Some((replacement_pos, replacement_sink)) = connections + .iter() + .enumerate() + .filter_map(|(num, (_, s))| { + match s { + ConnectionState::Open(s) => Some((num, s.clone())), + _ => None + } + }) + .next() + { + if pos <= replacement_pos { + debug!(target: "sub-libp2p", "External API <= Sink replaced({})", peer_id); + let event = GenericProtoOut::CustomProtocolReplaced { + peer_id: peer_id.clone(), + notifications_sink: replacement_sink, + }; + self.events.push_back(NetworkBehaviourAction::GenerateEvent(event)); + } + } else { + debug!(target: "sub-libp2p", "External API <= Closed({})", peer_id); + let event = GenericProtoOut::CustomProtocolClosed { + peer_id: peer_id.clone(), + }; + self.events.push_back(NetworkBehaviourAction::GenerateEvent(event)); + } } } else { @@ -1394,9 +1310,9 @@ impl NetworkBehaviour for GenericProto { debug_assert!(false); } - if opening_and_closing.is_empty() && closed.is_empty() && closing.is_empty() - && opening.is_empty() && open.is_empty() - { + // TODO: need to pick another connection as a replacement + + if connections.is_empty() { debug!(target: "sub-libp2p", "PSM <= Dropped({})", peer_id); self.peerset.dropped(peer_id.clone()); let ban_dur = Uniform::new(5, 10).sample(&mut rand::thread_rng()); @@ -1416,9 +1332,7 @@ impl NetworkBehaviour for GenericProto { }; } else { - *entry.get_mut() = PeerState::Enabled { - opening_and_closing, closed, closing, opening, open - }; + *entry.get_mut() = PeerState::Enabled { connections }; } } @@ -1526,157 +1440,157 @@ impl NetworkBehaviour for GenericProto { match mem::replace(entry.get_mut(), PeerState::Poisoned) { // Incoming => Incoming - PeerState::Incoming { opening_and_closing, closing, mut closed, mut open_desired, banned_until } => { - debug_assert!(!open_desired.is_empty()); - if let Some(pos) = closed.iter().position(|c| *c == connection) { - closed.remove(pos); - open_desired.push(connection); + PeerState::Incoming { mut connections, banned_until } => { + debug_assert!(connections.iter().any(|(_, s)| + matches!(s, ConnectionState::OpenDesired))); + if let Some((_, connec_state)) = connections.iter_mut().find(|(c, _)| *c == connection) { + if let ConnectionState::Closed = *connec_state { + *connec_state = ConnectionState::OpenDesired; + } else { + // Connections in `OpeningAndClosing` state are in a Closed phase, + // and as such can emit `OpenDesired` messages. + // Since an `Open` and a `Close` messages have already been sent, + // there is nothing much that can be done about this anyway. + debug_assert!(matches!( + connec_state, + ConnectionState::OpeningAndClosing + )); + } } else { - // Connections in `opening_and_closing` are in a Closed phase, and as - // such can emit `OpenDesired` messages. - // Since an `Open` and a `Close` messages have already been sent, - // there is nothing much that can be done about this anyway. - debug_assert!(opening_and_closing.iter().any(|c| *c == connection)); + error!( + target: "sub-libp2p", + "OpenDesired: State mismatch in the custom protos handler" + ); + debug_assert!(false); } - *entry.into_mut() = PeerState::Incoming { - opening_and_closing, - closing, - closed, - open_desired, - banned_until, - }; + *entry.into_mut() = PeerState::Incoming { connections, banned_until }; }, - PeerState::Enabled { opening_and_closing, closing, mut closed, mut opening, open } => { - if let Some(pos) = closed.iter().position(|c| *c == connection) { - closed.remove(pos); - debug!(target: "sub-libp2p", "Handler({:?}, {:?}) <= Open", source, connection); - self.events.push_back(NetworkBehaviourAction::NotifyHandler { - peer_id: source, - handler: NotifyHandler::One(connection), - event: NotifsHandlerIn::Open, - }); - opening.push(connection); - + PeerState::Enabled { mut connections } => { + if let Some((_, connec_state)) = connections.iter_mut().find(|(c, _)| *c == connection) { + if let ConnectionState::Closed = *connec_state { + debug!(target: "sub-libp2p", "Handler({:?}, {:?}) <= Open", source, connection); + self.events.push_back(NetworkBehaviourAction::NotifyHandler { + peer_id: source, + handler: NotifyHandler::One(connection), + event: NotifsHandlerIn::Open, + }); + *connec_state = ConnectionState::Opening; + } else { + // Connections in `OpeningAndClosing` and `Opening` are in a Closed + // phase, and as such can emit `OpenDesired` messages. + // Since an `Open` message haS already been sent, there is nothing + // more to do. + debug_assert!(matches!( + connec_state, + ConnectionState::OpenDesired | ConnectionState::Opening + )); + } } else { - // Connections in `opening_and_closing` and `opening` are in a Closed - // phase, and as such can emit `OpenDesired` messages. - // Since an `Open` message haS already been sent, there is nothing - // more to do. - debug_assert!( - opening_and_closing.iter().any(|c| *c == connection) || - opening.iter().any(|c| *c == connection) + error!( + target: "sub-libp2p", + "OpenDesired: State mismatch in the custom protos handler" ); + debug_assert!(false); } - *entry.into_mut() = PeerState::Enabled { - opening_and_closing, - closing, - closed, - opening, - open, - }; + *entry.into_mut() = PeerState::Enabled { connections }; }, // Disabled => Disabled | Incoming - PeerState::Disabled { opening_and_closing, mut closed, closing, banned_until } => { - if let Some(pos) = closed.iter().position(|c| *c == connection) { - closed.remove(pos); - // Added in `open_desired` below. - - let incoming_id = self.next_incoming_index; - self.next_incoming_index.0 = match self.next_incoming_index.0.checked_add(1) { - Some(v) => v, - None => { - error!(target: "sub-libp2p", "Overflow in next_incoming_index"); - return - } - }; + PeerState::Disabled { mut connections, banned_until } => { + if let Some((_, connec_state)) = connections.iter_mut().find(|(c, _)| *c == connection) { + if let ConnectionState::Closed = *connec_state { + *connec_state = ConnectionState::OpenDesired; + + let incoming_id = self.next_incoming_index; + self.next_incoming_index.0 = match self.next_incoming_index.0.checked_add(1) { + Some(v) => v, + None => { + error!(target: "sub-libp2p", "Overflow in next_incoming_index"); + return + } + }; - debug!(target: "sub-libp2p", "PSM <= Incoming({}, {:?}).", - source, incoming_id); - self.peerset.incoming(source.clone(), incoming_id); - self.incoming.push(IncomingPeer { - peer_id: source.clone(), - alive: true, - incoming_id, - }); - - *entry.into_mut() = PeerState::Incoming { - opening_and_closing, - closing, - closed, - open_desired: { - let mut l = SmallVec::new(); - l.push(connection); - l - }, - banned_until, - }; + debug!(target: "sub-libp2p", "PSM <= Incoming({}, {:?}).", + source, incoming_id); + self.peerset.incoming(source.clone(), incoming_id); + self.incoming.push(IncomingPeer { + peer_id: source.clone(), + alive: true, + incoming_id, + }); + + *entry.into_mut() = PeerState::Incoming { connections, banned_until }; + } else { + // Connections in `OpeningAndClosing` are in a Closed phase, and + // as such can emit `OpenDesired` messages. + // We ignore them. + debug_assert!(matches!( + connec_state, + ConnectionState::OpeningAndClosing + )); + } } else { - // Connections in `opening_and_closing` are in a Closed - // phase, and as such can emit `OpenDesired` messages. - // We ignore them. - debug_assert!(opening_and_closing.iter().any(|c| *c == connection)); - *entry.into_mut() = PeerState::Disabled { - opening_and_closing, - closed, - closing, - banned_until, - }; + error!( + target: "sub-libp2p", + "OpenDesired: State mismatch in the custom protos handler" + ); + debug_assert!(false); } } // DisabledPendingEnable => DisabledPendingEnable | Incoming - PeerState::DisabledPendingEnable { opening_and_closing, mut closed, closing, timer, timer_deadline } => { - if let Some(pos) = closed.iter().position(|c| *c == connection) { - closed.remove(pos); - // Added in `open_desired` below. - - let incoming_id = self.next_incoming_index; - self.next_incoming_index.0 = match self.next_incoming_index.0.checked_add(1) { - Some(v) => v, - None => { - error!(target: "sub-libp2p", "Overflow in next_incoming_index"); - return - } - }; + PeerState::DisabledPendingEnable { mut connections, timer, timer_deadline } => { + if let Some((_, connec_state)) = connections.iter_mut().find(|(c, _)| *c == connection) { + if let ConnectionState::Closed = *connec_state { + *connec_state = ConnectionState::OpenDesired; + + let incoming_id = self.next_incoming_index; + self.next_incoming_index.0 = match self.next_incoming_index.0.checked_add(1) { + Some(v) => v, + None => { + error!(target: "sub-libp2p", "Overflow in next_incoming_index"); + return + } + }; - debug!(target: "sub-libp2p", "PSM <= Incoming({}, {:?}).", - source, incoming_id); - self.peerset.incoming(source.clone(), incoming_id); - self.incoming.push(IncomingPeer { - peer_id: source.clone(), - alive: true, - incoming_id, - }); - - *entry.into_mut() = PeerState::Incoming { - opening_and_closing, - closing, - closed, - open_desired: { - let mut l = SmallVec::new(); - l.push(connection); - l - }, - banned_until: Some(timer_deadline), - }; + debug!(target: "sub-libp2p", "PSM <= Incoming({}, {:?}).", + source, incoming_id); + self.peerset.incoming(source.clone(), incoming_id); + self.incoming.push(IncomingPeer { + peer_id: source.clone(), + alive: true, + incoming_id, + }); + + *entry.into_mut() = PeerState::Incoming { + connections, + banned_until: Some(timer_deadline), + }; + } else { + // Connections in `OpeningAndClosing` are in a Closed phase, and + // as such can emit `OpenDesired` messages. + // We ignore them. + debug_assert!(matches!( + connec_state, + ConnectionState::OpeningAndClosing + )); + *entry.into_mut() = PeerState::DisabledPendingEnable { + connections, + timer, + timer_deadline, + }; + } } else { - // Connections in `opening_and_closing` are in a Closed - // phase, and as such can emit `OpenDesired` messages. - // We ignore them. - debug_assert!(opening_and_closing.iter().any(|c| *c == connection)); - *entry.into_mut() = PeerState::DisabledPendingEnable { - opening_and_closing, - closed, - closing, - timer, - timer_deadline, - }; + error!( + target: "sub-libp2p", + "OpenDesired: State mismatch in the custom protos handler" + ); + debug_assert!(false); } } @@ -1705,15 +1619,22 @@ impl NetworkBehaviour for GenericProto { match mem::replace(entry.get_mut(), PeerState::Poisoned) { // Enabled => Enabled | Disabled - PeerState::Enabled { opening_and_closing, mut closing, closed, opening, mut open } => { - let pos = if let Some(pos) = open.iter().position(|(c, _)| *c == connection) { + PeerState::Enabled { mut connections } => { + let pos = if let Some(pos) = connections.iter().position(|(c, _)| *c == connection) { pos } else { - debug_assert!(closing.iter().any(|c| *c == connection)); + error!(target: "sub-libp2p", + "CloseDesired: State mismatch in the custom protos handler"); + debug_assert!(false); return; }; - open.remove(pos); + if matches!(connections[pos].1, ConnectionState::Closing) { + return; + } + + debug_assert!(matches!(connections[pos].1, ConnectionState::Open(_))); + connections[pos].1 = ConnectionState::Closing; debug!(target: "sub-libp2p", "Handler({}, {:?}) <= Close", source, connection); self.events.push_back(NetworkBehaviourAction::NotifyHandler { @@ -1721,20 +1642,38 @@ impl NetworkBehaviour for GenericProto { handler: NotifyHandler::One(connection), event: NotifsHandlerIn::Close, }); - closing.push(connection); - // `open` wasn't empty before. - if open.is_empty() { - if opening.is_empty() { + if let Some((replacement_pos, replacement_sink)) = connections + .iter() + .enumerate() + .filter_map(|(num, (_, s))| { + match s { + ConnectionState::Open(s) => Some((num, s.clone())), + _ => None + } + }) + .next() + { + if pos <= replacement_pos { + debug!(target: "sub-libp2p", "External API <= Sink replaced({:?})", source); + let event = GenericProtoOut::CustomProtocolReplaced { + peer_id: source, + notifications_sink: replacement_sink, + }; + self.events.push_back(NetworkBehaviourAction::GenerateEvent(event)); + *entry.into_mut() = PeerState::Enabled { connections, }; + } + + } else { + // List of open connections wasn't empty before but not it is. + if !connections.iter().any(|(_, s)| matches!(s, ConnectionState::Opening)) { debug!(target: "sub-libp2p", "PSM <= Dropped({:?})", source); self.peerset.dropped(source.clone()); *entry.into_mut() = PeerState::Disabled { - opening_and_closing, closing, closed, banned_until: None + connections, banned_until: None }; } else { - *entry.into_mut() = PeerState::Enabled { - opening_and_closing, closing, closed, opening, open - }; + *entry.into_mut() = PeerState::Enabled { connections }; } debug!(target: "sub-libp2p", "External API <= Closed({:?})", source); @@ -1742,18 +1681,6 @@ impl NetworkBehaviour for GenericProto { peer_id: source, }; self.events.push_back(NetworkBehaviourAction::GenerateEvent(event)); - - } else if pos == 0 { - debug!(target: "sub-libp2p", "External API <= Sink replaced({:?})", source); - let new_notifications_sink = open[0].1.clone(); - let event = GenericProtoOut::CustomProtocolReplaced { - peer_id: source, - notifications_sink: new_notifications_sink, - }; - self.events.push_back(NetworkBehaviourAction::GenerateEvent(event)); - *entry.into_mut() = PeerState::Enabled { - opening_and_closing, closing, closed, opening, open - }; } }, @@ -1779,13 +1706,12 @@ impl NetworkBehaviour for GenericProto { source, connection); match self.peers.get_mut(&source) { - // Move the connection from `closing` to `closed`. - Some(PeerState::DisabledPendingEnable { closing, closed, .. }) | - Some(PeerState::Disabled { closing, closed, .. }) | - Some(PeerState::Enabled { closing, closed, .. }) => { - if let Some(pos) = closing.iter().position(|c| *c == connection) { - closing.remove(pos); - closed.push(connection); + // Move the connection from `Closing` to `Closed`. + Some(PeerState::DisabledPendingEnable { connections, .. }) | + Some(PeerState::Disabled { connections, .. }) | + Some(PeerState::Enabled { connections, .. }) => { + if let Some((_, connec_state)) = connections.iter_mut().find(|(c, s)| *c == connection && matches!(s, ConnectionState::Closing)) { + *connec_state = ConnectionState::Closed; } else { debug_assert!(false); error!(target: "sub-libp2p", @@ -1808,9 +1734,13 @@ impl NetworkBehaviour for GenericProto { source, connection); match self.peers.get_mut(&source) { - Some(PeerState::Enabled { opening_and_closing, closing, opening, open, .. }) => { - if let Some(pos) = opening.iter().position(|c| *c == connection) { - if open.is_empty() { + Some(PeerState::Enabled { connections, .. }) => { + let any_open = connections.iter().any(|(_, s)| matches!(s, ConnectionState::Open(_))); + + if let Some((_, connec_state)) = connections.iter_mut().find(|(c, s)| + *c == connection && matches!(s, ConnectionState::Opening)) + { + if !any_open { debug!(target: "sub-libp2p", "External API <= Open({:?})", source); let event = GenericProtoOut::CustomProtocolOpen { peer_id: source, @@ -1819,11 +1749,11 @@ impl NetworkBehaviour for GenericProto { }; self.events.push_back(NetworkBehaviourAction::GenerateEvent(event)); } - opening.remove(pos); - open.push((connection, notifications_sink)); - } else if let Some(pos) = opening_and_closing.iter().position(|c| *c == connection) { - opening_and_closing.remove(pos); - closing.push(connection); + *connec_state = ConnectionState::Open(notifications_sink); + } else if let Some((_, connec_state)) = connections.iter_mut().find(|(c, s)| + *c == connection && matches!(s, ConnectionState::OpeningAndClosing)) + { + *connec_state = ConnectionState::Closing; } else { debug_assert!(false); error!(target: "sub-libp2p", @@ -1831,11 +1761,12 @@ impl NetworkBehaviour for GenericProto { } }, - Some(PeerState::DisabledPendingEnable { opening_and_closing, closing, .. }) | - Some(PeerState::Disabled { opening_and_closing, closing, .. }) => { - if let Some(pos) = opening_and_closing.iter().position(|c| *c == connection) { - opening_and_closing.remove(pos); - closing.push(connection); + Some(PeerState::DisabledPendingEnable { connections, .. }) | + Some(PeerState::Disabled { connections, .. }) => { + if let Some((_, connec_state)) = connections.iter_mut().find(|(c, s)| + *c == connection && matches!(s, ConnectionState::OpeningAndClosing)) + { + *connec_state = ConnectionState::Closing; } else { debug_assert!(false); error!(target: "sub-libp2p", @@ -1866,58 +1797,56 @@ impl NetworkBehaviour for GenericProto { }; match mem::replace(entry.get_mut(), PeerState::Poisoned) { - PeerState::Enabled { mut closed, mut closing, mut opening_and_closing, mut opening, open } => { - debug_assert!(!opening.is_empty() || !open.is_empty()); - - if let Some(pos) = opening.iter().position(|c| *c == connection) { - opening.remove(pos); - closed.push(connection); - } else if let Some(pos) = opening_and_closing.iter().position(|c| *c == connection) { - opening_and_closing.remove(pos); - closing.push(connection); + PeerState::Enabled { mut connections } => { + debug_assert!(connections.iter().any(|(_, s)| + matches!(s, ConnectionState::Opening | ConnectionState::Open(_)))); + + if let Some((_, connec_state)) = connections.iter_mut().find(|(c, s)| + *c == connection && matches!(s, ConnectionState::Opening)) + { + *connec_state = ConnectionState::Closed; + } else if let Some((_, connec_state)) = connections.iter_mut().find(|(c, s)| + *c == connection && matches!(s, ConnectionState::OpeningAndClosing)) + { + *connec_state = ConnectionState::Closing; } else { debug_assert!(false); error!(target: "sub-libp2p", "OpenResultErr: State mismatch in the custom protos handler"); } - if opening.is_empty() && open.is_empty() { + if !connections.iter().any(|(_, s)| + matches!(s, ConnectionState::Opening | ConnectionState::Open(_))) + { debug!(target: "sub-libp2p", "PSM <= Dropped({:?})", source); self.peerset.dropped(source.clone()); *entry.into_mut() = PeerState::Disabled { - closed, - closing, - opening_and_closing, + connections, banned_until: None }; } else { - *entry.into_mut() = PeerState::Enabled { - closed, closing, opening_and_closing, opening, open - }; + *entry.into_mut() = PeerState::Enabled { connections }; } }, - PeerState::Disabled { closed, mut closing, mut opening_and_closing, banned_until } => { - if let Some(pos) = opening_and_closing.iter().position(|c| *c == connection) { - opening_and_closing.remove(pos); - closing.push(connection); + PeerState::Disabled { mut connections, banned_until } => { + if let Some((_, connec_state)) = connections.iter_mut().find(|(c, s)| + *c == connection && matches!(s, ConnectionState::OpeningAndClosing)) + { + *connec_state = ConnectionState::Closing; } else { debug_assert!(false); error!(target: "sub-libp2p", "OpenResultErr: State mismatch in the custom protos handler"); } - *entry.into_mut() = PeerState::Disabled { - closed, - closing, - opening_and_closing, - banned_until, - }; + *entry.into_mut() = PeerState::Disabled { connections, banned_until }; }, - PeerState::DisabledPendingEnable { closed, mut closing, mut opening_and_closing, timer, timer_deadline } => { - if let Some(pos) = opening_and_closing.iter().position(|c| *c == connection) { - opening_and_closing.remove(pos); - closing.push(connection); + PeerState::DisabledPendingEnable { mut connections, timer, timer_deadline } => { + if let Some((_, connec_state)) = connections.iter_mut().find(|(c, s)| + *c == connection && matches!(s, ConnectionState::OpeningAndClosing)) + { + *connec_state = ConnectionState::Closing; } else { debug_assert!(false); error!(target: "sub-libp2p", @@ -1925,9 +1854,7 @@ impl NetworkBehaviour for GenericProto { } *entry.into_mut() = PeerState::DisabledPendingEnable { - closed, - closing, - opening_and_closing, + connections, timer, timer_deadline, }; @@ -2035,28 +1962,23 @@ impl NetworkBehaviour for GenericProto { *peer_state = PeerState::Requested; } - PeerState::DisabledPendingEnable { timer, opening_and_closing, closed, closing, timer_deadline } - if *timer == delay_id => { - - if !closed.is_empty() { - let chosen_connec = closed.remove(0); + PeerState::DisabledPendingEnable { connections, timer, timer_deadline } + if *timer == delay_id => + { + // The first element of `closed` is chosen to open the notifications substream. + if let Some((connec_id, connec_state)) = connections.iter_mut() + .find(|(_, s)| matches!(s, ConnectionState::Closed)) + { debug!(target: "sub-libp2p", "Handler({:?}, {:?}) <= Open (ban expired)", - peer_id, chosen_connec); + peer_id, *connec_id); self.events.push_back(NetworkBehaviourAction::NotifyHandler { peer_id: peer_id.clone(), - handler: NotifyHandler::One(chosen_connec), + handler: NotifyHandler::One(*connec_id), event: NotifsHandlerIn::Open, }); + *connec_state = ConnectionState::Opening; *peer_state = PeerState::Enabled { - opening_and_closing: mem::replace(opening_and_closing, Default::default()), - opening: { - let mut l = SmallVec::new(); - l.push(chosen_connec); - l - }, - open: SmallVec::new(), - closed: mem::replace(closed, Default::default()), - closing: mem::replace(closing, Default::default()), + connections: mem::replace(connections, Default::default()), }; } else { *timer_deadline = Instant::now() + Duration::from_secs(5); From b045aa6fc132009f97b256d2180e06197f8c396a Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Wed, 4 Nov 2020 12:59:28 +0100 Subject: [PATCH 10/39] Fix possibility of Enabled with no Opening or Open connection --- .../network/src/protocol/generic_proto/behaviour.rs | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/client/network/src/protocol/generic_proto/behaviour.rs b/client/network/src/protocol/generic_proto/behaviour.rs index f882c72f3377c..e2e896933aac1 100644 --- a/client/network/src/protocol/generic_proto/behaviour.rs +++ b/client/network/src/protocol/generic_proto/behaviour.rs @@ -1310,8 +1310,6 @@ impl NetworkBehaviour for GenericProto { debug_assert!(false); } - // TODO: need to pick another connection as a replacement - if connections.is_empty() { debug!(target: "sub-libp2p", "PSM <= Dropped({})", peer_id); self.peerset.dropped(peer_id.clone()); @@ -1331,6 +1329,17 @@ impl NetworkBehaviour for GenericProto { timer_deadline: Instant::now() + Duration::from_secs(ban_dur), }; + } else if !connections.iter().any(|(_, s)| + matches!(s, ConnectionState::Opening | ConnectionState::Open(_))) + { + debug!(target: "sub-libp2p", "PSM <= Dropped({:?})", peer_id); + self.peerset.dropped(peer_id.clone()); + + *entry.get_mut() = PeerState::Disabled { + connections, + banned_until: None + }; + } else { *entry.get_mut() = PeerState::Enabled { connections }; } From 5e9e6add5c831832ee171a4acde008d25730c2b7 Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Wed, 4 Nov 2020 13:03:21 +0100 Subject: [PATCH 11/39] Line width --- client/network/src/protocol/generic_proto/behaviour.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/client/network/src/protocol/generic_proto/behaviour.rs b/client/network/src/protocol/generic_proto/behaviour.rs index e2e896933aac1..0a72596401bee 100644 --- a/client/network/src/protocol/generic_proto/behaviour.rs +++ b/client/network/src/protocol/generic_proto/behaviour.rs @@ -1719,7 +1719,10 @@ impl NetworkBehaviour for GenericProto { Some(PeerState::DisabledPendingEnable { connections, .. }) | Some(PeerState::Disabled { connections, .. }) | Some(PeerState::Enabled { connections, .. }) => { - if let Some((_, connec_state)) = connections.iter_mut().find(|(c, s)| *c == connection && matches!(s, ConnectionState::Closing)) { + if let Some((_, connec_state)) = connections + .iter_mut() + .find(|(c, s)| *c == connection && matches!(s, ConnectionState::Closing)) + { *connec_state = ConnectionState::Closed; } else { debug_assert!(false); From 7b8636bea53e89d6e13f55c95849d674436c1886 Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Wed, 4 Nov 2020 14:45:11 +0100 Subject: [PATCH 12/39] Add some debug_asserts! and fix TODO --- .../src/protocol/generic_proto/behaviour.rs | 31 ++++++++++++++++--- 1 file changed, 26 insertions(+), 5 deletions(-) diff --git a/client/network/src/protocol/generic_proto/behaviour.rs b/client/network/src/protocol/generic_proto/behaviour.rs index 0a72596401bee..b72ee1b24af12 100644 --- a/client/network/src/protocol/generic_proto/behaviour.rs +++ b/client/network/src/protocol/generic_proto/behaviour.rs @@ -865,6 +865,7 @@ impl GenericProto { // DisabledPendingEnable => Disabled PeerState::DisabledPendingEnable { connections, timer_deadline, timer: _ } => { + debug_assert!(!connections.is_empty()); debug!(target: "sub-libp2p", "PSM => Drop({:?}): Interrupting pending enabling.", entry.key()); @@ -878,6 +879,9 @@ impl GenericProto { PeerState::Enabled { mut connections } => { debug!(target: "sub-libp2p", "PSM => Drop({:?}): Disabling connections.", entry.key()); + debug_assert!(connections.iter().any(|(_, s)| + matches!(s, ConnectionState::Opening | ConnectionState::Open(_)))); + if connections.iter().any(|(_, s)| matches!(s, ConnectionState::Open(_))) { debug!(target: "sub-libp2p", "External API <= Closed({})", entry.key()); let event = GenericProtoOut::CustomProtocolClosed { @@ -953,10 +957,16 @@ impl GenericProto { }; if !incoming.alive { - debug!(target: "sub-libp2p", "PSM => Accept({:?}, {:?}): Obsolete incoming, - sending back dropped", index, incoming.peer_id); - debug!(target: "sub-libp2p", "PSM <= Dropped({:?})", incoming.peer_id); - self.peerset.dropped(incoming.peer_id); // TODO: is that correct?! + debug!(target: "sub-libp2p", "PSM => Accept({:?}, {:?}): Obsolete incoming", + index, incoming.peer_id); + match self.peers.get_mut(&incoming.peer_id) { + Some(PeerState::DisabledPendingEnable { .. }) | + Some(PeerState::Enabled { .. }) => {} + _ => { + debug!(target: "sub-libp2p", "PSM <= Dropped({:?})", incoming.peer_id); + self.peerset.dropped(incoming.peer_id); + }, + } return } @@ -1273,6 +1283,9 @@ impl NetworkBehaviour for GenericProto { peer_id, *conn ); + debug_assert!(connections.iter().any(|(_, s)| + matches!(s, ConnectionState::Opening | ConnectionState::Open(_)))); + if let Some(pos) = connections.iter().position(|(c, _)| *c == *conn) { let (_, state) = connections.remove(pos); if let ConnectionState::Open(_) = state { @@ -1477,6 +1490,9 @@ impl NetworkBehaviour for GenericProto { }, PeerState::Enabled { mut connections } => { + debug_assert!(connections.iter().any(|(_, s)| + matches!(s, ConnectionState::Opening | ConnectionState::Open(_)))); + if let Some((_, connec_state)) = connections.iter_mut().find(|(c, _)| *c == connection) { if let ConnectionState::Closed = *connec_state { debug!(target: "sub-libp2p", "Handler({:?}, {:?}) <= Open", source, connection); @@ -1629,6 +1645,9 @@ impl NetworkBehaviour for GenericProto { match mem::replace(entry.get_mut(), PeerState::Poisoned) { // Enabled => Enabled | Disabled PeerState::Enabled { mut connections } => { + debug_assert!(connections.iter().any(|(_, s)| + matches!(s, ConnectionState::Opening | ConnectionState::Open(_)))); + let pos = if let Some(pos) = connections.iter().position(|(c, _)| *c == connection) { pos } else { @@ -1747,6 +1766,8 @@ impl NetworkBehaviour for GenericProto { match self.peers.get_mut(&source) { Some(PeerState::Enabled { connections, .. }) => { + debug_assert!(connections.iter().any(|(_, s)| + matches!(s, ConnectionState::Opening | ConnectionState::Open(_)))); let any_open = connections.iter().any(|(_, s)| matches!(s, ConnectionState::Open(_))); if let Some((_, connec_state)) = connections.iter_mut().find(|(c, s)| @@ -1811,7 +1832,7 @@ impl NetworkBehaviour for GenericProto { match mem::replace(entry.get_mut(), PeerState::Poisoned) { PeerState::Enabled { mut connections } => { debug_assert!(connections.iter().any(|(_, s)| - matches!(s, ConnectionState::Opening | ConnectionState::Open(_)))); + matches!(s, ConnectionState::Opening | ConnectionState::Open(_)))); if let Some((_, connec_state)) = connections.iter_mut().find(|(c, s)| *c == connection && matches!(s, ConnectionState::Opening)) From 29cb9cf97243cf56772c71388f735beeeb948dc5 Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Thu, 5 Nov 2020 14:05:13 +0100 Subject: [PATCH 13/39] Refactor legacy handler --- .../protocol/generic_proto/handler/group.rs | 6 +- .../protocol/generic_proto/handler/legacy.rs | 420 ++++-------------- .../protocol/generic_proto/upgrade/legacy.rs | 13 +- 3 files changed, 85 insertions(+), 354 deletions(-) diff --git a/client/network/src/protocol/generic_proto/handler/group.rs b/client/network/src/protocol/generic_proto/handler/group.rs index d97a3b9b57623..a62728ee648b4 100644 --- a/client/network/src/protocol/generic_proto/handler/group.rs +++ b/client/network/src/protocol/generic_proto/handler/group.rs @@ -482,8 +482,6 @@ impl ProtocolsHandler for NotifsHandler { NotifsHandlerIn::Open => { match &mut self.state { State::Closed { pending_in } => { - self.legacy.inject_event(LegacyProtoHandlerIn::Enable); - for (handler, initial_message) in &mut self.out_handlers { // We create `initial_message` on a separate line to be sure that the // lock is released as soon as possible. @@ -518,7 +516,7 @@ impl ProtocolsHandler for NotifsHandler { match &mut self.state { State::Open { .. } | State::Opening { .. } => { - self.legacy.inject_event(LegacyProtoHandlerIn::Disable); + self.legacy.inject_event(LegacyProtoHandlerIn::Close); for (handler, _) in &mut self.out_handlers { handler.inject_event(NotifsOutHandlerIn::Disable); } @@ -828,7 +826,7 @@ impl ProtocolsHandler for NotifsHandler { // This fails the entire opening attempt. (ProtocolsHandlerEvent::Custom(NotifsOutHandlerOut::Refused), State::Opening { .. }) | (ProtocolsHandlerEvent::Custom(NotifsOutHandlerOut::Closed), State::Opening { .. }) => { - self.legacy.inject_event(LegacyProtoHandlerIn::Disable); + self.legacy.inject_event(LegacyProtoHandlerIn::Close); for (handler, _) in &mut self.out_handlers { handler.inject_event(NotifsOutHandlerIn::Disable); } diff --git a/client/network/src/protocol/generic_proto/handler/legacy.rs b/client/network/src/protocol/generic_proto/handler/legacy.rs index 404093553785c..58d533aa5a51d 100644 --- a/client/network/src/protocol/generic_proto/handler/legacy.rs +++ b/client/network/src/protocol/generic_proto/handler/legacy.rs @@ -15,10 +15,10 @@ // along with Substrate. If not, see . use crate::protocol::generic_proto::upgrade::{RegisteredProtocol, RegisteredProtocolEvent, RegisteredProtocolSubstream}; + use bytes::BytesMut; use futures::prelude::*; -use futures_timer::Delay; -use libp2p::core::{ConnectedPoint, PeerId, Endpoint}; +use libp2p::core::{ConnectedPoint, PeerId}; use libp2p::core::upgrade::{InboundUpgrade, OutboundUpgrade}; use libp2p::swarm::{ ProtocolsHandler, ProtocolsHandlerEvent, @@ -28,64 +28,25 @@ use libp2p::swarm::{ SubstreamProtocol, NegotiatedSubstream, }; -use log::{debug, error}; -use smallvec::{smallvec, SmallVec}; -use std::{borrow::Cow, collections::VecDeque, convert::Infallible, error, fmt, io, mem}; -use std::{pin::Pin, task::{Context, Poll}, time::Duration}; +use smallvec::SmallVec; +use std::{collections::VecDeque, convert::Infallible, error, fmt, io}; +use std::{pin::Pin, task::{Context, Poll}}; -/// Implements the `IntoProtocolsHandler` trait of libp2p. -/// -/// Every time a connection with a remote starts, an instance of this struct is created and -/// sent to a background task dedicated to this connection. Once the connection is established, -/// it is turned into a `LegacyProtoHandler`. It then handles all communications that are specific -/// to Substrate on that single connection. -/// -/// Note that there can be multiple instance of this struct simultaneously for same peer, -/// if there are multiple established connections to the peer. -/// -/// ## State of the handler -/// -/// There are six possible states for the handler: -/// -/// - Enabled and open, which is a normal operation. -/// - Enabled and closed, in which case it will try to open substreams. -/// - Disabled and open, in which case it will try to close substreams. -/// - Disabled and closed, in which case the handler is idle. The connection will be -/// garbage-collected after a few seconds if nothing more happens. -/// - Initializing and open. -/// - Initializing and closed, which is the state the handler starts in. -/// -/// The Init/Enabled/Disabled state is entirely controlled by the user by sending `Enable` or -/// `Disable` messages to the handler. The handler itself never transitions automatically between -/// these states. For example, if the handler reports a network misbehaviour, it will close the -/// substreams but it is the role of the user to send a `Disabled` event if it wants the connection -/// to close. Otherwise, the handler will try to reopen substreams. -/// -/// The handler starts in the "Initializing" state and must be transitionned to Enabled or Disabled -/// as soon as possible. -/// -/// The Open/Closed state is decided by the handler and is reported with the `CustomProtocolOpen` -/// and `CustomProtocolClosed` events. The `CustomMessage` event can only be generated if the -/// handler is open. -/// -/// ## How it works +/// Handler for the legacy substream. /// -/// When the handler is created, it is initially in the `Init` state and waits for either a -/// `Disable` or an `Enable` message from the outer layer. At any time, the outer layer is free to -/// toggle the handler between the disabled and enabled states. +/// The so-called legacy substream is a deprecated way of establishing a Substrate-specific +/// substream in an active connection. /// -/// When the handler switches to "enabled", it opens a substream and negotiates the protocol named -/// `/substrate/xxx`, where `xxx` is chosen by the user and depends on the chain. +/// Pro-actively opening a legacy substream is no longer supported. Only accepting incoming legacy +/// substreams is possible. As part of the protocol, only the dialing side of a connection +/// (emphasis *connection*, not substream) is allowed to open a legacy substream. /// -/// For backwards compatibility reasons, when we switch to "enabled" for the first time (while we -/// are still in "init" mode) and we are the connection listener, we don't open a substream. +/// # Usage /// -/// In order the handle the situation where both the remote and us get enabled at the same time, -/// we tolerate multiple substreams open at the same time. Messages are transmitted on an arbitrary -/// substream. The endpoints don't try to agree on a single substream. -/// -/// We consider that we are now "closed" if the remote closes all the existing substreams. -/// Re-opening it can then be performed by closing all active substream and re-opening one. +/// The handler can spontaneously generate `CustomProtocolOpen` and `CustomProtocolClosed` events +/// if the remote opens or closes the substream. Send a `Close` message in order to shut down any +/// active substream. After `Close` has beent sent, a `CustomProtocolClosed` event will be sent +/// back in the near future. /// pub struct LegacyProtoHandlerProto { /// Configuration for the protocol upgrade to negotiate. @@ -108,14 +69,11 @@ impl IntoProtocolsHandler for LegacyProtoHandlerProto { self.protocol.clone() } - fn into_handler(self, remote_peer_id: &PeerId, _: &ConnectedPoint) -> Self::Handler { + fn into_handler(self, _: &PeerId, _: &ConnectedPoint) -> Self::Handler { LegacyProtoHandler { protocol: self.protocol, - remote_peer_id: remote_peer_id.clone(), - state: ProtocolState::Init { - substreams: SmallVec::new(), - init_deadline: Delay::new(Duration::from_secs(20)) - }, + substreams: SmallVec::new(), + shutdown: SmallVec::new(), events_queue: VecDeque::new(), } } @@ -126,12 +84,11 @@ pub struct LegacyProtoHandler { /// Configuration for the protocol upgrade to negotiate. protocol: RegisteredProtocol, - /// State of the communications with the remote. - state: ProtocolState, + /// The substreams where bidirectional communications happen. + substreams: SmallVec<[RegisteredProtocolSubstream; 4]>, - /// Identifier of the node we're talking to. Used only for logging purposes and shouldn't have - /// any influence on the behaviour. - remote_peer_id: PeerId, + /// Contains substreams which are being shut down. + shutdown: SmallVec<[RegisteredProtocolSubstream; 4]>, /// Queue of events to send to the outside. /// @@ -142,62 +99,11 @@ pub struct LegacyProtoHandler { >, } -/// State of the handler. -enum ProtocolState { - /// Waiting for the behaviour to tell the handler whether it is enabled or disabled. - Init { - /// List of substreams opened by the remote but that haven't been processed yet. - /// For each substream, also includes the handshake message that we have received. - substreams: SmallVec<[(RegisteredProtocolSubstream, Vec); 6]>, - /// Deadline after which the initialization is abnormally long. - init_deadline: Delay, - }, - - /// Handler is ready to accept incoming substreams. - /// If we are in this state, we haven't sent any `CustomProtocolOpen` yet. - Opening, - - /// Normal operating mode. Contains the substreams that are open. - /// If we are in this state, we have sent a `CustomProtocolOpen` message to the outside. - Normal { - /// The substreams where bidirectional communications happen. - substreams: SmallVec<[RegisteredProtocolSubstream; 4]>, - /// Contains substreams which are being shut down. - shutdown: SmallVec<[RegisteredProtocolSubstream; 4]>, - }, - - /// We are disabled. Contains substreams that are being closed. - /// If we are in this state, either we have sent a `CustomProtocolClosed` message to the - /// outside or we have never sent any `CustomProtocolOpen` in the first place. - Disabled { - /// List of substreams to shut down. - shutdown: SmallVec<[RegisteredProtocolSubstream; 6]>, - - /// If true, we should reactivate the handler after all the substreams in `shutdown` have - /// been closed. - /// - /// Since we don't want to mix old and new substreams, we wait for all old substreams to - /// be closed before opening any new one. - reenable: bool, - }, - - /// In this state, we don't care about anything anymore and need to kill the connection as soon - /// as possible. - KillAsap, - - /// We sometimes temporarily switch to this state during processing. If we are in this state - /// at the beginning of a method, that means something bad happened in the source code. - Poisoned, -} - /// Event that can be received by a `LegacyProtoHandler`. #[derive(Debug)] pub enum LegacyProtoHandlerIn { - /// The node should start using custom protocols. - Enable, - - /// The node should stop using custom protocols. - Disable, + /// The handler should close any existing substream. + Close, } /// Event that can be emitted by a `LegacyProtoHandler`. @@ -213,10 +119,7 @@ pub enum LegacyProtoHandlerOut { }, /// Closed a custom protocol with the remote. - CustomProtocolClosed { - /// Reason why the substream closed, for diagnostic purposes. - reason: Cow<'static, str>, - }, + CustomProtocolClosed, /// Receives a message on a custom protocol substream. CustomMessage { @@ -226,179 +129,49 @@ pub enum LegacyProtoHandlerOut { } impl LegacyProtoHandler { - /// Enables the handler. - fn enable(&mut self) { - self.state = match mem::replace(&mut self.state, ProtocolState::Poisoned) { - ProtocolState::Poisoned => { - error!(target: "sub-libp2p", "Handler with {:?} is in poisoned state", - self.remote_peer_id); - ProtocolState::Poisoned - } - - ProtocolState::Init { substreams: mut incoming, .. } => { - if incoming.is_empty() { - ProtocolState::Opening - } else { - let event = LegacyProtoHandlerOut::CustomProtocolOpen { - version: incoming[0].0.protocol_version(), - received_handshake: mem::replace(&mut incoming[0].1, Vec::new()), - }; - self.events_queue.push_back(ProtocolsHandlerEvent::Custom(event)); - ProtocolState::Normal { - substreams: incoming.into_iter().map(|(s, _)| s).collect(), - shutdown: SmallVec::new() - } - } - } - - st @ ProtocolState::KillAsap => st, - st @ ProtocolState::Opening { .. } => st, - st @ ProtocolState::Normal { .. } => st, - ProtocolState::Disabled { shutdown, .. } => { - ProtocolState::Disabled { shutdown, reenable: true } - } - } - } - - /// Disables the handler. - fn disable(&mut self) { - self.state = match mem::replace(&mut self.state, ProtocolState::Poisoned) { - ProtocolState::Poisoned => { - error!(target: "sub-libp2p", "Handler with {:?} is in poisoned state", - self.remote_peer_id); - ProtocolState::Poisoned - } - - ProtocolState::Init { substreams: shutdown, .. } => { - let mut shutdown = shutdown.into_iter().map(|(s, _)| s).collect::>(); - for s in &mut shutdown { - s.shutdown(); - } - ProtocolState::Disabled { shutdown, reenable: false } - } - - ProtocolState::Opening { .. } | ProtocolState::Normal { .. } => - // At the moment, if we get disabled while things were working, we kill the entire - // connection in order to force a reset of the state. - // This is obviously an extremely shameful way to do things, but at the time of - // the writing of this comment, the networking works very poorly and a solution - // needs to be found. - ProtocolState::KillAsap, - - ProtocolState::Disabled { shutdown, .. } => - ProtocolState::Disabled { shutdown, reenable: false }, - - ProtocolState::KillAsap => ProtocolState::KillAsap, - }; - } - /// Polls the state for events. Optionally returns an event to produce. #[must_use] fn poll_state(&mut self, cx: &mut Context) -> Option> { - match mem::replace(&mut self.state, ProtocolState::Poisoned) { - ProtocolState::Poisoned => { - error!(target: "sub-libp2p", "Handler with {:?} is in poisoned state", - self.remote_peer_id); - self.state = ProtocolState::Poisoned; - None - } - - ProtocolState::Init { substreams, mut init_deadline } => { - match Pin::new(&mut init_deadline).poll(cx) { - Poll::Ready(()) => { - error!(target: "sub-libp2p", "Handler initialization process is too long \ - with {:?}", self.remote_peer_id); - self.state = ProtocolState::KillAsap; - }, - Poll::Pending => { - self.state = ProtocolState::Init { substreams, init_deadline }; + shutdown_list(&mut self.shutdown, cx); + + for n in (0..self.substreams.len()).rev() { + let mut substream = self.substreams.swap_remove(n); + match Pin::new(&mut substream).poll_next(cx) { + Poll::Pending => self.substreams.push(substream), + Poll::Ready(Some(Ok(RegisteredProtocolEvent::Message(message)))) => { + let event = LegacyProtoHandlerOut::CustomMessage { + message + }; + self.substreams.push(substream); + return Some(ProtocolsHandlerEvent::Custom(event)); + }, + Poll::Ready(Some(Ok(RegisteredProtocolEvent::Clogged))) => { + self.shutdown.push(substream); + if self.substreams.is_empty() { + let event = LegacyProtoHandlerOut::CustomProtocolClosed; + return Some(ProtocolsHandlerEvent::Custom(event)); } } - - None - } - - ProtocolState::Opening => { - self.state = ProtocolState::Opening; - None - } - - ProtocolState::Normal { mut substreams, mut shutdown } => { - for n in (0..substreams.len()).rev() { - let mut substream = substreams.swap_remove(n); - match Pin::new(&mut substream).poll_next(cx) { - Poll::Pending => substreams.push(substream), - Poll::Ready(Some(Ok(RegisteredProtocolEvent::Message(message)))) => { - let event = LegacyProtoHandlerOut::CustomMessage { - message - }; - substreams.push(substream); - self.state = ProtocolState::Normal { substreams, shutdown }; - return Some(ProtocolsHandlerEvent::Custom(event)); - }, - Poll::Ready(Some(Ok(RegisteredProtocolEvent::Clogged))) => { - shutdown.push(substream); - if substreams.is_empty() { - let event = LegacyProtoHandlerOut::CustomProtocolClosed { - reason: "Legacy substream clogged".into(), - }; - self.state = ProtocolState::Disabled { - shutdown: shutdown.into_iter().collect(), - reenable: true - }; - return Some(ProtocolsHandlerEvent::Custom(event)); - } - } - Poll::Ready(None) => { - shutdown.push(substream); - if substreams.is_empty() { - let event = LegacyProtoHandlerOut::CustomProtocolClosed { - reason: "All substreams have been closed by the remote".into(), - }; - self.state = ProtocolState::Disabled { - shutdown: shutdown.into_iter().collect(), - reenable: true - }; - return Some(ProtocolsHandlerEvent::Custom(event)); - } - } - Poll::Ready(Some(Err(err))) => { - if substreams.is_empty() { - let event = LegacyProtoHandlerOut::CustomProtocolClosed { - reason: format!("Error on the last substream: {:?}", err).into(), - }; - self.state = ProtocolState::Disabled { - shutdown: shutdown.into_iter().collect(), - reenable: true - }; - return Some(ProtocolsHandlerEvent::Custom(event)); - } else { - debug!(target: "sub-libp2p", "Error on extra substream: {:?}", err); - } - } + Poll::Ready(None) => { + self.shutdown.push(substream); + if self.substreams.is_empty() { + let event = LegacyProtoHandlerOut::CustomProtocolClosed; + return Some(ProtocolsHandlerEvent::Custom(event)); } } - - // This code is reached is none if and only if none of the substreams are in a ready state. - self.state = ProtocolState::Normal { substreams, shutdown }; - None - } - - ProtocolState::Disabled { mut shutdown, reenable } => { - shutdown_list(&mut shutdown, cx); - // If `reenable` is `true`, that means we should open the substreams system again - // after all the substreams are closed. - if reenable && shutdown.is_empty() { - self.state = ProtocolState::Opening; - } else { - self.state = ProtocolState::Disabled { shutdown, reenable }; + Poll::Ready(Some(Err(err))) => { + if self.substreams.is_empty() { + let event = LegacyProtoHandlerOut::CustomProtocolClosed; + return Some(ProtocolsHandlerEvent::Custom(event)); + } else { + log::debug!(target: "sub-libp2p", "Error on extra substream: {:?}", err); + } } - None } - - ProtocolState::KillAsap => None, } + + None } } @@ -417,50 +190,18 @@ impl ProtocolsHandler for LegacyProtoHandler { fn inject_fully_negotiated_inbound( &mut self, - (mut substream, received_handshake): >::Output, + (substream, received_handshake): >::Output, (): () ) { - self.state = match mem::replace(&mut self.state, ProtocolState::Poisoned) { - ProtocolState::Poisoned => { - error!(target: "sub-libp2p", "Handler with {:?} is in poisoned state", - self.remote_peer_id); - ProtocolState::Poisoned - } - - ProtocolState::Init { mut substreams, init_deadline } => { - if substream.endpoint() == Endpoint::Dialer { - error!(target: "sub-libp2p", "Opened dialing substream with {:?} before \ - initialization", self.remote_peer_id); - } - substreams.push((substream, received_handshake)); - ProtocolState::Init { substreams, init_deadline } - } - - ProtocolState::Opening { .. } => { - let event = LegacyProtoHandlerOut::CustomProtocolOpen { - version: substream.protocol_version(), - received_handshake, - }; - self.events_queue.push_back(ProtocolsHandlerEvent::Custom(event)); - ProtocolState::Normal { - substreams: smallvec![substream], - shutdown: SmallVec::new() - } - } - - ProtocolState::Normal { substreams: mut existing, shutdown } => { - existing.push(substream); - ProtocolState::Normal { substreams: existing, shutdown } - } - - ProtocolState::Disabled { mut shutdown, .. } => { - substream.shutdown(); - shutdown.push(substream); - ProtocolState::Disabled { shutdown, reenable: false } - } + if self.substreams.is_empty() { + let event = LegacyProtoHandlerOut::CustomProtocolOpen { + version: substream.protocol_version(), + received_handshake, + }; + self.events_queue.push_back(ProtocolsHandlerEvent::Custom(event)); + } - ProtocolState::KillAsap => ProtocolState::KillAsap, - }; + self.substreams.push(substream); } fn inject_fully_negotiated_outbound( @@ -472,9 +213,17 @@ impl ProtocolsHandler for LegacyProtoHandler { } fn inject_event(&mut self, message: LegacyProtoHandlerIn) { - match message { - LegacyProtoHandlerIn::Disable => self.disable(), - LegacyProtoHandlerIn::Enable => self.enable(), + // Only the `Close` message exists at the moment. + let LegacyProtoHandlerIn::Close = message; + + if !self.substreams.is_empty() { + let event = LegacyProtoHandlerOut::CustomProtocolClosed; + self.events_queue.push_back(ProtocolsHandlerEvent::Custom(event)); + } + + for mut substream in self.substreams.drain() { + substream.shutdown(); + self.shutdown.push(substream); } } @@ -487,10 +236,10 @@ impl ProtocolsHandler for LegacyProtoHandler { } fn connection_keep_alive(&self) -> KeepAlive { - match self.state { - ProtocolState::Init { .. } | ProtocolState::Normal { .. } => KeepAlive::Yes, - ProtocolState::Opening { .. } | ProtocolState::Disabled { .. } | - ProtocolState::Poisoned | ProtocolState::KillAsap => KeepAlive::No, + if self.substreams.is_empty() { + KeepAlive::No + } else { + KeepAlive::Yes } } @@ -505,11 +254,6 @@ impl ProtocolsHandler for LegacyProtoHandler { return Poll::Ready(event) } - // Kill the connection if needed. - if let ProtocolState::KillAsap = self.state { - return Poll::Ready(ProtocolsHandlerEvent::Close(ConnectionKillError)); - } - // Process all the substreams. if let Some(event) = self.poll_state(cx) { return Poll::Ready(event) diff --git a/client/network/src/protocol/generic_proto/upgrade/legacy.rs b/client/network/src/protocol/generic_proto/upgrade/legacy.rs index 1b2b97253d1ae..d425754a85e94 100644 --- a/client/network/src/protocol/generic_proto/upgrade/legacy.rs +++ b/client/network/src/protocol/generic_proto/upgrade/legacy.rs @@ -20,7 +20,7 @@ use crate::config::ProtocolId; use bytes::BytesMut; use futures::prelude::*; use futures_codec::Framed; -use libp2p::core::{Endpoint, UpgradeInfo, InboundUpgrade, OutboundUpgrade, upgrade::ProtocolName}; +use libp2p::core::{UpgradeInfo, InboundUpgrade, OutboundUpgrade, upgrade::ProtocolName}; use parking_lot::RwLock; use std::{collections::VecDeque, io, pin::Pin, sync::Arc, vec::IntoIter as VecIntoIter}; use std::task::{Context, Poll}; @@ -85,9 +85,6 @@ impl Clone for RegisteredProtocol { pub struct RegisteredProtocolSubstream { /// If true, we are in the process of closing the sink. is_closing: bool, - /// Whether the local node opened this substream (dialer), or we received this substream from - /// the remote (listener). - endpoint: Endpoint, /// Buffer of packets to send. send_queue: VecDeque, /// If true, we should call `poll_complete` on the inner sink. @@ -107,12 +104,6 @@ impl RegisteredProtocolSubstream { self.protocol_version } - /// Returns whether the local node opened this substream (dialer), or we received this - /// substream from the remote (listener). - pub fn endpoint(&self) -> Endpoint { - self.endpoint - } - /// Starts a graceful shutdown process on this substream. /// /// Note that "graceful" means that we sent a closing message. We don't wait for any @@ -262,7 +253,6 @@ where TSubstream: AsyncRead + AsyncWrite + Unpin + Send + 'static, Ok((RegisteredProtocolSubstream { is_closing: false, - endpoint: Endpoint::Listener, send_queue: VecDeque::new(), requires_poll_flush: false, inner: framed.fuse(), @@ -301,7 +291,6 @@ where TSubstream: AsyncRead + AsyncWrite + Unpin + Send + 'static, Ok((RegisteredProtocolSubstream { is_closing: false, - endpoint: Endpoint::Dialer, send_queue: VecDeque::new(), requires_poll_flush: false, inner: framed.fuse(), From d035be151399616015bdb248cf4aab7fdfc262c5 Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Fri, 6 Nov 2020 13:11:10 +0100 Subject: [PATCH 14/39] Rewrite group.rs entirely [part 1] --- .../src/protocol/generic_proto/behaviour.rs | 39 +- .../src/protocol/generic_proto/handler.rs | 2 - .../protocol/generic_proto/handler/group.rs | 662 ++++++++++-------- .../generic_proto/handler/notif_in.rs | 293 -------- .../generic_proto/handler/notif_out.rs | 444 ------------ 5 files changed, 389 insertions(+), 1051 deletions(-) delete mode 100644 client/network/src/protocol/generic_proto/handler/notif_in.rs delete mode 100644 client/network/src/protocol/generic_proto/handler/notif_out.rs diff --git a/client/network/src/protocol/generic_proto/behaviour.rs b/client/network/src/protocol/generic_proto/behaviour.rs index b72ee1b24af12..2ea6a5c99ccf5 100644 --- a/client/network/src/protocol/generic_proto/behaviour.rs +++ b/client/network/src/protocol/generic_proto/behaviour.rs @@ -1218,29 +1218,34 @@ impl NetworkBehaviour for GenericProto { peer_id, *conn ); - if let Some(pos) = connections.iter().position(|(c, _)| *c == *conn) { - let (_, state) = connections.remove(pos); - - if let ConnectionState::OpenDesired = state { - // In the incoming state, we don't report "Dropped". Instead we will just - // ignore the corresponding Accept/Reject. - if let Some(state) = self.incoming.iter_mut() - .find(|i| i.alive && i.peer_id == *peer_id) - { - state.alive = false; - } else { - error!(target: "sub-libp2p", "State mismatch in libp2p: no entry in \ - incoming corresponding to an incoming state in peers"); - debug_assert!(false); - } - } + debug_assert!(connections.iter().any(|(_, s)| matches!(s, ConnectionState::OpenDesired))); + if let Some(pos) = connections.iter().position(|(c, _)| *c == *conn) { + connections.remove(pos); } else { debug_assert!(false); error!(target: "sub-libp2p", "inject_connection_closed: State mismatch in the custom protos handler"); } + let no_desired_left = !connections.iter().any(|(_, s)| matches!(s, ConnectionState::OpenDesired)); + + // If no connection is `OpenDesired` anymore, clean up the peerset incoming + // request. + if no_desired_left { + // In the incoming state, we don't report "Dropped". Instead we will just + // ignore the corresponding Accept/Reject. + if let Some(state) = self.incoming.iter_mut() + .find(|i| i.alive && i.peer_id == *peer_id) + { + state.alive = false; + } else { + error!(target: "sub-libp2p", "State mismatch in libp2p: no entry in \ + incoming corresponding to an incoming state in peers"); + debug_assert!(false); + } + } + if connections.is_empty() { if let Some(until) = banned_until { let now = Instant::now(); @@ -1265,7 +1270,7 @@ impl NetworkBehaviour for GenericProto { entry.remove(); } - } else if !connections.iter().any(|(_, s)| matches!(s, ConnectionState::OpenDesired)) { + } else if no_desired_left { // If no connection is `OpenDesired` anymore, switch to `Disabled`. *entry.get_mut() = PeerState::Disabled { connections, banned_until }; diff --git a/client/network/src/protocol/generic_proto/handler.rs b/client/network/src/protocol/generic_proto/handler.rs index 5845130a7db87..fdf73498c82bf 100644 --- a/client/network/src/protocol/generic_proto/handler.rs +++ b/client/network/src/protocol/generic_proto/handler.rs @@ -23,5 +23,3 @@ pub use self::legacy::ConnectionKillError as LegacyConnectionKillError; mod group; mod legacy; -mod notif_in; -mod notif_out; diff --git a/client/network/src/protocol/generic_proto/handler/group.rs b/client/network/src/protocol/generic_proto/handler/group.rs index a62728ee648b4..6d5ba68ffabaa 100644 --- a/client/network/src/protocol/generic_proto/handler/group.rs +++ b/client/network/src/protocol/generic_proto/handler/group.rs @@ -50,17 +50,21 @@ //! When a [`NotifsHandlerOut::OpenDesired`] is emitted, the user should always send back either a //! [`NotifsHandlerIn::Open`] or a [`NotifsHandlerIn::Close`].If this isn't done, the remote will //! be left in a pending state. +//! +//! It is illegal to send a [`NotifsHandlerIn::Open`] before a previously-emitted +//! [`NotifsHandlerIn::Open`] has gotten an answer. use crate::protocol::generic_proto::{ - handler::legacy::{LegacyProtoHandler, LegacyProtoHandlerProto, LegacyProtoHandlerIn, LegacyProtoHandlerOut}, - handler::notif_in::{NotifsInHandlerProto, NotifsInHandler, NotifsInHandlerIn, NotifsInHandlerOut}, - handler::notif_out::{NotifsOutHandlerProto, NotifsOutHandler, NotifsOutHandlerIn, NotifsOutHandlerOut}, - upgrade::{NotificationsIn, NotificationsOut, NotificationsHandshakeError, RegisteredProtocol, UpgradeCollec}, + handler::legacy::{LegacyProtoHandler, LegacyProtoHandlerProto, LegacyProtoHandlerIn}, + upgrade::{ + NotificationsIn, NotificationsOut, NotificationsInSubstream, NotificationsOutSubstream, + NotificationsHandshakeError, RegisteredProtocol, UpgradeCollec + }, }; use bytes::BytesMut; use libp2p::core::{either::EitherOutput, ConnectedPoint, PeerId}; -use libp2p::core::upgrade::{UpgradeError, SelectUpgrade, InboundUpgrade, OutboundUpgrade}; +use libp2p::core::upgrade::{SelectUpgrade, InboundUpgrade, OutboundUpgrade}; use libp2p::swarm::{ ProtocolsHandler, ProtocolsHandlerEvent, IntoProtocolsHandler, @@ -76,14 +80,25 @@ use futures::{ }; use log::error; use parking_lot::{Mutex, RwLock}; -use std::{borrow::Cow, collections::VecDeque, mem, str, sync::Arc, task::{Context, Poll}}; +use std::{borrow::Cow, collections::VecDeque, cmp, mem, pin::Pin, str, sync::Arc, task::{Context, Poll}, time::Duration}; +use wasm_timer::Instant; /// Number of pending notifications in asynchronous contexts. /// See [`NotificationsSink::reserve_notification`] for context. const ASYNC_NOTIFICATIONS_BUFFER_SIZE: usize = 8; + /// Number of pending notifications in synchronous contexts. const SYNC_NOTIFICATIONS_BUFFER_SIZE: usize = 2048; +/// Maximum duration to open a substream and receive the handshake message. After that, we +/// consider that we failed to open the substream. +const OPEN_TIMEOUT: Duration = Duration::from_secs(10); + +/// After successfully establishing a connection with the remote, we keep the connection open for +/// at least this amount of time in order to give the rest of the code the chance to notify us to +/// open substreams. +const INITIAL_KEEPALIVE_TIME: Duration = Duration::from_secs(5); + /// Implements the `IntoProtocolsHandler` trait of libp2p. /// /// Every time a connection with a remote starts, an instance of this struct is created and @@ -92,12 +107,13 @@ const SYNC_NOTIFICATIONS_BUFFER_SIZE: usize = 2048; /// /// See the documentation at the module level for more information. pub struct NotifsHandlerProto { - /// Prototypes for handlers for inbound substreams, and the message we respond with in the + /// Prototypes for upgrades for inbound substreams, and the message we respond with in the /// handshake. - in_handlers: Vec<(NotifsInHandlerProto, Arc>>)>, + in_protocols: Vec<(NotificationsIn, Arc>>)>, - /// Prototypes for handlers for outbound substreams, and the initial handshake message we send. - out_handlers: Vec<(NotifsOutHandlerProto, Arc>>)>, + /// Name of protocols available for outbound substreams, and the initial handshake message we + /// send. + out_protocols: Vec<(Cow<'static, str>, Arc>>)>, /// Prototype for handler for backwards-compatibility. legacy: LegacyProtoHandlerProto, @@ -107,11 +123,16 @@ pub struct NotifsHandlerProto { /// /// See the documentation at the module level for more information. pub struct NotifsHandler { - /// Handlers for inbound substreams, and the message we respond with in the handshake. - in_handlers: Vec<(NotifsInHandler, Arc>>)>, + /// Prototypes for upgrades for inbound substreams, and the message we respond with in the + /// handshake. + in_protocols: Vec<(NotificationsIn, Arc>>)>, - /// Handlers for outbound substreams, and the initial handshake message we send. - out_handlers: Vec<(NotifsOutHandler, Arc>>)>, + /// Name of protocols available for outbound substreams, and the initial handshake message we + /// send. + out_protocols: Vec<(Cow<'static, str>, Arc>>)>, + + /// When the connection with the remote has been successfully established. + when_connection_open: Instant, /// Whether we are the connection dialer or listener. endpoint: ConnectedPoint, @@ -129,18 +150,25 @@ pub struct NotifsHandler { } /// See the module-level documentation to learn about the meaning of these variants. -#[derive(Debug)] enum State { /// Handler is in the "Closed" state. Closed { - /// When we receive inbound substream requests, we push here the index within - /// [`NotisHandler::in_handlers`], and process them when an `Open` or `Close` request is - /// received. + /// Vec of the same length as [`NotifsHandler::out_protocols`]. For each protocol, contains + /// a boolean indicating whether an outgoing substream is still in the process of being + /// opened. + pending_opening: Vec, + }, + + /// Handler is in the "Closed" state. A [`NotifsHandlerOut::OpenDesired`] has been emitted. + OpenDesired { + /// Vec of the same length as [`NotifsHandler::in_protocols`]. For each protocol, contains + /// a substream opened by the remote and that hasn't been accepted/rejected yet. /// - /// If this is non-empty, a [`NotifsHandlerOut::OpenDesired`] has been emitted. If this - /// transitions from non-empty to empty, a [`NotisHandlerOut::CloseDesired`] or a - /// [`NotisHandlerOut::CloseResult`] is emitted. - pending_in: Vec, + /// Must always contain at least one `Some`. + in_substreams: Vec>>, + + /// See [`State::Closed::pending_opening`]. + pending_opening: Vec, }, /// Handler is in the "Closed" state, but has received a [`NotifsHandlerIn::Open`] and is @@ -154,6 +182,20 @@ enum State { /// [`NotifsHandlerOut::Open`] event yet, this contains the received handshake waiting to /// be reported through the external API. pending_handshake: Option>, + + /// Vec of the same length as [`NotifsHandler::in_protocols`]. For each protocol, contains + /// a substream opened by the remote and that has been accepted. + /// + /// Contrary to [`State::OpenDesired::in_substreams`], it is possible for this to contain + /// only `None`s. + in_substreams: Vec>>, + + /// Vec of the same length as [`NotifsHandler::out_protocols`]. For each protocol, contains + /// an outbound substream that has been accepted by the remote. + /// + /// Items that contain `None` mean that a substream is still being opened. In other words, + /// this `Vec` is kind of a mirror version of [`State::Closed::pending_opening`]. + out_substreams: Vec>>, }, /// Handler is in the "Open" state. @@ -168,8 +210,24 @@ enum State { stream::Fuse> >, - /// If true, at least one substream has been closed and a - /// [`NotifsHandlerOut::CloseDesired`] message has been sent out. + /// Vec of the same length as [`NotifsHandler::out_protocols`]. For each protocol, contains + /// an outbound substream that has been accepted by the remote. + /// + /// On transition to [`State::Open`], all the elements must be `Some`. Elements are + /// switched to `None` only if the remote closes substreams, in which case `want_closed` + /// must be true. + out_substreams: Vec>>, + + /// Vec of the same length as [`NotifsHandler::in_protocols`]. For each protocol, contains + /// a substream opened by the remote and that has been accepted. + /// + /// Contrary to [`State::OpenDesired::in_substreams`], it is possible for this to contain + /// only `None`s. + in_substreams: Vec>>, + + /// If true, at least one substream in [`State::Open::out_substreams`] has been closed or + /// reset by the remote and a [`NotifsHandlerOut::CloseDesired`] message has been sent + /// out. want_closed: bool, }, } @@ -178,27 +236,24 @@ impl IntoProtocolsHandler for NotifsHandlerProto { type Handler = NotifsHandler; fn inbound_protocol(&self) -> SelectUpgrade, RegisteredProtocol> { - let in_handlers = self.in_handlers.iter() - .map(|(h, _)| h.inbound_protocol()) + let in_protocols = self.in_protocols.iter() + .map(|(h, _)| h.clone()) .collect::>(); - SelectUpgrade::new(in_handlers, self.legacy.inbound_protocol()) + SelectUpgrade::new(in_protocols, self.legacy.inbound_protocol()) } fn into_handler(self, remote_peer_id: &PeerId, connected_point: &ConnectedPoint) -> Self::Handler { + let num_out_proto = self.out_protocols.len(); + NotifsHandler { - in_handlers: self.in_handlers - .into_iter() - .map(|(proto, msg)| (proto.into_handler(remote_peer_id, connected_point), msg)) - .collect(), - out_handlers: self.out_handlers - .into_iter() - .map(|(proto, msg)| (proto.into_handler(remote_peer_id, connected_point), msg)) - .collect(), + in_protocols: self.in_protocols, + out_protocols: self.out_protocols, endpoint: connected_point.clone(), + when_connection_open: Instant::now(), legacy: self.legacy.into_handler(remote_peer_id, connected_point), state: State::Closed { - pending_in: Vec::new(), + pending_opening: (0..num_out_proto).map(|_| false).collect(), }, events_queue: VecDeque::with_capacity(16), } @@ -417,21 +472,19 @@ impl NotifsHandlerProto { let list = list.into(); assert!(!list.is_empty()); - let out_handlers = list + let out_protocols = list .clone() .into_iter() - .map(|(proto_name, initial_message)| { - (NotifsOutHandlerProto::new(proto_name), initial_message) - }).collect(); + .collect(); - let in_handlers = list.clone() + let in_protocols = list.clone() .into_iter() - .map(|(proto_name, msg)| (NotifsInHandlerProto::new(proto_name), msg)) + .map(|(proto_name, msg)| (NotificationsIn::new(proto_name), msg)) .collect(); NotifsHandlerProto { - in_handlers, - out_handlers, + in_protocols, + out_protocols, legacy: LegacyProtoHandlerProto::new(legacy), } } @@ -443,16 +496,16 @@ impl ProtocolsHandler for NotifsHandler { type Error = NotifsHandlerError; type InboundProtocol = SelectUpgrade, RegisteredProtocol>; type OutboundProtocol = NotificationsOut; - // Index within the `out_handlers` + // Index within the `out_protocols`. type OutboundOpenInfo = usize; type InboundOpenInfo = (); fn listen_protocol(&self) -> SubstreamProtocol { - let in_handlers = self.in_handlers.iter() - .map(|(h, _)| h.listen_protocol().into_upgrade().1) + let in_protocols = self.in_protocols.iter() + .map(|(h, _)| h.clone()) .collect::>(); - let proto = SelectUpgrade::new(in_handlers, self.legacy.listen_protocol().into_upgrade().1); + let proto = SelectUpgrade::new(in_protocols, self.legacy.listen_protocol().into_upgrade().1); SubstreamProtocol::new(proto, ()) } @@ -462,8 +515,50 @@ impl ProtocolsHandler for NotifsHandler { (): () ) { match out { - EitherOutput::First((out, num)) => - self.in_handlers[num].0.inject_fully_negotiated_inbound(out, ()), + EitherOutput::First(((_remote_handshake, mut proto), num)) => { + match &mut self.state { + State::Closed { pending_opening } => { + self.events_queue.push_back(ProtocolsHandlerEvent::Custom( + NotifsHandlerOut::OpenDesired + )); + + let mut in_substreams = (0..self.in_protocols.len()) + .map(|_| None) + .collect::>(); + in_substreams[num] = Some(proto); + self.state = State::OpenDesired { + in_substreams, + pending_opening: mem::replace(pending_opening, Vec::new()), + }; + }, + State::OpenDesired { in_substreams, .. } => { + if in_substreams[num].is_some() { + // If a substream already exists, silently drop the new one. + // Note that we drop the substream, which will send an equivalent to a + // TCP "RST" to the remote and force-close the substream. It might + // seem like an unclean way to get rid of a substream. However, keep + // in mind that it is invalid for the remote to open multiple such + // substreams, and therefore sending a "RST" is the most correct thing + // to do. + return; + } + in_substreams[num] = Some(proto); + }, + State::Opening { in_substreams, .. } | + State::Open { in_substreams, .. } => { + if in_substreams[num].is_some() { + // Same remark as above. + return; + } + + // We create `handshake_message` on a separate line to be sure + // that the lock is released as soon as possible. + let handshake_message = self.in_protocols[num].1.read().clone(); + proto.send_handshake(handshake_message); + in_substreams[num] = Some(proto); + }, + }; + } EitherOutput::Second(out) => self.legacy.inject_fully_negotiated_inbound(out, ()), } @@ -471,36 +566,108 @@ impl ProtocolsHandler for NotifsHandler { fn inject_fully_negotiated_outbound( &mut self, - out: >::Output, + (handshake, substream): >::Output, num: Self::OutboundOpenInfo ) { - self.out_handlers[num].0.inject_fully_negotiated_outbound(out, ()) + match &mut self.state { + State::Closed { pending_opening } | + State::OpenDesired { pending_opening, .. } => { + debug_assert!(pending_opening[num]); + pending_opening[num] = false; + } + State::Open { .. } => { + error!(target: "sub-libp2p", "☎️ State mismatch in notifications handler"); + debug_assert!(false); + } + State::Opening { pending_handshake, in_substreams, out_substreams } => { + debug_assert!(out_substreams[num].is_none()); + out_substreams[num] = Some(substream); + + if num == 0 { + debug_assert!(pending_handshake.is_none()); + *pending_handshake = Some(handshake); + } + + if !out_substreams.iter().any(|s| s.is_none()) { + let (async_tx, async_rx) = mpsc::channel(ASYNC_NOTIFICATIONS_BUFFER_SIZE); + let (sync_tx, sync_rx) = mpsc::channel(SYNC_NOTIFICATIONS_BUFFER_SIZE); + let notifications_sink = NotificationsSink { + inner: Arc::new(NotificationsSinkInner { + async_channel: FuturesMutex::new(async_tx), + sync_channel: Mutex::new(sync_tx), + }), + }; + + debug_assert!(pending_handshake.is_some()); + let pending_handshake = pending_handshake.take().unwrap_or_default(); + + self.state = State::Open { + notifications_sink_rx: stream::select(async_rx.fuse(), sync_rx.fuse()), + out_substreams: mem::replace(out_substreams, Vec::new()), + in_substreams: mem::replace(in_substreams, Vec::new()), + want_closed: false, + }; + + self.events_queue.push_back(ProtocolsHandlerEvent::Custom( + NotifsHandlerOut::OpenResultOk { + endpoint: self.endpoint.clone(), + received_handshake: pending_handshake, + notifications_sink + } + )); + } + } + } } fn inject_event(&mut self, message: NotifsHandlerIn) { match message { NotifsHandlerIn::Open => { match &mut self.state { - State::Closed { pending_in } => { - for (handler, initial_message) in &mut self.out_handlers { - // We create `initial_message` on a separate line to be sure that the - // lock is released as soon as possible. - let initial_message = initial_message.read().clone(); - handler.inject_event(NotifsOutHandlerIn::Enable { - initial_message, + State::Closed { .. } | State::OpenDesired { .. } => { + let (pending_opening, mut in_substreams) = match &mut self.state { + State::Closed { pending_opening } => (pending_opening, None), + State::OpenDesired { pending_opening, in_substreams } => + (pending_opening, Some(mem::replace(in_substreams, Vec::new()))), + _ => unreachable!() + }; + + for (n, is_pending) in pending_opening.iter().enumerate() { + if *is_pending { + continue; + } + + let proto = NotificationsOut::new( + self.out_protocols[n].0.clone(), + self.out_protocols[n].1.read().clone() + ); + + self.events_queue.push_back(ProtocolsHandlerEvent::OutboundSubstreamRequest { + protocol: SubstreamProtocol::new(proto, n) + .with_timeout(OPEN_TIMEOUT), }); } - for num in pending_in.drain(..) { - // We create `handshake_message` on a separate line to be sure - // that the lock is released as soon as possible. - let handshake_message = self.in_handlers[num].1.read().clone(); - self.in_handlers[num].0 - .inject_event(NotifsInHandlerIn::Accept(handshake_message)); + if let Some(in_substreams) = in_substreams.as_mut() { + for (num, substream) in in_substreams.iter_mut().enumerate() { + let substream = match substream.as_mut() { + Some(s) => s, + None => continue, + }; + + let handshake_message = self.in_protocols[num].1.read().clone(); + substream.send_handshake(handshake_message); + } } self.state = State::Opening { pending_handshake: None, + in_substreams: if let Some(in_substreams) = in_substreams { + in_substreams + } else { + (0..self.in_protocols.len()).map(|_| None).collect() + }, + out_substreams: (0..self.out_protocols.len()).map(|_| None).collect(), }; }, State::Opening { .. } | @@ -517,12 +684,9 @@ impl ProtocolsHandler for NotifsHandler { State::Open { .. } | State::Opening { .. } => { self.legacy.inject_event(LegacyProtoHandlerIn::Close); - for (handler, _) in &mut self.out_handlers { - handler.inject_event(NotifsOutHandlerIn::Disable); - } self.state = State::Closed { - pending_in: Vec::new(), + pending_opening: Vec::new(), }; if matches!(self.state, State::Opening { .. }) { @@ -531,11 +695,13 @@ impl ProtocolsHandler for NotifsHandler { ); } }, - State::Closed { pending_in } => { - for num in pending_in.drain(..) { - self.in_handlers[num].0.inject_event(NotifsInHandlerIn::Refuse); - } - }, + State::OpenDesired { pending_opening, .. } => { + // TODO: close in_substreams in a clean way + self.state = State::Closed { + pending_opening: mem::replace(pending_opening, Vec::new()), + }; + } + State::Closed { .. } => {}, } self.events_queue.push_back( @@ -550,55 +716,40 @@ impl ProtocolsHandler for NotifsHandler { num: usize, err: ProtocolsHandlerUpgrErr ) { - match err { - ProtocolsHandlerUpgrErr::Timeout => - self.out_handlers[num].0.inject_dial_upgrade_error( - (), - ProtocolsHandlerUpgrErr::Timeout - ), - ProtocolsHandlerUpgrErr::Timer => - self.out_handlers[num].0.inject_dial_upgrade_error( - (), - ProtocolsHandlerUpgrErr::Timer - ), - ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Select(err)) => - self.out_handlers[num].0.inject_dial_upgrade_error( - (), - ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Select(err)) - ), - ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Apply(err)) => - self.out_handlers[num].0.inject_dial_upgrade_error( - (), - ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Apply(err)) - ), - } - } - - fn connection_keep_alive(&self) -> KeepAlive { - // Iterate over each handler and return the maximum value. + match &mut self.state { + State::Closed { pending_opening } | State::OpenDesired { pending_opening, .. } => { + debug_assert!(pending_opening[num]); + pending_opening[num] = false; + } - let mut ret = self.legacy.connection_keep_alive(); - if ret.is_yes() { - return KeepAlive::Yes; - } + State::Opening { .. } => { + // TODO: close already-open substreams in a clean way? + let mut pending_opening = (0..self.out_protocols.len()) + .map(|_| true) + .collect::>(); + pending_opening[num] = false; + self.state = State::Closed { + pending_opening, + }; - for (handler, _) in &self.in_handlers { - let val = handler.connection_keep_alive(); - if val.is_yes() { - return KeepAlive::Yes; + self.events_queue.push_back(ProtocolsHandlerEvent::Custom( + NotifsHandlerOut::OpenResultErr + )); } - if ret < val { ret = val; } - } - for (handler, _) in &self.out_handlers { - let val = handler.connection_keep_alive(); - if val.is_yes() { - return KeepAlive::Yes; - } - if ret < val { ret = val; } + // No substream is being open when already `Open`. + State::Open { .. } => debug_assert!(false), } + } - ret + fn connection_keep_alive(&self) -> KeepAlive { + let legacy = self.legacy.connection_keep_alive(); + match self.state { + State::Closed { .. } => + cmp::max(legacy, KeepAlive::Until(self.when_connection_open + INITIAL_KEEPALIVE_TIME)), + State::OpenDesired { .. } | State::Opening { .. } | State::Open { .. } => + KeepAlive::Yes, + } } fn poll( @@ -611,7 +762,91 @@ impl ProtocolsHandler for NotifsHandler { return Poll::Ready(ev); } - if let State::Open { notifications_sink_rx, .. } = &mut self.state { + // Poll inbound substreams. + // Inbound substreams being closed is always tolerated, except for the `OpenDesired` state + // which might need to be switched back to `Closed`. + match &mut self.state { + State::Closed { .. } => {} + State::Open { in_substreams, .. } => { + for (num, substream) in in_substreams.iter_mut().enumerate() { + match substream.as_mut().map(|s| Stream::poll_next(Pin::new(s), cx)) { + None | Some(Poll::Pending) => continue, + Some(Poll::Ready(Some(Ok(message)))) => { + let event = NotifsHandlerOut::Notification { + message, + protocol_name: self.in_protocols[num].0.protocol_name().clone(), + }; + return Poll::Ready(ProtocolsHandlerEvent::Custom(event)) + }, + // TODO: close in a clean way? + Some(Poll::Ready(None)) | Some(Poll::Ready(Some(Err(_)))) => + *substream = None, + } + } + } + + State::OpenDesired { in_substreams, .. } | + State::Opening { in_substreams, .. } => { + for substream in in_substreams { + match substream.as_mut().map(|s| NotificationsInSubstream::poll_process(Pin::new(s), cx)) { + None | Some(Poll::Pending) => continue, + Some(Poll::Ready(Ok(void))) => match void {}, + Some(Poll::Ready(Err(_))) => *substream = None, + } + } + } + } + + // Since the previous block might have closed inbound substreams, make sure that we can + // stay in `OpenDesired` state. + if let State::OpenDesired { in_substreams, pending_opening } = &mut self.state { + if !in_substreams.iter().any(|s| s.is_some()) { + self.state = State::Closed { + pending_opening: mem::replace(pending_opening, Vec::new()), + }; + return Poll::Ready(ProtocolsHandlerEvent::Custom( + NotifsHandlerOut::CloseDesired + )) + } + } + + // Poll outbound substreams. + match &mut self.state { + State::Open { out_substreams, .. } | + State::Opening { out_substreams, .. } => { + let mut any_closed = false; + + for substream in out_substreams.iter_mut() { + match substream.as_mut().map(|s| Sink::poll_flush(Pin::new(s), cx)) { + None | Some(Poll::Pending) | Some(Poll::Ready(Ok(()))) => continue, + Some(Poll::Ready(Err(_))) => {} + }; + + // Reached if the substream has been closed. + *substream = None; + any_closed = true; + } + + if any_closed { + if let State::Open { want_closed, .. } = &mut self.state { + if !*want_closed { + *want_closed = true; + return Poll::Ready(ProtocolsHandlerEvent::Custom(NotifsHandlerOut::CloseDesired)); + } + } else if let State::Opening { out_substreams, .. } = &mut self.state { + // TODO: dispose of `in_substreams` in a clean way + let pending_opening = out_substreams.iter().map(|s| s.is_none()).collect(); + self.state = State::Closed { pending_opening }; + return Poll::Ready(ProtocolsHandlerEvent::Custom(NotifsHandlerOut::OpenResultErr)); + } + } + } + + State::Closed { .. } | + State::OpenDesired { .. } => {} + } + + if let State::Open { notifications_sink_rx, out_substreams, .. } = &mut self.state { 'poll_notifs_sink: loop { // Before we poll the notifications sink receiver, check that all the notification // channels are ready to send a message. @@ -619,13 +854,14 @@ impl ProtocolsHandler for NotifsHandler { // protocol, in which case each sink should wait only for its corresponding handler // to be ready, and not all handlers // see https://github.com/paritytech/substrate/issues/5670 - for (out_handler, _) in &mut self.out_handlers { - match out_handler.poll_ready(cx) { - Poll::Ready(_) => {}, - Poll::Pending => break 'poll_notifs_sink, + for substream in out_substreams.iter_mut() { + match substream.as_mut().map(|s| s.poll_ready_unpin(cx)) { + None | Some(Poll::Ready(_)) => {}, + Some(Poll::Pending) => break 'poll_notifs_sink } } + // Now that all substreams are ready for a message, grab what to send. let message = match notifications_sink_rx.poll_next_unpin(cx) { Poll::Ready(Some(msg)) => msg, Poll::Ready(None) | Poll::Pending => break, @@ -636,28 +872,13 @@ impl ProtocolsHandler for NotifsHandler { protocol_name, message } => { - let mut found_any_with_name = false; - - for (handler, _) in &mut self.out_handlers { - if *handler.protocol_name() == protocol_name { - found_any_with_name = true; - if handler.is_open() { - handler.send_or_discard(message); - continue 'poll_notifs_sink; - } + if let Some(pos) = self.out_protocols.iter().position(|(n, _)| *n == protocol_name) { + if let Some(substream) = out_substreams[pos].as_mut() { + let _ = substream.start_send_unpin(message); + continue 'poll_notifs_sink; } - } - // This code can be reached via the following scenarios: - // - // - User tried to send a notification on a non-existing protocol. This - // most likely relates to https://github.com/paritytech/substrate/issues/6827 - // - User tried to send a notification to a peer we're not or no longer - // connected to. This happens in a normal scenario due to the racy nature - // of connections and disconnections, and is benign. - // - // We print a warning in the former condition. - if !found_any_with_name { + } else { log::warn!( target: "sub-libp2p", "Tried to send a notification on non-registered protocol: {:?}", @@ -674,7 +895,8 @@ impl ProtocolsHandler for NotifsHandler { } } - // If `self.pending_handshake` is `Some`, we are in a state where the handshake-bearing + // TODO: legacy substream + /*// If `self.pending_handshake` is `Some`, we are in a state where the handshake-bearing // substream (either the legacy substream or the one special-cased as providing the // handshake) is open but the user isn't aware yet of the substreams being open. // When that is the case, neither the legacy substream nor the incoming notifications @@ -693,6 +915,7 @@ impl ProtocolsHandler for NotifsHandler { debug_assert!(pending_handshake.is_none()); *pending_handshake = Some(received_handshake); } + // TODO: wrong _ => debug_assert!(false), } @@ -722,158 +945,7 @@ impl ProtocolsHandler for NotifsHandler { return Poll::Ready(ProtocolsHandlerEvent::Close(NotifsHandlerError::Legacy(err))), } } - } - - for (handler_num, (handler, handshake_message)) in self.in_handlers.iter_mut().enumerate() { - loop { - let poll = if matches!(self.state, State::Open { .. }) { - handler.poll(cx) - } else { - handler.poll_process(cx) - }; - - let ev = match poll { - Poll::Ready(e) => e, - Poll::Pending => break, - }; - - match ev { - ProtocolsHandlerEvent::OutboundSubstreamRequest { .. } => - error!("Incoming substream handler tried to open a substream"), - ProtocolsHandlerEvent::Close(err) => void::unreachable(err), - ProtocolsHandlerEvent::Custom(NotifsInHandlerOut::OpenRequest(_)) => - match &mut self.state { - State::Closed { pending_in } => { - let was_empty = pending_in.is_empty(); - pending_in.push(handler_num); - if was_empty { - return Poll::Ready(ProtocolsHandlerEvent::Custom( - NotifsHandlerOut::OpenDesired - )); - } - }, - State::Opening { .. } | State::Open { .. } => { - // We create `handshake_message` on a separate line to be sure - // that the lock is released as soon as possible. - let handshake_message = handshake_message.read().clone(); - handler.inject_event(NotifsInHandlerIn::Accept(handshake_message)) - }, - }, - ProtocolsHandlerEvent::Custom(NotifsInHandlerOut::Closed) => { - match &mut self.state { - State::Open { want_closed, .. } if *want_closed == false => { - *want_closed = true; - return Poll::Ready(ProtocolsHandlerEvent::Custom( - NotifsHandlerOut::CloseDesired - )); - } - State::Open { .. } => {} - State::Opening { .. } => {} - State::Closed { .. } => debug_assert!(false), - } - }, - ProtocolsHandlerEvent::Custom(NotifsInHandlerOut::Notif(message)) => { - if matches!(self.state, State::Open { .. }) { - let msg = NotifsHandlerOut::Notification { - message, - protocol_name: handler.protocol_name().clone(), - }; - return Poll::Ready(ProtocolsHandlerEvent::Custom(msg)); - } else { - debug_assert!(false); - } - }, - } - } - } - - for (handler_num, (handler, _)) in self.out_handlers.iter_mut().enumerate() { - while let Poll::Ready(ev) = handler.poll(cx) { - match (ev, &mut self.state) { - (ProtocolsHandlerEvent::OutboundSubstreamRequest { protocol }, _) => - return Poll::Ready(ProtocolsHandlerEvent::OutboundSubstreamRequest { - protocol: protocol - .map_info(|()| handler_num), - }), - (ProtocolsHandlerEvent::Close(err), _) => void::unreachable(err), - - // Opened substream on the handshake-bearing notification protocol. - ( - ProtocolsHandlerEvent::Custom(NotifsOutHandlerOut::Open { handshake }), - State::Opening { pending_handshake } - ) if handler_num == 0 && pending_handshake.is_none() => - { - *pending_handshake = Some(handshake); - }, - - (ProtocolsHandlerEvent::Custom(NotifsOutHandlerOut::Open { .. }), _) - if handler_num == 0 => debug_assert!(false), - (ProtocolsHandlerEvent::Custom(NotifsOutHandlerOut::Open { .. }), _) => {}, - - ( - ProtocolsHandlerEvent::Custom(NotifsOutHandlerOut::Closed), - State::Open { want_closed, .. } - ) => { - if *want_closed == false { - *want_closed = true; - return Poll::Ready(ProtocolsHandlerEvent::Custom( - NotifsHandlerOut::CloseDesired - )); - } - }, - - // Remote has denied an opening attempt for this notifications protocol. - // This fails the entire opening attempt. - (ProtocolsHandlerEvent::Custom(NotifsOutHandlerOut::Refused), State::Opening { .. }) | - (ProtocolsHandlerEvent::Custom(NotifsOutHandlerOut::Closed), State::Opening { .. }) => { - self.legacy.inject_event(LegacyProtoHandlerIn::Close); - for (handler, _) in &mut self.out_handlers { - handler.inject_event(NotifsOutHandlerIn::Disable); - } - - self.state = State::Closed { - pending_in: Vec::new(), - }; - - return Poll::Ready(ProtocolsHandlerEvent::Custom( - NotifsHandlerOut::OpenResultErr - )); - }, - - - (ProtocolsHandlerEvent::Custom(NotifsOutHandlerOut::Refused), _) | - (ProtocolsHandlerEvent::Custom(NotifsOutHandlerOut::Closed), _) => - debug_assert!(false), - } - } - } - - if let State::Opening { pending_handshake: Some(pending_handshake), .. } = &mut self.state { - if self.out_handlers.iter().all(|(h, _)| h.is_open() || h.is_refused()) { - let (async_tx, async_rx) = mpsc::channel(ASYNC_NOTIFICATIONS_BUFFER_SIZE); - let (sync_tx, sync_rx) = mpsc::channel(SYNC_NOTIFICATIONS_BUFFER_SIZE); - let notifications_sink = NotificationsSink { - inner: Arc::new(NotificationsSinkInner { - async_channel: FuturesMutex::new(async_tx), - sync_channel: Mutex::new(sync_tx), - }), - }; - - let pending_handshake = mem::replace(pending_handshake, Vec::new()); - self.state = State::Open { - notifications_sink_rx: stream::select(async_rx.fuse(), sync_rx.fuse()), - want_closed: false, - }; - - return Poll::Ready(ProtocolsHandlerEvent::Custom( - NotifsHandlerOut::OpenResultOk { - endpoint: self.endpoint.clone(), - received_handshake: pending_handshake, - notifications_sink - } - )) - } - } + }*/ Poll::Pending } diff --git a/client/network/src/protocol/generic_proto/handler/notif_in.rs b/client/network/src/protocol/generic_proto/handler/notif_in.rs deleted file mode 100644 index d3b505e0de3e2..0000000000000 --- a/client/network/src/protocol/generic_proto/handler/notif_in.rs +++ /dev/null @@ -1,293 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2020 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -//! Implementations of the `IntoProtocolsHandler` and `ProtocolsHandler` traits for ingoing -//! substreams for a single gossiping protocol. -//! -//! > **Note**: Each instance corresponds to a single protocol. In order to support multiple -//! > protocols, you need to create multiple instances and group them. -//! - -use crate::protocol::generic_proto::upgrade::{NotificationsIn, NotificationsInSubstream}; -use bytes::BytesMut; -use futures::prelude::*; -use libp2p::core::{ConnectedPoint, PeerId}; -use libp2p::core::upgrade::{DeniedUpgrade, InboundUpgrade, OutboundUpgrade}; -use libp2p::swarm::{ - ProtocolsHandler, ProtocolsHandlerEvent, - IntoProtocolsHandler, - KeepAlive, - ProtocolsHandlerUpgrErr, - SubstreamProtocol, - NegotiatedSubstream, -}; -use log::{error, warn}; -use std::{borrow::Cow, collections::VecDeque, fmt, pin::Pin, task::{Context, Poll}}; - -/// Implements the `IntoProtocolsHandler` trait of libp2p. -/// -/// Every time a connection with a remote starts, an instance of this struct is created and -/// sent to a background task dedicated to this connection. Once the connection is established, -/// it is turned into a [`NotifsInHandler`]. -pub struct NotifsInHandlerProto { - /// Configuration for the protocol upgrade to negotiate. - in_protocol: NotificationsIn, -} - -/// The actual handler once the connection has been established. -pub struct NotifsInHandler { - /// Configuration for the protocol upgrade to negotiate for inbound substreams. - in_protocol: NotificationsIn, - - /// Substream that is open with the remote. - substream: Option>, - - /// If the substream is opened and closed rapidly, we can emit several `OpenRequest` and - /// `Closed` messages in a row without the handler having time to respond with `Accept` or - /// `Refuse`. - /// - /// In order to keep the state consistent, we increment this variable every time an - /// `OpenRequest` is emitted and decrement it every time an `Accept` or `Refuse` is received. - pending_accept_refuses: usize, - - /// Queue of events to send to the outside. - /// - /// This queue is only ever modified to insert elements at the back, or remove the first - /// element. - events_queue: VecDeque>, -} - -/// Event that can be received by a `NotifsInHandler`. -#[derive(Debug, Clone)] -pub enum NotifsInHandlerIn { - /// Can be sent back as a response to an `OpenRequest`. Contains the status message to send - /// to the remote. - /// - /// After sending this to the handler, the substream is now considered open and `Notif` events - /// can be received. - Accept(Vec), - - /// Can be sent back as a response to an `OpenRequest`. - Refuse, -} - -/// Event that can be emitted by a `NotifsInHandler`. -#[derive(Debug)] -pub enum NotifsInHandlerOut { - /// The remote wants to open a substream. Contains the initial message sent by the remote - /// when the substream has been opened. - /// - /// Every time this event is emitted, a corresponding `Accepted` or `Refused` **must** be sent - /// back even if a `Closed` is received. - OpenRequest(Vec), - - /// The notifications substream has been closed by the remote. In order to avoid race - /// conditions, this does **not** cancel any previously-sent `OpenRequest`. - Closed, - - /// Received a message on the notifications substream. - /// - /// Can only happen after an `Accept` and before a `Closed`. - Notif(BytesMut), -} - -impl NotifsInHandlerProto { - /// Builds a new `NotifsInHandlerProto`. - pub fn new( - protocol_name: impl Into> - ) -> Self { - NotifsInHandlerProto { - in_protocol: NotificationsIn::new(protocol_name), - } - } -} - -impl IntoProtocolsHandler for NotifsInHandlerProto { - type Handler = NotifsInHandler; - - fn inbound_protocol(&self) -> NotificationsIn { - self.in_protocol.clone() - } - - fn into_handler(self, _: &PeerId, _: &ConnectedPoint) -> Self::Handler { - NotifsInHandler { - in_protocol: self.in_protocol, - substream: None, - pending_accept_refuses: 0, - events_queue: VecDeque::new(), - } - } -} - -impl NotifsInHandler { - /// Returns the name of the protocol that we accept. - pub fn protocol_name(&self) -> &Cow<'static, str> { - self.in_protocol.protocol_name() - } - - /// Equivalent to the `poll` method of `ProtocolsHandler`, except that it is guaranteed to - /// never generate [`NotifsInHandlerOut::Notif`]. - /// - /// Use this method in situations where it is not desirable to receive events but still - /// necessary to drive any potential incoming handshake or request. - pub fn poll_process( - &mut self, - cx: &mut Context - ) -> Poll< - ProtocolsHandlerEvent - > { - if let Some(event) = self.events_queue.pop_front() { - return Poll::Ready(event) - } - - match self.substream.as_mut().map(|s| NotificationsInSubstream::poll_process(Pin::new(s), cx)) { - None | Some(Poll::Pending) => {}, - Some(Poll::Ready(Ok(v))) => match v {}, - Some(Poll::Ready(Err(_))) => { - self.substream = None; - return Poll::Ready(ProtocolsHandlerEvent::Custom(NotifsInHandlerOut::Closed)); - }, - } - - Poll::Pending - } -} - -impl ProtocolsHandler for NotifsInHandler { - type InEvent = NotifsInHandlerIn; - type OutEvent = NotifsInHandlerOut; - type Error = void::Void; - type InboundProtocol = NotificationsIn; - type OutboundProtocol = DeniedUpgrade; - type OutboundOpenInfo = (); - type InboundOpenInfo = (); - - fn listen_protocol(&self) -> SubstreamProtocol { - SubstreamProtocol::new(self.in_protocol.clone(), ()) - } - - fn inject_fully_negotiated_inbound( - &mut self, - (msg, proto): >::Output, - (): () - ) { - // If a substream already exists, we drop it and replace it with the new incoming one. - if self.substream.is_some() { - self.events_queue.push_back(ProtocolsHandlerEvent::Custom(NotifsInHandlerOut::Closed)); - } - - // Note that we drop the existing substream, which will send an equivalent to a TCP "RST" - // to the remote and force-close the substream. It might seem like an unclean way to get - // rid of a substream. However, keep in mind that it is invalid for the remote to open - // multiple such substreams, and therefore sending a "RST" is not an incorrect thing to do. - self.substream = Some(proto); - - self.events_queue.push_back(ProtocolsHandlerEvent::Custom(NotifsInHandlerOut::OpenRequest(msg))); - self.pending_accept_refuses = self.pending_accept_refuses - .checked_add(1) - .unwrap_or_else(|| { - error!(target: "sub-libp2p", "Overflow in pending_accept_refuses"); - usize::max_value() - }); - } - - fn inject_fully_negotiated_outbound( - &mut self, - out: >::Output, - _: Self::OutboundOpenInfo - ) { - // We never emit any outgoing substream. - void::unreachable(out) - } - - fn inject_event(&mut self, message: NotifsInHandlerIn) { - self.pending_accept_refuses = match self.pending_accept_refuses.checked_sub(1) { - Some(v) => v, - None => { - error!( - target: "sub-libp2p", - "Inconsistent state: received Accept/Refuse when no pending request exists" - ); - return; - } - }; - - // If we send multiple `OpenRequest`s in a row, we will receive back multiple - // `Accept`/`Refuse` messages. All of them are obsolete except the last one. - if self.pending_accept_refuses != 0 { - return; - } - - match (message, self.substream.as_mut()) { - (NotifsInHandlerIn::Accept(message), Some(sub)) => sub.send_handshake(message), - (NotifsInHandlerIn::Accept(_), None) => {}, - (NotifsInHandlerIn::Refuse, _) => self.substream = None, - } - } - - fn inject_dial_upgrade_error(&mut self, _: (), _: ProtocolsHandlerUpgrErr) { - error!(target: "sub-libp2p", "Received dial upgrade error in inbound-only handler"); - } - - fn connection_keep_alive(&self) -> KeepAlive { - if self.substream.is_some() { - KeepAlive::Yes - } else { - KeepAlive::No - } - } - - fn poll( - &mut self, - cx: &mut Context, - ) -> Poll< - ProtocolsHandlerEvent - > { - // Flush the events queue if necessary. - if let Some(event) = self.events_queue.pop_front() { - return Poll::Ready(event) - } - - match self.substream.as_mut().map(|s| Stream::poll_next(Pin::new(s), cx)) { - None | Some(Poll::Pending) => {}, - Some(Poll::Ready(Some(Ok(msg)))) => { - if self.pending_accept_refuses != 0 { - warn!( - target: "sub-libp2p", - "Bad state in inbound-only handler: notif before accepting substream" - ); - } - return Poll::Ready(ProtocolsHandlerEvent::Custom(NotifsInHandlerOut::Notif(msg))) - }, - Some(Poll::Ready(None)) | Some(Poll::Ready(Some(Err(_)))) => { - self.substream = None; - return Poll::Ready(ProtocolsHandlerEvent::Custom(NotifsInHandlerOut::Closed)); - }, - } - - Poll::Pending - } -} - -impl fmt::Debug for NotifsInHandler { - fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { - f.debug_struct("NotifsInHandler") - .field("substream_open", &self.substream.is_some()) - .finish() - } -} diff --git a/client/network/src/protocol/generic_proto/handler/notif_out.rs b/client/network/src/protocol/generic_proto/handler/notif_out.rs deleted file mode 100644 index 414e62c0d135f..0000000000000 --- a/client/network/src/protocol/generic_proto/handler/notif_out.rs +++ /dev/null @@ -1,444 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Substrate. - -// Substrate is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Substrate is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . - -//! Implementations of the `IntoProtocolsHandler` and `ProtocolsHandler` traits for outgoing -//! substreams of a single gossiping protocol. -//! -//! > **Note**: Each instance corresponds to a single protocol. In order to support multiple -//! > protocols, you need to create multiple instances and group them. -//! - -use crate::protocol::generic_proto::upgrade::{NotificationsOut, NotificationsOutSubstream, NotificationsHandshakeError}; -use futures::prelude::*; -use libp2p::core::{ConnectedPoint, PeerId}; -use libp2p::core::upgrade::{DeniedUpgrade, InboundUpgrade, OutboundUpgrade}; -use libp2p::swarm::{ - ProtocolsHandler, ProtocolsHandlerEvent, - IntoProtocolsHandler, - KeepAlive, - ProtocolsHandlerUpgrErr, - SubstreamProtocol, - NegotiatedSubstream, -}; -use log::{debug, warn, error}; -use std::{ - borrow::Cow, collections::VecDeque, fmt, mem, pin::Pin, task::{Context, Poll, Waker}, - time::Duration -}; -use wasm_timer::Instant; - -/// Maximum duration to open a substream and receive the handshake message. After that, we -/// consider that we failed to open the substream. -const OPEN_TIMEOUT: Duration = Duration::from_secs(10); -/// After successfully establishing a connection with the remote, we keep the connection open for -/// at least this amount of time in order to give the rest of the code the chance to notify us to -/// open substreams. -const INITIAL_KEEPALIVE_TIME: Duration = Duration::from_secs(5); - -/// Implements the `IntoProtocolsHandler` trait of libp2p. -/// -/// Every time a connection with a remote starts, an instance of this struct is created and -/// sent to a background task dedicated to this connection. Once the connection is established, -/// it is turned into a [`NotifsOutHandler`]. -/// -/// See the documentation of [`NotifsOutHandler`] for more information. -pub struct NotifsOutHandlerProto { - /// Name of the protocol to negotiate. - protocol_name: Cow<'static, str>, -} - -impl NotifsOutHandlerProto { - /// Builds a new [`NotifsOutHandlerProto`]. Will use the given protocol name for the - /// notifications substream. - pub fn new(protocol_name: impl Into>) -> Self { - NotifsOutHandlerProto { - protocol_name: protocol_name.into(), - } - } -} - -impl IntoProtocolsHandler for NotifsOutHandlerProto { - type Handler = NotifsOutHandler; - - fn inbound_protocol(&self) -> DeniedUpgrade { - DeniedUpgrade - } - - fn into_handler(self, _: &PeerId, _: &ConnectedPoint) -> Self::Handler { - NotifsOutHandler { - protocol_name: self.protocol_name, - when_connection_open: Instant::now(), - state: State::Disabled, - events_queue: VecDeque::new(), - } - } -} - -/// Handler for an outbound notification substream. -/// -/// When a connection is established, this handler starts in the "disabled" state, meaning that -/// no substream will be open. -/// -/// One can try open a substream by sending an [`NotifsOutHandlerIn::Enable`] message to the -/// handler. Once done, the handler will try to establish then maintain an outbound substream with -/// the remote for the purpose of sending notifications to it. -pub struct NotifsOutHandler { - /// Name of the protocol to negotiate. - protocol_name: Cow<'static, str>, - - /// Relationship with the node we're connected to. - state: State, - - /// When the connection with the remote has been successfully established. - when_connection_open: Instant, - - /// Queue of events to send to the outside. - /// - /// This queue must only ever be modified to insert elements at the back, or remove the first - /// element. - events_queue: VecDeque>, -} - -/// Our relationship with the node we're connected to. -enum State { - /// The handler is disabled and idle. No substream is open. - Disabled, - - /// The handler is disabled. A substream is still open and needs to be closed. - /// - /// > **Important**: Having this state means that `poll_close` has been called at least once, - /// > but the `Sink` API is unclear about whether or not the stream can then - /// > be recovered. Because of that, we must never switch from the - /// > `DisabledOpen` state to the `Open` state while keeping the same substream. - DisabledOpen(NotificationsOutSubstream), - - /// The handler is disabled but we are still trying to open a substream with the remote. - /// - /// If the handler gets enabled again, we can immediately switch to `Opening`. - DisabledOpening, - - /// The handler is enabled and we are trying to open a substream with the remote. - Opening { - /// The initial message that we sent. Necessary if we need to re-open a substream. - initial_message: Vec, - }, - - /// The handler is enabled. We have tried opening a substream in the past but the remote - /// refused it. - Refused, - - /// The handler is enabled and substream is open. - Open { - /// Substream that is currently open. - substream: NotificationsOutSubstream, - /// Waker for the last task that got `Poll::Pending` from `poll_ready`, to notify - /// when the open substream closes due to being disabled or encountering an - /// error, i.e. to notify the task as soon as the substream becomes unavailable, - /// without waiting for an underlying I/O task wakeup. - close_waker: Option, - /// The initial message that we sent. Necessary if we need to re-open a substream. - initial_message: Vec, - }, - - /// Poisoned state. Shouldn't be found in the wild. - Poisoned, -} - -/// Event that can be received by a `NotifsOutHandler`. -#[derive(Debug)] -pub enum NotifsOutHandlerIn { - /// Enables the notifications substream for this node. The handler will try to maintain a - /// substream with the remote. - Enable { - /// Initial message to send to remote nodes when we open substreams. - initial_message: Vec, - }, - - /// Disables the notifications substream for this node. This is the default state. - Disable, -} - -/// Event that can be emitted by a `NotifsOutHandler`. -#[derive(Debug)] -pub enum NotifsOutHandlerOut { - /// The notifications substream has been accepted by the remote. - Open { - /// Handshake message sent by the remote after we opened the substream. - handshake: Vec, - }, - - /// The notifications substream has been closed by the remote. - Closed, - - /// We tried to open a notifications substream, but the remote refused it. - /// - /// Can only happen if we're in a closed state. - Refused, -} - -impl NotifsOutHandler { - /// Returns true if the substream is currently open. - pub fn is_open(&self) -> bool { - match &self.state { - State::Disabled => false, - State::DisabledOpening => false, - State::DisabledOpen(_) => true, - State::Opening { .. } => false, - State::Refused => false, - State::Open { .. } => true, - State::Poisoned => false, - } - } - - /// Returns `true` if there has been an attempt to open the substream, but the remote refused - /// the substream. - /// - /// Always returns `false` if the handler is in a disabled state. - pub fn is_refused(&self) -> bool { - match &self.state { - State::Disabled => false, - State::DisabledOpening => false, - State::DisabledOpen(_) => false, - State::Opening { .. } => false, - State::Refused => true, - State::Open { .. } => false, - State::Poisoned => false, - } - } - - /// Returns the name of the protocol that we negotiate. - pub fn protocol_name(&self) -> &Cow<'static, str> { - &self.protocol_name - } - - /// Polls whether the outbound substream is ready to send a notification. - /// - /// - Returns `Poll::Pending` if the substream is open but not ready to send a notification. - /// - Returns `Poll::Ready(true)` if the substream is ready to send a notification. - /// - Returns `Poll::Ready(false)` if the substream is closed. - /// - pub fn poll_ready(&mut self, cx: &mut Context) -> Poll { - if let State::Open { substream, close_waker, .. } = &mut self.state { - match substream.poll_ready_unpin(cx) { - Poll::Ready(Ok(())) => Poll::Ready(true), - Poll::Ready(Err(_)) => Poll::Ready(false), - Poll::Pending => { - *close_waker = Some(cx.waker().clone()); - Poll::Pending - } - } - } else { - Poll::Ready(false) - } - } - - /// Sends out a notification. - /// - /// If the substream is closed, or not ready to send out a notification yet, then the - /// notification is silently discarded. - /// - /// You are encouraged to call [`NotifsOutHandler::poll_ready`] beforehand to determine - /// whether this will succeed. If `Poll::Ready(true)` is returned, then this method will send - /// out a notification. - pub fn send_or_discard(&mut self, notification: Vec) { - if let State::Open { substream, .. } = &mut self.state { - let _ = substream.start_send_unpin(notification); - } - } -} - -impl ProtocolsHandler for NotifsOutHandler { - type InEvent = NotifsOutHandlerIn; - type OutEvent = NotifsOutHandlerOut; - type Error = void::Void; - type InboundProtocol = DeniedUpgrade; - type OutboundProtocol = NotificationsOut; - type OutboundOpenInfo = (); - type InboundOpenInfo = (); - - fn listen_protocol(&self) -> SubstreamProtocol { - SubstreamProtocol::new(DeniedUpgrade, ()) - } - - fn inject_fully_negotiated_inbound( - &mut self, - proto: >::Output, - (): () - ) { - // We should never reach here. `proto` is a `Void`. - void::unreachable(proto) - } - - fn inject_fully_negotiated_outbound( - &mut self, - (handshake_msg, substream): >::Output, - _: () - ) { - match mem::replace(&mut self.state, State::Poisoned) { - State::Opening { initial_message } => { - let ev = NotifsOutHandlerOut::Open { handshake: handshake_msg }; - self.events_queue.push_back(ProtocolsHandlerEvent::Custom(ev)); - self.state = State::Open { substream, initial_message, close_waker: None }; - }, - // If the handler was disabled while we were negotiating the protocol, immediately - // close it. - State::DisabledOpening => self.state = State::DisabledOpen(substream), - - // Any other situation should never happen. - State::Disabled | State::Refused | State::Open { .. } | State::DisabledOpen(_) => - error!("☎️ State mismatch in notifications handler: substream already open"), - State::Poisoned => error!("☎️ Notifications handler in a poisoned state"), - } - } - - fn inject_event(&mut self, message: NotifsOutHandlerIn) { - match message { - NotifsOutHandlerIn::Enable { initial_message } => { - match mem::replace(&mut self.state, State::Poisoned) { - State::Disabled => { - let proto = NotificationsOut::new(self.protocol_name.clone(), initial_message.clone()); - self.events_queue.push_back(ProtocolsHandlerEvent::OutboundSubstreamRequest { - protocol: SubstreamProtocol::new(proto, ()).with_timeout(OPEN_TIMEOUT), - }); - self.state = State::Opening { initial_message }; - }, - State::DisabledOpening => self.state = State::Opening { initial_message }, - State::DisabledOpen(mut sub) => { - // As documented above, in this state we have already called `poll_close` - // once on the substream, and it is unclear whether the substream can then - // be recovered. When in doubt, let's drop the existing substream and - // open a new one. - if sub.close().now_or_never().is_none() { - warn!( - target: "sub-libp2p", - "📞 Improperly closed outbound notifications substream" - ); - } - - let proto = NotificationsOut::new(self.protocol_name.clone(), initial_message.clone()); - self.events_queue.push_back(ProtocolsHandlerEvent::OutboundSubstreamRequest { - protocol: SubstreamProtocol::new(proto, ()).with_timeout(OPEN_TIMEOUT), - }); - self.state = State::Opening { initial_message }; - }, - st @ State::Opening { .. } | st @ State::Refused | st @ State::Open { .. } => { - debug!(target: "sub-libp2p", - "Tried to enable notifications handler that was already enabled"); - self.state = st; - } - State::Poisoned => error!("Notifications handler in a poisoned state"), - } - } - - NotifsOutHandlerIn::Disable => { - match mem::replace(&mut self.state, State::Poisoned) { - st @ State::Disabled | st @ State::DisabledOpen(_) | st @ State::DisabledOpening => { - debug!(target: "sub-libp2p", - "Tried to disable notifications handler that was already disabled"); - self.state = st; - } - State::Opening { .. } => self.state = State::DisabledOpening, - State::Refused => self.state = State::Disabled, - State::Open { substream, close_waker, .. } => { - if let Some(close_waker) = close_waker { - close_waker.wake(); - } - self.state = State::DisabledOpen(substream) - }, - State::Poisoned => error!("☎️ Notifications handler in a poisoned state"), - } - } - } - } - - fn inject_dial_upgrade_error(&mut self, _: (), _: ProtocolsHandlerUpgrErr) { - match mem::replace(&mut self.state, State::Poisoned) { - State::Disabled => {}, - State::DisabledOpen(_) | State::Refused | State::Open { .. } => - error!("☎️ State mismatch in NotificationsOut"), - State::Opening { .. } => { - self.state = State::Refused; - let ev = NotifsOutHandlerOut::Refused; - self.events_queue.push_back(ProtocolsHandlerEvent::Custom(ev)); - }, - State::DisabledOpening => self.state = State::Disabled, - State::Poisoned => error!("☎️ Notifications handler in a poisoned state"), - } - } - - fn connection_keep_alive(&self) -> KeepAlive { - match self.state { - // We have a small grace period of `INITIAL_KEEPALIVE_TIME` during which we keep the - // connection open no matter what, in order to avoid closing and reopening - // connections all the time. - State::Disabled | State::DisabledOpen(_) | State::DisabledOpening => - KeepAlive::Until(self.when_connection_open + INITIAL_KEEPALIVE_TIME), - State::Opening { .. } | State::Open { .. } => KeepAlive::Yes, - State::Refused | State::Poisoned => KeepAlive::No, - } - } - - fn poll( - &mut self, - cx: &mut Context, - ) -> Poll> { - // Flush the events queue if necessary. - if let Some(event) = self.events_queue.pop_front() { - return Poll::Ready(event) - } - - match &mut self.state { - State::Open { substream, initial_message, close_waker } => - match Sink::poll_flush(Pin::new(substream), cx) { - Poll::Pending | Poll::Ready(Ok(())) => {}, - Poll::Ready(Err(_)) => { - if let Some(close_waker) = close_waker.take() { - close_waker.wake(); - } - - // We try to re-open a substream. - let initial_message = mem::replace(initial_message, Vec::new()); - self.state = State::Opening { initial_message: initial_message.clone() }; - let proto = NotificationsOut::new(self.protocol_name.clone(), initial_message); - self.events_queue.push_back(ProtocolsHandlerEvent::OutboundSubstreamRequest { - protocol: SubstreamProtocol::new(proto, ()).with_timeout(OPEN_TIMEOUT), - }); - return Poll::Ready(ProtocolsHandlerEvent::Custom(NotifsOutHandlerOut::Closed)); - } - }, - - State::DisabledOpen(sub) => match Sink::poll_close(Pin::new(sub), cx) { - Poll::Pending => {}, - Poll::Ready(Ok(())) | Poll::Ready(Err(_)) => { - self.state = State::Disabled; - return Poll::Ready(ProtocolsHandlerEvent::Custom(NotifsOutHandlerOut::Closed)); - }, - }, - - _ => {} - } - - Poll::Pending - } -} - -impl fmt::Debug for NotifsOutHandler { - fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { - f.debug_struct("NotifsOutHandler") - .field("open", &self.is_open()) - .finish() - } -} From 85134302169ff21305752523f3d10fe0a07195cd Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Wed, 11 Nov 2020 17:21:41 +0100 Subject: [PATCH 15/39] Rewrite group.rs entirely [part 2] --- client/network/src/protocol.rs | 2 +- client/network/src/protocol/generic_proto.rs | 2 +- .../src/protocol/generic_proto/handler.rs | 2 - .../protocol/generic_proto/handler/group.rs | 168 ++++++---- .../protocol/generic_proto/handler/legacy.rs | 303 ------------------ .../protocol/generic_proto/upgrade/legacy.rs | 13 +- client/network/src/service.rs | 5 +- 7 files changed, 103 insertions(+), 392 deletions(-) delete mode 100644 client/network/src/protocol/generic_proto/handler/legacy.rs diff --git a/client/network/src/protocol.rs b/client/network/src/protocol.rs index 1926265330f39..9403e471b0f27 100644 --- a/client/network/src/protocol.rs +++ b/client/network/src/protocol.rs @@ -63,7 +63,7 @@ pub mod message; pub mod event; pub mod sync; -pub use generic_proto::{NotificationsSink, Ready, NotifsHandlerError, LegacyConnectionKillError}; +pub use generic_proto::{NotificationsSink, Ready, NotifsHandlerError}; const REQUEST_TIMEOUT_SEC: u64 = 40; /// Interval at which we perform time based maintenance diff --git a/client/network/src/protocol/generic_proto.rs b/client/network/src/protocol/generic_proto.rs index 3133471b0d249..4d6e607a146e7 100644 --- a/client/network/src/protocol/generic_proto.rs +++ b/client/network/src/protocol/generic_proto.rs @@ -21,7 +21,7 @@ //! network, then performs the Substrate protocol handling on top. pub use self::behaviour::{GenericProto, GenericProtoOut}; -pub use self::handler::{NotifsHandlerError, NotificationsSink, Ready, LegacyConnectionKillError}; +pub use self::handler::{NotifsHandlerError, NotificationsSink, Ready}; mod behaviour; mod handler; diff --git a/client/network/src/protocol/generic_proto/handler.rs b/client/network/src/protocol/generic_proto/handler.rs index fdf73498c82bf..980935387df00 100644 --- a/client/network/src/protocol/generic_proto/handler.rs +++ b/client/network/src/protocol/generic_proto/handler.rs @@ -19,7 +19,5 @@ pub use self::group::{ NotificationsSink, NotifsHandlerError, Ready, NotifsHandlerProto, NotifsHandler, NotifsHandlerIn, NotifsHandlerOut }; -pub use self::legacy::ConnectionKillError as LegacyConnectionKillError; mod group; -mod legacy; diff --git a/client/network/src/protocol/generic_proto/handler/group.rs b/client/network/src/protocol/generic_proto/handler/group.rs index 6d5ba68ffabaa..87e4e36ed57f1 100644 --- a/client/network/src/protocol/generic_proto/handler/group.rs +++ b/client/network/src/protocol/generic_proto/handler/group.rs @@ -55,10 +55,10 @@ //! [`NotifsHandlerIn::Open`] has gotten an answer. use crate::protocol::generic_proto::{ - handler::legacy::{LegacyProtoHandler, LegacyProtoHandlerProto, LegacyProtoHandlerIn}, upgrade::{ NotificationsIn, NotificationsOut, NotificationsInSubstream, NotificationsOutSubstream, - NotificationsHandshakeError, RegisteredProtocol, UpgradeCollec + NotificationsHandshakeError, RegisteredProtocol, RegisteredProtocolSubstream, + RegisteredProtocolEvent, UpgradeCollec }, }; @@ -80,7 +80,8 @@ use futures::{ }; use log::error; use parking_lot::{Mutex, RwLock}; -use std::{borrow::Cow, collections::VecDeque, cmp, mem, pin::Pin, str, sync::Arc, task::{Context, Poll}, time::Duration}; +use smallvec::SmallVec; +use std::{borrow::Cow, collections::VecDeque, mem, pin::Pin, str, sync::Arc, task::{Context, Poll}, time::Duration}; use wasm_timer::Instant; /// Number of pending notifications in asynchronous contexts. @@ -115,8 +116,8 @@ pub struct NotifsHandlerProto { /// send. out_protocols: Vec<(Cow<'static, str>, Arc>>)>, - /// Prototype for handler for backwards-compatibility. - legacy: LegacyProtoHandlerProto, + /// Configuration for the legacy protocol upgrade. + legacy_protocol: RegisteredProtocol, } /// The actual handler once the connection has been established. @@ -137,12 +138,18 @@ pub struct NotifsHandler { /// Whether we are the connection dialer or listener. endpoint: ConnectedPoint, - /// Handler for backwards-compatibility. - legacy: LegacyProtoHandler, - /// State of this handler. state: State, + /// Configuration for the legacy protocol upgrade. + legacy_protocol: RegisteredProtocol, + + /// The substreams where bidirectional communications happen. + legacy_substreams: SmallVec<[RegisteredProtocolSubstream; 4]>, + + /// Contains substreams which are being shut down. + legacy_shutdown: SmallVec<[RegisteredProtocolSubstream; 4]>, + /// Events to return in priority from `poll`. events_queue: VecDeque< ProtocolsHandlerEvent @@ -240,10 +247,10 @@ impl IntoProtocolsHandler for NotifsHandlerProto { .map(|(h, _)| h.clone()) .collect::>(); - SelectUpgrade::new(in_protocols, self.legacy.inbound_protocol()) + SelectUpgrade::new(in_protocols, self.legacy_protocol.clone()) } - fn into_handler(self, remote_peer_id: &PeerId, connected_point: &ConnectedPoint) -> Self::Handler { + fn into_handler(self, _: &PeerId, connected_point: &ConnectedPoint) -> Self::Handler { let num_out_proto = self.out_protocols.len(); NotifsHandler { @@ -251,10 +258,12 @@ impl IntoProtocolsHandler for NotifsHandlerProto { out_protocols: self.out_protocols, endpoint: connected_point.clone(), when_connection_open: Instant::now(), - legacy: self.legacy.into_handler(remote_peer_id, connected_point), state: State::Closed { pending_opening: (0..num_out_proto).map(|_| false).collect(), }, + legacy_protocol: self.legacy_protocol, + legacy_substreams: SmallVec::new(), + legacy_shutdown: SmallVec::new(), events_queue: VecDeque::with_capacity(16), } } @@ -447,8 +456,6 @@ impl<'a> Ready<'a> { pub enum NotifsHandlerError { /// Channel of synchronous notifications is full. SyncNotificationsClogged, - /// Error in legacy protocol. - Legacy(::Error), } impl NotifsHandlerProto { @@ -466,7 +473,7 @@ impl NotifsHandlerProto { /// - Panics if `list` is empty. /// pub fn new( - legacy: RegisteredProtocol, + legacy_protocol: RegisteredProtocol, list: impl Into, Arc>>)>>, ) -> Self { let list = list.into(); @@ -485,7 +492,7 @@ impl NotifsHandlerProto { NotifsHandlerProto { in_protocols, out_protocols, - legacy: LegacyProtoHandlerProto::new(legacy), + legacy_protocol, } } } @@ -505,7 +512,7 @@ impl ProtocolsHandler for NotifsHandler { .map(|(h, _)| h.clone()) .collect::>(); - let proto = SelectUpgrade::new(in_protocols, self.legacy.listen_protocol().into_upgrade().1); + let proto = SelectUpgrade::new(in_protocols, self.legacy_protocol.clone()); SubstreamProtocol::new(proto, ()) } @@ -515,6 +522,7 @@ impl ProtocolsHandler for NotifsHandler { (): () ) { match out { + // Received notifications substream. EitherOutput::First(((_remote_handshake, mut proto), num)) => { match &mut self.state { State::Closed { pending_opening } => { @@ -559,8 +567,19 @@ impl ProtocolsHandler for NotifsHandler { }, }; } - EitherOutput::Second(out) => - self.legacy.inject_fully_negotiated_inbound(out, ()), + + // Received legacy substream. + EitherOutput::Second((substream, _handshake)) => { + // Note: while we awknowledge legacy substreams and handle incoming messages, + // it doesn't trigger any `OpenDesired` event as a way to simplify the logic of + // this code. + // Since mid-2019, legacy substreams are supposed to used at the same time as + // notifications substreams, and not in isolation. Nodes that open legacy + // substreams in isolation are considered deprecated. + if self.legacy_substreams.len() <= 4 { + self.legacy_substreams.push(substream); + } + }, } } @@ -680,11 +699,14 @@ impl ProtocolsHandler for NotifsHandler { }, NotifsHandlerIn::Close => { + for mut substream in self.legacy_substreams.drain() { + substream.shutdown(); + self.legacy_shutdown.push(substream); + } + match &mut self.state { State::Open { .. } | State::Opening { .. } => { - self.legacy.inject_event(LegacyProtoHandlerIn::Close); - self.state = State::Closed { pending_opening: Vec::new(), }; @@ -714,7 +736,7 @@ impl ProtocolsHandler for NotifsHandler { fn inject_dial_upgrade_error( &mut self, num: usize, - err: ProtocolsHandlerUpgrErr + _: ProtocolsHandlerUpgrErr ) { match &mut self.state { State::Closed { pending_opening } | State::OpenDesired { pending_opening, .. } => { @@ -743,10 +765,12 @@ impl ProtocolsHandler for NotifsHandler { } fn connection_keep_alive(&self) -> KeepAlive { - let legacy = self.legacy.connection_keep_alive(); + if !self.legacy_substreams.is_empty() { + return KeepAlive::Yes; + } + match self.state { - State::Closed { .. } => - cmp::max(legacy, KeepAlive::Until(self.when_connection_open + INITIAL_KEEPALIVE_TIME)), + State::Closed { .. } => KeepAlive::Until(self.when_connection_open + INITIAL_KEEPALIVE_TIME), State::OpenDesired { .. } | State::Opening { .. } | State::Open { .. } => KeepAlive::Yes, } @@ -895,58 +919,62 @@ impl ProtocolsHandler for NotifsHandler { } } - // TODO: legacy substream - /*// If `self.pending_handshake` is `Some`, we are in a state where the handshake-bearing - // substream (either the legacy substream or the one special-cased as providing the - // handshake) is open but the user isn't aware yet of the substreams being open. - // When that is the case, neither the legacy substream nor the incoming notifications - // substreams should be polled, otherwise there is a risk of receiving messages from them. - if !matches!(self.state, State::Opening { pending_handshake: Some(_) }) { - while let Poll::Ready(ev) = self.legacy.poll(cx) { - match ev { - ProtocolsHandlerEvent::OutboundSubstreamRequest { protocol, .. } => - match *protocol.info() {}, - ProtocolsHandlerEvent::Custom(LegacyProtoHandlerOut::CustomProtocolOpen { - received_handshake, - .. - }) => { - match &mut self.state { - State::Opening { pending_handshake } => { - debug_assert!(pending_handshake.is_none()); - *pending_handshake = Some(received_handshake); - } - // TODO: wrong - _ => debug_assert!(false), - } - - cx.waker().wake_by_ref(); - return Poll::Pending; - }, - ProtocolsHandlerEvent::Custom(LegacyProtoHandlerOut::CustomProtocolClosed { .. }) => { - match &mut self.state { - State::Open { want_closed, .. } if *want_closed == false => { - *want_closed = true; - return Poll::Ready(ProtocolsHandlerEvent::Custom( - NotifsHandlerOut::CloseDesired - )); - } - State::Open { .. } => {} - State::Opening { .. } => {} - State::Closed { .. } => debug_assert!(false), - } - }, - ProtocolsHandlerEvent::Custom(LegacyProtoHandlerOut::CustomMessage { message }) => { - debug_assert!(!matches!(self.state, State::Open { .. })); + for n in (0..self.legacy_substreams.len()).rev() { + let mut substream = self.legacy_substreams.swap_remove(n); + let poll_outcome = Pin::new(&mut substream).poll_next(cx); + match poll_outcome { + Poll::Pending => self.legacy_substreams.push(substream), + Poll::Ready(Some(Ok(RegisteredProtocolEvent::Message(message)))) => { + self.legacy_substreams.push(substream); + if matches!(self.state, State::Open { .. }) { return Poll::Ready(ProtocolsHandlerEvent::Custom( NotifsHandlerOut::CustomMessage { message } )) - }, - ProtocolsHandlerEvent::Close(err) => - return Poll::Ready(ProtocolsHandlerEvent::Close(NotifsHandlerError::Legacy(err))), + } + }, + Poll::Ready(Some(Ok(RegisteredProtocolEvent::Clogged))) => { + return Poll::Ready(ProtocolsHandlerEvent::Close( + NotifsHandlerError::SyncNotificationsClogged + )) + } + Poll::Ready(None) | Poll::Ready(Some(Err(_))) => { + if matches!(poll_outcome, Poll::Ready(None)) { + self.legacy_shutdown.push(substream); + } + + if let State::Open { want_closed, .. } = &mut self.state { + if !*want_closed { + *want_closed = true; + return Poll::Ready(ProtocolsHandlerEvent::Custom( + NotifsHandlerOut::CloseDesired + )) + } + } } } - }*/ + } + + shutdown_list(&mut self.legacy_shutdown, cx); Poll::Pending } } + +/// Given a list of substreams, tries to shut them down. The substreams that have been successfully +/// shut down are removed from the list. +fn shutdown_list + (list: &mut SmallVec>>, + cx: &mut Context) +{ + 'outer: for n in (0..list.len()).rev() { + let mut substream = list.swap_remove(n); + loop { + match substream.poll_next_unpin(cx) { + Poll::Ready(Some(Ok(_))) => {} + Poll::Pending => break, + Poll::Ready(Some(Err(_))) | Poll::Ready(None) => continue 'outer, + } + } + list.push(substream); + } +} diff --git a/client/network/src/protocol/generic_proto/handler/legacy.rs b/client/network/src/protocol/generic_proto/handler/legacy.rs deleted file mode 100644 index 58d533aa5a51d..0000000000000 --- a/client/network/src/protocol/generic_proto/handler/legacy.rs +++ /dev/null @@ -1,303 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Substrate. - -// Substrate is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Substrate is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . - -use crate::protocol::generic_proto::upgrade::{RegisteredProtocol, RegisteredProtocolEvent, RegisteredProtocolSubstream}; - -use bytes::BytesMut; -use futures::prelude::*; -use libp2p::core::{ConnectedPoint, PeerId}; -use libp2p::core::upgrade::{InboundUpgrade, OutboundUpgrade}; -use libp2p::swarm::{ - ProtocolsHandler, ProtocolsHandlerEvent, - IntoProtocolsHandler, - KeepAlive, - ProtocolsHandlerUpgrErr, - SubstreamProtocol, - NegotiatedSubstream, -}; -use smallvec::SmallVec; -use std::{collections::VecDeque, convert::Infallible, error, fmt, io}; -use std::{pin::Pin, task::{Context, Poll}}; - -/// Handler for the legacy substream. -/// -/// The so-called legacy substream is a deprecated way of establishing a Substrate-specific -/// substream in an active connection. -/// -/// Pro-actively opening a legacy substream is no longer supported. Only accepting incoming legacy -/// substreams is possible. As part of the protocol, only the dialing side of a connection -/// (emphasis *connection*, not substream) is allowed to open a legacy substream. -/// -/// # Usage -/// -/// The handler can spontaneously generate `CustomProtocolOpen` and `CustomProtocolClosed` events -/// if the remote opens or closes the substream. Send a `Close` message in order to shut down any -/// active substream. After `Close` has beent sent, a `CustomProtocolClosed` event will be sent -/// back in the near future. -/// -pub struct LegacyProtoHandlerProto { - /// Configuration for the protocol upgrade to negotiate. - protocol: RegisteredProtocol, -} - -impl LegacyProtoHandlerProto { - /// Builds a new `LegacyProtoHandlerProto`. - pub fn new(protocol: RegisteredProtocol) -> Self { - LegacyProtoHandlerProto { - protocol, - } - } -} - -impl IntoProtocolsHandler for LegacyProtoHandlerProto { - type Handler = LegacyProtoHandler; - - fn inbound_protocol(&self) -> RegisteredProtocol { - self.protocol.clone() - } - - fn into_handler(self, _: &PeerId, _: &ConnectedPoint) -> Self::Handler { - LegacyProtoHandler { - protocol: self.protocol, - substreams: SmallVec::new(), - shutdown: SmallVec::new(), - events_queue: VecDeque::new(), - } - } -} - -/// The actual handler once the connection has been established. -pub struct LegacyProtoHandler { - /// Configuration for the protocol upgrade to negotiate. - protocol: RegisteredProtocol, - - /// The substreams where bidirectional communications happen. - substreams: SmallVec<[RegisteredProtocolSubstream; 4]>, - - /// Contains substreams which are being shut down. - shutdown: SmallVec<[RegisteredProtocolSubstream; 4]>, - - /// Queue of events to send to the outside. - /// - /// This queue must only ever be modified to insert elements at the back, or remove the first - /// element. - events_queue: VecDeque< - ProtocolsHandlerEvent - >, -} - -/// Event that can be received by a `LegacyProtoHandler`. -#[derive(Debug)] -pub enum LegacyProtoHandlerIn { - /// The handler should close any existing substream. - Close, -} - -/// Event that can be emitted by a `LegacyProtoHandler`. -#[derive(Debug)] -pub enum LegacyProtoHandlerOut { - /// Opened a custom protocol with the remote. - CustomProtocolOpen { - /// Version of the protocol that has been opened. - version: u8, - /// Handshake message that has been sent to us. - /// This is normally a "Status" message, but this out of the concern of this code. - received_handshake: Vec, - }, - - /// Closed a custom protocol with the remote. - CustomProtocolClosed, - - /// Receives a message on a custom protocol substream. - CustomMessage { - /// Message that has been received. - message: BytesMut, - }, -} - -impl LegacyProtoHandler { - /// Polls the state for events. Optionally returns an event to produce. - #[must_use] - fn poll_state(&mut self, cx: &mut Context) - -> Option> { - shutdown_list(&mut self.shutdown, cx); - - for n in (0..self.substreams.len()).rev() { - let mut substream = self.substreams.swap_remove(n); - match Pin::new(&mut substream).poll_next(cx) { - Poll::Pending => self.substreams.push(substream), - Poll::Ready(Some(Ok(RegisteredProtocolEvent::Message(message)))) => { - let event = LegacyProtoHandlerOut::CustomMessage { - message - }; - self.substreams.push(substream); - return Some(ProtocolsHandlerEvent::Custom(event)); - }, - Poll::Ready(Some(Ok(RegisteredProtocolEvent::Clogged))) => { - self.shutdown.push(substream); - if self.substreams.is_empty() { - let event = LegacyProtoHandlerOut::CustomProtocolClosed; - return Some(ProtocolsHandlerEvent::Custom(event)); - } - } - Poll::Ready(None) => { - self.shutdown.push(substream); - if self.substreams.is_empty() { - let event = LegacyProtoHandlerOut::CustomProtocolClosed; - return Some(ProtocolsHandlerEvent::Custom(event)); - } - } - Poll::Ready(Some(Err(err))) => { - if self.substreams.is_empty() { - let event = LegacyProtoHandlerOut::CustomProtocolClosed; - return Some(ProtocolsHandlerEvent::Custom(event)); - } else { - log::debug!(target: "sub-libp2p", "Error on extra substream: {:?}", err); - } - } - } - } - - None - } -} - -impl ProtocolsHandler for LegacyProtoHandler { - type InEvent = LegacyProtoHandlerIn; - type OutEvent = LegacyProtoHandlerOut; - type Error = ConnectionKillError; - type InboundProtocol = RegisteredProtocol; - type OutboundProtocol = RegisteredProtocol; - type OutboundOpenInfo = Infallible; - type InboundOpenInfo = (); - - fn listen_protocol(&self) -> SubstreamProtocol { - SubstreamProtocol::new(self.protocol.clone(), ()) - } - - fn inject_fully_negotiated_inbound( - &mut self, - (substream, received_handshake): >::Output, - (): () - ) { - if self.substreams.is_empty() { - let event = LegacyProtoHandlerOut::CustomProtocolOpen { - version: substream.protocol_version(), - received_handshake, - }; - self.events_queue.push_back(ProtocolsHandlerEvent::Custom(event)); - } - - self.substreams.push(substream); - } - - fn inject_fully_negotiated_outbound( - &mut self, - _: >::Output, - unreachable: Self::OutboundOpenInfo - ) { - match unreachable {} - } - - fn inject_event(&mut self, message: LegacyProtoHandlerIn) { - // Only the `Close` message exists at the moment. - let LegacyProtoHandlerIn::Close = message; - - if !self.substreams.is_empty() { - let event = LegacyProtoHandlerOut::CustomProtocolClosed; - self.events_queue.push_back(ProtocolsHandlerEvent::Custom(event)); - } - - for mut substream in self.substreams.drain() { - substream.shutdown(); - self.shutdown.push(substream); - } - } - - fn inject_dial_upgrade_error( - &mut self, - unreachable: Self::OutboundOpenInfo, - _: ProtocolsHandlerUpgrErr - ) { - match unreachable {} - } - - fn connection_keep_alive(&self) -> KeepAlive { - if self.substreams.is_empty() { - KeepAlive::No - } else { - KeepAlive::Yes - } - } - - fn poll( - &mut self, - cx: &mut Context, - ) -> Poll< - ProtocolsHandlerEvent - > { - // Flush the events queue if necessary. - if let Some(event) = self.events_queue.pop_front() { - return Poll::Ready(event) - } - - // Process all the substreams. - if let Some(event) = self.poll_state(cx) { - return Poll::Ready(event) - } - - Poll::Pending - } -} - -impl fmt::Debug for LegacyProtoHandler { - fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { - f.debug_struct("LegacyProtoHandler") - .finish() - } -} - -/// Given a list of substreams, tries to shut them down. The substreams that have been successfully -/// shut down are removed from the list. -fn shutdown_list - (list: &mut SmallVec>>, - cx: &mut Context) -{ - 'outer: for n in (0..list.len()).rev() { - let mut substream = list.swap_remove(n); - loop { - match substream.poll_next_unpin(cx) { - Poll::Ready(Some(Ok(_))) => {} - Poll::Pending => break, - Poll::Ready(Some(Err(_))) | Poll::Ready(None) => continue 'outer, - } - } - list.push(substream); - } -} - -/// Error returned when switching from normal to disabled. -#[derive(Debug)] -pub struct ConnectionKillError; - -impl error::Error for ConnectionKillError { -} - -impl fmt::Display for ConnectionKillError { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "Connection kill when switching from normal to disabled") - } -} diff --git a/client/network/src/protocol/generic_proto/upgrade/legacy.rs b/client/network/src/protocol/generic_proto/upgrade/legacy.rs index d425754a85e94..91282d0cf57dd 100644 --- a/client/network/src/protocol/generic_proto/upgrade/legacy.rs +++ b/client/network/src/protocol/generic_proto/upgrade/legacy.rs @@ -91,19 +91,12 @@ pub struct RegisteredProtocolSubstream { requires_poll_flush: bool, /// The underlying substream. inner: stream::Fuse>>, - /// Version of the protocol that was negotiated. - protocol_version: u8, /// If true, we have sent a "remote is clogged" event recently and shouldn't send another one /// unless the buffer empties then fills itself again. clogged_fuse: bool, } impl RegisteredProtocolSubstream { - /// Returns the version of the protocol that was negotiated. - pub fn protocol_version(&self) -> u8 { - self.protocol_version - } - /// Starts a graceful shutdown process on this substream. /// /// Note that "graceful" means that we sent a closing message. We don't wait for any @@ -237,7 +230,7 @@ where TSubstream: AsyncRead + AsyncWrite + Unpin + Send + 'static, fn upgrade_inbound( self, socket: TSubstream, - info: Self::Info, + _: Self::Info, ) -> Self::Future { Box::pin(async move { let mut framed = { @@ -256,7 +249,6 @@ where TSubstream: AsyncRead + AsyncWrite + Unpin + Send + 'static, send_queue: VecDeque::new(), requires_poll_flush: false, inner: framed.fuse(), - protocol_version: info.version, clogged_fuse: false, }, received_handshake.to_vec())) }) @@ -273,7 +265,7 @@ where TSubstream: AsyncRead + AsyncWrite + Unpin + Send + 'static, fn upgrade_outbound( self, socket: TSubstream, - info: Self::Info, + _: Self::Info, ) -> Self::Future { Box::pin(async move { let mut framed = { @@ -294,7 +286,6 @@ where TSubstream: AsyncRead + AsyncWrite + Unpin + Send + 'static, send_queue: VecDeque::new(), requires_poll_flush: false, inner: framed.fuse(), - protocol_version: info.version, clogged_fuse: false, }, received_handshake.to_vec())) }) diff --git a/client/network/src/service.rs b/client/network/src/service.rs index 93abbbad02495..5fc8485947ff5 100644 --- a/client/network/src/service.rs +++ b/client/network/src/service.rs @@ -39,7 +39,7 @@ use crate::{ }, on_demand_layer::AlwaysBadChecker, light_client_handler, block_requests, finality_requests, - protocol::{self, event::Event, NotifsHandlerError, LegacyConnectionKillError, NotificationsSink, Ready, sync::SyncState, PeerInfo, Protocol}, + protocol::{self, event::Event, NotifsHandlerError, NotificationsSink, Ready, sync::SyncState, PeerInfo, Protocol}, transport, ReputationChange, }; use futures::{channel::oneshot, prelude::*}; @@ -1589,9 +1589,6 @@ impl Future for NetworkWorker { Some(ConnectionError::Handler(NodeHandlerWrapperError::Handler(EitherError::A(EitherError::A( EitherError::A(EitherError::A(EitherError::A(EitherError::B( EitherError::A(PingFailure::Timeout)))))))))) => "ping-timeout", - Some(ConnectionError::Handler(NodeHandlerWrapperError::Handler(EitherError::A(EitherError::A( - EitherError::A(EitherError::A(EitherError::A(EitherError::A( - NotifsHandlerError::Legacy(LegacyConnectionKillError)))))))))) => "force-closed", Some(ConnectionError::Handler(NodeHandlerWrapperError::Handler(EitherError::A(EitherError::A( EitherError::A(EitherError::A(EitherError::A(EitherError::A( NotifsHandlerError::SyncNotificationsClogged))))))))) => "sync-notifications-clogged", From 470ef2c1c260d0ff346208454eb89ed189544280 Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Thu, 12 Nov 2020 09:29:03 +0100 Subject: [PATCH 16/39] Remove faulty assertion Because of the asynchronous nature of the behaviour <-> handler communications, it is possible to receive notifications while in the Closing state --- .../src/protocol/generic_proto/behaviour.rs | 59 ++++++++++++------- 1 file changed, 37 insertions(+), 22 deletions(-) diff --git a/client/network/src/protocol/generic_proto/behaviour.rs b/client/network/src/protocol/generic_proto/behaviour.rs index 2ea6a5c99ccf5..bef9d6396e837 100644 --- a/client/network/src/protocol/generic_proto/behaviour.rs +++ b/client/network/src/protocol/generic_proto/behaviour.rs @@ -1907,33 +1907,48 @@ impl NetworkBehaviour for GenericProto { } NotifsHandlerOut::CustomMessage { message } => { - debug_assert!(self.is_open(&source)); - trace!(target: "sub-libp2p", "Handler({:?}) => Message", source); - trace!(target: "sub-libp2p", "External API <= Message({:?})", source); - let event = GenericProtoOut::LegacyMessage { - peer_id: source, - message, - }; + if self.is_open(&source) { + trace!(target: "sub-libp2p", "Handler({:?}) => Message", source); + trace!(target: "sub-libp2p", "External API <= Message({:?})", source); + let event = GenericProtoOut::LegacyMessage { + peer_id: source, + message, + }; - self.events.push_back(NetworkBehaviourAction::GenerateEvent(event)); + self.events.push_back(NetworkBehaviourAction::GenerateEvent(event)); + } else { + trace!( + target: "sub-libp2p", + "Handler({:?}) => Post-close message", + source, + ); + } } NotifsHandlerOut::Notification { protocol_name, message } => { - debug_assert!(self.is_open(&source)); - trace!( - target: "sub-libp2p", - "Handler({:?}) => Notification({:?})", - source, - protocol_name, - ); - trace!(target: "sub-libp2p", "External API <= Message({:?}, {:?})", protocol_name, source); - let event = GenericProtoOut::Notification { - peer_id: source, - protocol_name, - message, - }; + if self.is_open(&source) { + trace!( + target: "sub-libp2p", + "Handler({:?}) => Notification({:?})", + source, + protocol_name, + ); + trace!(target: "sub-libp2p", "External API <= Message({:?}, {:?})", protocol_name, source); + let event = GenericProtoOut::Notification { + peer_id: source, + protocol_name, + message, + }; - self.events.push_back(NetworkBehaviourAction::GenerateEvent(event)); + self.events.push_back(NetworkBehaviourAction::GenerateEvent(event)); + } else { + trace!( + target: "sub-libp2p", + "Handler({:?}) => Post-close notification({:?})", + source, + protocol_name, + ); + } } } } From b46016fb030a138cfc73a7d2d9a5c783cd4ca220 Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Thu, 12 Nov 2020 09:34:57 +0100 Subject: [PATCH 17/39] Don't poll the legacy substream is not Open --- .../protocol/generic_proto/handler/group.rs | 50 ++++++++++--------- 1 file changed, 26 insertions(+), 24 deletions(-) diff --git a/client/network/src/protocol/generic_proto/handler/group.rs b/client/network/src/protocol/generic_proto/handler/group.rs index 87e4e36ed57f1..c52e7f4eac5e6 100644 --- a/client/network/src/protocol/generic_proto/handler/group.rs +++ b/client/network/src/protocol/generic_proto/handler/group.rs @@ -919,35 +919,37 @@ impl ProtocolsHandler for NotifsHandler { } } - for n in (0..self.legacy_substreams.len()).rev() { - let mut substream = self.legacy_substreams.swap_remove(n); - let poll_outcome = Pin::new(&mut substream).poll_next(cx); - match poll_outcome { - Poll::Pending => self.legacy_substreams.push(substream), - Poll::Ready(Some(Ok(RegisteredProtocolEvent::Message(message)))) => { - self.legacy_substreams.push(substream); - if matches!(self.state, State::Open { .. }) { + // The legacy substreams are polled only if the state is `Open`. Otherwise, it would be + // possible to receive notifications that would need to get silently discarded. + if matches!(self.state, State::Open { .. }) { + for n in (0..self.legacy_substreams.len()).rev() { + let mut substream = self.legacy_substreams.swap_remove(n); + let poll_outcome = Pin::new(&mut substream).poll_next(cx); + match poll_outcome { + Poll::Pending => self.legacy_substreams.push(substream), + Poll::Ready(Some(Ok(RegisteredProtocolEvent::Message(message)))) => { + self.legacy_substreams.push(substream); return Poll::Ready(ProtocolsHandlerEvent::Custom( NotifsHandlerOut::CustomMessage { message } )) + }, + Poll::Ready(Some(Ok(RegisteredProtocolEvent::Clogged))) => { + return Poll::Ready(ProtocolsHandlerEvent::Close( + NotifsHandlerError::SyncNotificationsClogged + )) } - }, - Poll::Ready(Some(Ok(RegisteredProtocolEvent::Clogged))) => { - return Poll::Ready(ProtocolsHandlerEvent::Close( - NotifsHandlerError::SyncNotificationsClogged - )) - } - Poll::Ready(None) | Poll::Ready(Some(Err(_))) => { - if matches!(poll_outcome, Poll::Ready(None)) { - self.legacy_shutdown.push(substream); - } + Poll::Ready(None) | Poll::Ready(Some(Err(_))) => { + if matches!(poll_outcome, Poll::Ready(None)) { + self.legacy_shutdown.push(substream); + } - if let State::Open { want_closed, .. } = &mut self.state { - if !*want_closed { - *want_closed = true; - return Poll::Ready(ProtocolsHandlerEvent::Custom( - NotifsHandlerOut::CloseDesired - )) + if let State::Open { want_closed, .. } = &mut self.state { + if !*want_closed { + *want_closed = true; + return Poll::Ready(ProtocolsHandlerEvent::Custom( + NotifsHandlerOut::CloseDesired + )) + } } } } From b2018a609e2e2376db56b85e0a88d7d2851522bb Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Thu, 12 Nov 2020 12:05:46 +0100 Subject: [PATCH 18/39] Tolerate when not all substreams are accepted --- .../src/protocol/generic_proto/behaviour.rs | 2 +- .../protocol/generic_proto/handler/group.rs | 132 ++++++++++++++---- 2 files changed, 102 insertions(+), 32 deletions(-) diff --git a/client/network/src/protocol/generic_proto/behaviour.rs b/client/network/src/protocol/generic_proto/behaviour.rs index bef9d6396e837..085c1ef581db6 100644 --- a/client/network/src/protocol/generic_proto/behaviour.rs +++ b/client/network/src/protocol/generic_proto/behaviour.rs @@ -1824,7 +1824,7 @@ impl NetworkBehaviour for GenericProto { NotifsHandlerOut::OpenResultErr => { debug!(target: "sub-libp2p", - "Handler({:?}, {:?}) => Failed to open substream with remote", + "Handler({:?}, {:?}) => OpenResultErr", source, connection); let mut entry = if let Entry::Occupied(entry) = self.peers.entry(source.clone()) { diff --git a/client/network/src/protocol/generic_proto/handler/group.rs b/client/network/src/protocol/generic_proto/handler/group.rs index c52e7f4eac5e6..ce988959f6f99 100644 --- a/client/network/src/protocol/generic_proto/handler/group.rs +++ b/client/network/src/protocol/generic_proto/handler/group.rs @@ -200,9 +200,19 @@ enum State { /// Vec of the same length as [`NotifsHandler::out_protocols`]. For each protocol, contains /// an outbound substream that has been accepted by the remote. /// - /// Items that contain `None` mean that a substream is still being opened. In other words, - /// this `Vec` is kind of a mirror version of [`State::Closed::pending_opening`]. - out_substreams: Vec>>, + /// Items that contain `None` mean that a substream is still being opened or has been + /// rejected by the remote. In other words, this `Vec` is kind of a mirror version of + /// [`State::Closed::pending_opening`]. + /// + /// Items that contain `Some(None)` have been rejected by the remote, most likely because + /// they don't support this protocol. At the time of writing, the external API doesn't + /// distinguish between the different protocols. From the external API's point of view, + /// either all protocols are open or none are open. In reality, light clients in particular + /// don't support for example the GrandPa protocol, and as such will refuse our outgoing + /// attempts. This is problematic in theory, but in practice this is handled properly at a + /// higher level. This flaw will fixed once the outer layers know to differentiate the + /// multiple protocols. + out_substreams: Vec>>>, }, /// Handler is in the "Open" state. @@ -600,7 +610,7 @@ impl ProtocolsHandler for NotifsHandler { } State::Opening { pending_handshake, in_substreams, out_substreams } => { debug_assert!(out_substreams[num].is_none()); - out_substreams[num] = Some(substream); + out_substreams[num] = Some(Some(substream)); if num == 0 { debug_assert!(pending_handshake.is_none()); @@ -620,9 +630,14 @@ impl ProtocolsHandler for NotifsHandler { debug_assert!(pending_handshake.is_some()); let pending_handshake = pending_handshake.take().unwrap_or_default(); + let out_substreams = out_substreams + .drain(..) + .map(|s| s.expect("checked by the if above; qed")) + .collect(); + self.state = State::Open { notifications_sink_rx: stream::select(async_rx.fuse(), sync_rx.fuse()), - out_substreams: mem::replace(out_substreams, Vec::new()), + out_substreams, in_substreams: mem::replace(in_substreams, Vec::new()), want_closed: false, }; @@ -712,9 +727,9 @@ impl ProtocolsHandler for NotifsHandler { }; if matches!(self.state, State::Opening { .. }) { - self.events_queue.push_back( - ProtocolsHandlerEvent::Custom(NotifsHandlerOut::OpenResultErr) - ); + self.events_queue.push_back(ProtocolsHandlerEvent::Custom( + NotifsHandlerOut::OpenResultErr + )); } }, State::OpenDesired { pending_opening, .. } => { @@ -744,19 +759,61 @@ impl ProtocolsHandler for NotifsHandler { pending_opening[num] = false; } - State::Opening { .. } => { - // TODO: close already-open substreams in a clean way? - let mut pending_opening = (0..self.out_protocols.len()) - .map(|_| true) - .collect::>(); - pending_opening[num] = false; - self.state = State::Closed { - pending_opening, - }; + State::Opening { in_substreams, pending_handshake, out_substreams } => { + // Failing to open a substream isn't considered a failure. Instead, it is marked + // as `Some(None)` and the opening continues. + + out_substreams[num] = Some(None); + + // Some substreams are still being opened. Nothing more to do. + if out_substreams.iter().any(|s| s.is_none()) { + return; + } + + // All substreams have finished being open. + // If the handshake has been received, proceed and report the opening. + + if let Some(pending_handshake) = pending_handshake.take() { + // Open! + let (async_tx, async_rx) = mpsc::channel(ASYNC_NOTIFICATIONS_BUFFER_SIZE); + let (sync_tx, sync_rx) = mpsc::channel(SYNC_NOTIFICATIONS_BUFFER_SIZE); + let notifications_sink = NotificationsSink { + inner: Arc::new(NotificationsSinkInner { + async_channel: FuturesMutex::new(async_tx), + sync_channel: Mutex::new(sync_tx), + }), + }; + + let out_substreams = out_substreams + .drain(..) + .map(|s| s.expect("checked by the if above; qed")) + .collect(); + + self.state = State::Open { + notifications_sink_rx: stream::select(async_rx.fuse(), sync_rx.fuse()), + out_substreams, + in_substreams: mem::replace(in_substreams, Vec::new()), + want_closed: false, + }; + + self.events_queue.push_back(ProtocolsHandlerEvent::Custom( + NotifsHandlerOut::OpenResultOk { + endpoint: self.endpoint.clone(), + received_handshake: pending_handshake, + notifications_sink + } + )); + + } else { + // Open failure! + self.state = State::Closed { + pending_opening: (0..self.out_protocols.len()).map(|_| false).collect(), + }; - self.events_queue.push_back(ProtocolsHandlerEvent::Custom( - NotifsHandlerOut::OpenResultErr - )); + self.events_queue.push_back(ProtocolsHandlerEvent::Custom( + NotifsHandlerOut::OpenResultErr + )); + } } // No substream is being open when already `Open`. @@ -836,8 +893,7 @@ impl ProtocolsHandler for NotifsHandler { // Poll outbound substreams. match &mut self.state { - State::Open { out_substreams, .. } | - State::Opening { out_substreams, .. } => { + State::Open { out_substreams, want_closed, .. } => { let mut any_closed = false; for substream in out_substreams.iter_mut() { @@ -852,16 +908,30 @@ impl ProtocolsHandler for NotifsHandler { } if any_closed { - if let State::Open { want_closed, .. } = &mut self.state { - if !*want_closed { - *want_closed = true; - return Poll::Ready(ProtocolsHandlerEvent::Custom(NotifsHandlerOut::CloseDesired)); + if !*want_closed { + *want_closed = true; + return Poll::Ready(ProtocolsHandlerEvent::Custom(NotifsHandlerOut::CloseDesired)); + } + } + } + + State::Opening { out_substreams, pending_handshake, .. } => { + debug_assert!(out_substreams.iter().any(|s| s.is_none())); + + for (num, substream) in out_substreams.iter_mut().enumerate() { + match substream { + None | Some(None) => continue, + Some(Some(substream)) => match Sink::poll_flush(Pin::new(substream), cx) { + Poll::Pending | Poll::Ready(Ok(())) => continue, + Poll::Ready(Err(_)) => {} } - } else if let State::Opening { out_substreams, .. } = &mut self.state { - // TODO: dispose of `in_substreams` in a clean way - let pending_opening = out_substreams.iter().map(|s| s.is_none()).collect(); - self.state = State::Closed { pending_opening }; - return Poll::Ready(ProtocolsHandlerEvent::Custom(NotifsHandlerOut::OpenResultErr)); + } + + // Reached if the substream has been closed. + *substream = Some(None); + if num == 0 { + // Cancel the handshake. + *pending_handshake = None; } } } From 797fb49e838f01fba581ce18b1c074390294e338 Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Thu, 12 Nov 2020 12:13:53 +0100 Subject: [PATCH 19/39] Remove TODOs --- client/network/src/protocol/generic_proto/handler/group.rs | 2 -- 1 file changed, 2 deletions(-) diff --git a/client/network/src/protocol/generic_proto/handler/group.rs b/client/network/src/protocol/generic_proto/handler/group.rs index ce988959f6f99..c81ae7e21631e 100644 --- a/client/network/src/protocol/generic_proto/handler/group.rs +++ b/client/network/src/protocol/generic_proto/handler/group.rs @@ -733,7 +733,6 @@ impl ProtocolsHandler for NotifsHandler { } }, State::OpenDesired { pending_opening, .. } => { - // TODO: close in_substreams in a clean way self.state = State::Closed { pending_opening: mem::replace(pending_opening, Vec::new()), }; @@ -859,7 +858,6 @@ impl ProtocolsHandler for NotifsHandler { }; return Poll::Ready(ProtocolsHandlerEvent::Custom(event)) }, - // TODO: close in a clean way? Some(Poll::Ready(None)) | Some(Poll::Ready(Some(Err(_)))) => *substream = None, } From 259ddd74088e53e7c6a9b0a62a8d1573a0063ce3 Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Thu, 12 Nov 2020 12:49:48 +0100 Subject: [PATCH 20/39] Dummy commit to make CI log interesting things --- .gitlab-ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index cbb56fcf72679..30a223c863643 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -286,7 +286,7 @@ test-linux-stable-int: - echo "___Logs will be partly shown at the end in case of failure.___" - echo "___Full log will be saved to the job artifacts only in case of failure.___" - WASM_BUILD_NO_COLOR=1 - RUST_LOG=sync=trace,consensus=trace,client=trace,state-db=trace,db=trace,forks=trace,state_db=trace,storage_cache=trace + RUST_LOG=sub-libp2p=trace time cargo test -p node-cli --release --verbose --locked -- --ignored &> ${CI_COMMIT_SHORT_SHA}_int_failure.log - sccache -s From 0675c659d06195c30f8c5bc13e2d88141d57a3ba Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Thu, 12 Nov 2020 13:16:37 +0100 Subject: [PATCH 21/39] Try race condition fix --- .../src/protocol/generic_proto/handler/group.rs | 15 --------------- 1 file changed, 15 deletions(-) diff --git a/client/network/src/protocol/generic_proto/handler/group.rs b/client/network/src/protocol/generic_proto/handler/group.rs index c81ae7e21631e..38a2f4f260d01 100644 --- a/client/network/src/protocol/generic_proto/handler/group.rs +++ b/client/network/src/protocol/generic_proto/handler/group.rs @@ -550,25 +550,10 @@ impl ProtocolsHandler for NotifsHandler { }; }, State::OpenDesired { in_substreams, .. } => { - if in_substreams[num].is_some() { - // If a substream already exists, silently drop the new one. - // Note that we drop the substream, which will send an equivalent to a - // TCP "RST" to the remote and force-close the substream. It might - // seem like an unclean way to get rid of a substream. However, keep - // in mind that it is invalid for the remote to open multiple such - // substreams, and therefore sending a "RST" is the most correct thing - // to do. - return; - } in_substreams[num] = Some(proto); }, State::Opening { in_substreams, .. } | State::Open { in_substreams, .. } => { - if in_substreams[num].is_some() { - // Same remark as above. - return; - } - // We create `handshake_message` on a separate line to be sure // that the lock is released as soon as possible. let handshake_message = self.in_protocols[num].1.read().clone(); From c522ec5f5356385e8c0cc11c98bb91c2123ee1d0 Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Thu, 12 Nov 2020 13:46:50 +0100 Subject: [PATCH 22/39] Revert "Try race condition fix" This reverts commit 0675c659d06195c30f8c5bc13e2d88141d57a3ba. --- .../src/protocol/generic_proto/handler/group.rs | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/client/network/src/protocol/generic_proto/handler/group.rs b/client/network/src/protocol/generic_proto/handler/group.rs index 38a2f4f260d01..c81ae7e21631e 100644 --- a/client/network/src/protocol/generic_proto/handler/group.rs +++ b/client/network/src/protocol/generic_proto/handler/group.rs @@ -550,10 +550,25 @@ impl ProtocolsHandler for NotifsHandler { }; }, State::OpenDesired { in_substreams, .. } => { + if in_substreams[num].is_some() { + // If a substream already exists, silently drop the new one. + // Note that we drop the substream, which will send an equivalent to a + // TCP "RST" to the remote and force-close the substream. It might + // seem like an unclean way to get rid of a substream. However, keep + // in mind that it is invalid for the remote to open multiple such + // substreams, and therefore sending a "RST" is the most correct thing + // to do. + return; + } in_substreams[num] = Some(proto); }, State::Opening { in_substreams, .. } | State::Open { in_substreams, .. } => { + if in_substreams[num].is_some() { + // Same remark as above. + return; + } + // We create `handshake_message` on a separate line to be sure // that the lock is released as soon as possible. let handshake_message = self.in_protocols[num].1.read().clone(); From 57a0042d483bd588865ba71a7f41f1a8812f8147 Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Thu, 12 Nov 2020 13:49:59 +0100 Subject: [PATCH 23/39] Correctly rebuild pending_opening --- .../network/src/protocol/generic_proto/handler/group.rs | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/client/network/src/protocol/generic_proto/handler/group.rs b/client/network/src/protocol/generic_proto/handler/group.rs index c81ae7e21631e..4d12fdeac03f8 100644 --- a/client/network/src/protocol/generic_proto/handler/group.rs +++ b/client/network/src/protocol/generic_proto/handler/group.rs @@ -720,11 +720,16 @@ impl ProtocolsHandler for NotifsHandler { } match &mut self.state { - State::Open { .. } | - State::Opening { .. } => { + State::Open { .. } => { self.state = State::Closed { pending_opening: Vec::new(), }; + }, + State::Opening { out_substreams, .. } => { + let pending_opening = out_substreams.iter().map(|s| s.is_none()).collect(); + self.state = State::Closed { + pending_opening, + }; if matches!(self.state, State::Opening { .. }) { self.events_queue.push_back(ProtocolsHandlerEvent::Custom( From 2ee20de268d6aec846a2250716376c56b63b8ab2 Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Thu, 12 Nov 2020 14:05:34 +0100 Subject: [PATCH 24/39] Minor tweaks --- .../network/src/protocol/generic_proto/handler/group.rs | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/client/network/src/protocol/generic_proto/handler/group.rs b/client/network/src/protocol/generic_proto/handler/group.rs index 4d12fdeac03f8..1318bb88965cd 100644 --- a/client/network/src/protocol/generic_proto/handler/group.rs +++ b/client/network/src/protocol/generic_proto/handler/group.rs @@ -709,6 +709,7 @@ impl ProtocolsHandler for NotifsHandler { // As documented, it is forbidden to send an `Open` while there is already // one in the fly. error!(target: "sub-libp2p", "opening already-opened handler"); + debug_assert!(false); }, } }, @@ -731,11 +732,9 @@ impl ProtocolsHandler for NotifsHandler { pending_opening, }; - if matches!(self.state, State::Opening { .. }) { - self.events_queue.push_back(ProtocolsHandlerEvent::Custom( - NotifsHandlerOut::OpenResultErr - )); - } + self.events_queue.push_back(ProtocolsHandlerEvent::Custom( + NotifsHandlerOut::OpenResultErr + )); }, State::OpenDesired { pending_opening, .. } => { self.state = State::Closed { From e7852a231f4fc418898767aaa27c9a4358e12e8b Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Thu, 12 Nov 2020 14:06:29 +0100 Subject: [PATCH 25/39] Printlns for CI debugging --- client/network/src/protocol/generic_proto/handler/group.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/client/network/src/protocol/generic_proto/handler/group.rs b/client/network/src/protocol/generic_proto/handler/group.rs index 1318bb88965cd..c6f5316ca835f 100644 --- a/client/network/src/protocol/generic_proto/handler/group.rs +++ b/client/network/src/protocol/generic_proto/handler/group.rs @@ -722,11 +722,13 @@ impl ProtocolsHandler for NotifsHandler { match &mut self.state { State::Open { .. } => { + println!("is open"); self.state = State::Closed { pending_opening: Vec::new(), }; }, State::Opening { out_substreams, .. } => { + println!("is opening"); let pending_opening = out_substreams.iter().map(|s| s.is_none()).collect(); self.state = State::Closed { pending_opening, @@ -737,11 +739,13 @@ impl ProtocolsHandler for NotifsHandler { )); }, State::OpenDesired { pending_opening, .. } => { + println!("is opendesired"); self.state = State::Closed { pending_opening: mem::replace(pending_opening, Vec::new()), }; } - State::Closed { .. } => {}, + State::Closed { .. } => { + println!("is closed");}, } self.events_queue.push_back( From 639a83c2e220b698463cd09cadca96f104453ea5 Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Thu, 12 Nov 2020 14:17:53 +0100 Subject: [PATCH 26/39] Revert "Printlns for CI debugging" This reverts commit e7852a231f4fc418898767aaa27c9a4358e12e8b. --- client/network/src/protocol/generic_proto/handler/group.rs | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/client/network/src/protocol/generic_proto/handler/group.rs b/client/network/src/protocol/generic_proto/handler/group.rs index c6f5316ca835f..1318bb88965cd 100644 --- a/client/network/src/protocol/generic_proto/handler/group.rs +++ b/client/network/src/protocol/generic_proto/handler/group.rs @@ -722,13 +722,11 @@ impl ProtocolsHandler for NotifsHandler { match &mut self.state { State::Open { .. } => { - println!("is open"); self.state = State::Closed { pending_opening: Vec::new(), }; }, State::Opening { out_substreams, .. } => { - println!("is opening"); let pending_opening = out_substreams.iter().map(|s| s.is_none()).collect(); self.state = State::Closed { pending_opening, @@ -739,13 +737,11 @@ impl ProtocolsHandler for NotifsHandler { )); }, State::OpenDesired { pending_opening, .. } => { - println!("is opendesired"); self.state = State::Closed { pending_opening: mem::replace(pending_opening, Vec::new()), }; } - State::Closed { .. } => { - println!("is closed");}, + State::Closed { .. } => {}, } self.events_queue.push_back( From 55bd87f3e2d04243a7cab57c9fffb80f0f41b22f Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Thu, 12 Nov 2020 14:18:01 +0100 Subject: [PATCH 27/39] Revert "Dummy commit to make CI log interesting things" This reverts commit 259ddd74088e53e7c6a9b0a62a8d1573a0063ce3. --- .gitlab-ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 30a223c863643..cbb56fcf72679 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -286,7 +286,7 @@ test-linux-stable-int: - echo "___Logs will be partly shown at the end in case of failure.___" - echo "___Full log will be saved to the job artifacts only in case of failure.___" - WASM_BUILD_NO_COLOR=1 - RUST_LOG=sub-libp2p=trace + RUST_LOG=sync=trace,consensus=trace,client=trace,state-db=trace,db=trace,forks=trace,state_db=trace,storage_cache=trace time cargo test -p node-cli --release --verbose --locked -- --ignored &> ${CI_COMMIT_SHORT_SHA}_int_failure.log - sccache -s From 2de810532fd7440f520500c6d4f517b33603dfab Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Thu, 12 Nov 2020 14:19:18 +0100 Subject: [PATCH 28/39] mv group.rs ../handler.rs --- .../src/protocol/generic_proto/handler.rs | 1051 +++++++++++++++- .../protocol/generic_proto/handler/group.rs | 1054 ----------------- 2 files changed, 1041 insertions(+), 1064 deletions(-) delete mode 100644 client/network/src/protocol/generic_proto/handler/group.rs diff --git a/client/network/src/protocol/generic_proto/handler.rs b/client/network/src/protocol/generic_proto/handler.rs index 980935387df00..1318bb88965cd 100644 --- a/client/network/src/protocol/generic_proto/handler.rs +++ b/client/network/src/protocol/generic_proto/handler.rs @@ -1,23 +1,1054 @@ +// Copyright 2019-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 - -// This program is free software: you can redistribute it and/or modify +// Substrate is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// This program is distributed in the hope that it will be useful, +// Substrate is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with this program. If not, see . +// along with Substrate. If not, see . + +//! Implementations of the `IntoProtocolsHandler` and `ProtocolsHandler` traits for both incoming +//! and outgoing substreams for all gossiping protocols together. +//! +//! This is the main implementation of `ProtocolsHandler` in this crate, that handles all the +//! protocols that are Substrate-related and outside of the scope of libp2p. +//! +//! # Usage +//! +//! From an API perspective, the [`NotifsHandler`] is always in one of the following state: +//! +//! - Closed substreams. This is the initial state. +//! - Closed substreams, but remote desires them to be open. +//! - Open substreams. +//! - Open substreams, but remote desires them to be closed. +//! +//! The [`NotifsHandler`] can spontaneously switch between these states: +//! +//! - "Closed substreams" to "Closed substreams but open desired". When that happens, a +//! [`NotifsHandlerOut::OpenDesired`] is emitted. +//! - "Closed substreams but open desired" to "Closed substreams" (i.e. the remote has cancelled +//! their request). When that happens, a [`NotifsHandlerOut::CloseDesired`] is emitted. +//! - "Open substreams" to "Open substreams but close desired". When that happens, a +//! [`NotifsHandlerOut::CloseDesired`] is emitted. +//! +//! The user can instruct the `NotifsHandler` to switch from "closed" to "open" or vice-versa by +//! sending either a [`NotifsHandlerIn::Open`] or a [`NotifsHandlerIn::Close`]. The `NotifsHandler` +//! must answer with [`NotifsHandlerOut::OpenResultOk`] or [`NotifsHandlerOut::OpenResultErr`], or +//! with [`NotifsHandlerOut::CloseResult`]. +//! +//! When a [`NotifsHandlerOut::OpenResultOk`] is emitted, the `NotifsHandler` is now in the open +//! state. When a [`NotifsHandlerOut::OpenResultErr`] or [`NotifsHandlerOut::CloseResult`] is +//! emitted, the `NotifsHandler` is now (or remains) in the closed state. +//! +//! When a [`NotifsHandlerOut::OpenDesired`] is emitted, the user should always send back either a +//! [`NotifsHandlerIn::Open`] or a [`NotifsHandlerIn::Close`].If this isn't done, the remote will +//! be left in a pending state. +//! +//! It is illegal to send a [`NotifsHandlerIn::Open`] before a previously-emitted +//! [`NotifsHandlerIn::Open`] has gotten an answer. + +use crate::protocol::generic_proto::{ + upgrade::{ + NotificationsIn, NotificationsOut, NotificationsInSubstream, NotificationsOutSubstream, + NotificationsHandshakeError, RegisteredProtocol, RegisteredProtocolSubstream, + RegisteredProtocolEvent, UpgradeCollec + }, +}; -pub use self::group::{ - NotificationsSink, NotifsHandlerError, Ready, NotifsHandlerProto, NotifsHandler, NotifsHandlerIn, NotifsHandlerOut +use bytes::BytesMut; +use libp2p::core::{either::EitherOutput, ConnectedPoint, PeerId}; +use libp2p::core::upgrade::{SelectUpgrade, InboundUpgrade, OutboundUpgrade}; +use libp2p::swarm::{ + ProtocolsHandler, ProtocolsHandlerEvent, + IntoProtocolsHandler, + KeepAlive, + ProtocolsHandlerUpgrErr, + SubstreamProtocol, + NegotiatedSubstream, }; +use futures::{ + channel::mpsc, + lock::{Mutex as FuturesMutex, MutexGuard as FuturesMutexGuard}, + prelude::* +}; +use log::error; +use parking_lot::{Mutex, RwLock}; +use smallvec::SmallVec; +use std::{borrow::Cow, collections::VecDeque, mem, pin::Pin, str, sync::Arc, task::{Context, Poll}, time::Duration}; +use wasm_timer::Instant; + +/// Number of pending notifications in asynchronous contexts. +/// See [`NotificationsSink::reserve_notification`] for context. +const ASYNC_NOTIFICATIONS_BUFFER_SIZE: usize = 8; + +/// Number of pending notifications in synchronous contexts. +const SYNC_NOTIFICATIONS_BUFFER_SIZE: usize = 2048; + +/// Maximum duration to open a substream and receive the handshake message. After that, we +/// consider that we failed to open the substream. +const OPEN_TIMEOUT: Duration = Duration::from_secs(10); + +/// After successfully establishing a connection with the remote, we keep the connection open for +/// at least this amount of time in order to give the rest of the code the chance to notify us to +/// open substreams. +const INITIAL_KEEPALIVE_TIME: Duration = Duration::from_secs(5); + +/// Implements the `IntoProtocolsHandler` trait of libp2p. +/// +/// Every time a connection with a remote starts, an instance of this struct is created and +/// sent to a background task dedicated to this connection. Once the connection is established, +/// it is turned into a [`NotifsHandler`]. +/// +/// See the documentation at the module level for more information. +pub struct NotifsHandlerProto { + /// Prototypes for upgrades for inbound substreams, and the message we respond with in the + /// handshake. + in_protocols: Vec<(NotificationsIn, Arc>>)>, + + /// Name of protocols available for outbound substreams, and the initial handshake message we + /// send. + out_protocols: Vec<(Cow<'static, str>, Arc>>)>, + + /// Configuration for the legacy protocol upgrade. + legacy_protocol: RegisteredProtocol, +} + +/// The actual handler once the connection has been established. +/// +/// See the documentation at the module level for more information. +pub struct NotifsHandler { + /// Prototypes for upgrades for inbound substreams, and the message we respond with in the + /// handshake. + in_protocols: Vec<(NotificationsIn, Arc>>)>, + + /// Name of protocols available for outbound substreams, and the initial handshake message we + /// send. + out_protocols: Vec<(Cow<'static, str>, Arc>>)>, + + /// When the connection with the remote has been successfully established. + when_connection_open: Instant, + + /// Whether we are the connection dialer or listener. + endpoint: ConnectedPoint, + + /// State of this handler. + state: State, + + /// Configuration for the legacy protocol upgrade. + legacy_protocol: RegisteredProtocol, + + /// The substreams where bidirectional communications happen. + legacy_substreams: SmallVec<[RegisteredProtocolSubstream; 4]>, + + /// Contains substreams which are being shut down. + legacy_shutdown: SmallVec<[RegisteredProtocolSubstream; 4]>, + + /// Events to return in priority from `poll`. + events_queue: VecDeque< + ProtocolsHandlerEvent + >, +} + +/// See the module-level documentation to learn about the meaning of these variants. +enum State { + /// Handler is in the "Closed" state. + Closed { + /// Vec of the same length as [`NotifsHandler::out_protocols`]. For each protocol, contains + /// a boolean indicating whether an outgoing substream is still in the process of being + /// opened. + pending_opening: Vec, + }, + + /// Handler is in the "Closed" state. A [`NotifsHandlerOut::OpenDesired`] has been emitted. + OpenDesired { + /// Vec of the same length as [`NotifsHandler::in_protocols`]. For each protocol, contains + /// a substream opened by the remote and that hasn't been accepted/rejected yet. + /// + /// Must always contain at least one `Some`. + in_substreams: Vec>>, + + /// See [`State::Closed::pending_opening`]. + pending_opening: Vec, + }, + + /// Handler is in the "Closed" state, but has received a [`NotifsHandlerIn::Open`] and is + /// consequently trying to open the various notifications substreams. + /// + /// A [`NotifsHandlerOut::OpenResultOk`] or a [`NotifsHandlerOut::OpenResultErr`] event must + /// be emitted when transitionning to respectively [`State::Open`] or [`State::Closed`]. + Opening { + /// In the situation where either the legacy substream has been opened or the + /// handshake-bearing notifications protocol is open, but we haven't sent out any + /// [`NotifsHandlerOut::Open`] event yet, this contains the received handshake waiting to + /// be reported through the external API. + pending_handshake: Option>, + + /// Vec of the same length as [`NotifsHandler::in_protocols`]. For each protocol, contains + /// a substream opened by the remote and that has been accepted. + /// + /// Contrary to [`State::OpenDesired::in_substreams`], it is possible for this to contain + /// only `None`s. + in_substreams: Vec>>, + + /// Vec of the same length as [`NotifsHandler::out_protocols`]. For each protocol, contains + /// an outbound substream that has been accepted by the remote. + /// + /// Items that contain `None` mean that a substream is still being opened or has been + /// rejected by the remote. In other words, this `Vec` is kind of a mirror version of + /// [`State::Closed::pending_opening`]. + /// + /// Items that contain `Some(None)` have been rejected by the remote, most likely because + /// they don't support this protocol. At the time of writing, the external API doesn't + /// distinguish between the different protocols. From the external API's point of view, + /// either all protocols are open or none are open. In reality, light clients in particular + /// don't support for example the GrandPa protocol, and as such will refuse our outgoing + /// attempts. This is problematic in theory, but in practice this is handled properly at a + /// higher level. This flaw will fixed once the outer layers know to differentiate the + /// multiple protocols. + out_substreams: Vec>>>, + }, + + /// Handler is in the "Open" state. + Open { + /// Contains the two `Receiver`s connected to the [`NotificationsSink`] that has been + /// sent out. The notifications to send out can be pulled from this receivers. + /// We use two different channels in order to have two different channel sizes, but from + /// the receiving point of view, the two channels are the same. + /// The receivers are fused in case the user drops the [`NotificationsSink`] entirely. + notifications_sink_rx: stream::Select< + stream::Fuse>, + stream::Fuse> + >, + + /// Vec of the same length as [`NotifsHandler::out_protocols`]. For each protocol, contains + /// an outbound substream that has been accepted by the remote. + /// + /// On transition to [`State::Open`], all the elements must be `Some`. Elements are + /// switched to `None` only if the remote closes substreams, in which case `want_closed` + /// must be true. + out_substreams: Vec>>, + + /// Vec of the same length as [`NotifsHandler::in_protocols`]. For each protocol, contains + /// a substream opened by the remote and that has been accepted. + /// + /// Contrary to [`State::OpenDesired::in_substreams`], it is possible for this to contain + /// only `None`s. + in_substreams: Vec>>, + + /// If true, at least one substream in [`State::Open::out_substreams`] has been closed or + /// reset by the remote and a [`NotifsHandlerOut::CloseDesired`] message has been sent + /// out. + want_closed: bool, + }, +} + +impl IntoProtocolsHandler for NotifsHandlerProto { + type Handler = NotifsHandler; + + fn inbound_protocol(&self) -> SelectUpgrade, RegisteredProtocol> { + let in_protocols = self.in_protocols.iter() + .map(|(h, _)| h.clone()) + .collect::>(); + + SelectUpgrade::new(in_protocols, self.legacy_protocol.clone()) + } + + fn into_handler(self, _: &PeerId, connected_point: &ConnectedPoint) -> Self::Handler { + let num_out_proto = self.out_protocols.len(); + + NotifsHandler { + in_protocols: self.in_protocols, + out_protocols: self.out_protocols, + endpoint: connected_point.clone(), + when_connection_open: Instant::now(), + state: State::Closed { + pending_opening: (0..num_out_proto).map(|_| false).collect(), + }, + legacy_protocol: self.legacy_protocol, + legacy_substreams: SmallVec::new(), + legacy_shutdown: SmallVec::new(), + events_queue: VecDeque::with_capacity(16), + } + } +} + +/// Event that can be received by a `NotifsHandler`. +#[derive(Debug, Clone)] +pub enum NotifsHandlerIn { + /// Instruct the handler to open the notification substreams. + /// + /// Must always be answered by a [`NotifsHandlerOut::OpenResultOk`] or a + /// [`NotifsHandlerOut::OpenResultErr`] event. + /// + /// Importantly, it is forbidden to send a [`NotifsHandlerIn::Open`] while a previous one is + /// already in the fly. It is however possible if a `Close` is still in the fly. + Open, + + /// Instruct the handler to close the notification substreams, or reject any pending incoming + /// substream request. + /// + /// Must always be answered by a [`NotifsHandlerOut::CloseResult`] event. + Close, +} + +/// Event that can be emitted by a `NotifsHandler`. +#[derive(Debug)] +pub enum NotifsHandlerOut { + /// Acknowledges a [`NotifsHandlerIn::Open`]. + OpenResultOk { + /// The endpoint of the connection that is open for custom protocols. + endpoint: ConnectedPoint, + /// Handshake that was sent to us. + /// This is normally a "Status" message, but this out of the concern of this code. + received_handshake: Vec, + /// How notifications can be sent to this node. + notifications_sink: NotificationsSink, + }, + + /// Acknowledges a [`NotifsHandlerIn::Open`]. The remote has refused the attempt to open + /// notification substreams. + OpenResultErr, + + /// Acknowledges a [`NotifsHandlerIn::Close`]. + CloseResult, + + /// The remote would like the substreams to be open. Send a [`NotifsHandlerIn::Open`] or a + /// [`NotifsHandlerIn::Close`] in order to either accept or deny this request. If a + /// [`NotifsHandlerIn::Open`] or [`NotifsHandlerIn::Close`] has been sent before and has not + /// yet been acknowledged by a matching [`NotifsHandlerOut`], then you don't need to a send + /// another [`NotifsHandlerIn`]. + OpenDesired, + + /// The remote would like the substreams to be closed. Send a [`NotifsHandlerIn::Close`] in + /// order to close them. If a [`NotifsHandlerIn::Close`] has been sent before and has not yet + /// been acknowledged by a [`NotifsHandlerOut::CloseResult`], then you don't need to a send + /// another one. + CloseDesired, + + /// Received a non-gossiping message on the legacy substream. + /// + /// Can only happen when the handler is in the open state. + CustomMessage { + /// Message that has been received. + /// + /// Keep in mind that this can be a `ConsensusMessage` message, which then contains a + /// notification. + message: BytesMut, + }, + + /// Received a message on a custom protocol substream. + /// + /// Can only happen when the handler is in the open state. + Notification { + /// Name of the protocol of the message. + protocol_name: Cow<'static, str>, + + /// Message that has been received. + message: BytesMut, + }, +} + +/// Sink connected directly to the node background task. Allows sending notifications to the peer. +/// +/// Can be cloned in order to obtain multiple references to the same peer. +#[derive(Debug, Clone)] +pub struct NotificationsSink { + inner: Arc, +} + +#[derive(Debug)] +struct NotificationsSinkInner { + /// Sender to use in asynchronous contexts. Uses an asynchronous mutex. + async_channel: FuturesMutex>, + /// Sender to use in synchronous contexts. Uses a synchronous mutex. + /// This channel has a large capacity and is meant to be used in contexts where + /// back-pressure cannot be properly exerted. + /// It will be removed in a future version. + sync_channel: Mutex>, +} + +/// Message emitted through the [`NotificationsSink`] and processed by the background task +/// dedicated to the peer. +#[derive(Debug)] +enum NotificationsSinkMessage { + /// Message emitted by [`NotificationsSink::reserve_notification`] and + /// [`NotificationsSink::write_notification_now`]. + Notification { + protocol_name: Cow<'static, str>, + message: Vec, + }, + + /// Must close the connection. + ForceClose, +} + +impl NotificationsSink { + /// Sends a notification to the peer. + /// + /// If too many messages are already buffered, the notification is silently discarded and the + /// connection to the peer will be closed shortly after. + /// + /// The protocol name is expected to be checked ahead of calling this method. It is a logic + /// error to send a notification using an unknown protocol. + /// + /// This method will be removed in a future version. + pub fn send_sync_notification<'a>( + &'a self, + protocol_name: Cow<'static, str>, + message: impl Into> + ) { + let mut lock = self.inner.sync_channel.lock(); + let result = lock.try_send(NotificationsSinkMessage::Notification { + protocol_name, + message: message.into() + }); + + if result.is_err() { + // Cloning the `mpsc::Sender` guarantees the allocation of an extra spot in the + // buffer, and therefore that `try_send` will succeed. + let _result2 = lock.clone().try_send(NotificationsSinkMessage::ForceClose); + debug_assert!(_result2.map(|()| true).unwrap_or_else(|err| err.is_disconnected())); + } + } + + /// Wait until the remote is ready to accept a notification. + /// + /// Returns an error in the case where the connection is closed. + /// + /// The protocol name is expected to be checked ahead of calling this method. It is a logic + /// error to send a notification using an unknown protocol. + pub async fn reserve_notification<'a>(&'a self, protocol_name: Cow<'static, str>) -> Result, ()> { + let mut lock = self.inner.async_channel.lock().await; + + let poll_ready = future::poll_fn(|cx| lock.poll_ready(cx)).await; + if poll_ready.is_ok() { + Ok(Ready { protocol_name: protocol_name, lock }) + } else { + Err(()) + } + } +} + +/// Notification slot is reserved and the notification can actually be sent. +#[must_use] +#[derive(Debug)] +pub struct Ready<'a> { + /// Guarded channel. The channel inside is guaranteed to not be full. + lock: FuturesMutexGuard<'a, mpsc::Sender>, + /// Name of the protocol. Should match one of the protocols passed at initialization. + protocol_name: Cow<'static, str>, +} + +impl<'a> Ready<'a> { + /// Consumes this slots reservation and actually queues the notification. + /// + /// Returns an error if the substream has been closed. + pub fn send( + mut self, + notification: impl Into> + ) -> Result<(), ()> { + self.lock.start_send(NotificationsSinkMessage::Notification { + protocol_name: self.protocol_name, + message: notification.into(), + }).map_err(|_| ()) + } +} + +/// Error specific to the collection of protocols. +#[derive(Debug, derive_more::Display, derive_more::Error)] +pub enum NotifsHandlerError { + /// Channel of synchronous notifications is full. + SyncNotificationsClogged, +} + +impl NotifsHandlerProto { + /// Builds a new handler. + /// + /// `list` is a list of notification protocols names, and the message to send as part of the + /// handshake. At the moment, the message is always the same whether we open a substream + /// ourselves or respond to handshake from the remote. + /// + /// The first protocol in `list` is special-cased as the protocol that contains the handshake + /// to report through the [`NotifsHandlerOut::Open`] event. + /// + /// # Panic + /// + /// - Panics if `list` is empty. + /// + pub fn new( + legacy_protocol: RegisteredProtocol, + list: impl Into, Arc>>)>>, + ) -> Self { + let list = list.into(); + assert!(!list.is_empty()); + + let out_protocols = list + .clone() + .into_iter() + .collect(); + + let in_protocols = list.clone() + .into_iter() + .map(|(proto_name, msg)| (NotificationsIn::new(proto_name), msg)) + .collect(); + + NotifsHandlerProto { + in_protocols, + out_protocols, + legacy_protocol, + } + } +} + +impl ProtocolsHandler for NotifsHandler { + type InEvent = NotifsHandlerIn; + type OutEvent = NotifsHandlerOut; + type Error = NotifsHandlerError; + type InboundProtocol = SelectUpgrade, RegisteredProtocol>; + type OutboundProtocol = NotificationsOut; + // Index within the `out_protocols`. + type OutboundOpenInfo = usize; + type InboundOpenInfo = (); + + fn listen_protocol(&self) -> SubstreamProtocol { + let in_protocols = self.in_protocols.iter() + .map(|(h, _)| h.clone()) + .collect::>(); + + let proto = SelectUpgrade::new(in_protocols, self.legacy_protocol.clone()); + SubstreamProtocol::new(proto, ()) + } + + fn inject_fully_negotiated_inbound( + &mut self, + out: >::Output, + (): () + ) { + match out { + // Received notifications substream. + EitherOutput::First(((_remote_handshake, mut proto), num)) => { + match &mut self.state { + State::Closed { pending_opening } => { + self.events_queue.push_back(ProtocolsHandlerEvent::Custom( + NotifsHandlerOut::OpenDesired + )); + + let mut in_substreams = (0..self.in_protocols.len()) + .map(|_| None) + .collect::>(); + in_substreams[num] = Some(proto); + self.state = State::OpenDesired { + in_substreams, + pending_opening: mem::replace(pending_opening, Vec::new()), + }; + }, + State::OpenDesired { in_substreams, .. } => { + if in_substreams[num].is_some() { + // If a substream already exists, silently drop the new one. + // Note that we drop the substream, which will send an equivalent to a + // TCP "RST" to the remote and force-close the substream. It might + // seem like an unclean way to get rid of a substream. However, keep + // in mind that it is invalid for the remote to open multiple such + // substreams, and therefore sending a "RST" is the most correct thing + // to do. + return; + } + in_substreams[num] = Some(proto); + }, + State::Opening { in_substreams, .. } | + State::Open { in_substreams, .. } => { + if in_substreams[num].is_some() { + // Same remark as above. + return; + } + + // We create `handshake_message` on a separate line to be sure + // that the lock is released as soon as possible. + let handshake_message = self.in_protocols[num].1.read().clone(); + proto.send_handshake(handshake_message); + in_substreams[num] = Some(proto); + }, + }; + } + + // Received legacy substream. + EitherOutput::Second((substream, _handshake)) => { + // Note: while we awknowledge legacy substreams and handle incoming messages, + // it doesn't trigger any `OpenDesired` event as a way to simplify the logic of + // this code. + // Since mid-2019, legacy substreams are supposed to used at the same time as + // notifications substreams, and not in isolation. Nodes that open legacy + // substreams in isolation are considered deprecated. + if self.legacy_substreams.len() <= 4 { + self.legacy_substreams.push(substream); + } + }, + } + } + + fn inject_fully_negotiated_outbound( + &mut self, + (handshake, substream): >::Output, + num: Self::OutboundOpenInfo + ) { + match &mut self.state { + State::Closed { pending_opening } | + State::OpenDesired { pending_opening, .. } => { + debug_assert!(pending_opening[num]); + pending_opening[num] = false; + } + State::Open { .. } => { + error!(target: "sub-libp2p", "☎️ State mismatch in notifications handler"); + debug_assert!(false); + } + State::Opening { pending_handshake, in_substreams, out_substreams } => { + debug_assert!(out_substreams[num].is_none()); + out_substreams[num] = Some(Some(substream)); + + if num == 0 { + debug_assert!(pending_handshake.is_none()); + *pending_handshake = Some(handshake); + } + + if !out_substreams.iter().any(|s| s.is_none()) { + let (async_tx, async_rx) = mpsc::channel(ASYNC_NOTIFICATIONS_BUFFER_SIZE); + let (sync_tx, sync_rx) = mpsc::channel(SYNC_NOTIFICATIONS_BUFFER_SIZE); + let notifications_sink = NotificationsSink { + inner: Arc::new(NotificationsSinkInner { + async_channel: FuturesMutex::new(async_tx), + sync_channel: Mutex::new(sync_tx), + }), + }; + + debug_assert!(pending_handshake.is_some()); + let pending_handshake = pending_handshake.take().unwrap_or_default(); + + let out_substreams = out_substreams + .drain(..) + .map(|s| s.expect("checked by the if above; qed")) + .collect(); + + self.state = State::Open { + notifications_sink_rx: stream::select(async_rx.fuse(), sync_rx.fuse()), + out_substreams, + in_substreams: mem::replace(in_substreams, Vec::new()), + want_closed: false, + }; + + self.events_queue.push_back(ProtocolsHandlerEvent::Custom( + NotifsHandlerOut::OpenResultOk { + endpoint: self.endpoint.clone(), + received_handshake: pending_handshake, + notifications_sink + } + )); + } + } + } + } + + fn inject_event(&mut self, message: NotifsHandlerIn) { + match message { + NotifsHandlerIn::Open => { + match &mut self.state { + State::Closed { .. } | State::OpenDesired { .. } => { + let (pending_opening, mut in_substreams) = match &mut self.state { + State::Closed { pending_opening } => (pending_opening, None), + State::OpenDesired { pending_opening, in_substreams } => + (pending_opening, Some(mem::replace(in_substreams, Vec::new()))), + _ => unreachable!() + }; + + for (n, is_pending) in pending_opening.iter().enumerate() { + if *is_pending { + continue; + } + + let proto = NotificationsOut::new( + self.out_protocols[n].0.clone(), + self.out_protocols[n].1.read().clone() + ); + + self.events_queue.push_back(ProtocolsHandlerEvent::OutboundSubstreamRequest { + protocol: SubstreamProtocol::new(proto, n) + .with_timeout(OPEN_TIMEOUT), + }); + } + + if let Some(in_substreams) = in_substreams.as_mut() { + for (num, substream) in in_substreams.iter_mut().enumerate() { + let substream = match substream.as_mut() { + Some(s) => s, + None => continue, + }; + + let handshake_message = self.in_protocols[num].1.read().clone(); + substream.send_handshake(handshake_message); + } + } + + self.state = State::Opening { + pending_handshake: None, + in_substreams: if let Some(in_substreams) = in_substreams { + in_substreams + } else { + (0..self.in_protocols.len()).map(|_| None).collect() + }, + out_substreams: (0..self.out_protocols.len()).map(|_| None).collect(), + }; + }, + State::Opening { .. } | + State::Open { .. } => { + // As documented, it is forbidden to send an `Open` while there is already + // one in the fly. + error!(target: "sub-libp2p", "opening already-opened handler"); + debug_assert!(false); + }, + } + }, + + NotifsHandlerIn::Close => { + for mut substream in self.legacy_substreams.drain() { + substream.shutdown(); + self.legacy_shutdown.push(substream); + } + + match &mut self.state { + State::Open { .. } => { + self.state = State::Closed { + pending_opening: Vec::new(), + }; + }, + State::Opening { out_substreams, .. } => { + let pending_opening = out_substreams.iter().map(|s| s.is_none()).collect(); + self.state = State::Closed { + pending_opening, + }; + + self.events_queue.push_back(ProtocolsHandlerEvent::Custom( + NotifsHandlerOut::OpenResultErr + )); + }, + State::OpenDesired { pending_opening, .. } => { + self.state = State::Closed { + pending_opening: mem::replace(pending_opening, Vec::new()), + }; + } + State::Closed { .. } => {}, + } + + self.events_queue.push_back( + ProtocolsHandlerEvent::Custom(NotifsHandlerOut::CloseResult) + ); + }, + } + } + + fn inject_dial_upgrade_error( + &mut self, + num: usize, + _: ProtocolsHandlerUpgrErr + ) { + match &mut self.state { + State::Closed { pending_opening } | State::OpenDesired { pending_opening, .. } => { + debug_assert!(pending_opening[num]); + pending_opening[num] = false; + } + + State::Opening { in_substreams, pending_handshake, out_substreams } => { + // Failing to open a substream isn't considered a failure. Instead, it is marked + // as `Some(None)` and the opening continues. + + out_substreams[num] = Some(None); + + // Some substreams are still being opened. Nothing more to do. + if out_substreams.iter().any(|s| s.is_none()) { + return; + } + + // All substreams have finished being open. + // If the handshake has been received, proceed and report the opening. + + if let Some(pending_handshake) = pending_handshake.take() { + // Open! + let (async_tx, async_rx) = mpsc::channel(ASYNC_NOTIFICATIONS_BUFFER_SIZE); + let (sync_tx, sync_rx) = mpsc::channel(SYNC_NOTIFICATIONS_BUFFER_SIZE); + let notifications_sink = NotificationsSink { + inner: Arc::new(NotificationsSinkInner { + async_channel: FuturesMutex::new(async_tx), + sync_channel: Mutex::new(sync_tx), + }), + }; + + let out_substreams = out_substreams + .drain(..) + .map(|s| s.expect("checked by the if above; qed")) + .collect(); + + self.state = State::Open { + notifications_sink_rx: stream::select(async_rx.fuse(), sync_rx.fuse()), + out_substreams, + in_substreams: mem::replace(in_substreams, Vec::new()), + want_closed: false, + }; + + self.events_queue.push_back(ProtocolsHandlerEvent::Custom( + NotifsHandlerOut::OpenResultOk { + endpoint: self.endpoint.clone(), + received_handshake: pending_handshake, + notifications_sink + } + )); + + } else { + // Open failure! + self.state = State::Closed { + pending_opening: (0..self.out_protocols.len()).map(|_| false).collect(), + }; + + self.events_queue.push_back(ProtocolsHandlerEvent::Custom( + NotifsHandlerOut::OpenResultErr + )); + } + } + + // No substream is being open when already `Open`. + State::Open { .. } => debug_assert!(false), + } + } + + fn connection_keep_alive(&self) -> KeepAlive { + if !self.legacy_substreams.is_empty() { + return KeepAlive::Yes; + } + + match self.state { + State::Closed { .. } => KeepAlive::Until(self.when_connection_open + INITIAL_KEEPALIVE_TIME), + State::OpenDesired { .. } | State::Opening { .. } | State::Open { .. } => + KeepAlive::Yes, + } + } + + fn poll( + &mut self, + cx: &mut Context, + ) -> Poll< + ProtocolsHandlerEvent + > { + if let Some(ev) = self.events_queue.pop_front() { + return Poll::Ready(ev); + } + + // Poll inbound substreams. + // Inbound substreams being closed is always tolerated, except for the `OpenDesired` state + // which might need to be switched back to `Closed`. + match &mut self.state { + State::Closed { .. } => {} + State::Open { in_substreams, .. } => { + for (num, substream) in in_substreams.iter_mut().enumerate() { + match substream.as_mut().map(|s| Stream::poll_next(Pin::new(s), cx)) { + None | Some(Poll::Pending) => continue, + Some(Poll::Ready(Some(Ok(message)))) => { + let event = NotifsHandlerOut::Notification { + message, + protocol_name: self.in_protocols[num].0.protocol_name().clone(), + }; + return Poll::Ready(ProtocolsHandlerEvent::Custom(event)) + }, + Some(Poll::Ready(None)) | Some(Poll::Ready(Some(Err(_)))) => + *substream = None, + } + } + } + + State::OpenDesired { in_substreams, .. } | + State::Opening { in_substreams, .. } => { + for substream in in_substreams { + match substream.as_mut().map(|s| NotificationsInSubstream::poll_process(Pin::new(s), cx)) { + None | Some(Poll::Pending) => continue, + Some(Poll::Ready(Ok(void))) => match void {}, + Some(Poll::Ready(Err(_))) => *substream = None, + } + } + } + } + + // Since the previous block might have closed inbound substreams, make sure that we can + // stay in `OpenDesired` state. + if let State::OpenDesired { in_substreams, pending_opening } = &mut self.state { + if !in_substreams.iter().any(|s| s.is_some()) { + self.state = State::Closed { + pending_opening: mem::replace(pending_opening, Vec::new()), + }; + return Poll::Ready(ProtocolsHandlerEvent::Custom( + NotifsHandlerOut::CloseDesired + )) + } + } + + // Poll outbound substreams. + match &mut self.state { + State::Open { out_substreams, want_closed, .. } => { + let mut any_closed = false; + + for substream in out_substreams.iter_mut() { + match substream.as_mut().map(|s| Sink::poll_flush(Pin::new(s), cx)) { + None | Some(Poll::Pending) | Some(Poll::Ready(Ok(()))) => continue, + Some(Poll::Ready(Err(_))) => {} + }; + + // Reached if the substream has been closed. + *substream = None; + any_closed = true; + } + + if any_closed { + if !*want_closed { + *want_closed = true; + return Poll::Ready(ProtocolsHandlerEvent::Custom(NotifsHandlerOut::CloseDesired)); + } + } + } + + State::Opening { out_substreams, pending_handshake, .. } => { + debug_assert!(out_substreams.iter().any(|s| s.is_none())); + + for (num, substream) in out_substreams.iter_mut().enumerate() { + match substream { + None | Some(None) => continue, + Some(Some(substream)) => match Sink::poll_flush(Pin::new(substream), cx) { + Poll::Pending | Poll::Ready(Ok(())) => continue, + Poll::Ready(Err(_)) => {} + } + } + + // Reached if the substream has been closed. + *substream = Some(None); + if num == 0 { + // Cancel the handshake. + *pending_handshake = None; + } + } + } + + State::Closed { .. } | + State::OpenDesired { .. } => {} + } + + if let State::Open { notifications_sink_rx, out_substreams, .. } = &mut self.state { + 'poll_notifs_sink: loop { + // Before we poll the notifications sink receiver, check that all the notification + // channels are ready to send a message. + // TODO: it is planned that in the future we switch to one `NotificationsSink` per + // protocol, in which case each sink should wait only for its corresponding handler + // to be ready, and not all handlers + // see https://github.com/paritytech/substrate/issues/5670 + for substream in out_substreams.iter_mut() { + match substream.as_mut().map(|s| s.poll_ready_unpin(cx)) { + None | Some(Poll::Ready(_)) => {}, + Some(Poll::Pending) => break 'poll_notifs_sink + } + } + + // Now that all substreams are ready for a message, grab what to send. + let message = match notifications_sink_rx.poll_next_unpin(cx) { + Poll::Ready(Some(msg)) => msg, + Poll::Ready(None) | Poll::Pending => break, + }; + + match message { + NotificationsSinkMessage::Notification { + protocol_name, + message + } => { + if let Some(pos) = self.out_protocols.iter().position(|(n, _)| *n == protocol_name) { + if let Some(substream) = out_substreams[pos].as_mut() { + let _ = substream.start_send_unpin(message); + continue 'poll_notifs_sink; + } + + } else { + log::warn!( + target: "sub-libp2p", + "Tried to send a notification on non-registered protocol: {:?}", + protocol_name + ); + } + } + NotificationsSinkMessage::ForceClose => { + return Poll::Ready( + ProtocolsHandlerEvent::Close(NotifsHandlerError::SyncNotificationsClogged) + ); + } + } + } + } + + // The legacy substreams are polled only if the state is `Open`. Otherwise, it would be + // possible to receive notifications that would need to get silently discarded. + if matches!(self.state, State::Open { .. }) { + for n in (0..self.legacy_substreams.len()).rev() { + let mut substream = self.legacy_substreams.swap_remove(n); + let poll_outcome = Pin::new(&mut substream).poll_next(cx); + match poll_outcome { + Poll::Pending => self.legacy_substreams.push(substream), + Poll::Ready(Some(Ok(RegisteredProtocolEvent::Message(message)))) => { + self.legacy_substreams.push(substream); + return Poll::Ready(ProtocolsHandlerEvent::Custom( + NotifsHandlerOut::CustomMessage { message } + )) + }, + Poll::Ready(Some(Ok(RegisteredProtocolEvent::Clogged))) => { + return Poll::Ready(ProtocolsHandlerEvent::Close( + NotifsHandlerError::SyncNotificationsClogged + )) + } + Poll::Ready(None) | Poll::Ready(Some(Err(_))) => { + if matches!(poll_outcome, Poll::Ready(None)) { + self.legacy_shutdown.push(substream); + } + + if let State::Open { want_closed, .. } = &mut self.state { + if !*want_closed { + *want_closed = true; + return Poll::Ready(ProtocolsHandlerEvent::Custom( + NotifsHandlerOut::CloseDesired + )) + } + } + } + } + } + } + + shutdown_list(&mut self.legacy_shutdown, cx); + + Poll::Pending + } +} -mod group; +/// Given a list of substreams, tries to shut them down. The substreams that have been successfully +/// shut down are removed from the list. +fn shutdown_list + (list: &mut SmallVec>>, + cx: &mut Context) +{ + 'outer: for n in (0..list.len()).rev() { + let mut substream = list.swap_remove(n); + loop { + match substream.poll_next_unpin(cx) { + Poll::Ready(Some(Ok(_))) => {} + Poll::Pending => break, + Poll::Ready(Some(Err(_))) | Poll::Ready(None) => continue 'outer, + } + } + list.push(substream); + } +} diff --git a/client/network/src/protocol/generic_proto/handler/group.rs b/client/network/src/protocol/generic_proto/handler/group.rs deleted file mode 100644 index 1318bb88965cd..0000000000000 --- a/client/network/src/protocol/generic_proto/handler/group.rs +++ /dev/null @@ -1,1054 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Substrate. - -// Substrate is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Substrate is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . - -//! Implementations of the `IntoProtocolsHandler` and `ProtocolsHandler` traits for both incoming -//! and outgoing substreams for all gossiping protocols together. -//! -//! This is the main implementation of `ProtocolsHandler` in this crate, that handles all the -//! protocols that are Substrate-related and outside of the scope of libp2p. -//! -//! # Usage -//! -//! From an API perspective, the [`NotifsHandler`] is always in one of the following state: -//! -//! - Closed substreams. This is the initial state. -//! - Closed substreams, but remote desires them to be open. -//! - Open substreams. -//! - Open substreams, but remote desires them to be closed. -//! -//! The [`NotifsHandler`] can spontaneously switch between these states: -//! -//! - "Closed substreams" to "Closed substreams but open desired". When that happens, a -//! [`NotifsHandlerOut::OpenDesired`] is emitted. -//! - "Closed substreams but open desired" to "Closed substreams" (i.e. the remote has cancelled -//! their request). When that happens, a [`NotifsHandlerOut::CloseDesired`] is emitted. -//! - "Open substreams" to "Open substreams but close desired". When that happens, a -//! [`NotifsHandlerOut::CloseDesired`] is emitted. -//! -//! The user can instruct the `NotifsHandler` to switch from "closed" to "open" or vice-versa by -//! sending either a [`NotifsHandlerIn::Open`] or a [`NotifsHandlerIn::Close`]. The `NotifsHandler` -//! must answer with [`NotifsHandlerOut::OpenResultOk`] or [`NotifsHandlerOut::OpenResultErr`], or -//! with [`NotifsHandlerOut::CloseResult`]. -//! -//! When a [`NotifsHandlerOut::OpenResultOk`] is emitted, the `NotifsHandler` is now in the open -//! state. When a [`NotifsHandlerOut::OpenResultErr`] or [`NotifsHandlerOut::CloseResult`] is -//! emitted, the `NotifsHandler` is now (or remains) in the closed state. -//! -//! When a [`NotifsHandlerOut::OpenDesired`] is emitted, the user should always send back either a -//! [`NotifsHandlerIn::Open`] or a [`NotifsHandlerIn::Close`].If this isn't done, the remote will -//! be left in a pending state. -//! -//! It is illegal to send a [`NotifsHandlerIn::Open`] before a previously-emitted -//! [`NotifsHandlerIn::Open`] has gotten an answer. - -use crate::protocol::generic_proto::{ - upgrade::{ - NotificationsIn, NotificationsOut, NotificationsInSubstream, NotificationsOutSubstream, - NotificationsHandshakeError, RegisteredProtocol, RegisteredProtocolSubstream, - RegisteredProtocolEvent, UpgradeCollec - }, -}; - -use bytes::BytesMut; -use libp2p::core::{either::EitherOutput, ConnectedPoint, PeerId}; -use libp2p::core::upgrade::{SelectUpgrade, InboundUpgrade, OutboundUpgrade}; -use libp2p::swarm::{ - ProtocolsHandler, ProtocolsHandlerEvent, - IntoProtocolsHandler, - KeepAlive, - ProtocolsHandlerUpgrErr, - SubstreamProtocol, - NegotiatedSubstream, -}; -use futures::{ - channel::mpsc, - lock::{Mutex as FuturesMutex, MutexGuard as FuturesMutexGuard}, - prelude::* -}; -use log::error; -use parking_lot::{Mutex, RwLock}; -use smallvec::SmallVec; -use std::{borrow::Cow, collections::VecDeque, mem, pin::Pin, str, sync::Arc, task::{Context, Poll}, time::Duration}; -use wasm_timer::Instant; - -/// Number of pending notifications in asynchronous contexts. -/// See [`NotificationsSink::reserve_notification`] for context. -const ASYNC_NOTIFICATIONS_BUFFER_SIZE: usize = 8; - -/// Number of pending notifications in synchronous contexts. -const SYNC_NOTIFICATIONS_BUFFER_SIZE: usize = 2048; - -/// Maximum duration to open a substream and receive the handshake message. After that, we -/// consider that we failed to open the substream. -const OPEN_TIMEOUT: Duration = Duration::from_secs(10); - -/// After successfully establishing a connection with the remote, we keep the connection open for -/// at least this amount of time in order to give the rest of the code the chance to notify us to -/// open substreams. -const INITIAL_KEEPALIVE_TIME: Duration = Duration::from_secs(5); - -/// Implements the `IntoProtocolsHandler` trait of libp2p. -/// -/// Every time a connection with a remote starts, an instance of this struct is created and -/// sent to a background task dedicated to this connection. Once the connection is established, -/// it is turned into a [`NotifsHandler`]. -/// -/// See the documentation at the module level for more information. -pub struct NotifsHandlerProto { - /// Prototypes for upgrades for inbound substreams, and the message we respond with in the - /// handshake. - in_protocols: Vec<(NotificationsIn, Arc>>)>, - - /// Name of protocols available for outbound substreams, and the initial handshake message we - /// send. - out_protocols: Vec<(Cow<'static, str>, Arc>>)>, - - /// Configuration for the legacy protocol upgrade. - legacy_protocol: RegisteredProtocol, -} - -/// The actual handler once the connection has been established. -/// -/// See the documentation at the module level for more information. -pub struct NotifsHandler { - /// Prototypes for upgrades for inbound substreams, and the message we respond with in the - /// handshake. - in_protocols: Vec<(NotificationsIn, Arc>>)>, - - /// Name of protocols available for outbound substreams, and the initial handshake message we - /// send. - out_protocols: Vec<(Cow<'static, str>, Arc>>)>, - - /// When the connection with the remote has been successfully established. - when_connection_open: Instant, - - /// Whether we are the connection dialer or listener. - endpoint: ConnectedPoint, - - /// State of this handler. - state: State, - - /// Configuration for the legacy protocol upgrade. - legacy_protocol: RegisteredProtocol, - - /// The substreams where bidirectional communications happen. - legacy_substreams: SmallVec<[RegisteredProtocolSubstream; 4]>, - - /// Contains substreams which are being shut down. - legacy_shutdown: SmallVec<[RegisteredProtocolSubstream; 4]>, - - /// Events to return in priority from `poll`. - events_queue: VecDeque< - ProtocolsHandlerEvent - >, -} - -/// See the module-level documentation to learn about the meaning of these variants. -enum State { - /// Handler is in the "Closed" state. - Closed { - /// Vec of the same length as [`NotifsHandler::out_protocols`]. For each protocol, contains - /// a boolean indicating whether an outgoing substream is still in the process of being - /// opened. - pending_opening: Vec, - }, - - /// Handler is in the "Closed" state. A [`NotifsHandlerOut::OpenDesired`] has been emitted. - OpenDesired { - /// Vec of the same length as [`NotifsHandler::in_protocols`]. For each protocol, contains - /// a substream opened by the remote and that hasn't been accepted/rejected yet. - /// - /// Must always contain at least one `Some`. - in_substreams: Vec>>, - - /// See [`State::Closed::pending_opening`]. - pending_opening: Vec, - }, - - /// Handler is in the "Closed" state, but has received a [`NotifsHandlerIn::Open`] and is - /// consequently trying to open the various notifications substreams. - /// - /// A [`NotifsHandlerOut::OpenResultOk`] or a [`NotifsHandlerOut::OpenResultErr`] event must - /// be emitted when transitionning to respectively [`State::Open`] or [`State::Closed`]. - Opening { - /// In the situation where either the legacy substream has been opened or the - /// handshake-bearing notifications protocol is open, but we haven't sent out any - /// [`NotifsHandlerOut::Open`] event yet, this contains the received handshake waiting to - /// be reported through the external API. - pending_handshake: Option>, - - /// Vec of the same length as [`NotifsHandler::in_protocols`]. For each protocol, contains - /// a substream opened by the remote and that has been accepted. - /// - /// Contrary to [`State::OpenDesired::in_substreams`], it is possible for this to contain - /// only `None`s. - in_substreams: Vec>>, - - /// Vec of the same length as [`NotifsHandler::out_protocols`]. For each protocol, contains - /// an outbound substream that has been accepted by the remote. - /// - /// Items that contain `None` mean that a substream is still being opened or has been - /// rejected by the remote. In other words, this `Vec` is kind of a mirror version of - /// [`State::Closed::pending_opening`]. - /// - /// Items that contain `Some(None)` have been rejected by the remote, most likely because - /// they don't support this protocol. At the time of writing, the external API doesn't - /// distinguish between the different protocols. From the external API's point of view, - /// either all protocols are open or none are open. In reality, light clients in particular - /// don't support for example the GrandPa protocol, and as such will refuse our outgoing - /// attempts. This is problematic in theory, but in practice this is handled properly at a - /// higher level. This flaw will fixed once the outer layers know to differentiate the - /// multiple protocols. - out_substreams: Vec>>>, - }, - - /// Handler is in the "Open" state. - Open { - /// Contains the two `Receiver`s connected to the [`NotificationsSink`] that has been - /// sent out. The notifications to send out can be pulled from this receivers. - /// We use two different channels in order to have two different channel sizes, but from - /// the receiving point of view, the two channels are the same. - /// The receivers are fused in case the user drops the [`NotificationsSink`] entirely. - notifications_sink_rx: stream::Select< - stream::Fuse>, - stream::Fuse> - >, - - /// Vec of the same length as [`NotifsHandler::out_protocols`]. For each protocol, contains - /// an outbound substream that has been accepted by the remote. - /// - /// On transition to [`State::Open`], all the elements must be `Some`. Elements are - /// switched to `None` only if the remote closes substreams, in which case `want_closed` - /// must be true. - out_substreams: Vec>>, - - /// Vec of the same length as [`NotifsHandler::in_protocols`]. For each protocol, contains - /// a substream opened by the remote and that has been accepted. - /// - /// Contrary to [`State::OpenDesired::in_substreams`], it is possible for this to contain - /// only `None`s. - in_substreams: Vec>>, - - /// If true, at least one substream in [`State::Open::out_substreams`] has been closed or - /// reset by the remote and a [`NotifsHandlerOut::CloseDesired`] message has been sent - /// out. - want_closed: bool, - }, -} - -impl IntoProtocolsHandler for NotifsHandlerProto { - type Handler = NotifsHandler; - - fn inbound_protocol(&self) -> SelectUpgrade, RegisteredProtocol> { - let in_protocols = self.in_protocols.iter() - .map(|(h, _)| h.clone()) - .collect::>(); - - SelectUpgrade::new(in_protocols, self.legacy_protocol.clone()) - } - - fn into_handler(self, _: &PeerId, connected_point: &ConnectedPoint) -> Self::Handler { - let num_out_proto = self.out_protocols.len(); - - NotifsHandler { - in_protocols: self.in_protocols, - out_protocols: self.out_protocols, - endpoint: connected_point.clone(), - when_connection_open: Instant::now(), - state: State::Closed { - pending_opening: (0..num_out_proto).map(|_| false).collect(), - }, - legacy_protocol: self.legacy_protocol, - legacy_substreams: SmallVec::new(), - legacy_shutdown: SmallVec::new(), - events_queue: VecDeque::with_capacity(16), - } - } -} - -/// Event that can be received by a `NotifsHandler`. -#[derive(Debug, Clone)] -pub enum NotifsHandlerIn { - /// Instruct the handler to open the notification substreams. - /// - /// Must always be answered by a [`NotifsHandlerOut::OpenResultOk`] or a - /// [`NotifsHandlerOut::OpenResultErr`] event. - /// - /// Importantly, it is forbidden to send a [`NotifsHandlerIn::Open`] while a previous one is - /// already in the fly. It is however possible if a `Close` is still in the fly. - Open, - - /// Instruct the handler to close the notification substreams, or reject any pending incoming - /// substream request. - /// - /// Must always be answered by a [`NotifsHandlerOut::CloseResult`] event. - Close, -} - -/// Event that can be emitted by a `NotifsHandler`. -#[derive(Debug)] -pub enum NotifsHandlerOut { - /// Acknowledges a [`NotifsHandlerIn::Open`]. - OpenResultOk { - /// The endpoint of the connection that is open for custom protocols. - endpoint: ConnectedPoint, - /// Handshake that was sent to us. - /// This is normally a "Status" message, but this out of the concern of this code. - received_handshake: Vec, - /// How notifications can be sent to this node. - notifications_sink: NotificationsSink, - }, - - /// Acknowledges a [`NotifsHandlerIn::Open`]. The remote has refused the attempt to open - /// notification substreams. - OpenResultErr, - - /// Acknowledges a [`NotifsHandlerIn::Close`]. - CloseResult, - - /// The remote would like the substreams to be open. Send a [`NotifsHandlerIn::Open`] or a - /// [`NotifsHandlerIn::Close`] in order to either accept or deny this request. If a - /// [`NotifsHandlerIn::Open`] or [`NotifsHandlerIn::Close`] has been sent before and has not - /// yet been acknowledged by a matching [`NotifsHandlerOut`], then you don't need to a send - /// another [`NotifsHandlerIn`]. - OpenDesired, - - /// The remote would like the substreams to be closed. Send a [`NotifsHandlerIn::Close`] in - /// order to close them. If a [`NotifsHandlerIn::Close`] has been sent before and has not yet - /// been acknowledged by a [`NotifsHandlerOut::CloseResult`], then you don't need to a send - /// another one. - CloseDesired, - - /// Received a non-gossiping message on the legacy substream. - /// - /// Can only happen when the handler is in the open state. - CustomMessage { - /// Message that has been received. - /// - /// Keep in mind that this can be a `ConsensusMessage` message, which then contains a - /// notification. - message: BytesMut, - }, - - /// Received a message on a custom protocol substream. - /// - /// Can only happen when the handler is in the open state. - Notification { - /// Name of the protocol of the message. - protocol_name: Cow<'static, str>, - - /// Message that has been received. - message: BytesMut, - }, -} - -/// Sink connected directly to the node background task. Allows sending notifications to the peer. -/// -/// Can be cloned in order to obtain multiple references to the same peer. -#[derive(Debug, Clone)] -pub struct NotificationsSink { - inner: Arc, -} - -#[derive(Debug)] -struct NotificationsSinkInner { - /// Sender to use in asynchronous contexts. Uses an asynchronous mutex. - async_channel: FuturesMutex>, - /// Sender to use in synchronous contexts. Uses a synchronous mutex. - /// This channel has a large capacity and is meant to be used in contexts where - /// back-pressure cannot be properly exerted. - /// It will be removed in a future version. - sync_channel: Mutex>, -} - -/// Message emitted through the [`NotificationsSink`] and processed by the background task -/// dedicated to the peer. -#[derive(Debug)] -enum NotificationsSinkMessage { - /// Message emitted by [`NotificationsSink::reserve_notification`] and - /// [`NotificationsSink::write_notification_now`]. - Notification { - protocol_name: Cow<'static, str>, - message: Vec, - }, - - /// Must close the connection. - ForceClose, -} - -impl NotificationsSink { - /// Sends a notification to the peer. - /// - /// If too many messages are already buffered, the notification is silently discarded and the - /// connection to the peer will be closed shortly after. - /// - /// The protocol name is expected to be checked ahead of calling this method. It is a logic - /// error to send a notification using an unknown protocol. - /// - /// This method will be removed in a future version. - pub fn send_sync_notification<'a>( - &'a self, - protocol_name: Cow<'static, str>, - message: impl Into> - ) { - let mut lock = self.inner.sync_channel.lock(); - let result = lock.try_send(NotificationsSinkMessage::Notification { - protocol_name, - message: message.into() - }); - - if result.is_err() { - // Cloning the `mpsc::Sender` guarantees the allocation of an extra spot in the - // buffer, and therefore that `try_send` will succeed. - let _result2 = lock.clone().try_send(NotificationsSinkMessage::ForceClose); - debug_assert!(_result2.map(|()| true).unwrap_or_else(|err| err.is_disconnected())); - } - } - - /// Wait until the remote is ready to accept a notification. - /// - /// Returns an error in the case where the connection is closed. - /// - /// The protocol name is expected to be checked ahead of calling this method. It is a logic - /// error to send a notification using an unknown protocol. - pub async fn reserve_notification<'a>(&'a self, protocol_name: Cow<'static, str>) -> Result, ()> { - let mut lock = self.inner.async_channel.lock().await; - - let poll_ready = future::poll_fn(|cx| lock.poll_ready(cx)).await; - if poll_ready.is_ok() { - Ok(Ready { protocol_name: protocol_name, lock }) - } else { - Err(()) - } - } -} - -/// Notification slot is reserved and the notification can actually be sent. -#[must_use] -#[derive(Debug)] -pub struct Ready<'a> { - /// Guarded channel. The channel inside is guaranteed to not be full. - lock: FuturesMutexGuard<'a, mpsc::Sender>, - /// Name of the protocol. Should match one of the protocols passed at initialization. - protocol_name: Cow<'static, str>, -} - -impl<'a> Ready<'a> { - /// Consumes this slots reservation and actually queues the notification. - /// - /// Returns an error if the substream has been closed. - pub fn send( - mut self, - notification: impl Into> - ) -> Result<(), ()> { - self.lock.start_send(NotificationsSinkMessage::Notification { - protocol_name: self.protocol_name, - message: notification.into(), - }).map_err(|_| ()) - } -} - -/// Error specific to the collection of protocols. -#[derive(Debug, derive_more::Display, derive_more::Error)] -pub enum NotifsHandlerError { - /// Channel of synchronous notifications is full. - SyncNotificationsClogged, -} - -impl NotifsHandlerProto { - /// Builds a new handler. - /// - /// `list` is a list of notification protocols names, and the message to send as part of the - /// handshake. At the moment, the message is always the same whether we open a substream - /// ourselves or respond to handshake from the remote. - /// - /// The first protocol in `list` is special-cased as the protocol that contains the handshake - /// to report through the [`NotifsHandlerOut::Open`] event. - /// - /// # Panic - /// - /// - Panics if `list` is empty. - /// - pub fn new( - legacy_protocol: RegisteredProtocol, - list: impl Into, Arc>>)>>, - ) -> Self { - let list = list.into(); - assert!(!list.is_empty()); - - let out_protocols = list - .clone() - .into_iter() - .collect(); - - let in_protocols = list.clone() - .into_iter() - .map(|(proto_name, msg)| (NotificationsIn::new(proto_name), msg)) - .collect(); - - NotifsHandlerProto { - in_protocols, - out_protocols, - legacy_protocol, - } - } -} - -impl ProtocolsHandler for NotifsHandler { - type InEvent = NotifsHandlerIn; - type OutEvent = NotifsHandlerOut; - type Error = NotifsHandlerError; - type InboundProtocol = SelectUpgrade, RegisteredProtocol>; - type OutboundProtocol = NotificationsOut; - // Index within the `out_protocols`. - type OutboundOpenInfo = usize; - type InboundOpenInfo = (); - - fn listen_protocol(&self) -> SubstreamProtocol { - let in_protocols = self.in_protocols.iter() - .map(|(h, _)| h.clone()) - .collect::>(); - - let proto = SelectUpgrade::new(in_protocols, self.legacy_protocol.clone()); - SubstreamProtocol::new(proto, ()) - } - - fn inject_fully_negotiated_inbound( - &mut self, - out: >::Output, - (): () - ) { - match out { - // Received notifications substream. - EitherOutput::First(((_remote_handshake, mut proto), num)) => { - match &mut self.state { - State::Closed { pending_opening } => { - self.events_queue.push_back(ProtocolsHandlerEvent::Custom( - NotifsHandlerOut::OpenDesired - )); - - let mut in_substreams = (0..self.in_protocols.len()) - .map(|_| None) - .collect::>(); - in_substreams[num] = Some(proto); - self.state = State::OpenDesired { - in_substreams, - pending_opening: mem::replace(pending_opening, Vec::new()), - }; - }, - State::OpenDesired { in_substreams, .. } => { - if in_substreams[num].is_some() { - // If a substream already exists, silently drop the new one. - // Note that we drop the substream, which will send an equivalent to a - // TCP "RST" to the remote and force-close the substream. It might - // seem like an unclean way to get rid of a substream. However, keep - // in mind that it is invalid for the remote to open multiple such - // substreams, and therefore sending a "RST" is the most correct thing - // to do. - return; - } - in_substreams[num] = Some(proto); - }, - State::Opening { in_substreams, .. } | - State::Open { in_substreams, .. } => { - if in_substreams[num].is_some() { - // Same remark as above. - return; - } - - // We create `handshake_message` on a separate line to be sure - // that the lock is released as soon as possible. - let handshake_message = self.in_protocols[num].1.read().clone(); - proto.send_handshake(handshake_message); - in_substreams[num] = Some(proto); - }, - }; - } - - // Received legacy substream. - EitherOutput::Second((substream, _handshake)) => { - // Note: while we awknowledge legacy substreams and handle incoming messages, - // it doesn't trigger any `OpenDesired` event as a way to simplify the logic of - // this code. - // Since mid-2019, legacy substreams are supposed to used at the same time as - // notifications substreams, and not in isolation. Nodes that open legacy - // substreams in isolation are considered deprecated. - if self.legacy_substreams.len() <= 4 { - self.legacy_substreams.push(substream); - } - }, - } - } - - fn inject_fully_negotiated_outbound( - &mut self, - (handshake, substream): >::Output, - num: Self::OutboundOpenInfo - ) { - match &mut self.state { - State::Closed { pending_opening } | - State::OpenDesired { pending_opening, .. } => { - debug_assert!(pending_opening[num]); - pending_opening[num] = false; - } - State::Open { .. } => { - error!(target: "sub-libp2p", "☎️ State mismatch in notifications handler"); - debug_assert!(false); - } - State::Opening { pending_handshake, in_substreams, out_substreams } => { - debug_assert!(out_substreams[num].is_none()); - out_substreams[num] = Some(Some(substream)); - - if num == 0 { - debug_assert!(pending_handshake.is_none()); - *pending_handshake = Some(handshake); - } - - if !out_substreams.iter().any(|s| s.is_none()) { - let (async_tx, async_rx) = mpsc::channel(ASYNC_NOTIFICATIONS_BUFFER_SIZE); - let (sync_tx, sync_rx) = mpsc::channel(SYNC_NOTIFICATIONS_BUFFER_SIZE); - let notifications_sink = NotificationsSink { - inner: Arc::new(NotificationsSinkInner { - async_channel: FuturesMutex::new(async_tx), - sync_channel: Mutex::new(sync_tx), - }), - }; - - debug_assert!(pending_handshake.is_some()); - let pending_handshake = pending_handshake.take().unwrap_or_default(); - - let out_substreams = out_substreams - .drain(..) - .map(|s| s.expect("checked by the if above; qed")) - .collect(); - - self.state = State::Open { - notifications_sink_rx: stream::select(async_rx.fuse(), sync_rx.fuse()), - out_substreams, - in_substreams: mem::replace(in_substreams, Vec::new()), - want_closed: false, - }; - - self.events_queue.push_back(ProtocolsHandlerEvent::Custom( - NotifsHandlerOut::OpenResultOk { - endpoint: self.endpoint.clone(), - received_handshake: pending_handshake, - notifications_sink - } - )); - } - } - } - } - - fn inject_event(&mut self, message: NotifsHandlerIn) { - match message { - NotifsHandlerIn::Open => { - match &mut self.state { - State::Closed { .. } | State::OpenDesired { .. } => { - let (pending_opening, mut in_substreams) = match &mut self.state { - State::Closed { pending_opening } => (pending_opening, None), - State::OpenDesired { pending_opening, in_substreams } => - (pending_opening, Some(mem::replace(in_substreams, Vec::new()))), - _ => unreachable!() - }; - - for (n, is_pending) in pending_opening.iter().enumerate() { - if *is_pending { - continue; - } - - let proto = NotificationsOut::new( - self.out_protocols[n].0.clone(), - self.out_protocols[n].1.read().clone() - ); - - self.events_queue.push_back(ProtocolsHandlerEvent::OutboundSubstreamRequest { - protocol: SubstreamProtocol::new(proto, n) - .with_timeout(OPEN_TIMEOUT), - }); - } - - if let Some(in_substreams) = in_substreams.as_mut() { - for (num, substream) in in_substreams.iter_mut().enumerate() { - let substream = match substream.as_mut() { - Some(s) => s, - None => continue, - }; - - let handshake_message = self.in_protocols[num].1.read().clone(); - substream.send_handshake(handshake_message); - } - } - - self.state = State::Opening { - pending_handshake: None, - in_substreams: if let Some(in_substreams) = in_substreams { - in_substreams - } else { - (0..self.in_protocols.len()).map(|_| None).collect() - }, - out_substreams: (0..self.out_protocols.len()).map(|_| None).collect(), - }; - }, - State::Opening { .. } | - State::Open { .. } => { - // As documented, it is forbidden to send an `Open` while there is already - // one in the fly. - error!(target: "sub-libp2p", "opening already-opened handler"); - debug_assert!(false); - }, - } - }, - - NotifsHandlerIn::Close => { - for mut substream in self.legacy_substreams.drain() { - substream.shutdown(); - self.legacy_shutdown.push(substream); - } - - match &mut self.state { - State::Open { .. } => { - self.state = State::Closed { - pending_opening: Vec::new(), - }; - }, - State::Opening { out_substreams, .. } => { - let pending_opening = out_substreams.iter().map(|s| s.is_none()).collect(); - self.state = State::Closed { - pending_opening, - }; - - self.events_queue.push_back(ProtocolsHandlerEvent::Custom( - NotifsHandlerOut::OpenResultErr - )); - }, - State::OpenDesired { pending_opening, .. } => { - self.state = State::Closed { - pending_opening: mem::replace(pending_opening, Vec::new()), - }; - } - State::Closed { .. } => {}, - } - - self.events_queue.push_back( - ProtocolsHandlerEvent::Custom(NotifsHandlerOut::CloseResult) - ); - }, - } - } - - fn inject_dial_upgrade_error( - &mut self, - num: usize, - _: ProtocolsHandlerUpgrErr - ) { - match &mut self.state { - State::Closed { pending_opening } | State::OpenDesired { pending_opening, .. } => { - debug_assert!(pending_opening[num]); - pending_opening[num] = false; - } - - State::Opening { in_substreams, pending_handshake, out_substreams } => { - // Failing to open a substream isn't considered a failure. Instead, it is marked - // as `Some(None)` and the opening continues. - - out_substreams[num] = Some(None); - - // Some substreams are still being opened. Nothing more to do. - if out_substreams.iter().any(|s| s.is_none()) { - return; - } - - // All substreams have finished being open. - // If the handshake has been received, proceed and report the opening. - - if let Some(pending_handshake) = pending_handshake.take() { - // Open! - let (async_tx, async_rx) = mpsc::channel(ASYNC_NOTIFICATIONS_BUFFER_SIZE); - let (sync_tx, sync_rx) = mpsc::channel(SYNC_NOTIFICATIONS_BUFFER_SIZE); - let notifications_sink = NotificationsSink { - inner: Arc::new(NotificationsSinkInner { - async_channel: FuturesMutex::new(async_tx), - sync_channel: Mutex::new(sync_tx), - }), - }; - - let out_substreams = out_substreams - .drain(..) - .map(|s| s.expect("checked by the if above; qed")) - .collect(); - - self.state = State::Open { - notifications_sink_rx: stream::select(async_rx.fuse(), sync_rx.fuse()), - out_substreams, - in_substreams: mem::replace(in_substreams, Vec::new()), - want_closed: false, - }; - - self.events_queue.push_back(ProtocolsHandlerEvent::Custom( - NotifsHandlerOut::OpenResultOk { - endpoint: self.endpoint.clone(), - received_handshake: pending_handshake, - notifications_sink - } - )); - - } else { - // Open failure! - self.state = State::Closed { - pending_opening: (0..self.out_protocols.len()).map(|_| false).collect(), - }; - - self.events_queue.push_back(ProtocolsHandlerEvent::Custom( - NotifsHandlerOut::OpenResultErr - )); - } - } - - // No substream is being open when already `Open`. - State::Open { .. } => debug_assert!(false), - } - } - - fn connection_keep_alive(&self) -> KeepAlive { - if !self.legacy_substreams.is_empty() { - return KeepAlive::Yes; - } - - match self.state { - State::Closed { .. } => KeepAlive::Until(self.when_connection_open + INITIAL_KEEPALIVE_TIME), - State::OpenDesired { .. } | State::Opening { .. } | State::Open { .. } => - KeepAlive::Yes, - } - } - - fn poll( - &mut self, - cx: &mut Context, - ) -> Poll< - ProtocolsHandlerEvent - > { - if let Some(ev) = self.events_queue.pop_front() { - return Poll::Ready(ev); - } - - // Poll inbound substreams. - // Inbound substreams being closed is always tolerated, except for the `OpenDesired` state - // which might need to be switched back to `Closed`. - match &mut self.state { - State::Closed { .. } => {} - State::Open { in_substreams, .. } => { - for (num, substream) in in_substreams.iter_mut().enumerate() { - match substream.as_mut().map(|s| Stream::poll_next(Pin::new(s), cx)) { - None | Some(Poll::Pending) => continue, - Some(Poll::Ready(Some(Ok(message)))) => { - let event = NotifsHandlerOut::Notification { - message, - protocol_name: self.in_protocols[num].0.protocol_name().clone(), - }; - return Poll::Ready(ProtocolsHandlerEvent::Custom(event)) - }, - Some(Poll::Ready(None)) | Some(Poll::Ready(Some(Err(_)))) => - *substream = None, - } - } - } - - State::OpenDesired { in_substreams, .. } | - State::Opening { in_substreams, .. } => { - for substream in in_substreams { - match substream.as_mut().map(|s| NotificationsInSubstream::poll_process(Pin::new(s), cx)) { - None | Some(Poll::Pending) => continue, - Some(Poll::Ready(Ok(void))) => match void {}, - Some(Poll::Ready(Err(_))) => *substream = None, - } - } - } - } - - // Since the previous block might have closed inbound substreams, make sure that we can - // stay in `OpenDesired` state. - if let State::OpenDesired { in_substreams, pending_opening } = &mut self.state { - if !in_substreams.iter().any(|s| s.is_some()) { - self.state = State::Closed { - pending_opening: mem::replace(pending_opening, Vec::new()), - }; - return Poll::Ready(ProtocolsHandlerEvent::Custom( - NotifsHandlerOut::CloseDesired - )) - } - } - - // Poll outbound substreams. - match &mut self.state { - State::Open { out_substreams, want_closed, .. } => { - let mut any_closed = false; - - for substream in out_substreams.iter_mut() { - match substream.as_mut().map(|s| Sink::poll_flush(Pin::new(s), cx)) { - None | Some(Poll::Pending) | Some(Poll::Ready(Ok(()))) => continue, - Some(Poll::Ready(Err(_))) => {} - }; - - // Reached if the substream has been closed. - *substream = None; - any_closed = true; - } - - if any_closed { - if !*want_closed { - *want_closed = true; - return Poll::Ready(ProtocolsHandlerEvent::Custom(NotifsHandlerOut::CloseDesired)); - } - } - } - - State::Opening { out_substreams, pending_handshake, .. } => { - debug_assert!(out_substreams.iter().any(|s| s.is_none())); - - for (num, substream) in out_substreams.iter_mut().enumerate() { - match substream { - None | Some(None) => continue, - Some(Some(substream)) => match Sink::poll_flush(Pin::new(substream), cx) { - Poll::Pending | Poll::Ready(Ok(())) => continue, - Poll::Ready(Err(_)) => {} - } - } - - // Reached if the substream has been closed. - *substream = Some(None); - if num == 0 { - // Cancel the handshake. - *pending_handshake = None; - } - } - } - - State::Closed { .. } | - State::OpenDesired { .. } => {} - } - - if let State::Open { notifications_sink_rx, out_substreams, .. } = &mut self.state { - 'poll_notifs_sink: loop { - // Before we poll the notifications sink receiver, check that all the notification - // channels are ready to send a message. - // TODO: it is planned that in the future we switch to one `NotificationsSink` per - // protocol, in which case each sink should wait only for its corresponding handler - // to be ready, and not all handlers - // see https://github.com/paritytech/substrate/issues/5670 - for substream in out_substreams.iter_mut() { - match substream.as_mut().map(|s| s.poll_ready_unpin(cx)) { - None | Some(Poll::Ready(_)) => {}, - Some(Poll::Pending) => break 'poll_notifs_sink - } - } - - // Now that all substreams are ready for a message, grab what to send. - let message = match notifications_sink_rx.poll_next_unpin(cx) { - Poll::Ready(Some(msg)) => msg, - Poll::Ready(None) | Poll::Pending => break, - }; - - match message { - NotificationsSinkMessage::Notification { - protocol_name, - message - } => { - if let Some(pos) = self.out_protocols.iter().position(|(n, _)| *n == protocol_name) { - if let Some(substream) = out_substreams[pos].as_mut() { - let _ = substream.start_send_unpin(message); - continue 'poll_notifs_sink; - } - - } else { - log::warn!( - target: "sub-libp2p", - "Tried to send a notification on non-registered protocol: {:?}", - protocol_name - ); - } - } - NotificationsSinkMessage::ForceClose => { - return Poll::Ready( - ProtocolsHandlerEvent::Close(NotifsHandlerError::SyncNotificationsClogged) - ); - } - } - } - } - - // The legacy substreams are polled only if the state is `Open`. Otherwise, it would be - // possible to receive notifications that would need to get silently discarded. - if matches!(self.state, State::Open { .. }) { - for n in (0..self.legacy_substreams.len()).rev() { - let mut substream = self.legacy_substreams.swap_remove(n); - let poll_outcome = Pin::new(&mut substream).poll_next(cx); - match poll_outcome { - Poll::Pending => self.legacy_substreams.push(substream), - Poll::Ready(Some(Ok(RegisteredProtocolEvent::Message(message)))) => { - self.legacy_substreams.push(substream); - return Poll::Ready(ProtocolsHandlerEvent::Custom( - NotifsHandlerOut::CustomMessage { message } - )) - }, - Poll::Ready(Some(Ok(RegisteredProtocolEvent::Clogged))) => { - return Poll::Ready(ProtocolsHandlerEvent::Close( - NotifsHandlerError::SyncNotificationsClogged - )) - } - Poll::Ready(None) | Poll::Ready(Some(Err(_))) => { - if matches!(poll_outcome, Poll::Ready(None)) { - self.legacy_shutdown.push(substream); - } - - if let State::Open { want_closed, .. } = &mut self.state { - if !*want_closed { - *want_closed = true; - return Poll::Ready(ProtocolsHandlerEvent::Custom( - NotifsHandlerOut::CloseDesired - )) - } - } - } - } - } - } - - shutdown_list(&mut self.legacy_shutdown, cx); - - Poll::Pending - } -} - -/// Given a list of substreams, tries to shut them down. The substreams that have been successfully -/// shut down are removed from the list. -fn shutdown_list - (list: &mut SmallVec>>, - cx: &mut Context) -{ - 'outer: for n in (0..list.len()).rev() { - let mut substream = list.swap_remove(n); - loop { - match substream.poll_next_unpin(cx) { - Poll::Ready(Some(Ok(_))) => {} - Poll::Pending => break, - Poll::Ready(Some(Err(_))) | Poll::Ready(None) => continue 'outer, - } - } - list.push(substream); - } -} From 1e5cd4f9d9303b26b61c944dcbbe194d2c7771f7 Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Fri, 13 Nov 2020 15:26:52 +0100 Subject: [PATCH 29/39] Apply suggestions from code review Co-authored-by: Max Inden --- .../src/protocol/generic_proto/behaviour.rs | 14 ++++++-------- .../network/src/protocol/generic_proto/handler.rs | 6 +++--- 2 files changed, 9 insertions(+), 11 deletions(-) diff --git a/client/network/src/protocol/generic_proto/behaviour.rs b/client/network/src/protocol/generic_proto/behaviour.rs index 085c1ef581db6..6bf0b2b64e9f5 100644 --- a/client/network/src/protocol/generic_proto/behaviour.rs +++ b/client/network/src/protocol/generic_proto/behaviour.rs @@ -750,7 +750,6 @@ impl GenericProto { }); *connec_state = ConnectionState::Opening; *occ_entry.into_mut() = PeerState::Enabled { connections }; - } else { // If no connection is available, switch to `DisabledPendingEnable` in order // to try again later. @@ -1273,7 +1272,6 @@ impl NetworkBehaviour for GenericProto { } else if no_desired_left { // If no connection is `OpenDesired` anymore, switch to `Disabled`. *entry.get_mut() = PeerState::Disabled { connections, banned_until }; - } else { *entry.get_mut() = PeerState::Incoming { connections, banned_until }; } @@ -1698,7 +1696,7 @@ impl NetworkBehaviour for GenericProto { } } else { - // List of open connections wasn't empty before but not it is. + // List of open connections wasn't empty before but now it is. if !connections.iter().any(|(_, s)| matches!(s, ConnectionState::Opening)) { debug!(target: "sub-libp2p", "PSM <= Dropped({:?})", source); self.peerset.dropped(source.clone()); @@ -1749,9 +1747,9 @@ impl NetworkBehaviour for GenericProto { { *connec_state = ConnectionState::Closed; } else { - debug_assert!(false); error!(target: "sub-libp2p", "CloseResult: State mismatch in the custom protos handler"); + debug_assert!(false); } }, @@ -1806,9 +1804,9 @@ impl NetworkBehaviour for GenericProto { { *connec_state = ConnectionState::Closing; } else { - debug_assert!(false); error!(target: "sub-libp2p", "OpenResultOk State mismatch in the custom protos handler"); + debug_assert!(false); } } @@ -1873,9 +1871,9 @@ impl NetworkBehaviour for GenericProto { { *connec_state = ConnectionState::Closing; } else { - debug_assert!(false); error!(target: "sub-libp2p", "OpenResultErr: State mismatch in the custom protos handler"); + debug_assert!(false); } *entry.into_mut() = PeerState::Disabled { connections, banned_until }; @@ -1886,9 +1884,9 @@ impl NetworkBehaviour for GenericProto { { *connec_state = ConnectionState::Closing; } else { - debug_assert!(false); error!(target: "sub-libp2p", "OpenResultErr: State mismatch in the custom protos handler"); + debug_assert!(false); } *entry.into_mut() = PeerState::DisabledPendingEnable { @@ -1919,7 +1917,7 @@ impl NetworkBehaviour for GenericProto { } else { trace!( target: "sub-libp2p", - "Handler({:?}) => Post-close message", + "Handler({:?}) => Post-close message. Dropping message.", source, ); } diff --git a/client/network/src/protocol/generic_proto/handler.rs b/client/network/src/protocol/generic_proto/handler.rs index 1318bb88965cd..5778cb3804b00 100644 --- a/client/network/src/protocol/generic_proto/handler.rs +++ b/client/network/src/protocol/generic_proto/handler.rs @@ -22,7 +22,7 @@ //! //! # Usage //! -//! From an API perspective, the [`NotifsHandler`] is always in one of the following state: +//! From an API perspective, the [`NotifsHandler`] is always in one of the following state (see [`State`]): //! //! - Closed substreams. This is the initial state. //! - Closed substreams, but remote desires them to be open. @@ -412,7 +412,7 @@ impl NotificationsSink { if result.is_err() { // Cloning the `mpsc::Sender` guarantees the allocation of an extra spot in the - // buffer, and therefore that `try_send` will succeed. + // buffer, and therefore `try_send` will succeed. let _result2 = lock.clone().try_send(NotificationsSinkMessage::ForceClose); debug_assert!(_result2.map(|()| true).unwrap_or_else(|err| err.is_disconnected())); } @@ -583,7 +583,7 @@ impl ProtocolsHandler for NotifsHandler { // Note: while we awknowledge legacy substreams and handle incoming messages, // it doesn't trigger any `OpenDesired` event as a way to simplify the logic of // this code. - // Since mid-2019, legacy substreams are supposed to used at the same time as + // Since mid-2019, legacy substreams are supposed to be used at the same time as // notifications substreams, and not in isolation. Nodes that open legacy // substreams in isolation are considered deprecated. if self.legacy_substreams.len() <= 4 { From 556ab6f90cd267c998afae681aa00fac691afe70 Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Fri, 13 Nov 2020 15:37:10 +0100 Subject: [PATCH 30/39] Banned => Backoff --- .../src/protocol/generic_proto/behaviour.rs | 146 +++++++++--------- 1 file changed, 73 insertions(+), 73 deletions(-) diff --git a/client/network/src/protocol/generic_proto/behaviour.rs b/client/network/src/protocol/generic_proto/behaviour.rs index 6bf0b2b64e9f5..e1fc586060f30 100644 --- a/client/network/src/protocol/generic_proto/behaviour.rs +++ b/client/network/src/protocol/generic_proto/behaviour.rs @@ -63,10 +63,10 @@ use wasm_timer::Instant; /// been asked to attribute an inbound slot. /// /// In addition to these states, there also exists a "banning" system. If we fail to dial a peer, -/// we "ban" it for a few seconds. If the PSM requests connecting to a peer that is currently -/// "banned", the next dialing attempt is delayed until after the ban expires. However, the PSM +/// we back-off for a few seconds. If the PSM requests connecting to a peer that is currently +/// backed-off, the next dialing attempt is delayed until after the ban expires. However, the PSM /// will still consider the peer to be connected. This "ban" is thus not a ban in a strict sense: -/// if a "banned" peer tries to connect, the connection is accepted. A ban only delays dialing +/// if a backed-off peer tries to connect, the connection is accepted. A ban only delays dialing /// attempts. /// /// There may be multiple connections to a peer. The status of a peer on @@ -151,10 +151,10 @@ enum PeerState { /// The peer misbehaved. If the PSM wants us to connect to this peer, we will add an artificial /// delay to the connection. - Banned { + Backoff { /// When the ban expires. For clean-up purposes. References an entry in `delays`. timer: DelayId, - /// Until when the peer is banned. + /// Until when the peer is backed-off. timer_deadline: Instant, }, @@ -176,14 +176,14 @@ enum PeerState { Disabled { /// If `Some`, any connection request from the peerset to this peer is delayed until the /// given `Instant`. - banned_until: Option, + backoff_until: Option, /// List of connections with this peer, and their state. connections: SmallVec<[(ConnectionId, ConnectionState); crate::MAX_CONNECTIONS_PER_PEER]>, }, /// We are connected to this peer. The peerset has requested a connection to this peer, but - /// it is currently in a "banned" phase. The state will switch to `Enabled` once the timer + /// it is currently in a "backed-off" phase. The state will switch to `Enabled` once the timer /// expires. /// /// The handler is either in the closed state, or a `Close` message has been sent to it and @@ -212,7 +212,7 @@ enum PeerState { /// peer. Incoming { /// If `Some`, any dial attempts to this peer are delayed until the given `Instant`. - banned_until: Option, + backoff_until: Option, /// List of connections with this peer, and their state. connections: SmallVec<[(ConnectionId, ConnectionState); crate::MAX_CONNECTIONS_PER_PEER]>, @@ -238,7 +238,7 @@ impl PeerState { }) .next(), PeerState::Poisoned => None, - PeerState::Banned { .. } => None, + PeerState::Backoff { .. } => None, PeerState::PendingRequest { .. } => None, PeerState::Requested => None, PeerState::Disabled { .. } => None, @@ -251,7 +251,7 @@ impl PeerState { fn is_requested(&self) -> bool { match self { PeerState::Poisoned => false, - PeerState::Banned { .. } => false, + PeerState::Backoff { .. } => false, PeerState::PendingRequest { .. } => true, PeerState::Requested => true, PeerState::Disabled { .. } => false, @@ -469,7 +469,7 @@ impl GenericProto { st @ PeerState::Disabled { .. } => *entry.into_mut() = st, st @ PeerState::Requested => *entry.into_mut() = st, st @ PeerState::PendingRequest { .. } => *entry.into_mut() = st, - st @ PeerState::Banned { .. } => *entry.into_mut() = st, + st @ PeerState::Backoff { .. } => *entry.into_mut() = st, // DisabledPendingEnable => Disabled. PeerState::DisabledPendingEnable { @@ -479,14 +479,14 @@ impl GenericProto { } => { debug!(target: "sub-libp2p", "PSM <= Dropped({:?})", peer_id); self.peerset.dropped(peer_id.clone()); - let banned_until = Some(if let Some(ban) = ban { + let backoff_until = Some(if let Some(ban) = ban { cmp::max(timer_deadline, Instant::now() + ban) } else { timer_deadline }); *entry.into_mut() = PeerState::Disabled { connections, - banned_until + backoff_until } }, @@ -532,16 +532,16 @@ impl GenericProto { debug_assert!(!connections.iter().any(|(_, s)| matches!(s, ConnectionState::Open(_)))); debug_assert!(!connections.iter().any(|(_, s)| matches!(s, ConnectionState::Opening))); - let banned_until = ban.map(|dur| Instant::now() + dur); + let backoff_until = ban.map(|dur| Instant::now() + dur); *entry.into_mut() = PeerState::Disabled { connections, - banned_until + backoff_until } }, // Incoming => Disabled. // Ongoing opening requests from the remote are rejected. - PeerState::Incoming { mut connections, banned_until } => { + PeerState::Incoming { mut connections, backoff_until } => { let inc = if let Some(inc) = self.incoming.iter_mut() .find(|i| i.peer_id == *entry.key() && i.alive) { inc @@ -565,7 +565,7 @@ impl GenericProto { *connec_state = ConnectionState::Closing; } - let banned_until = match (banned_until, ban) { + let backoff_until = match (backoff_until, ban) { (Some(a), Some(b)) => Some(cmp::max(a, Instant::now() + b)), (Some(a), None) => Some(a), (None, Some(b)) => Some(Instant::now() + b), @@ -575,7 +575,7 @@ impl GenericProto { debug_assert!(!connections.iter().any(|(_, s)| matches!(s, ConnectionState::OpenDesired))); *entry.into_mut() = PeerState::Disabled { connections, - banned_until + backoff_until } }, @@ -599,7 +599,7 @@ impl GenericProto { Some(PeerState::Incoming { .. }) => false, Some(PeerState::Requested) => false, Some(PeerState::PendingRequest { .. }) => false, - Some(PeerState::Banned { .. }) => false, + Some(PeerState::Backoff { .. }) => false, Some(PeerState::Poisoned) => false, } } @@ -688,8 +688,8 @@ impl GenericProto { let now = Instant::now(); match mem::replace(occ_entry.get_mut(), PeerState::Poisoned) { - // Banned (not expired) => PendingRequest - PeerState::Banned { ref timer, ref timer_deadline } if *timer_deadline > now => { + // Backoff (not expired) => PendingRequest + PeerState::Backoff { ref timer, ref timer_deadline } if *timer_deadline > now => { let peer_id = occ_entry.key().clone(); debug!(target: "sub-libp2p", "PSM => Connect({:?}): Will start to connect at \ until {:?}", peer_id, timer_deadline); @@ -699,8 +699,8 @@ impl GenericProto { }; }, - // Banned (expired) => Requested - PeerState::Banned { .. } => { + // Backoff (expired) => Requested + PeerState::Backoff { .. } => { debug!(target: "sub-libp2p", "PSM => Connect({:?}): Starting to connect", occ_entry.key()); debug!(target: "sub-libp2p", "Libp2p <= Dial {:?}", occ_entry.key()); self.events.push_back(NetworkBehaviourAction::DialPeer { @@ -713,15 +713,15 @@ impl GenericProto { // Disabled (with non-expired ban) => DisabledPendingEnable PeerState::Disabled { connections, - banned_until: Some(ref banned) - } if *banned > now => { + backoff_until: Some(ref backoff) + } if *backoff > now => { let peer_id = occ_entry.key().clone(); - debug!(target: "sub-libp2p", "PSM => Connect({:?}): But peer is banned until {:?}", - peer_id, banned); + debug!(target: "sub-libp2p", "PSM => Connect({:?}): But peer is backed-off until {:?}", + peer_id, backoff); let delay_id = self.next_delay_id; self.next_delay_id.0 += 1; - let delay = futures_timer::Delay::new(*banned - now); + let delay = futures_timer::Delay::new(*backoff - now); self.delays.push(async move { delay.await; (delay_id, peer_id) @@ -730,12 +730,12 @@ impl GenericProto { *occ_entry.into_mut() = PeerState::DisabledPendingEnable { connections, timer: delay_id, - timer_deadline: *banned, + timer_deadline: *backoff, }; }, // Disabled => Enabled - PeerState::Disabled { mut connections, banned_until } => { + PeerState::Disabled { mut connections, backoff_until } => { // The first element of `closed` is chosen to open the notifications substream. if let Some((connec_id, connec_state)) = connections.iter_mut() .find(|(_, s)| matches!(s, ConnectionState::Closed)) @@ -764,8 +764,8 @@ impl GenericProto { let timer_deadline = { let base = now + Duration::from_secs(5); - if let Some(banned_until) = banned_until { - cmp::max(base, banned_until) + if let Some(backoff_until) = backoff_until { + cmp::max(base, backoff_until) } else { base } @@ -857,7 +857,7 @@ impl GenericProto { }; match mem::replace(entry.get_mut(), PeerState::Poisoned) { - st @ PeerState::Disabled { .. } | st @ PeerState::Banned { .. } => { + st @ PeerState::Disabled { .. } | st @ PeerState::Backoff { .. } => { debug!(target: "sub-libp2p", "PSM => Drop({:?}): Already disabled.", entry.key()); *entry.into_mut() = st; }, @@ -870,7 +870,7 @@ impl GenericProto { entry.key()); *entry.into_mut() = PeerState::Disabled { connections, - banned_until: Some(timer_deadline), + backoff_until: Some(timer_deadline), }; }, @@ -913,7 +913,7 @@ impl GenericProto { *connec_state = ConnectionState::Closing; } - *entry.into_mut() = PeerState::Disabled { connections, banned_until: None } + *entry.into_mut() = PeerState::Disabled { connections, backoff_until: None } }, // Requested => Ø @@ -925,10 +925,10 @@ impl GenericProto { entry.remove(); }, - // PendingRequest => Banned + // PendingRequest => Backoff PeerState::PendingRequest { timer, timer_deadline } => { debug!(target: "sub-libp2p", "PSM => Drop({:?}): Not yet connected", entry.key()); - *entry.into_mut() = PeerState::Banned { timer, timer_deadline } + *entry.into_mut() = PeerState::Backoff { timer, timer_deadline } }, // Invalid state transitions. @@ -1034,7 +1034,7 @@ impl GenericProto { match mem::replace(state, PeerState::Poisoned) { // Incoming => Disabled - PeerState::Incoming { mut connections, banned_until } => { + PeerState::Incoming { mut connections, backoff_until } => { debug!(target: "sub-libp2p", "PSM => Reject({:?}, {:?}): Rejecting connections.", index, incoming.peer_id); @@ -1051,7 +1051,7 @@ impl GenericProto { *connec_state = ConnectionState::Closing; } - *state = PeerState::Disabled { connections, banned_until }; + *state = PeerState::Disabled { connections, backoff_until }; } peer => error!(target: "sub-libp2p", "State mismatch in libp2p: Expected alive incoming. Got {:?}.", @@ -1100,10 +1100,10 @@ impl NetworkBehaviour for GenericProto { } // Poisoned gets inserted above if the entry was missing. - // Ø | Banned => Disabled + // Ø | Backoff => Disabled st @ &mut PeerState::Poisoned | - st @ &mut PeerState::Banned { .. } => { - let banned_until = if let PeerState::Banned { timer_deadline, .. } = st { + st @ &mut PeerState::Backoff { .. } => { + let backoff_until = if let PeerState::Backoff { timer_deadline, .. } = st { Some(*timer_deadline) } else { None @@ -1114,7 +1114,7 @@ impl NetworkBehaviour for GenericProto { let mut connections = SmallVec::new(); connections.push((*conn, ConnectionState::Closed)); - *st = PeerState::Disabled { connections, banned_until }; + *st = PeerState::Disabled { connections, backoff_until }; } // In all other states, add this new connection to the list of closed inactive @@ -1141,8 +1141,8 @@ impl NetworkBehaviour for GenericProto { }; match mem::replace(entry.get_mut(), PeerState::Poisoned) { - // Disabled => Disabled | Banned | Ø - PeerState::Disabled { mut connections, banned_until } => { + // Disabled => Disabled | Backoff | Ø + PeerState::Disabled { mut connections, backoff_until } => { debug!(target: "sub-libp2p", "Libp2p => Disconnected({}, {:?}): Disabled.", peer_id, *conn); if let Some(pos) = connections.iter().position(|(c, _)| *c == *conn) { @@ -1154,7 +1154,7 @@ impl NetworkBehaviour for GenericProto { } if connections.is_empty() { - if let Some(until) = banned_until { + if let Some(until) = backoff_until { let now = Instant::now(); if until > now { let delay_id = self.next_delay_id; @@ -1166,7 +1166,7 @@ impl NetworkBehaviour for GenericProto { (delay_id, peer_id) }.boxed()); - *entry.get_mut() = PeerState::Banned { + *entry.get_mut() = PeerState::Backoff { timer: delay_id, timer_deadline: until, }; @@ -1177,11 +1177,11 @@ impl NetworkBehaviour for GenericProto { entry.remove(); } } else { - *entry.get_mut() = PeerState::Disabled { connections, banned_until }; + *entry.get_mut() = PeerState::Disabled { connections, backoff_until }; } }, - // DisabledPendingEnable => DisabledPendingEnable | Banned + // DisabledPendingEnable => DisabledPendingEnable | Backoff PeerState::DisabledPendingEnable { mut connections, timer_deadline, timer } => { debug!( target: "sub-libp2p", @@ -1200,7 +1200,7 @@ impl NetworkBehaviour for GenericProto { if connections.is_empty() { debug!(target: "sub-libp2p", "PSM <= Dropped({})", peer_id); self.peerset.dropped(peer_id.clone()); - *entry.get_mut() = PeerState::Banned { timer, timer_deadline }; + *entry.get_mut() = PeerState::Backoff { timer, timer_deadline }; } else { *entry.get_mut() = PeerState::DisabledPendingEnable { @@ -1209,8 +1209,8 @@ impl NetworkBehaviour for GenericProto { } }, - // Incoming => Incoming | Disabled | Banned | Ø - PeerState::Incoming { mut connections, banned_until } => { + // Incoming => Incoming | Disabled | Backoff | Ø + PeerState::Incoming { mut connections, backoff_until } => { debug!( target: "sub-libp2p", "Libp2p => Disconnected({}, {:?}): OpenDesired.", @@ -1246,7 +1246,7 @@ impl NetworkBehaviour for GenericProto { } if connections.is_empty() { - if let Some(until) = banned_until { + if let Some(until) = backoff_until { let now = Instant::now(); if until > now { let delay_id = self.next_delay_id; @@ -1258,7 +1258,7 @@ impl NetworkBehaviour for GenericProto { (delay_id, peer_id) }.boxed()); - *entry.get_mut() = PeerState::Banned { + *entry.get_mut() = PeerState::Backoff { timer: delay_id, timer_deadline: until, }; @@ -1271,14 +1271,14 @@ impl NetworkBehaviour for GenericProto { } else if no_desired_left { // If no connection is `OpenDesired` anymore, switch to `Disabled`. - *entry.get_mut() = PeerState::Disabled { connections, banned_until }; + *entry.get_mut() = PeerState::Disabled { connections, backoff_until }; } else { - *entry.get_mut() = PeerState::Incoming { connections, banned_until }; + *entry.get_mut() = PeerState::Incoming { connections, backoff_until }; } } - // Enabled => Enabled | Banned - // Peers are always banned when disconnecting while Enabled. + // Enabled => Enabled | Backoff + // Peers are always backed-off when disconnecting while Enabled. PeerState::Enabled { mut connections } => { debug!( target: "sub-libp2p", @@ -1340,7 +1340,7 @@ impl NetworkBehaviour for GenericProto { (delay_id, peer_id) }.boxed()); - *entry.get_mut() = PeerState::Banned { + *entry.get_mut() = PeerState::Backoff { timer: delay_id, timer_deadline: Instant::now() + Duration::from_secs(ban_dur), }; @@ -1353,7 +1353,7 @@ impl NetworkBehaviour for GenericProto { *entry.get_mut() = PeerState::Disabled { connections, - banned_until: None + backoff_until: None }; } else { @@ -1363,7 +1363,7 @@ impl NetworkBehaviour for GenericProto { PeerState::Requested | PeerState::PendingRequest { .. } | - PeerState::Banned { .. } => { + PeerState::Backoff { .. } => { // This is a serious bug either in this state machine or in libp2p. error!(target: "sub-libp2p", "`inject_connection_closed` called for unknown peer {}", @@ -1388,7 +1388,7 @@ impl NetworkBehaviour for GenericProto { if let Entry::Occupied(mut entry) = self.peers.entry(peer_id.clone()) { match mem::replace(entry.get_mut(), PeerState::Poisoned) { // The peer is not in our list. - st @ PeerState::Banned { .. } => { + st @ PeerState::Backoff { .. } => { trace!(target: "sub-libp2p", "Libp2p => Dial failure for {:?}", peer_id); *entry.into_mut() = st; }, @@ -1417,7 +1417,7 @@ impl NetworkBehaviour for GenericProto { (delay_id, peer_id) }.boxed()); - *entry.into_mut() = PeerState::Banned { + *entry.into_mut() = PeerState::Backoff { timer: delay_id, timer_deadline: now + ban_duration, }; @@ -1465,7 +1465,7 @@ impl NetworkBehaviour for GenericProto { match mem::replace(entry.get_mut(), PeerState::Poisoned) { // Incoming => Incoming - PeerState::Incoming { mut connections, banned_until } => { + PeerState::Incoming { mut connections, backoff_until } => { debug_assert!(connections.iter().any(|(_, s)| matches!(s, ConnectionState::OpenDesired))); if let Some((_, connec_state)) = connections.iter_mut().find(|(c, _)| *c == connection) { @@ -1489,7 +1489,7 @@ impl NetworkBehaviour for GenericProto { debug_assert!(false); } - *entry.into_mut() = PeerState::Incoming { connections, banned_until }; + *entry.into_mut() = PeerState::Incoming { connections, backoff_until }; }, PeerState::Enabled { mut connections } => { @@ -1527,7 +1527,7 @@ impl NetworkBehaviour for GenericProto { }, // Disabled => Disabled | Incoming - PeerState::Disabled { mut connections, banned_until } => { + PeerState::Disabled { mut connections, backoff_until } => { if let Some((_, connec_state)) = connections.iter_mut().find(|(c, _)| *c == connection) { if let ConnectionState::Closed = *connec_state { *connec_state = ConnectionState::OpenDesired; @@ -1550,7 +1550,7 @@ impl NetworkBehaviour for GenericProto { incoming_id, }); - *entry.into_mut() = PeerState::Incoming { connections, banned_until }; + *entry.into_mut() = PeerState::Incoming { connections, backoff_until }; } else { // Connections in `OpeningAndClosing` are in a Closed phase, and @@ -1596,7 +1596,7 @@ impl NetworkBehaviour for GenericProto { *entry.into_mut() = PeerState::Incoming { connections, - banned_until: Some(timer_deadline), + backoff_until: Some(timer_deadline), }; } else { @@ -1701,7 +1701,7 @@ impl NetworkBehaviour for GenericProto { debug!(target: "sub-libp2p", "PSM <= Dropped({:?})", source); self.peerset.dropped(source.clone()); *entry.into_mut() = PeerState::Disabled { - connections, banned_until: None + connections, backoff_until: None }; } else { *entry.into_mut() = PeerState::Enabled { connections }; @@ -1859,13 +1859,13 @@ impl NetworkBehaviour for GenericProto { *entry.into_mut() = PeerState::Disabled { connections, - banned_until: None + backoff_until: None }; } else { *entry.into_mut() = PeerState::Enabled { connections }; } }, - PeerState::Disabled { mut connections, banned_until } => { + PeerState::Disabled { mut connections, backoff_until } => { if let Some((_, connec_state)) = connections.iter_mut().find(|(c, s)| *c == connection && matches!(s, ConnectionState::OpeningAndClosing)) { @@ -1876,7 +1876,7 @@ impl NetworkBehaviour for GenericProto { debug_assert!(false); } - *entry.into_mut() = PeerState::Disabled { connections, banned_until }; + *entry.into_mut() = PeerState::Disabled { connections, backoff_until }; }, PeerState::DisabledPendingEnable { mut connections, timer, timer_deadline } => { if let Some((_, connec_state)) = connections.iter_mut().find(|(c, s)| @@ -1999,7 +1999,7 @@ impl NetworkBehaviour for GenericProto { }; match peer_state { - PeerState::Banned { timer, .. } if *timer == delay_id => { + PeerState::Backoff { timer, .. } if *timer == delay_id => { debug!(target: "sub-libp2p", "Libp2p <= Clean up ban of {:?} from the state", peer_id); self.peers.remove(&peer_id); } From 0e5f130287bc29288ce23778443e4133da35cb40 Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Fri, 13 Nov 2020 15:41:53 +0100 Subject: [PATCH 31/39] Mention the actual PeerStates --- client/network/src/protocol/generic_proto/behaviour.rs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/client/network/src/protocol/generic_proto/behaviour.rs b/client/network/src/protocol/generic_proto/behaviour.rs index e1fc586060f30..10314c601d177 100644 --- a/client/network/src/protocol/generic_proto/behaviour.rs +++ b/client/network/src/protocol/generic_proto/behaviour.rs @@ -53,14 +53,14 @@ use wasm_timer::Instant; /// /// In the state machine below, each `PeerId` is attributed one of these states: /// -/// - No open connection, but requested by the peerset. Currently dialing. -/// - Has open TCP connection(s) unbeknownst to the peerset. No substream is open. -/// - Has open TCP connection(s), acknowledged by the peerset. +/// - `Requested`: No open connection, but requested by the peerset. Currently dialing. +/// - `Disabled`: Has open TCP connection(s) unbeknownst to the peerset. No substream is open. +/// - `Enabled`: Has open TCP connection(s), acknowledged by the peerset. /// - Notifications substreams are open on at least one connection, and external /// API has been notified. /// - Notifications substreams aren't open. -/// - Has open TCP connection(s) and remote would like to open substreams. Peerset has -/// been asked to attribute an inbound slot. +/// - `Incoming`: Has open TCP connection(s) and remote would like to open substreams. +/// Peerset has been asked to attribute an inbound slot. /// /// In addition to these states, there also exists a "banning" system. If we fail to dial a peer, /// we back-off for a few seconds. If the PSM requests connecting to a peer that is currently From ca31ed38de3a83c3e114d4d7e9a48d3767c87d22 Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Fri, 13 Nov 2020 15:42:01 +0100 Subject: [PATCH 32/39] OpenDesired -> OpenDesiredByRemote --- .../src/protocol/generic_proto/handler.rs | 32 +++++++++---------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/client/network/src/protocol/generic_proto/handler.rs b/client/network/src/protocol/generic_proto/handler.rs index 5778cb3804b00..b2bc8a7472cbe 100644 --- a/client/network/src/protocol/generic_proto/handler.rs +++ b/client/network/src/protocol/generic_proto/handler.rs @@ -167,7 +167,7 @@ enum State { }, /// Handler is in the "Closed" state. A [`NotifsHandlerOut::OpenDesired`] has been emitted. - OpenDesired { + OpenDesiredByRemote { /// Vec of the same length as [`NotifsHandler::in_protocols`]. For each protocol, contains /// a substream opened by the remote and that hasn't been accepted/rejected yet. /// @@ -193,8 +193,8 @@ enum State { /// Vec of the same length as [`NotifsHandler::in_protocols`]. For each protocol, contains /// a substream opened by the remote and that has been accepted. /// - /// Contrary to [`State::OpenDesired::in_substreams`], it is possible for this to contain - /// only `None`s. + /// Contrary to [`State::OpenDesiredByRemote::in_substreams`], it is possible for this to + /// contain only `None`s. in_substreams: Vec>>, /// Vec of the same length as [`NotifsHandler::out_protocols`]. For each protocol, contains @@ -238,8 +238,8 @@ enum State { /// Vec of the same length as [`NotifsHandler::in_protocols`]. For each protocol, contains /// a substream opened by the remote and that has been accepted. /// - /// Contrary to [`State::OpenDesired::in_substreams`], it is possible for this to contain - /// only `None`s. + /// Contrary to [`State::OpenDesiredByRemote::in_substreams`], it is possible for this to + /// contain only `None`s. in_substreams: Vec>>, /// If true, at least one substream in [`State::Open::out_substreams`] has been closed or @@ -544,12 +544,12 @@ impl ProtocolsHandler for NotifsHandler { .map(|_| None) .collect::>(); in_substreams[num] = Some(proto); - self.state = State::OpenDesired { + self.state = State::OpenDesiredByRemote { in_substreams, pending_opening: mem::replace(pending_opening, Vec::new()), }; }, - State::OpenDesired { in_substreams, .. } => { + State::OpenDesiredByRemote { in_substreams, .. } => { if in_substreams[num].is_some() { // If a substream already exists, silently drop the new one. // Note that we drop the substream, which will send an equivalent to a @@ -600,7 +600,7 @@ impl ProtocolsHandler for NotifsHandler { ) { match &mut self.state { State::Closed { pending_opening } | - State::OpenDesired { pending_opening, .. } => { + State::OpenDesiredByRemote { pending_opening, .. } => { debug_assert!(pending_opening[num]); pending_opening[num] = false; } @@ -658,10 +658,10 @@ impl ProtocolsHandler for NotifsHandler { match message { NotifsHandlerIn::Open => { match &mut self.state { - State::Closed { .. } | State::OpenDesired { .. } => { + State::Closed { .. } | State::OpenDesiredByRemote { .. } => { let (pending_opening, mut in_substreams) = match &mut self.state { State::Closed { pending_opening } => (pending_opening, None), - State::OpenDesired { pending_opening, in_substreams } => + State::OpenDesiredByRemote { pending_opening, in_substreams } => (pending_opening, Some(mem::replace(in_substreams, Vec::new()))), _ => unreachable!() }; @@ -736,7 +736,7 @@ impl ProtocolsHandler for NotifsHandler { NotifsHandlerOut::OpenResultErr )); }, - State::OpenDesired { pending_opening, .. } => { + State::OpenDesiredByRemote { pending_opening, .. } => { self.state = State::Closed { pending_opening: mem::replace(pending_opening, Vec::new()), }; @@ -757,7 +757,7 @@ impl ProtocolsHandler for NotifsHandler { _: ProtocolsHandlerUpgrErr ) { match &mut self.state { - State::Closed { pending_opening } | State::OpenDesired { pending_opening, .. } => { + State::Closed { pending_opening } | State::OpenDesiredByRemote { pending_opening, .. } => { debug_assert!(pending_opening[num]); pending_opening[num] = false; } @@ -831,7 +831,7 @@ impl ProtocolsHandler for NotifsHandler { match self.state { State::Closed { .. } => KeepAlive::Until(self.when_connection_open + INITIAL_KEEPALIVE_TIME), - State::OpenDesired { .. } | State::Opening { .. } | State::Open { .. } => + State::OpenDesiredByRemote { .. } | State::Opening { .. } | State::Open { .. } => KeepAlive::Yes, } } @@ -868,7 +868,7 @@ impl ProtocolsHandler for NotifsHandler { } } - State::OpenDesired { in_substreams, .. } | + State::OpenDesiredByRemote { in_substreams, .. } | State::Opening { in_substreams, .. } => { for substream in in_substreams { match substream.as_mut().map(|s| NotificationsInSubstream::poll_process(Pin::new(s), cx)) { @@ -882,7 +882,7 @@ impl ProtocolsHandler for NotifsHandler { // Since the previous block might have closed inbound substreams, make sure that we can // stay in `OpenDesired` state. - if let State::OpenDesired { in_substreams, pending_opening } = &mut self.state { + if let State::OpenDesiredByRemote { in_substreams, pending_opening } = &mut self.state { if !in_substreams.iter().any(|s| s.is_some()) { self.state = State::Closed { pending_opening: mem::replace(pending_opening, Vec::new()), @@ -939,7 +939,7 @@ impl ProtocolsHandler for NotifsHandler { } State::Closed { .. } | - State::OpenDesired { .. } => {} + State::OpenDesiredByRemote { .. } => {} } if let State::Open { notifications_sink_rx, out_substreams, .. } = &mut self.state { From 7578c9ec89db5177c2d7fec35c4d75b5caef2af0 Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Fri, 13 Nov 2020 18:44:16 +0100 Subject: [PATCH 33/39] OpeningThenClosing --- .../src/protocol/generic_proto/behaviour.rs | 32 +++++++++---------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/client/network/src/protocol/generic_proto/behaviour.rs b/client/network/src/protocol/generic_proto/behaviour.rs index 10314c601d177..67ddbe07e3699 100644 --- a/client/network/src/protocol/generic_proto/behaviour.rs +++ b/client/network/src/protocol/generic_proto/behaviour.rs @@ -280,7 +280,7 @@ enum ConnectionState { /// Connection is in the `Closed` state but a [`NotifsHandlerIn::Open`] message then a /// [`NotifsHandlerIn::Close`] message has been sent. An `OpenResultOk`/`OpenResultErr` message /// followed with a `CloseResult` message are expected. - OpeningAndClosing, + OpeningThenClosing, /// Connection is in the `Closed` state, but a [`NotifsHandlerOut::OpenDesired`] message has /// been received, meaning that the remote wants to open a substream. @@ -526,7 +526,7 @@ impl GenericProto { handler: NotifyHandler::One(*connec_id), event: NotifsHandlerIn::Close, }); - *connec_state = ConnectionState::OpeningAndClosing; + *connec_state = ConnectionState::OpeningThenClosing; } debug_assert!(!connections.iter().any(|(_, s)| matches!(s, ConnectionState::Open(_)))); @@ -754,7 +754,7 @@ impl GenericProto { // If no connection is available, switch to `DisabledPendingEnable` in order // to try again later. debug_assert!(connections.iter().any(|(_, s)| { - matches!(s, ConnectionState::OpeningAndClosing | ConnectionState::Closing) + matches!(s, ConnectionState::OpeningThenClosing | ConnectionState::Closing) })); debug!( target: "sub-libp2p", @@ -898,7 +898,7 @@ impl GenericProto { handler: NotifyHandler::One(*connec_id), event: NotifsHandlerIn::Close, }); - *connec_state = ConnectionState::OpeningAndClosing; + *connec_state = ConnectionState::OpeningThenClosing; } for (connec_id, connec_state) in connections.iter_mut() @@ -1472,13 +1472,13 @@ impl NetworkBehaviour for GenericProto { if let ConnectionState::Closed = *connec_state { *connec_state = ConnectionState::OpenDesired; } else { - // Connections in `OpeningAndClosing` state are in a Closed phase, + // Connections in `OpeningThenClosing` state are in a Closed phase, // and as such can emit `OpenDesired` messages. // Since an `Open` and a `Close` messages have already been sent, // there is nothing much that can be done about this anyway. debug_assert!(matches!( connec_state, - ConnectionState::OpeningAndClosing + ConnectionState::OpeningThenClosing )); } } else { @@ -1506,7 +1506,7 @@ impl NetworkBehaviour for GenericProto { }); *connec_state = ConnectionState::Opening; } else { - // Connections in `OpeningAndClosing` and `Opening` are in a Closed + // Connections in `OpeningThenClosing` and `Opening` are in a Closed // phase, and as such can emit `OpenDesired` messages. // Since an `Open` message haS already been sent, there is nothing // more to do. @@ -1553,12 +1553,12 @@ impl NetworkBehaviour for GenericProto { *entry.into_mut() = PeerState::Incoming { connections, backoff_until }; } else { - // Connections in `OpeningAndClosing` are in a Closed phase, and + // Connections in `OpeningThenClosing` are in a Closed phase, and // as such can emit `OpenDesired` messages. // We ignore them. debug_assert!(matches!( connec_state, - ConnectionState::OpeningAndClosing + ConnectionState::OpeningThenClosing )); } } else { @@ -1600,12 +1600,12 @@ impl NetworkBehaviour for GenericProto { }; } else { - // Connections in `OpeningAndClosing` are in a Closed phase, and + // Connections in `OpeningThenClosing` are in a Closed phase, and // as such can emit `OpenDesired` messages. // We ignore them. debug_assert!(matches!( connec_state, - ConnectionState::OpeningAndClosing + ConnectionState::OpeningThenClosing )); *entry.into_mut() = PeerState::DisabledPendingEnable { connections, @@ -1787,7 +1787,7 @@ impl NetworkBehaviour for GenericProto { } *connec_state = ConnectionState::Open(notifications_sink); } else if let Some((_, connec_state)) = connections.iter_mut().find(|(c, s)| - *c == connection && matches!(s, ConnectionState::OpeningAndClosing)) + *c == connection && matches!(s, ConnectionState::OpeningThenClosing)) { *connec_state = ConnectionState::Closing; } else { @@ -1800,7 +1800,7 @@ impl NetworkBehaviour for GenericProto { Some(PeerState::DisabledPendingEnable { connections, .. }) | Some(PeerState::Disabled { connections, .. }) => { if let Some((_, connec_state)) = connections.iter_mut().find(|(c, s)| - *c == connection && matches!(s, ConnectionState::OpeningAndClosing)) + *c == connection && matches!(s, ConnectionState::OpeningThenClosing)) { *connec_state = ConnectionState::Closing; } else { @@ -1842,7 +1842,7 @@ impl NetworkBehaviour for GenericProto { { *connec_state = ConnectionState::Closed; } else if let Some((_, connec_state)) = connections.iter_mut().find(|(c, s)| - *c == connection && matches!(s, ConnectionState::OpeningAndClosing)) + *c == connection && matches!(s, ConnectionState::OpeningThenClosing)) { *connec_state = ConnectionState::Closing; } else { @@ -1867,7 +1867,7 @@ impl NetworkBehaviour for GenericProto { }, PeerState::Disabled { mut connections, backoff_until } => { if let Some((_, connec_state)) = connections.iter_mut().find(|(c, s)| - *c == connection && matches!(s, ConnectionState::OpeningAndClosing)) + *c == connection && matches!(s, ConnectionState::OpeningThenClosing)) { *connec_state = ConnectionState::Closing; } else { @@ -1880,7 +1880,7 @@ impl NetworkBehaviour for GenericProto { }, PeerState::DisabledPendingEnable { mut connections, timer, timer_deadline } => { if let Some((_, connec_state)) = connections.iter_mut().find(|(c, s)| - *c == connection && matches!(s, ConnectionState::OpeningAndClosing)) + *c == connection && matches!(s, ConnectionState::OpeningThenClosing)) { *connec_state = ConnectionState::Closing; } else { From 3c4ed8f7aa341092635d036a90904be17b0e1cec Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Fri, 13 Nov 2020 18:44:55 +0100 Subject: [PATCH 34/39] Add doc links to PeerState --- client/network/src/protocol/generic_proto/behaviour.rs | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/client/network/src/protocol/generic_proto/behaviour.rs b/client/network/src/protocol/generic_proto/behaviour.rs index 67ddbe07e3699..077e465355c52 100644 --- a/client/network/src/protocol/generic_proto/behaviour.rs +++ b/client/network/src/protocol/generic_proto/behaviour.rs @@ -53,13 +53,14 @@ use wasm_timer::Instant; /// /// In the state machine below, each `PeerId` is attributed one of these states: /// -/// - `Requested`: No open connection, but requested by the peerset. Currently dialing. -/// - `Disabled`: Has open TCP connection(s) unbeknownst to the peerset. No substream is open. -/// - `Enabled`: Has open TCP connection(s), acknowledged by the peerset. +/// - [`PeerState::Requested`]: No open connection, but requested by the peerset. Currently dialing. +/// - [`PeerState::Disabled`]: Has open TCP connection(s) unbeknownst to the peerset. No substream +/// is open. +/// - [`PeerState::Enabled`]: Has open TCP connection(s), acknowledged by the peerset. /// - Notifications substreams are open on at least one connection, and external /// API has been notified. /// - Notifications substreams aren't open. -/// - `Incoming`: Has open TCP connection(s) and remote would like to open substreams. +/// - [`PeerState::Incoming`]: Has open TCP connection(s) and remote would like to open substreams. /// Peerset has been asked to attribute an inbound slot. /// /// In addition to these states, there also exists a "banning" system. If we fail to dial a peer, From 61fc5bb1cfa19366b34b285ad78267976ff988c6 Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Fri, 13 Nov 2020 18:45:19 +0100 Subject: [PATCH 35/39] Simplify increment logic --- client/network/src/protocol/generic_proto/behaviour.rs | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/client/network/src/protocol/generic_proto/behaviour.rs b/client/network/src/protocol/generic_proto/behaviour.rs index 077e465355c52..994ecf1c822fa 100644 --- a/client/network/src/protocol/generic_proto/behaviour.rs +++ b/client/network/src/protocol/generic_proto/behaviour.rs @@ -1534,13 +1534,7 @@ impl NetworkBehaviour for GenericProto { *connec_state = ConnectionState::OpenDesired; let incoming_id = self.next_incoming_index; - self.next_incoming_index.0 = match self.next_incoming_index.0.checked_add(1) { - Some(v) => v, - None => { - error!(target: "sub-libp2p", "Overflow in next_incoming_index"); - return - } - }; + self.next_incoming_index.0 += 1; debug!(target: "sub-libp2p", "PSM <= Incoming({}, {:?}).", source, incoming_id); From 222fee2b6ffd564f2fb3ab299d70941351dc0d25 Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Fri, 13 Nov 2020 18:45:56 +0100 Subject: [PATCH 36/39] One more debug_assert --- client/network/src/protocol/generic_proto/behaviour.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/client/network/src/protocol/generic_proto/behaviour.rs b/client/network/src/protocol/generic_proto/behaviour.rs index 994ecf1c822fa..691540fd4b185 100644 --- a/client/network/src/protocol/generic_proto/behaviour.rs +++ b/client/network/src/protocol/generic_proto/behaviour.rs @@ -737,6 +737,10 @@ impl GenericProto { // Disabled => Enabled PeerState::Disabled { mut connections, backoff_until } => { + debug_assert!(!connections.iter().any(|(_, s)| { + matches!(s, ConnectionState::Open(_)) + })); + // The first element of `closed` is chosen to open the notifications substream. if let Some((connec_id, connec_state)) = connections.iter_mut() .find(|(_, s)| matches!(s, ConnectionState::Closed)) From 4d909708ece9ca19126f5ddd73d4abfa9614f9fd Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Fri, 13 Nov 2020 18:46:16 +0100 Subject: [PATCH 37/39] debug_assert! --- client/network/src/protocol/generic_proto/behaviour.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/client/network/src/protocol/generic_proto/behaviour.rs b/client/network/src/protocol/generic_proto/behaviour.rs index 691540fd4b185..b17c50bd55633 100644 --- a/client/network/src/protocol/generic_proto/behaviour.rs +++ b/client/network/src/protocol/generic_proto/behaviour.rs @@ -1828,6 +1828,7 @@ impl NetworkBehaviour for GenericProto { entry } else { error!(target: "sub-libp2p", "OpenResultErr: State mismatch in the custom protos handler"); + debug_assert!(false); return }; @@ -1845,9 +1846,9 @@ impl NetworkBehaviour for GenericProto { { *connec_state = ConnectionState::Closing; } else { - debug_assert!(false); error!(target: "sub-libp2p", "OpenResultErr: State mismatch in the custom protos handler"); + debug_assert!(false); } if !connections.iter().any(|(_, s)| From 9d3c981535b17c37709eae5f115ba7962c9e3d26 Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Fri, 13 Nov 2020 18:47:38 +0100 Subject: [PATCH 38/39] OpenDesiredByRemote --- .../src/protocol/generic_proto/behaviour.rs | 76 ++++++++++--------- .../src/protocol/generic_proto/handler.rs | 20 ++--- 2 files changed, 49 insertions(+), 47 deletions(-) diff --git a/client/network/src/protocol/generic_proto/behaviour.rs b/client/network/src/protocol/generic_proto/behaviour.rs index b17c50bd55633..a427c56ed40cc 100644 --- a/client/network/src/protocol/generic_proto/behaviour.rs +++ b/client/network/src/protocol/generic_proto/behaviour.rs @@ -207,10 +207,10 @@ enum PeerState { connections: SmallVec<[(ConnectionId, ConnectionState); crate::MAX_CONNECTIONS_PER_PEER]>, }, - /// We are connected to this peer. We have received an `OpenDesired` from one of the handlers - /// and forwarded that request to the peerset. The connection handlers are waiting for a - /// response, i.e. to be opened or closed based on whether the peerset accepts or rejects the - /// peer. + /// We are connected to this peer. We have received an `OpenDesiredByRemote` from one of the + /// handlers and forwarded that request to the peerset. The connection handlers are waiting for + /// a response, i.e. to be opened or closed based on whether the peerset accepts or rejects + /// the peer. Incoming { /// If `Some`, any dial attempts to this peer are delayed until the given `Instant`. backoff_until: Option, @@ -283,9 +283,9 @@ enum ConnectionState { /// followed with a `CloseResult` message are expected. OpeningThenClosing, - /// Connection is in the `Closed` state, but a [`NotifsHandlerOut::OpenDesired`] message has - /// been received, meaning that the remote wants to open a substream. - OpenDesired, + /// Connection is in the `Closed` state, but a [`NotifsHandlerOut::OpenDesiredByRemote`] + /// message has been received, meaning that the remote wants to open a substream. + OpenDesiredByRemote, /// Connection is in the `Open` state. /// @@ -555,7 +555,7 @@ impl GenericProto { inc.alive = false; for (connec_id, connec_state) in connections.iter_mut() - .filter(|(_, s)| matches!(s, ConnectionState::OpenDesired)) + .filter(|(_, s)| matches!(s, ConnectionState::OpenDesiredByRemote)) { debug!(target: "sub-libp2p", "Handler({:?}, {:?}) <= Close", peer_id, *connec_id); self.events.push_back(NetworkBehaviourAction::NotifyHandler { @@ -573,7 +573,7 @@ impl GenericProto { (None, None) => None, }; - debug_assert!(!connections.iter().any(|(_, s)| matches!(s, ConnectionState::OpenDesired))); + debug_assert!(!connections.iter().any(|(_, s)| matches!(s, ConnectionState::OpenDesiredByRemote))); *entry.into_mut() = PeerState::Disabled { connections, backoff_until @@ -805,9 +805,9 @@ impl GenericProto { incoming for incoming peer") } - debug_assert!(connections.iter().any(|(_, s)| matches!(s, ConnectionState::OpenDesired))); + debug_assert!(connections.iter().any(|(_, s)| matches!(s, ConnectionState::OpenDesiredByRemote))); for (connec_id, connec_state) in connections.iter_mut() - .filter(|(_, s)| matches!(s, ConnectionState::OpenDesired)) + .filter(|(_, s)| matches!(s, ConnectionState::OpenDesiredByRemote)) { debug!(target: "sub-libp2p", "Handler({:?}, {:?}) <= Open", occ_entry.key(), *connec_id); self.events.push_back(NetworkBehaviourAction::NotifyHandler { @@ -988,9 +988,9 @@ impl GenericProto { debug!(target: "sub-libp2p", "PSM => Accept({:?}, {:?}): Enabling connections.", index, incoming.peer_id); - debug_assert!(connections.iter().any(|(_, s)| matches!(s, ConnectionState::OpenDesired))); + debug_assert!(connections.iter().any(|(_, s)| matches!(s, ConnectionState::OpenDesiredByRemote))); for (connec_id, connec_state) in connections.iter_mut() - .filter(|(_, s)| matches!(s, ConnectionState::OpenDesired)) + .filter(|(_, s)| matches!(s, ConnectionState::OpenDesiredByRemote)) { debug!(target: "sub-libp2p", "Handler({:?}, {:?}) <= Open", incoming.peer_id, *connec_id); self.events.push_back(NetworkBehaviourAction::NotifyHandler { @@ -1043,9 +1043,9 @@ impl GenericProto { debug!(target: "sub-libp2p", "PSM => Reject({:?}, {:?}): Rejecting connections.", index, incoming.peer_id); - debug_assert!(connections.iter().any(|(_, s)| matches!(s, ConnectionState::OpenDesired))); + debug_assert!(connections.iter().any(|(_, s)| matches!(s, ConnectionState::OpenDesiredByRemote))); for (connec_id, connec_state) in connections.iter_mut() - .filter(|(_, s)| matches!(s, ConnectionState::OpenDesired)) + .filter(|(_, s)| matches!(s, ConnectionState::OpenDesiredByRemote)) { debug!(target: "sub-libp2p", "Handler({:?}, {:?}) <= Close", incoming.peer_id, connec_id); self.events.push_back(NetworkBehaviourAction::NotifyHandler { @@ -1218,11 +1218,11 @@ impl NetworkBehaviour for GenericProto { PeerState::Incoming { mut connections, backoff_until } => { debug!( target: "sub-libp2p", - "Libp2p => Disconnected({}, {:?}): OpenDesired.", + "Libp2p => Disconnected({}, {:?}): OpenDesiredByRemote.", peer_id, *conn ); - debug_assert!(connections.iter().any(|(_, s)| matches!(s, ConnectionState::OpenDesired))); + debug_assert!(connections.iter().any(|(_, s)| matches!(s, ConnectionState::OpenDesiredByRemote))); if let Some(pos) = connections.iter().position(|(c, _)| *c == *conn) { connections.remove(pos); @@ -1232,9 +1232,11 @@ impl NetworkBehaviour for GenericProto { "inject_connection_closed: State mismatch in the custom protos handler"); } - let no_desired_left = !connections.iter().any(|(_, s)| matches!(s, ConnectionState::OpenDesired)); + let no_desired_left = !connections.iter().any(|(_, s)| { + matches!(s, ConnectionState::OpenDesiredByRemote) + }); - // If no connection is `OpenDesired` anymore, clean up the peerset incoming + // If no connection is `OpenDesiredByRemote` anymore, clean up the peerset incoming // request. if no_desired_left { // In the incoming state, we don't report "Dropped". Instead we will just @@ -1275,7 +1277,7 @@ impl NetworkBehaviour for GenericProto { } } else if no_desired_left { - // If no connection is `OpenDesired` anymore, switch to `Disabled`. + // If no connection is `OpenDesiredByRemote` anymore, switch to `Disabled`. *entry.get_mut() = PeerState::Disabled { connections, backoff_until }; } else { *entry.get_mut() = PeerState::Incoming { connections, backoff_until }; @@ -1455,15 +1457,15 @@ impl NetworkBehaviour for GenericProto { event: NotifsHandlerOut, ) { match event { - NotifsHandlerOut::OpenDesired => { + NotifsHandlerOut::OpenDesiredByRemote => { debug!(target: "sub-libp2p", - "Handler({:?}, {:?}]) => OpenDesired", + "Handler({:?}, {:?}]) => OpenDesiredByRemote", source, connection); let mut entry = if let Entry::Occupied(entry) = self.peers.entry(source.clone()) { entry } else { - error!(target: "sub-libp2p", "OpenDesired: State mismatch in the custom protos handler"); + error!(target: "sub-libp2p", "OpenDesiredByRemote: State mismatch in the custom protos handler"); debug_assert!(false); return }; @@ -1472,13 +1474,13 @@ impl NetworkBehaviour for GenericProto { // Incoming => Incoming PeerState::Incoming { mut connections, backoff_until } => { debug_assert!(connections.iter().any(|(_, s)| - matches!(s, ConnectionState::OpenDesired))); + matches!(s, ConnectionState::OpenDesiredByRemote))); if let Some((_, connec_state)) = connections.iter_mut().find(|(c, _)| *c == connection) { if let ConnectionState::Closed = *connec_state { - *connec_state = ConnectionState::OpenDesired; + *connec_state = ConnectionState::OpenDesiredByRemote; } else { // Connections in `OpeningThenClosing` state are in a Closed phase, - // and as such can emit `OpenDesired` messages. + // and as such can emit `OpenDesiredByRemote` messages. // Since an `Open` and a `Close` messages have already been sent, // there is nothing much that can be done about this anyway. debug_assert!(matches!( @@ -1489,7 +1491,7 @@ impl NetworkBehaviour for GenericProto { } else { error!( target: "sub-libp2p", - "OpenDesired: State mismatch in the custom protos handler" + "OpenDesiredByRemote: State mismatch in the custom protos handler" ); debug_assert!(false); } @@ -1512,18 +1514,18 @@ impl NetworkBehaviour for GenericProto { *connec_state = ConnectionState::Opening; } else { // Connections in `OpeningThenClosing` and `Opening` are in a Closed - // phase, and as such can emit `OpenDesired` messages. + // phase, and as such can emit `OpenDesiredByRemote` messages. // Since an `Open` message haS already been sent, there is nothing // more to do. debug_assert!(matches!( connec_state, - ConnectionState::OpenDesired | ConnectionState::Opening + ConnectionState::OpenDesiredByRemote | ConnectionState::Opening )); } } else { error!( target: "sub-libp2p", - "OpenDesired: State mismatch in the custom protos handler" + "OpenDesiredByRemote: State mismatch in the custom protos handler" ); debug_assert!(false); } @@ -1535,7 +1537,7 @@ impl NetworkBehaviour for GenericProto { PeerState::Disabled { mut connections, backoff_until } => { if let Some((_, connec_state)) = connections.iter_mut().find(|(c, _)| *c == connection) { if let ConnectionState::Closed = *connec_state { - *connec_state = ConnectionState::OpenDesired; + *connec_state = ConnectionState::OpenDesiredByRemote; let incoming_id = self.next_incoming_index; self.next_incoming_index.0 += 1; @@ -1553,7 +1555,7 @@ impl NetworkBehaviour for GenericProto { } else { // Connections in `OpeningThenClosing` are in a Closed phase, and - // as such can emit `OpenDesired` messages. + // as such can emit `OpenDesiredByRemote` messages. // We ignore them. debug_assert!(matches!( connec_state, @@ -1563,7 +1565,7 @@ impl NetworkBehaviour for GenericProto { } else { error!( target: "sub-libp2p", - "OpenDesired: State mismatch in the custom protos handler" + "OpenDesiredByRemote: State mismatch in the custom protos handler" ); debug_assert!(false); } @@ -1573,7 +1575,7 @@ impl NetworkBehaviour for GenericProto { PeerState::DisabledPendingEnable { mut connections, timer, timer_deadline } => { if let Some((_, connec_state)) = connections.iter_mut().find(|(c, _)| *c == connection) { if let ConnectionState::Closed = *connec_state { - *connec_state = ConnectionState::OpenDesired; + *connec_state = ConnectionState::OpenDesiredByRemote; let incoming_id = self.next_incoming_index; self.next_incoming_index.0 = match self.next_incoming_index.0.checked_add(1) { @@ -1600,7 +1602,7 @@ impl NetworkBehaviour for GenericProto { } else { // Connections in `OpeningThenClosing` are in a Closed phase, and - // as such can emit `OpenDesired` messages. + // as such can emit `OpenDesiredByRemote` messages. // We ignore them. debug_assert!(matches!( connec_state, @@ -1615,7 +1617,7 @@ impl NetworkBehaviour for GenericProto { } else { error!( target: "sub-libp2p", - "OpenDesired: State mismatch in the custom protos handler" + "OpenDesiredByRemote: State mismatch in the custom protos handler" ); debug_assert!(false); } @@ -1623,7 +1625,7 @@ impl NetworkBehaviour for GenericProto { state => { error!(target: "sub-libp2p", - "OpenDesired: Unexpected state in the custom protos handler: {:?}", + "OpenDesiredByRemote: Unexpected state in the custom protos handler: {:?}", state); debug_assert!(false); return diff --git a/client/network/src/protocol/generic_proto/handler.rs b/client/network/src/protocol/generic_proto/handler.rs index b2bc8a7472cbe..0272261f67d57 100644 --- a/client/network/src/protocol/generic_proto/handler.rs +++ b/client/network/src/protocol/generic_proto/handler.rs @@ -32,7 +32,7 @@ //! The [`NotifsHandler`] can spontaneously switch between these states: //! //! - "Closed substreams" to "Closed substreams but open desired". When that happens, a -//! [`NotifsHandlerOut::OpenDesired`] is emitted. +//! [`NotifsHandlerOut::OpenDesiredByRemote`] is emitted. //! - "Closed substreams but open desired" to "Closed substreams" (i.e. the remote has cancelled //! their request). When that happens, a [`NotifsHandlerOut::CloseDesired`] is emitted. //! - "Open substreams" to "Open substreams but close desired". When that happens, a @@ -47,7 +47,7 @@ //! state. When a [`NotifsHandlerOut::OpenResultErr`] or [`NotifsHandlerOut::CloseResult`] is //! emitted, the `NotifsHandler` is now (or remains) in the closed state. //! -//! When a [`NotifsHandlerOut::OpenDesired`] is emitted, the user should always send back either a +//! When a [`NotifsHandlerOut::OpenDesiredByRemote`] is emitted, the user should always send back either a //! [`NotifsHandlerIn::Open`] or a [`NotifsHandlerIn::Close`].If this isn't done, the remote will //! be left in a pending state. //! @@ -166,7 +166,7 @@ enum State { pending_opening: Vec, }, - /// Handler is in the "Closed" state. A [`NotifsHandlerOut::OpenDesired`] has been emitted. + /// Handler is in the "Closed" state. A [`NotifsHandlerOut::OpenDesiredByRemote`] has been emitted. OpenDesiredByRemote { /// Vec of the same length as [`NotifsHandler::in_protocols`]. For each protocol, contains /// a substream opened by the remote and that hasn't been accepted/rejected yet. @@ -324,7 +324,7 @@ pub enum NotifsHandlerOut { /// [`NotifsHandlerIn::Open`] or [`NotifsHandlerIn::Close`] has been sent before and has not /// yet been acknowledged by a matching [`NotifsHandlerOut`], then you don't need to a send /// another [`NotifsHandlerIn`]. - OpenDesired, + OpenDesiredByRemote, /// The remote would like the substreams to be closed. Send a [`NotifsHandlerIn::Close`] in /// order to close them. If a [`NotifsHandlerIn::Close`] has been sent before and has not yet @@ -537,7 +537,7 @@ impl ProtocolsHandler for NotifsHandler { match &mut self.state { State::Closed { pending_opening } => { self.events_queue.push_back(ProtocolsHandlerEvent::Custom( - NotifsHandlerOut::OpenDesired + NotifsHandlerOut::OpenDesiredByRemote )); let mut in_substreams = (0..self.in_protocols.len()) @@ -581,8 +581,8 @@ impl ProtocolsHandler for NotifsHandler { // Received legacy substream. EitherOutput::Second((substream, _handshake)) => { // Note: while we awknowledge legacy substreams and handle incoming messages, - // it doesn't trigger any `OpenDesired` event as a way to simplify the logic of - // this code. + // it doesn't trigger any `OpenDesiredByRemote` event as a way to simplify the + // logic of this code. // Since mid-2019, legacy substreams are supposed to be used at the same time as // notifications substreams, and not in isolation. Nodes that open legacy // substreams in isolation are considered deprecated. @@ -847,8 +847,8 @@ impl ProtocolsHandler for NotifsHandler { } // Poll inbound substreams. - // Inbound substreams being closed is always tolerated, except for the `OpenDesired` state - // which might need to be switched back to `Closed`. + // Inbound substreams being closed is always tolerated, except for the + // `OpenDesiredByRemote` state which might need to be switched back to `Closed`. match &mut self.state { State::Closed { .. } => {} State::Open { in_substreams, .. } => { @@ -881,7 +881,7 @@ impl ProtocolsHandler for NotifsHandler { } // Since the previous block might have closed inbound substreams, make sure that we can - // stay in `OpenDesired` state. + // stay in `OpenDesiredByRemote` state. if let State::OpenDesiredByRemote { in_substreams, pending_opening } = &mut self.state { if !in_substreams.iter().any(|s| s.is_some()) { self.state = State::Closed { From ae8724261febee5c55b7fb9a9841d2456245482c Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Mon, 16 Nov 2020 09:49:22 +0100 Subject: [PATCH 39/39] Update client/network/src/protocol/generic_proto/behaviour.rs Co-authored-by: Max Inden --- client/network/src/protocol/generic_proto/behaviour.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/client/network/src/protocol/generic_proto/behaviour.rs b/client/network/src/protocol/generic_proto/behaviour.rs index a427c56ed40cc..f84aead47283a 100644 --- a/client/network/src/protocol/generic_proto/behaviour.rs +++ b/client/network/src/protocol/generic_proto/behaviour.rs @@ -1831,6 +1831,7 @@ impl NetworkBehaviour for GenericProto { } else { error!(target: "sub-libp2p", "OpenResultErr: State mismatch in the custom protos handler"); debug_assert!(false); + debug_assert!(false); return };